1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
28 #include "ARCRuntimeEntryPoints.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "BlotMapVector.h"
34 #include "llvm/ADT/DenseMap.h"
35 #include "llvm/ADT/DenseSet.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/IR/CFG.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/raw_ostream.h"
46 using namespace llvm::objcarc;
48 #define DEBUG_TYPE "objc-arc-opts"
50 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
53 /// \brief This is similar to GetRCIdentityRoot but it stops as soon
54 /// as it finds a value with multiple uses.
55 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
56 if (Arg->hasOneUse()) {
57 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
58 return FindSingleUseIdentifiedObject(BC->getOperand(0));
59 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
60 if (GEP->hasAllZeroIndices())
61 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
62 if (IsForwarding(GetBasicARCInstKind(Arg)))
63 return FindSingleUseIdentifiedObject(
64 cast<CallInst>(Arg)->getArgOperand(0));
65 if (!IsObjCIdentifiedObject(Arg))
70 // If we found an identifiable object but it has multiple uses, but they are
71 // trivial uses, we can still consider this to be a single-use value.
72 if (IsObjCIdentifiedObject(Arg)) {
73 for (const User *U : Arg->users())
74 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
83 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
84 /// GetUnderlyingObjects except that it returns early when it sees the first
86 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
87 SmallPtrSet<const Value *, 4> Visited;
88 SmallVector<const Value *, 4> Worklist;
89 Worklist.push_back(V);
91 const Value *P = Worklist.pop_back_val();
92 P = GetUnderlyingObjCPtr(P);
94 if (isa<AllocaInst>(P))
97 if (!Visited.insert(P).second)
100 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
101 Worklist.push_back(SI->getTrueValue());
102 Worklist.push_back(SI->getFalseValue());
106 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
107 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
108 Worklist.push_back(PN->getIncomingValue(i));
111 } while (!Worklist.empty());
119 /// \defgroup ARCOpt ARC Optimization.
122 // TODO: On code like this:
125 // stuff_that_cannot_release()
126 // objc_autorelease(%x)
127 // stuff_that_cannot_release()
129 // stuff_that_cannot_release()
130 // objc_autorelease(%x)
132 // The second retain and autorelease can be deleted.
134 // TODO: It should be possible to delete
135 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
136 // pairs if nothing is actually autoreleased between them. Also, autorelease
137 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
138 // after inlining) can be turned into plain release calls.
140 // TODO: Critical-edge splitting. If the optimial insertion point is
141 // a critical edge, the current algorithm has to fail, because it doesn't
142 // know how to split edges. It should be possible to make the optimizer
143 // think in terms of edges, rather than blocks, and then split critical
146 // TODO: OptimizeSequences could generalized to be Interprocedural.
148 // TODO: Recognize that a bunch of other objc runtime calls have
149 // non-escaping arguments and non-releasing arguments, and may be
150 // non-autoreleasing.
152 // TODO: Sink autorelease calls as far as possible. Unfortunately we
153 // usually can't sink them past other calls, which would be the main
154 // case where it would be useful.
156 // TODO: The pointer returned from objc_loadWeakRetained is retained.
158 // TODO: Delete release+retain pairs (rare).
160 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
161 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
162 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
163 STATISTIC(NumRets, "Number of return value forwarding "
164 "retain+autoreleases eliminated");
165 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
166 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
168 STATISTIC(NumRetainsBeforeOpt,
169 "Number of retains before optimization");
170 STATISTIC(NumReleasesBeforeOpt,
171 "Number of releases before optimization");
172 STATISTIC(NumRetainsAfterOpt,
173 "Number of retains after optimization");
174 STATISTIC(NumReleasesAfterOpt,
175 "Number of releases after optimization");
179 /// \brief Per-BasicBlock state.
181 /// The number of unique control paths from the entry which can reach this
183 unsigned TopDownPathCount;
185 /// The number of unique control paths to exits from this block.
186 unsigned BottomUpPathCount;
188 /// The top-down traversal uses this to record information known about a
189 /// pointer at the bottom of each block.
190 BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
192 /// The bottom-up traversal uses this to record information known about a
193 /// pointer at the top of each block.
194 BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
196 /// Effective predecessors of the current block ignoring ignorable edges and
197 /// ignored backedges.
198 SmallVector<BasicBlock *, 2> Preds;
200 /// Effective successors of the current block ignoring ignorable edges and
201 /// ignored backedges.
202 SmallVector<BasicBlock *, 2> Succs;
205 static const unsigned OverflowOccurredValue;
207 BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
209 typedef decltype(PerPtrTopDown)::iterator top_down_ptr_iterator;
210 typedef decltype(PerPtrTopDown)::const_iterator const_top_down_ptr_iterator;
212 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
213 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
214 const_top_down_ptr_iterator top_down_ptr_begin() const {
215 return PerPtrTopDown.begin();
217 const_top_down_ptr_iterator top_down_ptr_end() const {
218 return PerPtrTopDown.end();
221 typedef decltype(PerPtrBottomUp)::iterator bottom_up_ptr_iterator;
223 PerPtrBottomUp)::const_iterator const_bottom_up_ptr_iterator;
225 bottom_up_ptr_iterator bottom_up_ptr_begin() {
226 return PerPtrBottomUp.begin();
228 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
229 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
230 return PerPtrBottomUp.begin();
232 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
233 return PerPtrBottomUp.end();
236 /// Mark this block as being an entry block, which has one path from the
237 /// entry by definition.
238 void SetAsEntry() { TopDownPathCount = 1; }
240 /// Mark this block as being an exit block, which has one path to an exit by
242 void SetAsExit() { BottomUpPathCount = 1; }
244 /// Attempt to find the PtrState object describing the top down state for
245 /// pointer Arg. Return a new initialized PtrState describing the top down
246 /// state for Arg if we do not find one.
247 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
248 return PerPtrTopDown[Arg];
251 /// Attempt to find the PtrState object describing the bottom up state for
252 /// pointer Arg. Return a new initialized PtrState describing the bottom up
253 /// state for Arg if we do not find one.
254 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
255 return PerPtrBottomUp[Arg];
258 /// Attempt to find the PtrState object describing the bottom up state for
260 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
261 return PerPtrBottomUp.find(Arg);
264 void clearBottomUpPointers() {
265 PerPtrBottomUp.clear();
268 void clearTopDownPointers() {
269 PerPtrTopDown.clear();
272 void InitFromPred(const BBState &Other);
273 void InitFromSucc(const BBState &Other);
274 void MergePred(const BBState &Other);
275 void MergeSucc(const BBState &Other);
277 /// Compute the number of possible unique paths from an entry to an exit
278 /// which pass through this block. This is only valid after both the
279 /// top-down and bottom-up traversals are complete.
281 /// Returns true if overflow occurred. Returns false if overflow did not
283 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
284 if (TopDownPathCount == OverflowOccurredValue ||
285 BottomUpPathCount == OverflowOccurredValue)
287 unsigned long long Product =
288 (unsigned long long)TopDownPathCount*BottomUpPathCount;
289 // Overflow occurred if any of the upper bits of Product are set or if all
290 // the lower bits of Product are all set.
291 return (Product >> 32) ||
292 ((PathCount = Product) == OverflowOccurredValue);
295 // Specialized CFG utilities.
296 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
297 edge_iterator pred_begin() const { return Preds.begin(); }
298 edge_iterator pred_end() const { return Preds.end(); }
299 edge_iterator succ_begin() const { return Succs.begin(); }
300 edge_iterator succ_end() const { return Succs.end(); }
302 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
303 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
305 bool isExit() const { return Succs.empty(); }
308 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
311 void BBState::InitFromPred(const BBState &Other) {
312 PerPtrTopDown = Other.PerPtrTopDown;
313 TopDownPathCount = Other.TopDownPathCount;
316 void BBState::InitFromSucc(const BBState &Other) {
317 PerPtrBottomUp = Other.PerPtrBottomUp;
318 BottomUpPathCount = Other.BottomUpPathCount;
321 /// The top-down traversal uses this to merge information about predecessors to
322 /// form the initial state for a new block.
323 void BBState::MergePred(const BBState &Other) {
324 if (TopDownPathCount == OverflowOccurredValue)
327 // Other.TopDownPathCount can be 0, in which case it is either dead or a
328 // loop backedge. Loop backedges are special.
329 TopDownPathCount += Other.TopDownPathCount;
331 // In order to be consistent, we clear the top down pointers when by adding
332 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
334 if (TopDownPathCount == OverflowOccurredValue) {
335 clearTopDownPointers();
339 // Check for overflow. If we have overflow, fall back to conservative
341 if (TopDownPathCount < Other.TopDownPathCount) {
342 TopDownPathCount = OverflowOccurredValue;
343 clearTopDownPointers();
347 // For each entry in the other set, if our set has an entry with the same key,
348 // merge the entries. Otherwise, copy the entry and merge it with an empty
350 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
352 auto Pair = PerPtrTopDown.insert(*MI);
353 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
357 // For each entry in our set, if the other set doesn't have an entry with the
358 // same key, force it to merge with an empty entry.
359 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
360 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
361 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
364 /// The bottom-up traversal uses this to merge information about successors to
365 /// form the initial state for a new block.
366 void BBState::MergeSucc(const BBState &Other) {
367 if (BottomUpPathCount == OverflowOccurredValue)
370 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
371 // loop backedge. Loop backedges are special.
372 BottomUpPathCount += Other.BottomUpPathCount;
374 // In order to be consistent, we clear the top down pointers when by adding
375 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
377 if (BottomUpPathCount == OverflowOccurredValue) {
378 clearBottomUpPointers();
382 // Check for overflow. If we have overflow, fall back to conservative
384 if (BottomUpPathCount < Other.BottomUpPathCount) {
385 BottomUpPathCount = OverflowOccurredValue;
386 clearBottomUpPointers();
390 // For each entry in the other set, if our set has an entry with the
391 // same key, merge the entries. Otherwise, copy the entry and merge
392 // it with an empty entry.
393 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
395 auto Pair = PerPtrBottomUp.insert(*MI);
396 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
400 // For each entry in our set, if the other set doesn't have an entry
401 // with the same key, force it to merge with an empty entry.
402 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
404 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
405 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
410 /// \brief The main ARC optimization pass.
411 class ObjCARCOpt : public FunctionPass {
413 ProvenanceAnalysis PA;
415 /// A cache of references to runtime entry point constants.
416 ARCRuntimeEntryPoints EP;
418 /// A cache of MDKinds that can be passed into other functions to propagate
419 /// MDKind identifiers.
420 ARCMDKindCache MDKindCache;
422 // This is used to track if a pointer is stored into an alloca.
423 DenseSet<const Value *> MultiOwnersSet;
425 /// A flag indicating whether this optimization pass should run.
428 /// Flags which determine whether each of the interesting runtine functions
429 /// is in fact used in the current function.
430 unsigned UsedInThisFunction;
432 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
433 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
435 void OptimizeIndividualCalls(Function &F);
437 void CheckForCFGHazards(const BasicBlock *BB,
438 DenseMap<const BasicBlock *, BBState> &BBStates,
439 BBState &MyStates) const;
440 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
441 BlotMapVector<Value *, RRInfo> &Retains,
443 bool VisitBottomUp(BasicBlock *BB,
444 DenseMap<const BasicBlock *, BBState> &BBStates,
445 BlotMapVector<Value *, RRInfo> &Retains);
446 bool VisitInstructionTopDown(Instruction *Inst,
447 DenseMap<Value *, RRInfo> &Releases,
449 bool VisitTopDown(BasicBlock *BB,
450 DenseMap<const BasicBlock *, BBState> &BBStates,
451 DenseMap<Value *, RRInfo> &Releases);
452 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
453 BlotMapVector<Value *, RRInfo> &Retains,
454 DenseMap<Value *, RRInfo> &Releases);
456 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
457 BlotMapVector<Value *, RRInfo> &Retains,
458 DenseMap<Value *, RRInfo> &Releases,
459 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
461 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
462 BlotMapVector<Value *, RRInfo> &Retains,
463 DenseMap<Value *, RRInfo> &Releases, Module *M,
464 SmallVectorImpl<Instruction *> &NewRetains,
465 SmallVectorImpl<Instruction *> &NewReleases,
466 SmallVectorImpl<Instruction *> &DeadInsts,
467 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
468 Value *Arg, bool KnownSafe,
469 bool &AnyPairsCompletelyEliminated);
471 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
472 BlotMapVector<Value *, RRInfo> &Retains,
473 DenseMap<Value *, RRInfo> &Releases, Module *M);
475 void OptimizeWeakCalls(Function &F);
477 bool OptimizeSequences(Function &F);
479 void OptimizeReturns(Function &F);
482 void GatherStatistics(Function &F, bool AfterOptimization = false);
485 void getAnalysisUsage(AnalysisUsage &AU) const override;
486 bool doInitialization(Module &M) override;
487 bool runOnFunction(Function &F) override;
488 void releaseMemory() override;
492 ObjCARCOpt() : FunctionPass(ID) {
493 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
498 char ObjCARCOpt::ID = 0;
499 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
500 "objc-arc", "ObjC ARC optimization", false, false)
501 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
502 INITIALIZE_PASS_END(ObjCARCOpt,
503 "objc-arc", "ObjC ARC optimization", false, false)
505 Pass *llvm::createObjCARCOptPass() {
506 return new ObjCARCOpt();
509 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
510 AU.addRequired<ObjCARCAliasAnalysis>();
511 AU.addRequired<AliasAnalysis>();
512 // ARC optimization doesn't currently split critical edges.
513 AU.setPreservesCFG();
516 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
517 /// not a return value. Or, if it can be paired with an
518 /// objc_autoreleaseReturnValue, delete the pair and return true.
520 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
521 // Check for the argument being from an immediately preceding call or invoke.
522 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
523 ImmutableCallSite CS(Arg);
524 if (const Instruction *Call = CS.getInstruction()) {
525 if (Call->getParent() == RetainRV->getParent()) {
526 BasicBlock::const_iterator I = Call;
528 while (IsNoopInstruction(I)) ++I;
531 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
532 BasicBlock *RetainRVParent = RetainRV->getParent();
533 if (II->getNormalDest() == RetainRVParent) {
534 BasicBlock::const_iterator I = RetainRVParent->begin();
535 while (IsNoopInstruction(I)) ++I;
542 // Check for being preceded by an objc_autoreleaseReturnValue on the same
543 // pointer. In this case, we can delete the pair.
544 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
546 do --I; while (I != Begin && IsNoopInstruction(I));
547 if (GetBasicARCInstKind(I) == ARCInstKind::AutoreleaseRV &&
548 GetArgRCIdentityRoot(I) == Arg) {
552 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
553 << "Erasing " << *RetainRV << "\n");
556 EraseInstruction(RetainRV);
561 // Turn it to a plain objc_retain.
565 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
566 "objc_retain since the operand is not a return value.\n"
567 "Old = " << *RetainRV << "\n");
569 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
570 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
572 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
577 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
578 /// used as a return value.
579 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
580 Instruction *AutoreleaseRV,
581 ARCInstKind &Class) {
582 // Check for a return of the pointer value.
583 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
584 SmallVector<const Value *, 2> Users;
585 Users.push_back(Ptr);
587 Ptr = Users.pop_back_val();
588 for (const User *U : Ptr->users()) {
589 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
591 if (isa<BitCastInst>(U))
594 } while (!Users.empty());
599 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
600 "objc_autorelease since its operand is not used as a return "
602 "Old = " << *AutoreleaseRV << "\n");
604 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
605 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
606 AutoreleaseRVCI->setCalledFunction(NewDecl);
607 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
608 Class = ARCInstKind::Autorelease;
610 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
614 /// Visit each call, one at a time, and make simplifications without doing any
615 /// additional analysis.
616 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
617 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
618 // Reset all the flags in preparation for recomputing them.
619 UsedInThisFunction = 0;
621 // Visit all objc_* calls in F.
622 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
623 Instruction *Inst = &*I++;
625 ARCInstKind Class = GetBasicARCInstKind(Inst);
627 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
632 // Delete no-op casts. These function calls have special semantics, but
633 // the semantics are entirely implemented via lowering in the front-end,
634 // so by the time they reach the optimizer, they are just no-op calls
635 // which return their argument.
637 // There are gray areas here, as the ability to cast reference-counted
638 // pointers to raw void* and back allows code to break ARC assumptions,
639 // however these are currently considered to be unimportant.
640 case ARCInstKind::NoopCast:
643 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
644 EraseInstruction(Inst);
647 // If the pointer-to-weak-pointer is null, it's undefined behavior.
648 case ARCInstKind::StoreWeak:
649 case ARCInstKind::LoadWeak:
650 case ARCInstKind::LoadWeakRetained:
651 case ARCInstKind::InitWeak:
652 case ARCInstKind::DestroyWeak: {
653 CallInst *CI = cast<CallInst>(Inst);
654 if (IsNullOrUndef(CI->getArgOperand(0))) {
656 Type *Ty = CI->getArgOperand(0)->getType();
657 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
658 Constant::getNullValue(Ty),
660 llvm::Value *NewValue = UndefValue::get(CI->getType());
661 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
662 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
663 CI->replaceAllUsesWith(NewValue);
664 CI->eraseFromParent();
669 case ARCInstKind::CopyWeak:
670 case ARCInstKind::MoveWeak: {
671 CallInst *CI = cast<CallInst>(Inst);
672 if (IsNullOrUndef(CI->getArgOperand(0)) ||
673 IsNullOrUndef(CI->getArgOperand(1))) {
675 Type *Ty = CI->getArgOperand(0)->getType();
676 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
677 Constant::getNullValue(Ty),
680 llvm::Value *NewValue = UndefValue::get(CI->getType());
681 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
682 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
684 CI->replaceAllUsesWith(NewValue);
685 CI->eraseFromParent();
690 case ARCInstKind::RetainRV:
691 if (OptimizeRetainRVCall(F, Inst))
694 case ARCInstKind::AutoreleaseRV:
695 OptimizeAutoreleaseRVCall(F, Inst, Class);
699 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
700 if (IsAutorelease(Class) && Inst->use_empty()) {
701 CallInst *Call = cast<CallInst>(Inst);
702 const Value *Arg = Call->getArgOperand(0);
703 Arg = FindSingleUseIdentifiedObject(Arg);
708 // Create the declaration lazily.
709 LLVMContext &C = Inst->getContext();
711 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
712 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
714 NewCall->setMetadata(MDKindCache.ImpreciseReleaseMDKind,
715 MDNode::get(C, None));
717 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
718 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
719 << *NewCall << "\n");
721 EraseInstruction(Call);
723 Class = ARCInstKind::Release;
727 // For functions which can never be passed stack arguments, add
729 if (IsAlwaysTail(Class)) {
731 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
732 "passed stack args: " << *Inst << "\n");
733 cast<CallInst>(Inst)->setTailCall();
736 // Ensure that functions that can never have a "tail" keyword due to the
737 // semantics of ARC truly do not do so.
738 if (IsNeverTail(Class)) {
740 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
742 cast<CallInst>(Inst)->setTailCall(false);
745 // Set nounwind as needed.
746 if (IsNoThrow(Class)) {
748 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
750 cast<CallInst>(Inst)->setDoesNotThrow();
753 if (!IsNoopOnNull(Class)) {
754 UsedInThisFunction |= 1 << unsigned(Class);
758 const Value *Arg = GetArgRCIdentityRoot(Inst);
760 // ARC calls with null are no-ops. Delete them.
761 if (IsNullOrUndef(Arg)) {
764 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
766 EraseInstruction(Inst);
770 // Keep track of which of retain, release, autorelease, and retain_block
771 // are actually present in this function.
772 UsedInThisFunction |= 1 << unsigned(Class);
774 // If Arg is a PHI, and one or more incoming values to the
775 // PHI are null, and the call is control-equivalent to the PHI, and there
776 // are no relevant side effects between the PHI and the call, the call
777 // could be pushed up to just those paths with non-null incoming values.
778 // For now, don't bother splitting critical edges for this.
779 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
780 Worklist.push_back(std::make_pair(Inst, Arg));
782 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
786 const PHINode *PN = dyn_cast<PHINode>(Arg);
789 // Determine if the PHI has any null operands, or any incoming
791 bool HasNull = false;
792 bool HasCriticalEdges = false;
793 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
795 GetRCIdentityRoot(PN->getIncomingValue(i));
796 if (IsNullOrUndef(Incoming))
798 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
799 .getNumSuccessors() != 1) {
800 HasCriticalEdges = true;
804 // If we have null operands and no critical edges, optimize.
805 if (!HasCriticalEdges && HasNull) {
806 SmallPtrSet<Instruction *, 4> DependingInstructions;
807 SmallPtrSet<const BasicBlock *, 4> Visited;
809 // Check that there is nothing that cares about the reference
810 // count between the call and the phi.
812 case ARCInstKind::Retain:
813 case ARCInstKind::RetainBlock:
814 // These can always be moved up.
816 case ARCInstKind::Release:
817 // These can't be moved across things that care about the retain
819 FindDependencies(NeedsPositiveRetainCount, Arg,
820 Inst->getParent(), Inst,
821 DependingInstructions, Visited, PA);
823 case ARCInstKind::Autorelease:
824 // These can't be moved across autorelease pool scope boundaries.
825 FindDependencies(AutoreleasePoolBoundary, Arg,
826 Inst->getParent(), Inst,
827 DependingInstructions, Visited, PA);
829 case ARCInstKind::RetainRV:
830 case ARCInstKind::AutoreleaseRV:
831 // Don't move these; the RV optimization depends on the autoreleaseRV
832 // being tail called, and the retainRV being immediately after a call
833 // (which might still happen if we get lucky with codegen layout, but
834 // it's not worth taking the chance).
837 llvm_unreachable("Invalid dependence flavor");
840 if (DependingInstructions.size() == 1 &&
841 *DependingInstructions.begin() == PN) {
844 // Clone the call into each predecessor that has a non-null value.
845 CallInst *CInst = cast<CallInst>(Inst);
846 Type *ParamTy = CInst->getArgOperand(0)->getType();
847 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
849 GetRCIdentityRoot(PN->getIncomingValue(i));
850 if (!IsNullOrUndef(Incoming)) {
851 CallInst *Clone = cast<CallInst>(CInst->clone());
852 Value *Op = PN->getIncomingValue(i);
853 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
854 if (Op->getType() != ParamTy)
855 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
856 Clone->setArgOperand(0, Op);
857 Clone->insertBefore(InsertPos);
859 DEBUG(dbgs() << "Cloning "
861 "And inserting clone at " << *InsertPos << "\n");
862 Worklist.push_back(std::make_pair(Clone, Incoming));
865 // Erase the original call.
866 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
867 EraseInstruction(CInst);
871 } while (!Worklist.empty());
875 /// If we have a top down pointer in the S_Use state, make sure that there are
876 /// no CFG hazards by checking the states of various bottom up pointers.
877 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
878 const bool SuccSRRIKnownSafe,
880 bool &SomeSuccHasSame,
881 bool &AllSuccsHaveSame,
882 bool &NotAllSeqEqualButKnownSafe,
883 bool &ShouldContinue) {
886 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
887 S.ClearSequenceProgress();
890 S.SetCFGHazardAfflicted(true);
891 ShouldContinue = true;
895 SomeSuccHasSame = true;
899 case S_MovableRelease:
900 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
901 AllSuccsHaveSame = false;
903 NotAllSeqEqualButKnownSafe = true;
906 llvm_unreachable("bottom-up pointer in retain state!");
908 llvm_unreachable("This should have been handled earlier.");
912 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
913 /// there are no CFG hazards by checking the states of various bottom up
915 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
916 const bool SuccSRRIKnownSafe,
918 bool &SomeSuccHasSame,
919 bool &AllSuccsHaveSame,
920 bool &NotAllSeqEqualButKnownSafe) {
923 SomeSuccHasSame = true;
927 case S_MovableRelease:
929 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
930 AllSuccsHaveSame = false;
932 NotAllSeqEqualButKnownSafe = true;
935 llvm_unreachable("bottom-up pointer in retain state!");
937 llvm_unreachable("This should have been handled earlier.");
941 /// Check for critical edges, loop boundaries, irreducible control flow, or
942 /// other CFG structures where moving code across the edge would result in it
943 /// being executed more.
945 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
946 DenseMap<const BasicBlock *, BBState> &BBStates,
947 BBState &MyStates) const {
948 // If any top-down local-use or possible-dec has a succ which is earlier in
949 // the sequence, forget it.
950 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
952 TopDownPtrState &S = I->second;
953 const Sequence Seq = I->second.GetSeq();
955 // We only care about S_Retain, S_CanRelease, and S_Use.
959 // Make sure that if extra top down states are added in the future that this
960 // code is updated to handle it.
961 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
962 "Unknown top down sequence state.");
964 const Value *Arg = I->first;
965 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
966 bool SomeSuccHasSame = false;
967 bool AllSuccsHaveSame = true;
968 bool NotAllSeqEqualButKnownSafe = false;
970 succ_const_iterator SI(TI), SE(TI, false);
972 for (; SI != SE; ++SI) {
973 // If VisitBottomUp has pointer information for this successor, take
974 // what we know about it.
975 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
977 assert(BBI != BBStates.end());
978 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
979 const Sequence SuccSSeq = SuccS.GetSeq();
981 // If bottom up, the pointer is in an S_None state, clear the sequence
982 // progress since the sequence in the bottom up state finished
983 // suggesting a mismatch in between retains/releases. This is true for
984 // all three cases that we are handling here: S_Retain, S_Use, and
986 if (SuccSSeq == S_None) {
987 S.ClearSequenceProgress();
991 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
993 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
995 // *NOTE* We do not use Seq from above here since we are allowing for
996 // S.GetSeq() to change while we are visiting basic blocks.
999 bool ShouldContinue = false;
1000 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1001 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1007 case S_CanRelease: {
1008 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1009 SomeSuccHasSame, AllSuccsHaveSame,
1010 NotAllSeqEqualButKnownSafe);
1017 case S_MovableRelease:
1022 // If the state at the other end of any of the successor edges
1023 // matches the current state, require all edges to match. This
1024 // guards against loops in the middle of a sequence.
1025 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1026 S.ClearSequenceProgress();
1027 } else if (NotAllSeqEqualButKnownSafe) {
1028 // If we would have cleared the state foregoing the fact that we are known
1029 // safe, stop code motion. This is because whether or not it is safe to
1030 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1031 // are allowed to perform code motion.
1032 S.SetCFGHazardAfflicted(true);
1037 bool ObjCARCOpt::VisitInstructionBottomUp(
1038 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1039 BBState &MyStates) {
1040 bool NestingDetected = false;
1041 ARCInstKind Class = GetARCInstKind(Inst);
1042 const Value *Arg = nullptr;
1044 DEBUG(dbgs() << "Class: " << Class << "\n");
1047 case ARCInstKind::Release: {
1048 Arg = GetArgRCIdentityRoot(Inst);
1050 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1051 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1054 case ARCInstKind::RetainBlock:
1055 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1056 // objc_retainBlocks to objc_retains. Thus at this point any
1057 // objc_retainBlocks that we see are not optimizable.
1059 case ARCInstKind::Retain:
1060 case ARCInstKind::RetainRV: {
1061 Arg = GetArgRCIdentityRoot(Inst);
1063 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1064 S.SetKnownPositiveRefCount();
1066 Sequence OldSeq = S.GetSeq();
1070 case S_MovableRelease:
1072 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1073 // imprecise release, clear our reverse insertion points.
1074 if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
1075 S.ClearReverseInsertPts();
1078 // Don't do retain+release tracking for ARCInstKind::RetainRV,
1080 // better to let it remain as the first instruction after a call.
1081 if (Class != ARCInstKind::RetainRV)
1082 Retains[Inst] = S.GetRRInfo();
1083 S.ClearSequenceProgress();
1088 llvm_unreachable("bottom-up pointer in retain state!");
1090 // A retain moving bottom up can be a use.
1093 case ARCInstKind::AutoreleasepoolPop:
1094 // Conservatively, clear MyStates for all known pointers.
1095 MyStates.clearBottomUpPointers();
1096 return NestingDetected;
1097 case ARCInstKind::AutoreleasepoolPush:
1098 case ARCInstKind::None:
1099 // These are irrelevant.
1100 return NestingDetected;
1101 case ARCInstKind::User:
1102 // If we have a store into an alloca of a pointer we are tracking, the
1103 // pointer has multiple owners implying that we must be more conservative.
1105 // This comes up in the context of a pointer being ``KnownSafe''. In the
1106 // presence of a block being initialized, the frontend will emit the
1107 // objc_retain on the original pointer and the release on the pointer loaded
1108 // from the alloca. The optimizer will through the provenance analysis
1109 // realize that the two are related, but since we only require KnownSafe in
1110 // one direction, will match the inner retain on the original pointer with
1111 // the guard release on the original pointer. This is fixed by ensuring that
1112 // in the presence of allocas we only unconditionally remove pointers if
1113 // both our retain and our release are KnownSafe.
1114 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1115 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
1116 auto I = MyStates.findPtrBottomUpState(
1117 GetRCIdentityRoot(SI->getValueOperand()));
1118 if (I != MyStates.bottom_up_ptr_end())
1119 MultiOwnersSet.insert(I->first);
1127 // Consider any other possible effects of this instruction on each
1128 // pointer being tracked.
1129 for (auto MI = MyStates.bottom_up_ptr_begin(),
1130 ME = MyStates.bottom_up_ptr_end();
1132 const Value *Ptr = MI->first;
1134 continue; // Handled above.
1135 BottomUpPtrState &S = MI->second;
1136 Sequence Seq = S.GetSeq();
1138 // Check for possible releases.
1139 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1140 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1142 S.ClearKnownPositiveRefCount();
1145 S.SetSeq(S_CanRelease);
1149 case S_MovableRelease:
1154 llvm_unreachable("bottom-up pointer in retain state!");
1158 // Check for possible direct uses.
1161 case S_MovableRelease:
1162 if (CanUse(Inst, Ptr, PA, Class)) {
1163 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1165 assert(!S.HasReverseInsertPts());
1166 // If this is an invoke instruction, we're scanning it as part of
1167 // one of its successor blocks, since we can't insert code after it
1168 // in its own block, and we don't want to split critical edges.
1169 if (isa<InvokeInst>(Inst))
1170 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1172 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1174 } else if (Seq == S_Release && IsUser(Class)) {
1175 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
1177 // Non-movable releases depend on any possible objc pointer use.
1179 assert(!S.HasReverseInsertPts());
1180 // As above; handle invoke specially.
1181 if (isa<InvokeInst>(Inst))
1182 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1184 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1188 if (CanUse(Inst, Ptr, PA, Class)) {
1189 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
1199 llvm_unreachable("bottom-up pointer in retain state!");
1203 return NestingDetected;
1206 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1207 DenseMap<const BasicBlock *, BBState> &BBStates,
1208 BlotMapVector<Value *, RRInfo> &Retains) {
1210 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1212 bool NestingDetected = false;
1213 BBState &MyStates = BBStates[BB];
1215 // Merge the states from each successor to compute the initial state
1216 // for the current block.
1217 BBState::edge_iterator SI(MyStates.succ_begin()),
1218 SE(MyStates.succ_end());
1220 const BasicBlock *Succ = *SI;
1221 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1222 assert(I != BBStates.end());
1223 MyStates.InitFromSucc(I->second);
1225 for (; SI != SE; ++SI) {
1227 I = BBStates.find(Succ);
1228 assert(I != BBStates.end());
1229 MyStates.MergeSucc(I->second);
1233 // Visit all the instructions, bottom-up.
1234 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1235 Instruction *Inst = std::prev(I);
1237 // Invoke instructions are visited as part of their successors (below).
1238 if (isa<InvokeInst>(Inst))
1241 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
1243 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1246 // If there's a predecessor with an invoke, visit the invoke as if it were
1247 // part of this block, since we can't insert code after an invoke in its own
1248 // block, and we don't want to split critical edges.
1249 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1250 PE(MyStates.pred_end()); PI != PE; ++PI) {
1251 BasicBlock *Pred = *PI;
1252 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1253 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1256 return NestingDetected;
1260 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1261 DenseMap<Value *, RRInfo> &Releases,
1262 BBState &MyStates) {
1263 bool NestingDetected = false;
1264 ARCInstKind Class = GetARCInstKind(Inst);
1265 const Value *Arg = nullptr;
1268 case ARCInstKind::RetainBlock:
1269 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1270 // objc_retainBlocks to objc_retains. Thus at this point any
1271 // objc_retainBlocks that we see are not optimizable.
1273 case ARCInstKind::Retain:
1274 case ARCInstKind::RetainRV: {
1275 Arg = GetArgRCIdentityRoot(Inst);
1276 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1277 NestingDetected |= S.InitTopDown(Class, Inst);
1278 // A retain can be a potential use; procede to the generic checking
1282 case ARCInstKind::Release: {
1283 Arg = GetArgRCIdentityRoot(Inst);
1285 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1286 S.ClearKnownPositiveRefCount();
1288 Sequence OldSeq = S.GetSeq();
1290 MDNode *ReleaseMetadata =
1291 Inst->getMetadata(MDKindCache.ImpreciseReleaseMDKind);
1296 if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
1297 S.ClearReverseInsertPts();
1300 S.SetReleaseMetadata(ReleaseMetadata);
1301 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
1302 Releases[Inst] = S.GetRRInfo();
1303 S.ClearSequenceProgress();
1309 case S_MovableRelease:
1310 llvm_unreachable("top-down pointer in release state!");
1314 case ARCInstKind::AutoreleasepoolPop:
1315 // Conservatively, clear MyStates for all known pointers.
1316 MyStates.clearTopDownPointers();
1317 return NestingDetected;
1318 case ARCInstKind::AutoreleasepoolPush:
1319 case ARCInstKind::None:
1320 // These are irrelevant.
1321 return NestingDetected;
1326 // Consider any other possible effects of this instruction on each
1327 // pointer being tracked.
1328 for (auto MI = MyStates.top_down_ptr_begin(),
1329 ME = MyStates.top_down_ptr_end();
1331 const Value *Ptr = MI->first;
1333 continue; // Handled above.
1334 TopDownPtrState &S = MI->second;
1335 Sequence Seq = S.GetSeq();
1337 // Check for possible releases.
1338 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1339 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1341 S.ClearKnownPositiveRefCount();
1344 S.SetSeq(S_CanRelease);
1345 assert(!S.HasReverseInsertPts());
1346 S.InsertReverseInsertPt(Inst);
1348 // One call can't cause a transition from S_Retain to S_CanRelease
1349 // and S_CanRelease to S_Use. If we've made the first transition,
1358 case S_MovableRelease:
1359 llvm_unreachable("top-down pointer in release state!");
1363 // Check for possible direct uses.
1366 if (CanUse(Inst, Ptr, PA, Class)) {
1367 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1378 case S_MovableRelease:
1379 llvm_unreachable("top-down pointer in release state!");
1383 return NestingDetected;
1387 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1388 DenseMap<const BasicBlock *, BBState> &BBStates,
1389 DenseMap<Value *, RRInfo> &Releases) {
1390 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1391 bool NestingDetected = false;
1392 BBState &MyStates = BBStates[BB];
1394 // Merge the states from each predecessor to compute the initial state
1395 // for the current block.
1396 BBState::edge_iterator PI(MyStates.pred_begin()),
1397 PE(MyStates.pred_end());
1399 const BasicBlock *Pred = *PI;
1400 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1401 assert(I != BBStates.end());
1402 MyStates.InitFromPred(I->second);
1404 for (; PI != PE; ++PI) {
1406 I = BBStates.find(Pred);
1407 assert(I != BBStates.end());
1408 MyStates.MergePred(I->second);
1412 // Visit all the instructions, top-down.
1413 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1414 Instruction *Inst = I;
1416 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
1418 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
1421 CheckForCFGHazards(BB, BBStates, MyStates);
1422 return NestingDetected;
1426 ComputePostOrders(Function &F,
1427 SmallVectorImpl<BasicBlock *> &PostOrder,
1428 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1429 unsigned NoObjCARCExceptionsMDKind,
1430 DenseMap<const BasicBlock *, BBState> &BBStates) {
1431 /// The visited set, for doing DFS walks.
1432 SmallPtrSet<BasicBlock *, 16> Visited;
1434 // Do DFS, computing the PostOrder.
1435 SmallPtrSet<BasicBlock *, 16> OnStack;
1436 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1438 // Functions always have exactly one entry block, and we don't have
1439 // any other block that we treat like an entry block.
1440 BasicBlock *EntryBB = &F.getEntryBlock();
1441 BBState &MyStates = BBStates[EntryBB];
1442 MyStates.SetAsEntry();
1443 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
1444 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1445 Visited.insert(EntryBB);
1446 OnStack.insert(EntryBB);
1449 BasicBlock *CurrBB = SuccStack.back().first;
1450 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
1451 succ_iterator SE(TI, false);
1453 while (SuccStack.back().second != SE) {
1454 BasicBlock *SuccBB = *SuccStack.back().second++;
1455 if (Visited.insert(SuccBB).second) {
1456 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
1457 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
1458 BBStates[CurrBB].addSucc(SuccBB);
1459 BBState &SuccStates = BBStates[SuccBB];
1460 SuccStates.addPred(CurrBB);
1461 OnStack.insert(SuccBB);
1465 if (!OnStack.count(SuccBB)) {
1466 BBStates[CurrBB].addSucc(SuccBB);
1467 BBStates[SuccBB].addPred(CurrBB);
1470 OnStack.erase(CurrBB);
1471 PostOrder.push_back(CurrBB);
1472 SuccStack.pop_back();
1473 } while (!SuccStack.empty());
1477 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1478 // Functions may have many exits, and there also blocks which we treat
1479 // as exits due to ignored edges.
1480 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1481 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
1482 BasicBlock *ExitBB = I;
1483 BBState &MyStates = BBStates[ExitBB];
1484 if (!MyStates.isExit())
1487 MyStates.SetAsExit();
1489 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
1490 Visited.insert(ExitBB);
1491 while (!PredStack.empty()) {
1492 reverse_dfs_next_succ:
1493 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1494 while (PredStack.back().second != PE) {
1495 BasicBlock *BB = *PredStack.back().second++;
1496 if (Visited.insert(BB).second) {
1497 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1498 goto reverse_dfs_next_succ;
1501 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1506 // Visit the function both top-down and bottom-up.
1507 bool ObjCARCOpt::Visit(Function &F,
1508 DenseMap<const BasicBlock *, BBState> &BBStates,
1509 BlotMapVector<Value *, RRInfo> &Retains,
1510 DenseMap<Value *, RRInfo> &Releases) {
1512 // Use reverse-postorder traversals, because we magically know that loops
1513 // will be well behaved, i.e. they won't repeatedly call retain on a single
1514 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1515 // class here because we want the reverse-CFG postorder to consider each
1516 // function exit point, and we want to ignore selected cycle edges.
1517 SmallVector<BasicBlock *, 16> PostOrder;
1518 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1519 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1520 MDKindCache.NoObjCARCExceptionsMDKind, BBStates);
1522 // Use reverse-postorder on the reverse CFG for bottom-up.
1523 bool BottomUpNestingDetected = false;
1524 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1525 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
1527 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
1529 // Use reverse-postorder for top-down.
1530 bool TopDownNestingDetected = false;
1531 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1532 PostOrder.rbegin(), E = PostOrder.rend();
1534 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
1536 return TopDownNestingDetected && BottomUpNestingDetected;
1539 /// Move the calls in RetainsToMove and ReleasesToMove.
1540 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1541 RRInfo &ReleasesToMove,
1542 BlotMapVector<Value *, RRInfo> &Retains,
1543 DenseMap<Value *, RRInfo> &Releases,
1544 SmallVectorImpl<Instruction *> &DeadInsts,
1546 Type *ArgTy = Arg->getType();
1547 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1549 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1551 // Insert the new retain and release calls.
1552 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1553 Value *MyArg = ArgTy == ParamTy ? Arg :
1554 new BitCastInst(Arg, ParamTy, "", InsertPt);
1555 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1556 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1557 Call->setDoesNotThrow();
1558 Call->setTailCall();
1560 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
1561 "At insertion point: " << *InsertPt << "\n");
1563 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1564 Value *MyArg = ArgTy == ParamTy ? Arg :
1565 new BitCastInst(Arg, ParamTy, "", InsertPt);
1566 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
1567 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1568 // Attach a clang.imprecise_release metadata tag, if appropriate.
1569 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1570 Call->setMetadata(MDKindCache.ImpreciseReleaseMDKind, M);
1571 Call->setDoesNotThrow();
1572 if (ReleasesToMove.IsTailCallRelease)
1573 Call->setTailCall();
1575 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
1576 "At insertion point: " << *InsertPt << "\n");
1579 // Delete the original retain and release calls.
1580 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1581 Retains.blot(OrigRetain);
1582 DeadInsts.push_back(OrigRetain);
1583 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1585 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1586 Releases.erase(OrigRelease);
1587 DeadInsts.push_back(OrigRelease);
1588 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1593 bool ObjCARCOpt::ConnectTDBUTraversals(
1594 DenseMap<const BasicBlock *, BBState> &BBStates,
1595 BlotMapVector<Value *, RRInfo> &Retains,
1596 DenseMap<Value *, RRInfo> &Releases, Module *M,
1597 SmallVectorImpl<Instruction *> &NewRetains,
1598 SmallVectorImpl<Instruction *> &NewReleases,
1599 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1600 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1601 bool &AnyPairsCompletelyEliminated) {
1602 // If a pair happens in a region where it is known that the reference count
1603 // is already incremented, we can similarly ignore possible decrements unless
1604 // we are dealing with a retainable object with multiple provenance sources.
1605 bool KnownSafeTD = true, KnownSafeBU = true;
1606 bool MultipleOwners = false;
1607 bool CFGHazardAfflicted = false;
1609 // Connect the dots between the top-down-collected RetainsToMove and
1610 // bottom-up-collected ReleasesToMove to form sets of related calls.
1611 // This is an iterative process so that we connect multiple releases
1612 // to multiple retains if needed.
1613 unsigned OldDelta = 0;
1614 unsigned NewDelta = 0;
1615 unsigned OldCount = 0;
1616 unsigned NewCount = 0;
1617 bool FirstRelease = true;
1619 for (SmallVectorImpl<Instruction *>::const_iterator
1620 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
1621 Instruction *NewRetain = *NI;
1622 BlotMapVector<Value *, RRInfo>::const_iterator It =
1623 Retains.find(NewRetain);
1624 assert(It != Retains.end());
1625 const RRInfo &NewRetainRRI = It->second;
1626 KnownSafeTD &= NewRetainRRI.KnownSafe;
1628 MultipleOwners || MultiOwnersSet.count(GetArgRCIdentityRoot(NewRetain));
1629 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1630 DenseMap<Value *, RRInfo>::const_iterator Jt =
1631 Releases.find(NewRetainRelease);
1632 if (Jt == Releases.end())
1634 const RRInfo &NewRetainReleaseRRI = Jt->second;
1636 // If the release does not have a reference to the retain as well,
1637 // something happened which is unaccounted for. Do not do anything.
1639 // This can happen if we catch an additive overflow during path count
1641 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1644 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1646 // If we overflow when we compute the path count, don't remove/move
1648 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1649 unsigned PathCount = BBState::OverflowOccurredValue;
1650 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1652 assert(PathCount != BBState::OverflowOccurredValue &&
1653 "PathCount at this point can not be "
1654 "OverflowOccurredValue.");
1655 OldDelta -= PathCount;
1657 // Merge the ReleaseMetadata and IsTailCallRelease values.
1659 ReleasesToMove.ReleaseMetadata =
1660 NewRetainReleaseRRI.ReleaseMetadata;
1661 ReleasesToMove.IsTailCallRelease =
1662 NewRetainReleaseRRI.IsTailCallRelease;
1663 FirstRelease = false;
1665 if (ReleasesToMove.ReleaseMetadata !=
1666 NewRetainReleaseRRI.ReleaseMetadata)
1667 ReleasesToMove.ReleaseMetadata = nullptr;
1668 if (ReleasesToMove.IsTailCallRelease !=
1669 NewRetainReleaseRRI.IsTailCallRelease)
1670 ReleasesToMove.IsTailCallRelease = false;
1673 // Collect the optimal insertion points.
1675 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1676 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1677 // If we overflow when we compute the path count, don't
1678 // remove/move anything.
1679 const BBState &RIPBBState = BBStates[RIP->getParent()];
1680 PathCount = BBState::OverflowOccurredValue;
1681 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1683 assert(PathCount != BBState::OverflowOccurredValue &&
1684 "PathCount at this point can not be "
1685 "OverflowOccurredValue.");
1686 NewDelta -= PathCount;
1689 NewReleases.push_back(NewRetainRelease);
1694 if (NewReleases.empty()) break;
1696 // Back the other way.
1697 for (SmallVectorImpl<Instruction *>::const_iterator
1698 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
1699 Instruction *NewRelease = *NI;
1700 DenseMap<Value *, RRInfo>::const_iterator It =
1701 Releases.find(NewRelease);
1702 assert(It != Releases.end());
1703 const RRInfo &NewReleaseRRI = It->second;
1704 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1705 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1706 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1707 BlotMapVector<Value *, RRInfo>::const_iterator Jt =
1708 Retains.find(NewReleaseRetain);
1709 if (Jt == Retains.end())
1711 const RRInfo &NewReleaseRetainRRI = Jt->second;
1713 // If the retain does not have a reference to the release as well,
1714 // something happened which is unaccounted for. Do not do anything.
1716 // This can happen if we catch an additive overflow during path count
1718 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1721 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1722 // If we overflow when we compute the path count, don't remove/move
1724 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1725 unsigned PathCount = BBState::OverflowOccurredValue;
1726 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1728 assert(PathCount != BBState::OverflowOccurredValue &&
1729 "PathCount at this point can not be "
1730 "OverflowOccurredValue.");
1731 OldDelta += PathCount;
1732 OldCount += PathCount;
1734 // Collect the optimal insertion points.
1736 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1737 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1738 // If we overflow when we compute the path count, don't
1739 // remove/move anything.
1740 const BBState &RIPBBState = BBStates[RIP->getParent()];
1742 PathCount = BBState::OverflowOccurredValue;
1743 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1745 assert(PathCount != BBState::OverflowOccurredValue &&
1746 "PathCount at this point can not be "
1747 "OverflowOccurredValue.");
1748 NewDelta += PathCount;
1749 NewCount += PathCount;
1752 NewRetains.push_back(NewReleaseRetain);
1756 NewReleases.clear();
1757 if (NewRetains.empty()) break;
1760 // If the pointer is known incremented in 1 direction and we do not have
1761 // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
1762 // to be known safe in both directions.
1763 bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
1764 ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
1765 if (UnconditionallySafe) {
1766 RetainsToMove.ReverseInsertPts.clear();
1767 ReleasesToMove.ReverseInsertPts.clear();
1770 // Determine whether the new insertion points we computed preserve the
1771 // balance of retain and release calls through the program.
1772 // TODO: If the fully aggressive solution isn't valid, try to find a
1773 // less aggressive solution which is.
1777 // At this point, we are not going to remove any RR pairs, but we still are
1778 // able to move RR pairs. If one of our pointers is afflicted with
1779 // CFGHazards, we cannot perform such code motion so exit early.
1780 const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
1781 ReleasesToMove.ReverseInsertPts.size();
1782 if (CFGHazardAfflicted && WillPerformCodeMotion)
1786 // Determine whether the original call points are balanced in the retain and
1787 // release calls through the program. If not, conservatively don't touch
1789 // TODO: It's theoretically possible to do code motion in this case, as
1790 // long as the existing imbalances are maintained.
1795 assert(OldCount != 0 && "Unreachable code?");
1796 NumRRs += OldCount - NewCount;
1797 // Set to true if we completely removed any RR pairs.
1798 AnyPairsCompletelyEliminated = NewCount == 0;
1800 // We can move calls!
1804 /// Identify pairings between the retains and releases, and delete and/or move
1806 bool ObjCARCOpt::PerformCodePlacement(
1807 DenseMap<const BasicBlock *, BBState> &BBStates,
1808 BlotMapVector<Value *, RRInfo> &Retains,
1809 DenseMap<Value *, RRInfo> &Releases, Module *M) {
1810 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
1812 bool AnyPairsCompletelyEliminated = false;
1813 RRInfo RetainsToMove;
1814 RRInfo ReleasesToMove;
1815 SmallVector<Instruction *, 4> NewRetains;
1816 SmallVector<Instruction *, 4> NewReleases;
1817 SmallVector<Instruction *, 8> DeadInsts;
1819 // Visit each retain.
1820 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
1823 Value *V = I->first;
1824 if (!V) continue; // blotted
1826 Instruction *Retain = cast<Instruction>(V);
1828 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
1830 Value *Arg = GetArgRCIdentityRoot(Retain);
1832 // If the object being released is in static or stack storage, we know it's
1833 // not being managed by ObjC reference counting, so we can delete pairs
1834 // regardless of what possible decrements or uses lie between them.
1835 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
1837 // A constant pointer can't be pointing to an object on the heap. It may
1838 // be reference-counted, but it won't be deleted.
1839 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
1840 if (const GlobalVariable *GV =
1841 dyn_cast<GlobalVariable>(
1842 GetRCIdentityRoot(LI->getPointerOperand())))
1843 if (GV->isConstant())
1846 // Connect the dots between the top-down-collected RetainsToMove and
1847 // bottom-up-collected ReleasesToMove to form sets of related calls.
1848 NewRetains.push_back(Retain);
1849 bool PerformMoveCalls =
1850 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
1851 NewReleases, DeadInsts, RetainsToMove,
1852 ReleasesToMove, Arg, KnownSafe,
1853 AnyPairsCompletelyEliminated);
1855 if (PerformMoveCalls) {
1856 // Ok, everything checks out and we're all set. Let's move/delete some
1858 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
1859 Retains, Releases, DeadInsts, M);
1862 // Clean up state for next retain.
1863 NewReleases.clear();
1865 RetainsToMove.clear();
1866 ReleasesToMove.clear();
1869 // Now that we're done moving everything, we can delete the newly dead
1870 // instructions, as we no longer need them as insert points.
1871 while (!DeadInsts.empty())
1872 EraseInstruction(DeadInsts.pop_back_val());
1874 return AnyPairsCompletelyEliminated;
1877 /// Weak pointer optimizations.
1878 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
1879 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
1881 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
1882 // itself because it uses AliasAnalysis and we need to do provenance
1884 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1885 Instruction *Inst = &*I++;
1887 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
1889 ARCInstKind Class = GetBasicARCInstKind(Inst);
1890 if (Class != ARCInstKind::LoadWeak &&
1891 Class != ARCInstKind::LoadWeakRetained)
1894 // Delete objc_loadWeak calls with no users.
1895 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
1896 Inst->eraseFromParent();
1900 // TODO: For now, just look for an earlier available version of this value
1901 // within the same block. Theoretically, we could do memdep-style non-local
1902 // analysis too, but that would want caching. A better approach would be to
1903 // use the technique that EarlyCSE uses.
1904 inst_iterator Current = std::prev(I);
1905 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
1906 for (BasicBlock::iterator B = CurrentBB->begin(),
1907 J = Current.getInstructionIterator();
1909 Instruction *EarlierInst = &*std::prev(J);
1910 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
1911 switch (EarlierClass) {
1912 case ARCInstKind::LoadWeak:
1913 case ARCInstKind::LoadWeakRetained: {
1914 // If this is loading from the same pointer, replace this load's value
1916 CallInst *Call = cast<CallInst>(Inst);
1917 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1918 Value *Arg = Call->getArgOperand(0);
1919 Value *EarlierArg = EarlierCall->getArgOperand(0);
1920 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1921 case AliasAnalysis::MustAlias:
1923 // If the load has a builtin retain, insert a plain retain for it.
1924 if (Class == ARCInstKind::LoadWeakRetained) {
1925 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1926 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1929 // Zap the fully redundant load.
1930 Call->replaceAllUsesWith(EarlierCall);
1931 Call->eraseFromParent();
1933 case AliasAnalysis::MayAlias:
1934 case AliasAnalysis::PartialAlias:
1936 case AliasAnalysis::NoAlias:
1941 case ARCInstKind::StoreWeak:
1942 case ARCInstKind::InitWeak: {
1943 // If this is storing to the same pointer and has the same size etc.
1944 // replace this load's value with the stored value.
1945 CallInst *Call = cast<CallInst>(Inst);
1946 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1947 Value *Arg = Call->getArgOperand(0);
1948 Value *EarlierArg = EarlierCall->getArgOperand(0);
1949 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1950 case AliasAnalysis::MustAlias:
1952 // If the load has a builtin retain, insert a plain retain for it.
1953 if (Class == ARCInstKind::LoadWeakRetained) {
1954 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1955 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1958 // Zap the fully redundant load.
1959 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
1960 Call->eraseFromParent();
1962 case AliasAnalysis::MayAlias:
1963 case AliasAnalysis::PartialAlias:
1965 case AliasAnalysis::NoAlias:
1970 case ARCInstKind::MoveWeak:
1971 case ARCInstKind::CopyWeak:
1972 // TOOD: Grab the copied value.
1974 case ARCInstKind::AutoreleasepoolPush:
1975 case ARCInstKind::None:
1976 case ARCInstKind::IntrinsicUser:
1977 case ARCInstKind::User:
1978 // Weak pointers are only modified through the weak entry points
1979 // (and arbitrary calls, which could call the weak entry points).
1982 // Anything else could modify the weak pointer.
1989 // Then, for each destroyWeak with an alloca operand, check to see if
1990 // the alloca and all its users can be zapped.
1991 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1992 Instruction *Inst = &*I++;
1993 ARCInstKind Class = GetBasicARCInstKind(Inst);
1994 if (Class != ARCInstKind::DestroyWeak)
1997 CallInst *Call = cast<CallInst>(Inst);
1998 Value *Arg = Call->getArgOperand(0);
1999 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2000 for (User *U : Alloca->users()) {
2001 const Instruction *UserInst = cast<Instruction>(U);
2002 switch (GetBasicARCInstKind(UserInst)) {
2003 case ARCInstKind::InitWeak:
2004 case ARCInstKind::StoreWeak:
2005 case ARCInstKind::DestroyWeak:
2012 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
2013 CallInst *UserInst = cast<CallInst>(*UI++);
2014 switch (GetBasicARCInstKind(UserInst)) {
2015 case ARCInstKind::InitWeak:
2016 case ARCInstKind::StoreWeak:
2017 // These functions return their second argument.
2018 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2020 case ARCInstKind::DestroyWeak:
2024 llvm_unreachable("alloca really is used!");
2026 UserInst->eraseFromParent();
2028 Alloca->eraseFromParent();
2034 /// Identify program paths which execute sequences of retains and releases which
2035 /// can be eliminated.
2036 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2037 // Releases, Retains - These are used to store the results of the main flow
2038 // analysis. These use Value* as the key instead of Instruction* so that the
2039 // map stays valid when we get around to rewriting code and calls get
2040 // replaced by arguments.
2041 DenseMap<Value *, RRInfo> Releases;
2042 BlotMapVector<Value *, RRInfo> Retains;
2044 // This is used during the traversal of the function to track the
2045 // states for each identified object at each block.
2046 DenseMap<const BasicBlock *, BBState> BBStates;
2048 // Analyze the CFG of the function, and all instructions.
2049 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2052 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2057 MultiOwnersSet.clear();
2059 return AnyPairsCompletelyEliminated && NestingDetected;
2062 /// Check if there is a dependent call earlier that does not have anything in
2063 /// between the Retain and the call that can affect the reference count of their
2064 /// shared pointer argument. Note that Retain need not be in BB.
2066 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2067 SmallPtrSetImpl<Instruction *> &DepInsts,
2068 SmallPtrSetImpl<const BasicBlock *> &Visited,
2069 ProvenanceAnalysis &PA) {
2070 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2071 DepInsts, Visited, PA);
2072 if (DepInsts.size() != 1)
2075 auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2077 // Check that the pointer is the return value of the call.
2078 if (!Call || Arg != Call)
2081 // Check that the call is a regular call.
2082 ARCInstKind Class = GetBasicARCInstKind(Call);
2083 if (Class != ARCInstKind::CallOrUser && Class != ARCInstKind::Call)
2089 /// Find a dependent retain that precedes the given autorelease for which there
2090 /// is nothing in between the two instructions that can affect the ref count of
2093 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2094 Instruction *Autorelease,
2095 SmallPtrSetImpl<Instruction *> &DepInsts,
2096 SmallPtrSetImpl<const BasicBlock *> &Visited,
2097 ProvenanceAnalysis &PA) {
2098 FindDependencies(CanChangeRetainCount, Arg,
2099 BB, Autorelease, DepInsts, Visited, PA);
2100 if (DepInsts.size() != 1)
2103 auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2105 // Check that we found a retain with the same argument.
2106 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2107 GetArgRCIdentityRoot(Retain) != Arg) {
2114 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2115 /// no instructions dependent on Arg that need a positive ref count in between
2116 /// the autorelease and the ret.
2118 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2120 SmallPtrSetImpl<Instruction *> &DepInsts,
2121 SmallPtrSetImpl<const BasicBlock *> &V,
2122 ProvenanceAnalysis &PA) {
2123 FindDependencies(NeedsPositiveRetainCount, Arg,
2124 BB, Ret, DepInsts, V, PA);
2125 if (DepInsts.size() != 1)
2128 auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2131 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2132 if (!IsAutorelease(AutoreleaseClass))
2134 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2140 /// Look for this pattern:
2142 /// %call = call i8* @something(...)
2143 /// %2 = call i8* @objc_retain(i8* %call)
2144 /// %3 = call i8* @objc_autorelease(i8* %2)
2147 /// And delete the retain and autorelease.
2148 void ObjCARCOpt::OptimizeReturns(Function &F) {
2149 if (!F.getReturnType()->isPointerTy())
2152 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2154 SmallPtrSet<Instruction *, 4> DependingInstructions;
2155 SmallPtrSet<const BasicBlock *, 4> Visited;
2156 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2157 BasicBlock *BB = FI;
2158 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2160 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2165 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2167 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2168 // dependent on Arg such that there are no instructions dependent on Arg
2169 // that need a positive ref count in between the autorelease and Ret.
2170 CallInst *Autorelease =
2171 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
2172 DependingInstructions, Visited,
2174 DependingInstructions.clear();
2181 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
2182 DependingInstructions, Visited, PA);
2183 DependingInstructions.clear();
2189 // Check that there is nothing that can affect the reference count
2190 // between the retain and the call. Note that Retain need not be in BB.
2191 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2192 DependingInstructions,
2194 DependingInstructions.clear();
2197 if (!HasSafePathToCall)
2200 // If so, we can zap the retain and autorelease.
2203 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2204 << *Autorelease << "\n");
2205 EraseInstruction(Retain);
2206 EraseInstruction(Autorelease);
2212 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2213 llvm::Statistic &NumRetains =
2214 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2215 llvm::Statistic &NumReleases =
2216 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2218 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2219 Instruction *Inst = &*I++;
2220 switch (GetBasicARCInstKind(Inst)) {
2223 case ARCInstKind::Retain:
2226 case ARCInstKind::Release:
2234 bool ObjCARCOpt::doInitialization(Module &M) {
2238 // If nothing in the Module uses ARC, don't do anything.
2239 Run = ModuleHasARC(M);
2243 // Identify the imprecise release metadata kind.
2244 MDKindCache.ImpreciseReleaseMDKind =
2245 M.getContext().getMDKindID("clang.imprecise_release");
2246 MDKindCache.CopyOnEscapeMDKind =
2247 M.getContext().getMDKindID("clang.arc.copy_on_escape");
2248 MDKindCache.NoObjCARCExceptionsMDKind =
2249 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
2251 // Intuitively, objc_retain and others are nocapture, however in practice
2252 // they are not, because they return their argument value. And objc_release
2253 // calls finalizers which can have arbitrary side effects.
2255 // Initialize our runtime entry point cache.
2261 bool ObjCARCOpt::runOnFunction(Function &F) {
2265 // If nothing in the Module uses ARC, don't do anything.
2271 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
2274 PA.setAA(&getAnalysis<AliasAnalysis>());
2277 if (AreStatisticsEnabled()) {
2278 GatherStatistics(F, false);
2282 // This pass performs several distinct transformations. As a compile-time aid
2283 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2284 // library functions aren't declared.
2286 // Preliminary optimizations. This also computes UsedInThisFunction.
2287 OptimizeIndividualCalls(F);
2289 // Optimizations for weak pointers.
2290 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2291 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2292 (1 << unsigned(ARCInstKind::StoreWeak)) |
2293 (1 << unsigned(ARCInstKind::InitWeak)) |
2294 (1 << unsigned(ARCInstKind::CopyWeak)) |
2295 (1 << unsigned(ARCInstKind::MoveWeak)) |
2296 (1 << unsigned(ARCInstKind::DestroyWeak))))
2297 OptimizeWeakCalls(F);
2299 // Optimizations for retain+release pairs.
2300 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2301 (1 << unsigned(ARCInstKind::RetainRV)) |
2302 (1 << unsigned(ARCInstKind::RetainBlock))))
2303 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2304 // Run OptimizeSequences until it either stops making changes or
2305 // no retain+release pair nesting is detected.
2306 while (OptimizeSequences(F)) {}
2308 // Optimizations if objc_autorelease is used.
2309 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2310 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2313 // Gather statistics after optimization.
2315 if (AreStatisticsEnabled()) {
2316 GatherStatistics(F, true);
2320 DEBUG(dbgs() << "\n");
2325 void ObjCARCOpt::releaseMemory() {