1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
27 #define DEBUG_TYPE "objc-arc-opts"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/DenseSet.h"
34 #include "llvm/ADT/STLExtras.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/Support/CFG.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/raw_ostream.h"
44 using namespace llvm::objcarc;
46 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
50 /// \brief An associative container with fast insertion-order (deterministic)
51 /// iteration over its elements. Plus the special blot operation.
52 template<class KeyT, class ValueT>
54 /// Map keys to indices in Vector.
55 typedef DenseMap<KeyT, size_t> MapTy;
58 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
63 typedef typename VectorTy::iterator iterator;
64 typedef typename VectorTy::const_iterator const_iterator;
65 iterator begin() { return Vector.begin(); }
66 iterator end() { return Vector.end(); }
67 const_iterator begin() const { return Vector.begin(); }
68 const_iterator end() const { return Vector.end(); }
72 assert(Vector.size() >= Map.size()); // May differ due to blotting.
73 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
75 assert(I->second < Vector.size());
76 assert(Vector[I->second].first == I->first);
78 for (typename VectorTy::const_iterator I = Vector.begin(),
79 E = Vector.end(); I != E; ++I)
81 (Map.count(I->first) &&
82 Map[I->first] == size_t(I - Vector.begin())));
86 ValueT &operator[](const KeyT &Arg) {
87 std::pair<typename MapTy::iterator, bool> Pair =
88 Map.insert(std::make_pair(Arg, size_t(0)));
90 size_t Num = Vector.size();
91 Pair.first->second = Num;
92 Vector.push_back(std::make_pair(Arg, ValueT()));
93 return Vector[Num].second;
95 return Vector[Pair.first->second].second;
98 std::pair<iterator, bool>
99 insert(const std::pair<KeyT, ValueT> &InsertPair) {
100 std::pair<typename MapTy::iterator, bool> Pair =
101 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
103 size_t Num = Vector.size();
104 Pair.first->second = Num;
105 Vector.push_back(InsertPair);
106 return std::make_pair(Vector.begin() + Num, true);
108 return std::make_pair(Vector.begin() + Pair.first->second, false);
111 iterator find(const KeyT &Key) {
112 typename MapTy::iterator It = Map.find(Key);
113 if (It == Map.end()) return Vector.end();
114 return Vector.begin() + It->second;
117 const_iterator find(const KeyT &Key) const {
118 typename MapTy::const_iterator It = Map.find(Key);
119 if (It == Map.end()) return Vector.end();
120 return Vector.begin() + It->second;
123 /// This is similar to erase, but instead of removing the element from the
124 /// vector, it just zeros out the key in the vector. This leaves iterators
125 /// intact, but clients must be prepared for zeroed-out keys when iterating.
126 void blot(const KeyT &Key) {
127 typename MapTy::iterator It = Map.find(Key);
128 if (It == Map.end()) return;
129 Vector[It->second].first = KeyT();
142 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
145 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
146 /// as it finds a value with multiple uses.
147 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
148 if (Arg->hasOneUse()) {
149 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
150 return FindSingleUseIdentifiedObject(BC->getOperand(0));
151 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
152 if (GEP->hasAllZeroIndices())
153 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
154 if (IsForwarding(GetBasicInstructionClass(Arg)))
155 return FindSingleUseIdentifiedObject(
156 cast<CallInst>(Arg)->getArgOperand(0));
157 if (!IsObjCIdentifiedObject(Arg))
162 // If we found an identifiable object but it has multiple uses, but they are
163 // trivial uses, we can still consider this to be a single-use value.
164 if (IsObjCIdentifiedObject(Arg)) {
165 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
168 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
178 /// \brief Test whether the given retainable object pointer escapes.
180 /// This differs from regular escape analysis in that a use as an
181 /// argument to a call is not considered an escape.
183 static bool DoesRetainableObjPtrEscape(const User *Ptr) {
184 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Target: " << *Ptr << "\n");
186 // Walk the def-use chains.
187 SmallVector<const Value *, 4> Worklist;
188 Worklist.push_back(Ptr);
189 // If Ptr has any operands add them as well.
190 for (User::const_op_iterator I = Ptr->op_begin(), E = Ptr->op_end(); I != E;
192 Worklist.push_back(*I);
195 // Ensure we do not visit any value twice.
196 SmallPtrSet<const Value *, 8> VisitedSet;
199 const Value *V = Worklist.pop_back_val();
201 DEBUG(dbgs() << "Visiting: " << *V << "\n");
203 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
205 const User *UUser = *UI;
207 DEBUG(dbgs() << "User: " << *UUser << "\n");
209 // Special - Use by a call (callee or argument) is not considered
211 switch (GetBasicInstructionClass(UUser)) {
216 case IC_AutoreleaseRV: {
217 DEBUG(dbgs() << "User copies pointer arguments. Pointer Escapes!\n");
218 // These special functions make copies of their pointer arguments.
221 case IC_IntrinsicUser:
222 // Use by the use intrinsic is not an escape.
226 // Use by an instruction which copies the value is an escape if the
227 // result is an escape.
228 if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
229 isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
231 if (VisitedSet.insert(UUser)) {
232 DEBUG(dbgs() << "User copies value. Ptr escapes if result escapes."
233 " Adding to list.\n");
234 Worklist.push_back(UUser);
236 DEBUG(dbgs() << "Already visited node.\n");
240 // Use by a load is not an escape.
241 if (isa<LoadInst>(UUser))
243 // Use by a store is not an escape if the use is the address.
244 if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
245 if (V != SI->getValueOperand())
249 // Regular calls and other stuff are not considered escapes.
252 // Otherwise, conservatively assume an escape.
253 DEBUG(dbgs() << "Assuming ptr escapes.\n");
256 } while (!Worklist.empty());
259 DEBUG(dbgs() << "Ptr does not escape.\n");
263 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
264 /// GetUnderlyingObjects except that it returns early when it sees the first
266 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
267 SmallPtrSet<const Value *, 4> Visited;
268 SmallVector<const Value *, 4> Worklist;
269 Worklist.push_back(V);
271 const Value *P = Worklist.pop_back_val();
272 P = GetUnderlyingObjCPtr(P);
274 if (isa<AllocaInst>(P))
277 if (!Visited.insert(P))
280 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
281 Worklist.push_back(SI->getTrueValue());
282 Worklist.push_back(SI->getFalseValue());
286 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
287 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
288 Worklist.push_back(PN->getIncomingValue(i));
291 } while (!Worklist.empty());
299 /// \defgroup ARCOpt ARC Optimization.
302 // TODO: On code like this:
305 // stuff_that_cannot_release()
306 // objc_autorelease(%x)
307 // stuff_that_cannot_release()
309 // stuff_that_cannot_release()
310 // objc_autorelease(%x)
312 // The second retain and autorelease can be deleted.
314 // TODO: It should be possible to delete
315 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
316 // pairs if nothing is actually autoreleased between them. Also, autorelease
317 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
318 // after inlining) can be turned into plain release calls.
320 // TODO: Critical-edge splitting. If the optimial insertion point is
321 // a critical edge, the current algorithm has to fail, because it doesn't
322 // know how to split edges. It should be possible to make the optimizer
323 // think in terms of edges, rather than blocks, and then split critical
326 // TODO: OptimizeSequences could generalized to be Interprocedural.
328 // TODO: Recognize that a bunch of other objc runtime calls have
329 // non-escaping arguments and non-releasing arguments, and may be
330 // non-autoreleasing.
332 // TODO: Sink autorelease calls as far as possible. Unfortunately we
333 // usually can't sink them past other calls, which would be the main
334 // case where it would be useful.
336 // TODO: The pointer returned from objc_loadWeakRetained is retained.
338 // TODO: Delete release+retain pairs (rare).
340 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
341 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
342 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
343 STATISTIC(NumRets, "Number of return value forwarding "
344 "retain+autoreleases eliminated");
345 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
346 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
348 STATISTIC(NumRetainsBeforeOpt,
349 "Number of retains before optimization");
350 STATISTIC(NumReleasesBeforeOpt,
351 "Number of releases before optimization");
352 STATISTIC(NumRetainsAfterOpt,
353 "Number of retains after optimization");
354 STATISTIC(NumReleasesAfterOpt,
355 "Number of releases after optimization");
361 /// \brief A sequence of states that a pointer may go through in which an
362 /// objc_retain and objc_release are actually needed.
365 S_Retain, ///< objc_retain(x).
366 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
367 S_Use, ///< any use of x.
368 S_Stop, ///< like S_Release, but code motion is stopped.
369 S_Release, ///< objc_release(x).
370 S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
373 raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
374 LLVM_ATTRIBUTE_UNUSED;
375 raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
378 return OS << "S_None";
380 return OS << "S_Retain";
382 return OS << "S_CanRelease";
384 return OS << "S_Use";
386 return OS << "S_Release";
387 case S_MovableRelease:
388 return OS << "S_MovableRelease";
390 return OS << "S_Stop";
392 llvm_unreachable("Unknown sequence type.");
396 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
400 if (A == S_None || B == S_None)
403 if (A > B) std::swap(A, B);
405 // Choose the side which is further along in the sequence.
406 if ((A == S_Retain || A == S_CanRelease) &&
407 (B == S_CanRelease || B == S_Use))
410 // Choose the side which is further along in the sequence.
411 if ((A == S_Use || A == S_CanRelease) &&
412 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
414 // If both sides are releases, choose the more conservative one.
415 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
417 if (A == S_Release && B == S_MovableRelease)
425 /// \brief Unidirectional information about either a
426 /// retain-decrement-use-release sequence or release-use-decrement-retain
427 /// reverse sequence.
429 /// After an objc_retain, the reference count of the referenced
430 /// object is known to be positive. Similarly, before an objc_release, the
431 /// reference count of the referenced object is known to be positive. If
432 /// there are retain-release pairs in code regions where the retain count
433 /// is known to be positive, they can be eliminated, regardless of any side
434 /// effects between them.
436 /// Also, a retain+release pair nested within another retain+release
437 /// pair all on the known same pointer value can be eliminated, regardless
438 /// of any intervening side effects.
440 /// KnownSafe is true when either of these conditions is satisfied.
443 /// True of the objc_release calls are all marked with the "tail" keyword.
444 bool IsTailCallRelease;
446 /// If the Calls are objc_release calls and they all have a
447 /// clang.imprecise_release tag, this is the metadata tag.
448 MDNode *ReleaseMetadata;
450 /// For a top-down sequence, the set of objc_retains or
451 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
452 SmallPtrSet<Instruction *, 2> Calls;
454 /// The set of optimal insert positions for moving calls in the opposite
456 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
458 /// If this is true, we cannot perform code motion but can still remove
459 /// retain/release pairs.
460 bool CFGHazardAfflicted;
463 KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0),
464 CFGHazardAfflicted(false) {}
468 /// Conservatively merge the two RRInfo. Returns true if a partial merge has
469 /// occured, false otherwise.
470 bool Merge(const RRInfo &Other);
475 void RRInfo::clear() {
477 IsTailCallRelease = false;
480 ReverseInsertPts.clear();
481 CFGHazardAfflicted = false;
484 bool RRInfo::Merge(const RRInfo &Other) {
485 // Conservatively merge the ReleaseMetadata information.
486 if (ReleaseMetadata != Other.ReleaseMetadata)
489 // Conservatively merge the boolean state.
490 KnownSafe &= Other.KnownSafe;
491 IsTailCallRelease &= Other.IsTailCallRelease;
492 CFGHazardAfflicted |= Other.CFGHazardAfflicted;
494 // Merge the call sets.
495 Calls.insert(Other.Calls.begin(), Other.Calls.end());
497 // Merge the insert point sets. If there are any differences,
498 // that makes this a partial merge.
499 bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
500 for (SmallPtrSet<Instruction *, 2>::const_iterator
501 I = Other.ReverseInsertPts.begin(),
502 E = Other.ReverseInsertPts.end(); I != E; ++I)
503 Partial |= ReverseInsertPts.insert(*I);
508 /// \brief This class summarizes several per-pointer runtime properties which
509 /// are propogated through the flow graph.
511 /// True if the reference count is known to be incremented.
512 bool KnownPositiveRefCount;
514 /// True if we've seen an opportunity for partial RR elimination, such as
515 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
518 /// The current position in the sequence.
522 /// Unidirectional information about the current sequence.
524 /// TODO: Encapsulate this better.
527 PtrState() : KnownPositiveRefCount(false), Partial(false),
531 bool IsKnownSafe() const {
532 return RRI.KnownSafe;
535 void SetKnownSafe(const bool NewValue) {
536 RRI.KnownSafe = NewValue;
539 bool IsTailCallRelease() const {
540 return RRI.IsTailCallRelease;
543 void SetTailCallRelease(const bool NewValue) {
544 RRI.IsTailCallRelease = NewValue;
547 bool IsTrackingImpreciseReleases() {
548 return RRI.ReleaseMetadata != 0;
551 const MDNode *GetReleaseMetadata() const {
552 return RRI.ReleaseMetadata;
555 void SetReleaseMetadata(MDNode *NewValue) {
556 RRI.ReleaseMetadata = NewValue;
559 bool IsCFGHazardAfflicted() const {
560 return RRI.CFGHazardAfflicted;
563 void SetCFGHazardAfflicted(const bool NewValue) {
564 RRI.CFGHazardAfflicted = NewValue;
567 void SetKnownPositiveRefCount() {
568 DEBUG(dbgs() << "Setting Known Positive.\n");
569 KnownPositiveRefCount = true;
572 void ClearKnownPositiveRefCount() {
573 DEBUG(dbgs() << "Clearing Known Positive.\n");
574 KnownPositiveRefCount = false;
577 bool HasKnownPositiveRefCount() const {
578 return KnownPositiveRefCount;
581 void SetSeq(Sequence NewSeq) {
582 DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
586 Sequence GetSeq() const {
590 void ClearSequenceProgress() {
591 ResetSequenceProgress(S_None);
594 void ResetSequenceProgress(Sequence NewSeq) {
595 DEBUG(dbgs() << "Resetting sequence progress.\n");
601 void Merge(const PtrState &Other, bool TopDown);
603 void InsertCall(Instruction *I) {
607 void InsertReverseInsertPt(Instruction *I) {
608 RRI.ReverseInsertPts.insert(I);
611 void ClearReverseInsertPts() {
612 RRI.ReverseInsertPts.clear();
615 bool HasReverseInsertPts() const {
616 return !RRI.ReverseInsertPts.empty();
622 PtrState::Merge(const PtrState &Other, bool TopDown) {
623 Seq = MergeSeqs(Seq, Other.Seq, TopDown);
624 KnownPositiveRefCount &= Other.KnownPositiveRefCount;
626 // If we're not in a sequence (anymore), drop all associated state.
630 } else if (Partial || Other.Partial) {
631 // If we're doing a merge on a path that's previously seen a partial
632 // merge, conservatively drop the sequence, to avoid doing partial
633 // RR elimination. If the branch predicates for the two merge differ,
634 // mixing them is unsafe.
635 ClearSequenceProgress();
637 // Otherwise merge the other PtrState's RRInfo into our RRInfo. At this
638 // point, we know that currently we are not partial. Stash whether or not
639 // the merge operation caused us to undergo a partial merging of reverse
641 Partial = RRI.Merge(Other.RRI);
646 /// \brief Per-BasicBlock state.
648 /// The number of unique control paths from the entry which can reach this
650 unsigned TopDownPathCount;
652 /// The number of unique control paths to exits from this block.
653 unsigned BottomUpPathCount;
655 /// A type for PerPtrTopDown and PerPtrBottomUp.
656 typedef MapVector<const Value *, PtrState> MapTy;
658 /// The top-down traversal uses this to record information known about a
659 /// pointer at the bottom of each block.
662 /// The bottom-up traversal uses this to record information known about a
663 /// pointer at the top of each block.
664 MapTy PerPtrBottomUp;
666 /// Effective predecessors of the current block ignoring ignorable edges and
667 /// ignored backedges.
668 SmallVector<BasicBlock *, 2> Preds;
669 /// Effective successors of the current block ignoring ignorable edges and
670 /// ignored backedges.
671 SmallVector<BasicBlock *, 2> Succs;
674 BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
676 typedef MapTy::iterator ptr_iterator;
677 typedef MapTy::const_iterator ptr_const_iterator;
679 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
680 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
681 ptr_const_iterator top_down_ptr_begin() const {
682 return PerPtrTopDown.begin();
684 ptr_const_iterator top_down_ptr_end() const {
685 return PerPtrTopDown.end();
688 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
689 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
690 ptr_const_iterator bottom_up_ptr_begin() const {
691 return PerPtrBottomUp.begin();
693 ptr_const_iterator bottom_up_ptr_end() const {
694 return PerPtrBottomUp.end();
697 /// Mark this block as being an entry block, which has one path from the
698 /// entry by definition.
699 void SetAsEntry() { TopDownPathCount = 1; }
701 /// Mark this block as being an exit block, which has one path to an exit by
703 void SetAsExit() { BottomUpPathCount = 1; }
705 /// Attempt to find the PtrState object describing the top down state for
706 /// pointer Arg. Return a new initialized PtrState describing the top down
707 /// state for Arg if we do not find one.
708 PtrState &getPtrTopDownState(const Value *Arg) {
709 return PerPtrTopDown[Arg];
712 /// Attempt to find the PtrState object describing the bottom up state for
713 /// pointer Arg. Return a new initialized PtrState describing the bottom up
714 /// state for Arg if we do not find one.
715 PtrState &getPtrBottomUpState(const Value *Arg) {
716 return PerPtrBottomUp[Arg];
719 /// Attempt to find the PtrState object describing the bottom up state for
721 ptr_iterator findPtrBottomUpState(const Value *Arg) {
722 return PerPtrBottomUp.find(Arg);
725 void clearBottomUpPointers() {
726 PerPtrBottomUp.clear();
729 void clearTopDownPointers() {
730 PerPtrTopDown.clear();
733 void InitFromPred(const BBState &Other);
734 void InitFromSucc(const BBState &Other);
735 void MergePred(const BBState &Other);
736 void MergeSucc(const BBState &Other);
738 /// Compute the number of possible unique paths from an entry to an exit
739 /// which pass through this block. This is only valid after both the
740 /// top-down and bottom-up traversals are complete.
742 /// Returns true if overflow occured. Returns false if overflow did not
744 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
745 assert(TopDownPathCount != 0);
746 assert(BottomUpPathCount != 0);
747 unsigned long long Product =
748 (unsigned long long)TopDownPathCount*BottomUpPathCount;
750 // Overflow occured if any of the upper bits of Product are set.
751 return Product >> 32;
754 // Specialized CFG utilities.
755 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
756 edge_iterator pred_begin() { return Preds.begin(); }
757 edge_iterator pred_end() { return Preds.end(); }
758 edge_iterator succ_begin() { return Succs.begin(); }
759 edge_iterator succ_end() { return Succs.end(); }
761 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
762 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
764 bool isExit() const { return Succs.empty(); }
768 void BBState::InitFromPred(const BBState &Other) {
769 PerPtrTopDown = Other.PerPtrTopDown;
770 TopDownPathCount = Other.TopDownPathCount;
773 void BBState::InitFromSucc(const BBState &Other) {
774 PerPtrBottomUp = Other.PerPtrBottomUp;
775 BottomUpPathCount = Other.BottomUpPathCount;
778 /// The top-down traversal uses this to merge information about predecessors to
779 /// form the initial state for a new block.
780 void BBState::MergePred(const BBState &Other) {
781 // Other.TopDownPathCount can be 0, in which case it is either dead or a
782 // loop backedge. Loop backedges are special.
783 TopDownPathCount += Other.TopDownPathCount;
785 // Check for overflow. If we have overflow, fall back to conservative
787 if (TopDownPathCount < Other.TopDownPathCount) {
788 clearTopDownPointers();
792 // For each entry in the other set, if our set has an entry with the same key,
793 // merge the entries. Otherwise, copy the entry and merge it with an empty
795 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
796 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
797 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
798 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
802 // For each entry in our set, if the other set doesn't have an entry with the
803 // same key, force it to merge with an empty entry.
804 for (ptr_iterator MI = top_down_ptr_begin(),
805 ME = top_down_ptr_end(); MI != ME; ++MI)
806 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
807 MI->second.Merge(PtrState(), /*TopDown=*/true);
810 /// The bottom-up traversal uses this to merge information about successors to
811 /// form the initial state for a new block.
812 void BBState::MergeSucc(const BBState &Other) {
813 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
814 // loop backedge. Loop backedges are special.
815 BottomUpPathCount += Other.BottomUpPathCount;
817 // Check for overflow. If we have overflow, fall back to conservative
819 if (BottomUpPathCount < Other.BottomUpPathCount) {
820 clearBottomUpPointers();
824 // For each entry in the other set, if our set has an entry with the
825 // same key, merge the entries. Otherwise, copy the entry and merge
826 // it with an empty entry.
827 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
828 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
829 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
830 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
834 // For each entry in our set, if the other set doesn't have an entry
835 // with the same key, force it to merge with an empty entry.
836 for (ptr_iterator MI = bottom_up_ptr_begin(),
837 ME = bottom_up_ptr_end(); MI != ME; ++MI)
838 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
839 MI->second.Merge(PtrState(), /*TopDown=*/false);
842 // Only enable ARC Annotations if we are building a debug version of
845 #define ARC_ANNOTATIONS
848 // Define some macros along the lines of DEBUG and some helper functions to make
849 // it cleaner to create annotations in the source code and to no-op when not
850 // building in debug mode.
851 #ifdef ARC_ANNOTATIONS
853 #include "llvm/Support/CommandLine.h"
855 /// Enable/disable ARC sequence annotations.
857 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
858 cl::desc("Enable emission of arc data flow analysis "
861 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
862 cl::desc("Disable check for cfg hazards when "
864 static cl::opt<std::string>
865 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
867 cl::desc("filter out all data flow annotations "
868 "but those that apply to the given "
869 "target llvm identifier."));
871 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
872 /// instruction so that we can track backwards when post processing via the llvm
873 /// arc annotation processor tool. If the function is an
874 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
878 // If pointer is a result of an instruction and it does not have a source
879 // MDNode it, attach a new MDNode onto it. If pointer is a result of
880 // an instruction and does have a source MDNode attached to it, return a
881 // reference to said Node. Otherwise just return 0.
882 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
884 if (!(Node = Inst->getMetadata(NodeId))) {
885 // We do not have any node. Generate and attatch the hash MDString to the
888 // We just use an MDString to ensure that this metadata gets written out
889 // of line at the module level and to provide a very simple format
890 // encoding the information herein. Both of these makes it simpler to
891 // parse the annotations by a simple external program.
893 raw_string_ostream os(Str);
894 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
895 << Inst->getName() << ")";
897 Hash = MDString::get(Inst->getContext(), os.str());
898 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
900 // We have a node. Grab its hash and return it.
901 assert(Node->getNumOperands() == 1 &&
902 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
903 Hash = cast<MDString>(Node->getOperand(0));
905 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
907 raw_string_ostream os(str);
908 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
910 Hash = MDString::get(Arg->getContext(), os.str());
916 static std::string SequenceToString(Sequence A) {
918 raw_string_ostream os(str);
923 /// Helper function to change a Sequence into a String object using our overload
924 /// for raw_ostream so we only have printing code in one location.
925 static MDString *SequenceToMDString(LLVMContext &Context,
927 return MDString::get(Context, SequenceToString(A));
930 /// A simple function to generate a MDNode which describes the change in state
931 /// for Value *Ptr caused by Instruction *Inst.
932 static void AppendMDNodeToInstForPtr(unsigned NodeId,
935 MDString *PtrSourceMDNodeID,
939 Value *tmp[3] = {PtrSourceMDNodeID,
940 SequenceToMDString(Inst->getContext(),
942 SequenceToMDString(Inst->getContext(),
944 Node = MDNode::get(Inst->getContext(),
945 ArrayRef<Value*>(tmp, 3));
947 Inst->setMetadata(NodeId, Node);
950 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
951 /// state of a pointer at the entrance to a basic block.
952 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
953 Value *Ptr, Sequence Seq) {
954 // If we have a target identifier, make sure that we match it before
956 if(!ARCAnnotationTargetIdentifier.empty() &&
957 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
960 Module *M = BB->getParent()->getParent();
961 LLVMContext &C = M->getContext();
962 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
963 Type *I8XX = PointerType::getUnqual(I8X);
964 Type *Params[] = {I8XX, I8XX};
965 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
966 ArrayRef<Type*>(Params, 2),
968 Constant *Callee = M->getOrInsertFunction(Name, FTy);
970 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
973 StringRef Tmp = Ptr->getName();
974 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
975 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
977 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
978 cast<Constant>(ActualPtrName), Tmp);
982 std::string SeqStr = SequenceToString(Seq);
983 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
984 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
986 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
987 cast<Constant>(ActualPtrName), SeqStr);
990 Builder.CreateCall2(Callee, PtrName, S);
993 /// Add to the end of the basic block llvm.ptr.annotations which show the state
994 /// of the pointer at the bottom of the basic block.
995 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
996 Value *Ptr, Sequence Seq) {
997 // If we have a target identifier, make sure that we match it before emitting
999 if(!ARCAnnotationTargetIdentifier.empty() &&
1000 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
1003 Module *M = BB->getParent()->getParent();
1004 LLVMContext &C = M->getContext();
1005 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1006 Type *I8XX = PointerType::getUnqual(I8X);
1007 Type *Params[] = {I8XX, I8XX};
1008 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
1009 ArrayRef<Type*>(Params, 2),
1010 /*isVarArg=*/false);
1011 Constant *Callee = M->getOrInsertFunction(Name, FTy);
1013 IRBuilder<> Builder(BB, llvm::prior(BB->end()));
1016 StringRef Tmp = Ptr->getName();
1017 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
1018 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
1020 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
1021 cast<Constant>(ActualPtrName), Tmp);
1025 std::string SeqStr = SequenceToString(Seq);
1026 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
1027 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
1029 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
1030 cast<Constant>(ActualPtrName), SeqStr);
1032 Builder.CreateCall2(Callee, PtrName, S);
1035 /// Adds a source annotation to pointer and a state change annotation to Inst
1036 /// referencing the source annotation and the old/new state of pointer.
1037 static void GenerateARCAnnotation(unsigned InstMDId,
1043 if (EnableARCAnnotations) {
1044 // If we have a target identifier, make sure that we match it before
1045 // emitting an annotation.
1046 if(!ARCAnnotationTargetIdentifier.empty() &&
1047 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
1050 // First generate the source annotation on our pointer. This will return an
1051 // MDString* if Ptr actually comes from an instruction implying we can put
1052 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
1053 // then we know that our pointer is from an Argument so we put a reference
1054 // to the argument number.
1056 // The point of this is to make it easy for the
1057 // llvm-arc-annotation-processor tool to cross reference where the source
1058 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
1059 // information via debug info for backends to use (since why would anyone
1060 // need such a thing from LLVM IR besides in non standard cases
1062 MDString *SourcePtrMDNode =
1063 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
1064 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
1069 // The actual interface for accessing the above functionality is defined via
1070 // some simple macros which are defined below. We do this so that the user does
1071 // not need to pass in what metadata id is needed resulting in cleaner code and
1072 // additionally since it provides an easy way to conditionally no-op all
1073 // annotation support in a non-debug build.
1075 /// Use this macro to annotate a sequence state change when processing
1076 /// instructions bottom up,
1077 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
1078 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
1079 ARCAnnotationProvenanceSourceMDKind, (inst), \
1080 const_cast<Value*>(ptr), (old), (new))
1081 /// Use this macro to annotate a sequence state change when processing
1082 /// instructions top down.
1083 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
1084 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
1085 ARCAnnotationProvenanceSourceMDKind, (inst), \
1086 const_cast<Value*>(ptr), (old), (new))
1088 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
1090 if (EnableARCAnnotations) { \
1091 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
1092 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
1093 Value *Ptr = const_cast<Value*>(I->first); \
1094 Sequence Seq = I->second.GetSeq(); \
1095 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
1100 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
1101 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
1102 Entrance, bottom_up)
1103 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
1104 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
1105 Terminator, bottom_up)
1106 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
1107 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
1109 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
1110 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
1111 Terminator, top_down)
1113 #else // !ARC_ANNOTATION
1114 // If annotations are off, noop.
1115 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
1116 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
1117 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
1118 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
1119 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
1120 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
1121 #endif // !ARC_ANNOTATION
1124 /// \brief The main ARC optimization pass.
1125 class ObjCARCOpt : public FunctionPass {
1127 ProvenanceAnalysis PA;
1129 // This is used to track if a pointer is stored into an alloca.
1130 DenseSet<const Value *> MultiOwnersSet;
1132 /// A flag indicating whether this optimization pass should run.
1135 /// Declarations for ObjC runtime functions, for use in creating calls to
1136 /// them. These are initialized lazily to avoid cluttering up the Module
1137 /// with unused declarations.
1139 /// Declaration for ObjC runtime function objc_autoreleaseReturnValue.
1140 Constant *AutoreleaseRVCallee;
1141 /// Declaration for ObjC runtime function objc_release.
1142 Constant *ReleaseCallee;
1143 /// Declaration for ObjC runtime function objc_retain.
1144 Constant *RetainCallee;
1145 /// Declaration for ObjC runtime function objc_retainBlock.
1146 Constant *RetainBlockCallee;
1147 /// Declaration for ObjC runtime function objc_autorelease.
1148 Constant *AutoreleaseCallee;
1150 /// Flags which determine whether each of the interesting runtine functions
1151 /// is in fact used in the current function.
1152 unsigned UsedInThisFunction;
1154 /// The Metadata Kind for clang.imprecise_release metadata.
1155 unsigned ImpreciseReleaseMDKind;
1157 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
1158 unsigned CopyOnEscapeMDKind;
1160 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
1161 unsigned NoObjCARCExceptionsMDKind;
1163 #ifdef ARC_ANNOTATIONS
1164 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
1165 unsigned ARCAnnotationBottomUpMDKind;
1166 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
1167 unsigned ARCAnnotationTopDownMDKind;
1168 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
1169 unsigned ARCAnnotationProvenanceSourceMDKind;
1170 #endif // ARC_ANNOATIONS
1172 Constant *getAutoreleaseRVCallee(Module *M);
1173 Constant *getReleaseCallee(Module *M);
1174 Constant *getRetainCallee(Module *M);
1175 Constant *getRetainBlockCallee(Module *M);
1176 Constant *getAutoreleaseCallee(Module *M);
1178 bool IsRetainBlockOptimizable(const Instruction *Inst);
1180 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1181 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1182 InstructionClass &Class);
1183 bool OptimizeRetainBlockCall(Function &F, Instruction *RetainBlock,
1184 InstructionClass &Class);
1185 void OptimizeIndividualCalls(Function &F);
1187 void CheckForCFGHazards(const BasicBlock *BB,
1188 DenseMap<const BasicBlock *, BBState> &BBStates,
1189 BBState &MyStates) const;
1190 bool VisitInstructionBottomUp(Instruction *Inst,
1192 MapVector<Value *, RRInfo> &Retains,
1194 bool VisitBottomUp(BasicBlock *BB,
1195 DenseMap<const BasicBlock *, BBState> &BBStates,
1196 MapVector<Value *, RRInfo> &Retains);
1197 bool VisitInstructionTopDown(Instruction *Inst,
1198 DenseMap<Value *, RRInfo> &Releases,
1200 bool VisitTopDown(BasicBlock *BB,
1201 DenseMap<const BasicBlock *, BBState> &BBStates,
1202 DenseMap<Value *, RRInfo> &Releases);
1203 bool Visit(Function &F,
1204 DenseMap<const BasicBlock *, BBState> &BBStates,
1205 MapVector<Value *, RRInfo> &Retains,
1206 DenseMap<Value *, RRInfo> &Releases);
1208 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1209 MapVector<Value *, RRInfo> &Retains,
1210 DenseMap<Value *, RRInfo> &Releases,
1211 SmallVectorImpl<Instruction *> &DeadInsts,
1214 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
1215 MapVector<Value *, RRInfo> &Retains,
1216 DenseMap<Value *, RRInfo> &Releases,
1218 SmallVector<Instruction *, 4> &NewRetains,
1219 SmallVector<Instruction *, 4> &NewReleases,
1220 SmallVector<Instruction *, 8> &DeadInsts,
1221 RRInfo &RetainsToMove,
1222 RRInfo &ReleasesToMove,
1225 bool &AnyPairsCompletelyEliminated);
1227 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1228 MapVector<Value *, RRInfo> &Retains,
1229 DenseMap<Value *, RRInfo> &Releases,
1232 void OptimizeWeakCalls(Function &F);
1234 bool OptimizeSequences(Function &F);
1236 void OptimizeReturns(Function &F);
1239 void GatherStatistics(Function &F, bool AfterOptimization = false);
1242 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
1243 virtual bool doInitialization(Module &M);
1244 virtual bool runOnFunction(Function &F);
1245 virtual void releaseMemory();
1249 ObjCARCOpt() : FunctionPass(ID) {
1250 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1255 char ObjCARCOpt::ID = 0;
1256 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1257 "objc-arc", "ObjC ARC optimization", false, false)
1258 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1259 INITIALIZE_PASS_END(ObjCARCOpt,
1260 "objc-arc", "ObjC ARC optimization", false, false)
1262 Pass *llvm::createObjCARCOptPass() {
1263 return new ObjCARCOpt();
1266 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1267 AU.addRequired<ObjCARCAliasAnalysis>();
1268 AU.addRequired<AliasAnalysis>();
1269 // ARC optimization doesn't currently split critical edges.
1270 AU.setPreservesCFG();
1273 bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
1274 // Without the magic metadata tag, we have to assume this might be an
1275 // objc_retainBlock call inserted to convert a block pointer to an id,
1276 // in which case it really is needed.
1277 if (!Inst->getMetadata(CopyOnEscapeMDKind))
1280 // If the pointer "escapes" (not including being used in a call),
1281 // the copy may be needed.
1282 if (DoesRetainableObjPtrEscape(Inst))
1285 // Otherwise, it's not needed.
1289 Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
1290 if (!AutoreleaseRVCallee) {
1291 LLVMContext &C = M->getContext();
1292 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1293 Type *Params[] = { I8X };
1294 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1295 AttributeSet Attribute =
1296 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1297 Attribute::NoUnwind);
1298 AutoreleaseRVCallee =
1299 M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
1302 return AutoreleaseRVCallee;
1305 Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
1306 if (!ReleaseCallee) {
1307 LLVMContext &C = M->getContext();
1308 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1309 AttributeSet Attribute =
1310 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1311 Attribute::NoUnwind);
1313 M->getOrInsertFunction(
1315 FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
1318 return ReleaseCallee;
1321 Constant *ObjCARCOpt::getRetainCallee(Module *M) {
1322 if (!RetainCallee) {
1323 LLVMContext &C = M->getContext();
1324 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1325 AttributeSet Attribute =
1326 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1327 Attribute::NoUnwind);
1329 M->getOrInsertFunction(
1331 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1334 return RetainCallee;
1337 Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
1338 if (!RetainBlockCallee) {
1339 LLVMContext &C = M->getContext();
1340 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1341 // objc_retainBlock is not nounwind because it calls user copy constructors
1342 // which could theoretically throw.
1344 M->getOrInsertFunction(
1346 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1349 return RetainBlockCallee;
1352 Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
1353 if (!AutoreleaseCallee) {
1354 LLVMContext &C = M->getContext();
1355 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1356 AttributeSet Attribute =
1357 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1358 Attribute::NoUnwind);
1360 M->getOrInsertFunction(
1362 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1365 return AutoreleaseCallee;
1368 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
1369 /// not a return value. Or, if it can be paired with an
1370 /// objc_autoreleaseReturnValue, delete the pair and return true.
1372 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
1373 // Check for the argument being from an immediately preceding call or invoke.
1374 const Value *Arg = GetObjCArg(RetainRV);
1375 ImmutableCallSite CS(Arg);
1376 if (const Instruction *Call = CS.getInstruction()) {
1377 if (Call->getParent() == RetainRV->getParent()) {
1378 BasicBlock::const_iterator I = Call;
1380 while (IsNoopInstruction(I)) ++I;
1381 if (&*I == RetainRV)
1383 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
1384 BasicBlock *RetainRVParent = RetainRV->getParent();
1385 if (II->getNormalDest() == RetainRVParent) {
1386 BasicBlock::const_iterator I = RetainRVParent->begin();
1387 while (IsNoopInstruction(I)) ++I;
1388 if (&*I == RetainRV)
1394 // Check for being preceded by an objc_autoreleaseReturnValue on the same
1395 // pointer. In this case, we can delete the pair.
1396 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
1398 do --I; while (I != Begin && IsNoopInstruction(I));
1399 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
1400 GetObjCArg(I) == Arg) {
1404 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
1405 << "Erasing " << *RetainRV << "\n");
1407 EraseInstruction(I);
1408 EraseInstruction(RetainRV);
1413 // Turn it to a plain objc_retain.
1417 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
1418 "objc_retain since the operand is not a return value.\n"
1419 "Old = " << *RetainRV << "\n");
1421 cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent()));
1423 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
1428 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1429 /// used as a return value.
1431 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1432 InstructionClass &Class) {
1433 // Check for a return of the pointer value.
1434 const Value *Ptr = GetObjCArg(AutoreleaseRV);
1435 SmallVector<const Value *, 2> Users;
1436 Users.push_back(Ptr);
1438 Ptr = Users.pop_back_val();
1439 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
1441 const User *I = *UI;
1442 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
1444 if (isa<BitCastInst>(I))
1447 } while (!Users.empty());
1452 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
1453 "objc_autorelease since its operand is not used as a return "
1455 "Old = " << *AutoreleaseRV << "\n");
1457 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1459 setCalledFunction(getAutoreleaseCallee(F.getParent()));
1460 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1461 Class = IC_Autorelease;
1463 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
1467 // \brief Attempt to strength reduce objc_retainBlock calls to objc_retain
1470 // Specifically: If an objc_retainBlock call has the copy_on_escape metadata and
1471 // does not escape (following the rules of block escaping), strength reduce the
1472 // objc_retainBlock to an objc_retain.
1474 // TODO: If an objc_retainBlock call is dominated period by a previous
1475 // objc_retainBlock call, strength reduce the objc_retainBlock to an
1478 ObjCARCOpt::OptimizeRetainBlockCall(Function &F, Instruction *Inst,
1479 InstructionClass &Class) {
1480 assert(GetBasicInstructionClass(Inst) == Class);
1481 assert(IC_RetainBlock == Class);
1483 // If we can not optimize Inst, return false.
1484 if (!IsRetainBlockOptimizable(Inst))
1490 DEBUG(dbgs() << "Strength reduced retainBlock => retain.\n");
1491 DEBUG(dbgs() << "Old: " << *Inst << "\n");
1492 CallInst *RetainBlock = cast<CallInst>(Inst);
1493 RetainBlock->setCalledFunction(getRetainCallee(F.getParent()));
1494 // Remove copy_on_escape metadata.
1495 RetainBlock->setMetadata(CopyOnEscapeMDKind, 0);
1497 DEBUG(dbgs() << "New: " << *Inst << "\n");
1501 /// Visit each call, one at a time, and make simplifications without doing any
1502 /// additional analysis.
1503 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1504 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
1505 // Reset all the flags in preparation for recomputing them.
1506 UsedInThisFunction = 0;
1508 // Visit all objc_* calls in F.
1509 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1510 Instruction *Inst = &*I++;
1512 InstructionClass Class = GetBasicInstructionClass(Inst);
1514 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
1519 // Delete no-op casts. These function calls have special semantics, but
1520 // the semantics are entirely implemented via lowering in the front-end,
1521 // so by the time they reach the optimizer, they are just no-op calls
1522 // which return their argument.
1524 // There are gray areas here, as the ability to cast reference-counted
1525 // pointers to raw void* and back allows code to break ARC assumptions,
1526 // however these are currently considered to be unimportant.
1530 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
1531 EraseInstruction(Inst);
1534 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1537 case IC_LoadWeakRetained:
1539 case IC_DestroyWeak: {
1540 CallInst *CI = cast<CallInst>(Inst);
1541 if (IsNullOrUndef(CI->getArgOperand(0))) {
1543 Type *Ty = CI->getArgOperand(0)->getType();
1544 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1545 Constant::getNullValue(Ty),
1547 llvm::Value *NewValue = UndefValue::get(CI->getType());
1548 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1549 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1550 CI->replaceAllUsesWith(NewValue);
1551 CI->eraseFromParent();
1558 CallInst *CI = cast<CallInst>(Inst);
1559 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1560 IsNullOrUndef(CI->getArgOperand(1))) {
1562 Type *Ty = CI->getArgOperand(0)->getType();
1563 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1564 Constant::getNullValue(Ty),
1567 llvm::Value *NewValue = UndefValue::get(CI->getType());
1568 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1569 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1571 CI->replaceAllUsesWith(NewValue);
1572 CI->eraseFromParent();
1577 case IC_RetainBlock:
1578 // If we strength reduce an objc_retainBlock to an objc_retain, continue
1579 // onto the objc_retain peephole optimizations. Otherwise break.
1580 OptimizeRetainBlockCall(F, Inst, Class);
1583 if (OptimizeRetainRVCall(F, Inst))
1586 case IC_AutoreleaseRV:
1587 OptimizeAutoreleaseRVCall(F, Inst, Class);
1591 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1592 if (IsAutorelease(Class) && Inst->use_empty()) {
1593 CallInst *Call = cast<CallInst>(Inst);
1594 const Value *Arg = Call->getArgOperand(0);
1595 Arg = FindSingleUseIdentifiedObject(Arg);
1600 // Create the declaration lazily.
1601 LLVMContext &C = Inst->getContext();
1603 CallInst::Create(getReleaseCallee(F.getParent()),
1604 Call->getArgOperand(0), "", Call);
1605 NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
1607 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1608 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
1609 << *NewCall << "\n");
1611 EraseInstruction(Call);
1617 // For functions which can never be passed stack arguments, add
1619 if (IsAlwaysTail(Class)) {
1621 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
1622 "passed stack args: " << *Inst << "\n");
1623 cast<CallInst>(Inst)->setTailCall();
1626 // Ensure that functions that can never have a "tail" keyword due to the
1627 // semantics of ARC truly do not do so.
1628 if (IsNeverTail(Class)) {
1630 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
1632 cast<CallInst>(Inst)->setTailCall(false);
1635 // Set nounwind as needed.
1636 if (IsNoThrow(Class)) {
1638 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1640 cast<CallInst>(Inst)->setDoesNotThrow();
1643 if (!IsNoopOnNull(Class)) {
1644 UsedInThisFunction |= 1 << Class;
1648 const Value *Arg = GetObjCArg(Inst);
1650 // ARC calls with null are no-ops. Delete them.
1651 if (IsNullOrUndef(Arg)) {
1654 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1656 EraseInstruction(Inst);
1660 // Keep track of which of retain, release, autorelease, and retain_block
1661 // are actually present in this function.
1662 UsedInThisFunction |= 1 << Class;
1664 // If Arg is a PHI, and one or more incoming values to the
1665 // PHI are null, and the call is control-equivalent to the PHI, and there
1666 // are no relevant side effects between the PHI and the call, the call
1667 // could be pushed up to just those paths with non-null incoming values.
1668 // For now, don't bother splitting critical edges for this.
1669 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1670 Worklist.push_back(std::make_pair(Inst, Arg));
1672 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1676 const PHINode *PN = dyn_cast<PHINode>(Arg);
1679 // Determine if the PHI has any null operands, or any incoming
1681 bool HasNull = false;
1682 bool HasCriticalEdges = false;
1683 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1685 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1686 if (IsNullOrUndef(Incoming))
1688 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1689 .getNumSuccessors() != 1) {
1690 HasCriticalEdges = true;
1694 // If we have null operands and no critical edges, optimize.
1695 if (!HasCriticalEdges && HasNull) {
1696 SmallPtrSet<Instruction *, 4> DependingInstructions;
1697 SmallPtrSet<const BasicBlock *, 4> Visited;
1699 // Check that there is nothing that cares about the reference
1700 // count between the call and the phi.
1703 case IC_RetainBlock:
1704 // These can always be moved up.
1707 // These can't be moved across things that care about the retain
1709 FindDependencies(NeedsPositiveRetainCount, Arg,
1710 Inst->getParent(), Inst,
1711 DependingInstructions, Visited, PA);
1713 case IC_Autorelease:
1714 // These can't be moved across autorelease pool scope boundaries.
1715 FindDependencies(AutoreleasePoolBoundary, Arg,
1716 Inst->getParent(), Inst,
1717 DependingInstructions, Visited, PA);
1720 case IC_AutoreleaseRV:
1721 // Don't move these; the RV optimization depends on the autoreleaseRV
1722 // being tail called, and the retainRV being immediately after a call
1723 // (which might still happen if we get lucky with codegen layout, but
1724 // it's not worth taking the chance).
1727 llvm_unreachable("Invalid dependence flavor");
1730 if (DependingInstructions.size() == 1 &&
1731 *DependingInstructions.begin() == PN) {
1734 // Clone the call into each predecessor that has a non-null value.
1735 CallInst *CInst = cast<CallInst>(Inst);
1736 Type *ParamTy = CInst->getArgOperand(0)->getType();
1737 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1739 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1740 if (!IsNullOrUndef(Incoming)) {
1741 CallInst *Clone = cast<CallInst>(CInst->clone());
1742 Value *Op = PN->getIncomingValue(i);
1743 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1744 if (Op->getType() != ParamTy)
1745 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1746 Clone->setArgOperand(0, Op);
1747 Clone->insertBefore(InsertPos);
1749 DEBUG(dbgs() << "Cloning "
1751 "And inserting clone at " << *InsertPos << "\n");
1752 Worklist.push_back(std::make_pair(Clone, Incoming));
1755 // Erase the original call.
1756 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1757 EraseInstruction(CInst);
1761 } while (!Worklist.empty());
1765 /// If we have a top down pointer in the S_Use state, make sure that there are
1766 /// no CFG hazards by checking the states of various bottom up pointers.
1767 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1768 const bool SuccSRRIKnownSafe,
1770 bool &SomeSuccHasSame,
1771 bool &AllSuccsHaveSame,
1772 bool &NotAllSeqEqualButKnownSafe,
1773 bool &ShouldContinue) {
1775 case S_CanRelease: {
1776 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1777 S.ClearSequenceProgress();
1780 S.SetCFGHazardAfflicted(true);
1781 ShouldContinue = true;
1785 SomeSuccHasSame = true;
1789 case S_MovableRelease:
1790 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1791 AllSuccsHaveSame = false;
1793 NotAllSeqEqualButKnownSafe = true;
1796 llvm_unreachable("bottom-up pointer in retain state!");
1798 llvm_unreachable("This should have been handled earlier.");
1802 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1803 /// there are no CFG hazards by checking the states of various bottom up
1805 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1806 const bool SuccSRRIKnownSafe,
1808 bool &SomeSuccHasSame,
1809 bool &AllSuccsHaveSame,
1810 bool &NotAllSeqEqualButKnownSafe) {
1813 SomeSuccHasSame = true;
1817 case S_MovableRelease:
1819 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1820 AllSuccsHaveSame = false;
1822 NotAllSeqEqualButKnownSafe = true;
1825 llvm_unreachable("bottom-up pointer in retain state!");
1827 llvm_unreachable("This should have been handled earlier.");
1831 /// Check for critical edges, loop boundaries, irreducible control flow, or
1832 /// other CFG structures where moving code across the edge would result in it
1833 /// being executed more.
1835 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1836 DenseMap<const BasicBlock *, BBState> &BBStates,
1837 BBState &MyStates) const {
1838 // If any top-down local-use or possible-dec has a succ which is earlier in
1839 // the sequence, forget it.
1840 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1841 E = MyStates.top_down_ptr_end(); I != E; ++I) {
1842 PtrState &S = I->second;
1843 const Sequence Seq = I->second.GetSeq();
1845 // We only care about S_Retain, S_CanRelease, and S_Use.
1849 // Make sure that if extra top down states are added in the future that this
1850 // code is updated to handle it.
1851 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1852 "Unknown top down sequence state.");
1854 const Value *Arg = I->first;
1855 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1856 bool SomeSuccHasSame = false;
1857 bool AllSuccsHaveSame = true;
1858 bool NotAllSeqEqualButKnownSafe = false;
1860 succ_const_iterator SI(TI), SE(TI, false);
1862 for (; SI != SE; ++SI) {
1863 // If VisitBottomUp has pointer information for this successor, take
1864 // what we know about it.
1865 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1867 assert(BBI != BBStates.end());
1868 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1869 const Sequence SuccSSeq = SuccS.GetSeq();
1871 // If bottom up, the pointer is in an S_None state, clear the sequence
1872 // progress since the sequence in the bottom up state finished
1873 // suggesting a mismatch in between retains/releases. This is true for
1874 // all three cases that we are handling here: S_Retain, S_Use, and
1876 if (SuccSSeq == S_None) {
1877 S.ClearSequenceProgress();
1881 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1883 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1885 // *NOTE* We do not use Seq from above here since we are allowing for
1886 // S.GetSeq() to change while we are visiting basic blocks.
1887 switch(S.GetSeq()) {
1889 bool ShouldContinue = false;
1890 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1891 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1897 case S_CanRelease: {
1898 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1899 SomeSuccHasSame, AllSuccsHaveSame,
1900 NotAllSeqEqualButKnownSafe);
1907 case S_MovableRelease:
1912 // If the state at the other end of any of the successor edges
1913 // matches the current state, require all edges to match. This
1914 // guards against loops in the middle of a sequence.
1915 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1916 S.ClearSequenceProgress();
1917 } else if (NotAllSeqEqualButKnownSafe) {
1918 // If we would have cleared the state foregoing the fact that we are known
1919 // safe, stop code motion. This is because whether or not it is safe to
1920 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1921 // are allowed to perform code motion.
1922 S.SetCFGHazardAfflicted(true);
1928 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1930 MapVector<Value *, RRInfo> &Retains,
1931 BBState &MyStates) {
1932 bool NestingDetected = false;
1933 InstructionClass Class = GetInstructionClass(Inst);
1934 const Value *Arg = 0;
1936 DEBUG(dbgs() << "Class: " << Class << "\n");
1940 Arg = GetObjCArg(Inst);
1942 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1944 // If we see two releases in a row on the same pointer. If so, make
1945 // a note, and we'll cicle back to revisit it after we've
1946 // hopefully eliminated the second release, which may allow us to
1947 // eliminate the first release too.
1948 // Theoretically we could implement removal of nested retain+release
1949 // pairs by making PtrState hold a stack of states, but this is
1950 // simple and avoids adding overhead for the non-nested case.
1951 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1952 DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
1953 NestingDetected = true;
1956 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1957 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1958 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1959 S.ResetSequenceProgress(NewSeq);
1960 S.SetReleaseMetadata(ReleaseMetadata);
1961 S.SetKnownSafe(S.HasKnownPositiveRefCount());
1962 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
1964 S.SetKnownPositiveRefCount();
1967 case IC_RetainBlock:
1968 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1969 // objc_retainBlocks to objc_retains. Thus at this point any
1970 // objc_retainBlocks that we see are not optimizable.
1974 Arg = GetObjCArg(Inst);
1976 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1977 S.SetKnownPositiveRefCount();
1979 Sequence OldSeq = S.GetSeq();
1983 case S_MovableRelease:
1985 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1986 // imprecise release, clear our reverse insertion points.
1987 if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
1988 S.ClearReverseInsertPts();
1991 // Don't do retain+release tracking for IC_RetainRV, because it's
1992 // better to let it remain as the first instruction after a call.
1993 if (Class != IC_RetainRV)
1994 Retains[Inst] = S.RRI;
1995 S.ClearSequenceProgress();
2000 llvm_unreachable("bottom-up pointer in retain state!");
2002 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
2003 // A retain moving bottom up can be a use.
2006 case IC_AutoreleasepoolPop:
2007 // Conservatively, clear MyStates for all known pointers.
2008 MyStates.clearBottomUpPointers();
2009 return NestingDetected;
2010 case IC_AutoreleasepoolPush:
2012 // These are irrelevant.
2013 return NestingDetected;
2015 // If we have a store into an alloca of a pointer we are tracking, the
2016 // pointer has multiple owners implying that we must be more conservative.
2018 // This comes up in the context of a pointer being ``KnownSafe''. In the
2019 // presense of a block being initialized, the frontend will emit the
2020 // objc_retain on the original pointer and the release on the pointer loaded
2021 // from the alloca. The optimizer will through the provenance analysis
2022 // realize that the two are related, but since we only require KnownSafe in
2023 // one direction, will match the inner retain on the original pointer with
2024 // the guard release on the original pointer. This is fixed by ensuring that
2025 // in the presense of allocas we only unconditionally remove pointers if
2026 // both our retain and our release are KnownSafe.
2027 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
2028 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
2029 BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
2030 StripPointerCastsAndObjCCalls(SI->getValueOperand()));
2031 if (I != MyStates.bottom_up_ptr_end())
2032 MultiOwnersSet.insert(I->first);
2040 // Consider any other possible effects of this instruction on each
2041 // pointer being tracked.
2042 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
2043 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
2044 const Value *Ptr = MI->first;
2046 continue; // Handled above.
2047 PtrState &S = MI->second;
2048 Sequence Seq = S.GetSeq();
2050 // Check for possible releases.
2051 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2052 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
2054 S.ClearKnownPositiveRefCount();
2057 S.SetSeq(S_CanRelease);
2058 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
2062 case S_MovableRelease:
2067 llvm_unreachable("bottom-up pointer in retain state!");
2071 // Check for possible direct uses.
2074 case S_MovableRelease:
2075 if (CanUse(Inst, Ptr, PA, Class)) {
2076 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2078 assert(!S.HasReverseInsertPts());
2079 // If this is an invoke instruction, we're scanning it as part of
2080 // one of its successor blocks, since we can't insert code after it
2081 // in its own block, and we don't want to split critical edges.
2082 if (isa<InvokeInst>(Inst))
2083 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
2085 S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst)));
2087 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
2088 } else if (Seq == S_Release && IsUser(Class)) {
2089 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
2091 // Non-movable releases depend on any possible objc pointer use.
2093 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
2094 assert(!S.HasReverseInsertPts());
2095 // As above; handle invoke specially.
2096 if (isa<InvokeInst>(Inst))
2097 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
2099 S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst)));
2103 if (CanUse(Inst, Ptr, PA, Class)) {
2104 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
2107 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
2115 llvm_unreachable("bottom-up pointer in retain state!");
2119 return NestingDetected;
2123 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
2124 DenseMap<const BasicBlock *, BBState> &BBStates,
2125 MapVector<Value *, RRInfo> &Retains) {
2127 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
2129 bool NestingDetected = false;
2130 BBState &MyStates = BBStates[BB];
2132 // Merge the states from each successor to compute the initial state
2133 // for the current block.
2134 BBState::edge_iterator SI(MyStates.succ_begin()),
2135 SE(MyStates.succ_end());
2137 const BasicBlock *Succ = *SI;
2138 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
2139 assert(I != BBStates.end());
2140 MyStates.InitFromSucc(I->second);
2142 for (; SI != SE; ++SI) {
2144 I = BBStates.find(Succ);
2145 assert(I != BBStates.end());
2146 MyStates.MergeSucc(I->second);
2150 // If ARC Annotations are enabled, output the current state of pointers at the
2151 // bottom of the basic block.
2152 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
2154 // Visit all the instructions, bottom-up.
2155 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
2156 Instruction *Inst = llvm::prior(I);
2158 // Invoke instructions are visited as part of their successors (below).
2159 if (isa<InvokeInst>(Inst))
2162 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2164 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
2167 // If there's a predecessor with an invoke, visit the invoke as if it were
2168 // part of this block, since we can't insert code after an invoke in its own
2169 // block, and we don't want to split critical edges.
2170 for (BBState::edge_iterator PI(MyStates.pred_begin()),
2171 PE(MyStates.pred_end()); PI != PE; ++PI) {
2172 BasicBlock *Pred = *PI;
2173 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
2174 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
2177 // If ARC Annotations are enabled, output the current state of pointers at the
2178 // top of the basic block.
2179 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
2181 return NestingDetected;
2185 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
2186 DenseMap<Value *, RRInfo> &Releases,
2187 BBState &MyStates) {
2188 bool NestingDetected = false;
2189 InstructionClass Class = GetInstructionClass(Inst);
2190 const Value *Arg = 0;
2193 case IC_RetainBlock:
2194 // In OptimizeIndividualCalls, we have strength reduced all optimizable
2195 // objc_retainBlocks to objc_retains. Thus at this point any
2196 // objc_retainBlocks that we see are not optimizable.
2200 Arg = GetObjCArg(Inst);
2202 PtrState &S = MyStates.getPtrTopDownState(Arg);
2204 // Don't do retain+release tracking for IC_RetainRV, because it's
2205 // better to let it remain as the first instruction after a call.
2206 if (Class != IC_RetainRV) {
2207 // If we see two retains in a row on the same pointer. If so, make
2208 // a note, and we'll cicle back to revisit it after we've
2209 // hopefully eliminated the second retain, which may allow us to
2210 // eliminate the first retain too.
2211 // Theoretically we could implement removal of nested retain+release
2212 // pairs by making PtrState hold a stack of states, but this is
2213 // simple and avoids adding overhead for the non-nested case.
2214 if (S.GetSeq() == S_Retain)
2215 NestingDetected = true;
2217 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
2218 S.ResetSequenceProgress(S_Retain);
2219 S.SetKnownSafe(S.HasKnownPositiveRefCount());
2223 S.SetKnownPositiveRefCount();
2225 // A retain can be a potential use; procede to the generic checking
2230 Arg = GetObjCArg(Inst);
2232 PtrState &S = MyStates.getPtrTopDownState(Arg);
2233 S.ClearKnownPositiveRefCount();
2235 Sequence OldSeq = S.GetSeq();
2237 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2242 if (OldSeq == S_Retain || ReleaseMetadata != 0)
2243 S.ClearReverseInsertPts();
2246 S.SetReleaseMetadata(ReleaseMetadata);
2247 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
2248 Releases[Inst] = S.RRI;
2249 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
2250 S.ClearSequenceProgress();
2256 case S_MovableRelease:
2257 llvm_unreachable("top-down pointer in release state!");
2261 case IC_AutoreleasepoolPop:
2262 // Conservatively, clear MyStates for all known pointers.
2263 MyStates.clearTopDownPointers();
2264 return NestingDetected;
2265 case IC_AutoreleasepoolPush:
2267 // These are irrelevant.
2268 return NestingDetected;
2273 // Consider any other possible effects of this instruction on each
2274 // pointer being tracked.
2275 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2276 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2277 const Value *Ptr = MI->first;
2279 continue; // Handled above.
2280 PtrState &S = MI->second;
2281 Sequence Seq = S.GetSeq();
2283 // Check for possible releases.
2284 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2285 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
2287 S.ClearKnownPositiveRefCount();
2290 S.SetSeq(S_CanRelease);
2291 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
2292 assert(!S.HasReverseInsertPts());
2293 S.InsertReverseInsertPt(Inst);
2295 // One call can't cause a transition from S_Retain to S_CanRelease
2296 // and S_CanRelease to S_Use. If we've made the first transition,
2305 case S_MovableRelease:
2306 llvm_unreachable("top-down pointer in release state!");
2310 // Check for possible direct uses.
2313 if (CanUse(Inst, Ptr, PA, Class)) {
2314 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2317 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
2326 case S_MovableRelease:
2327 llvm_unreachable("top-down pointer in release state!");
2331 return NestingDetected;
2335 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
2336 DenseMap<const BasicBlock *, BBState> &BBStates,
2337 DenseMap<Value *, RRInfo> &Releases) {
2338 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
2339 bool NestingDetected = false;
2340 BBState &MyStates = BBStates[BB];
2342 // Merge the states from each predecessor to compute the initial state
2343 // for the current block.
2344 BBState::edge_iterator PI(MyStates.pred_begin()),
2345 PE(MyStates.pred_end());
2347 const BasicBlock *Pred = *PI;
2348 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
2349 assert(I != BBStates.end());
2350 MyStates.InitFromPred(I->second);
2352 for (; PI != PE; ++PI) {
2354 I = BBStates.find(Pred);
2355 assert(I != BBStates.end());
2356 MyStates.MergePred(I->second);
2360 // If ARC Annotations are enabled, output the current state of pointers at the
2361 // top of the basic block.
2362 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
2364 // Visit all the instructions, top-down.
2365 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
2366 Instruction *Inst = I;
2368 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2370 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
2373 // If ARC Annotations are enabled, output the current state of pointers at the
2374 // bottom of the basic block.
2375 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
2377 #ifdef ARC_ANNOTATIONS
2378 if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
2380 CheckForCFGHazards(BB, BBStates, MyStates);
2381 return NestingDetected;
2385 ComputePostOrders(Function &F,
2386 SmallVectorImpl<BasicBlock *> &PostOrder,
2387 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
2388 unsigned NoObjCARCExceptionsMDKind,
2389 DenseMap<const BasicBlock *, BBState> &BBStates) {
2390 /// The visited set, for doing DFS walks.
2391 SmallPtrSet<BasicBlock *, 16> Visited;
2393 // Do DFS, computing the PostOrder.
2394 SmallPtrSet<BasicBlock *, 16> OnStack;
2395 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
2397 // Functions always have exactly one entry block, and we don't have
2398 // any other block that we treat like an entry block.
2399 BasicBlock *EntryBB = &F.getEntryBlock();
2400 BBState &MyStates = BBStates[EntryBB];
2401 MyStates.SetAsEntry();
2402 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
2403 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
2404 Visited.insert(EntryBB);
2405 OnStack.insert(EntryBB);
2408 BasicBlock *CurrBB = SuccStack.back().first;
2409 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
2410 succ_iterator SE(TI, false);
2412 while (SuccStack.back().second != SE) {
2413 BasicBlock *SuccBB = *SuccStack.back().second++;
2414 if (Visited.insert(SuccBB)) {
2415 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
2416 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
2417 BBStates[CurrBB].addSucc(SuccBB);
2418 BBState &SuccStates = BBStates[SuccBB];
2419 SuccStates.addPred(CurrBB);
2420 OnStack.insert(SuccBB);
2424 if (!OnStack.count(SuccBB)) {
2425 BBStates[CurrBB].addSucc(SuccBB);
2426 BBStates[SuccBB].addPred(CurrBB);
2429 OnStack.erase(CurrBB);
2430 PostOrder.push_back(CurrBB);
2431 SuccStack.pop_back();
2432 } while (!SuccStack.empty());
2436 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
2437 // Functions may have many exits, and there also blocks which we treat
2438 // as exits due to ignored edges.
2439 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
2440 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2441 BasicBlock *ExitBB = I;
2442 BBState &MyStates = BBStates[ExitBB];
2443 if (!MyStates.isExit())
2446 MyStates.SetAsExit();
2448 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
2449 Visited.insert(ExitBB);
2450 while (!PredStack.empty()) {
2451 reverse_dfs_next_succ:
2452 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
2453 while (PredStack.back().second != PE) {
2454 BasicBlock *BB = *PredStack.back().second++;
2455 if (Visited.insert(BB)) {
2456 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
2457 goto reverse_dfs_next_succ;
2460 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
2465 // Visit the function both top-down and bottom-up.
2467 ObjCARCOpt::Visit(Function &F,
2468 DenseMap<const BasicBlock *, BBState> &BBStates,
2469 MapVector<Value *, RRInfo> &Retains,
2470 DenseMap<Value *, RRInfo> &Releases) {
2472 // Use reverse-postorder traversals, because we magically know that loops
2473 // will be well behaved, i.e. they won't repeatedly call retain on a single
2474 // pointer without doing a release. We can't use the ReversePostOrderTraversal
2475 // class here because we want the reverse-CFG postorder to consider each
2476 // function exit point, and we want to ignore selected cycle edges.
2477 SmallVector<BasicBlock *, 16> PostOrder;
2478 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
2479 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
2480 NoObjCARCExceptionsMDKind,
2483 // Use reverse-postorder on the reverse CFG for bottom-up.
2484 bool BottomUpNestingDetected = false;
2485 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2486 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
2488 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
2490 // Use reverse-postorder for top-down.
2491 bool TopDownNestingDetected = false;
2492 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2493 PostOrder.rbegin(), E = PostOrder.rend();
2495 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
2497 return TopDownNestingDetected && BottomUpNestingDetected;
2500 /// Move the calls in RetainsToMove and ReleasesToMove.
2501 void ObjCARCOpt::MoveCalls(Value *Arg,
2502 RRInfo &RetainsToMove,
2503 RRInfo &ReleasesToMove,
2504 MapVector<Value *, RRInfo> &Retains,
2505 DenseMap<Value *, RRInfo> &Releases,
2506 SmallVectorImpl<Instruction *> &DeadInsts,
2508 Type *ArgTy = Arg->getType();
2509 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
2511 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
2513 // Insert the new retain and release calls.
2514 for (SmallPtrSet<Instruction *, 2>::const_iterator
2515 PI = ReleasesToMove.ReverseInsertPts.begin(),
2516 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2517 Instruction *InsertPt = *PI;
2518 Value *MyArg = ArgTy == ParamTy ? Arg :
2519 new BitCastInst(Arg, ParamTy, "", InsertPt);
2521 CallInst::Create(getRetainCallee(M), MyArg, "", InsertPt);
2522 Call->setDoesNotThrow();
2523 Call->setTailCall();
2525 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
2526 "At insertion point: " << *InsertPt << "\n");
2528 for (SmallPtrSet<Instruction *, 2>::const_iterator
2529 PI = RetainsToMove.ReverseInsertPts.begin(),
2530 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2531 Instruction *InsertPt = *PI;
2532 Value *MyArg = ArgTy == ParamTy ? Arg :
2533 new BitCastInst(Arg, ParamTy, "", InsertPt);
2534 CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
2536 // Attach a clang.imprecise_release metadata tag, if appropriate.
2537 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2538 Call->setMetadata(ImpreciseReleaseMDKind, M);
2539 Call->setDoesNotThrow();
2540 if (ReleasesToMove.IsTailCallRelease)
2541 Call->setTailCall();
2543 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
2544 "At insertion point: " << *InsertPt << "\n");
2547 // Delete the original retain and release calls.
2548 for (SmallPtrSet<Instruction *, 2>::const_iterator
2549 AI = RetainsToMove.Calls.begin(),
2550 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
2551 Instruction *OrigRetain = *AI;
2552 Retains.blot(OrigRetain);
2553 DeadInsts.push_back(OrigRetain);
2554 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
2556 for (SmallPtrSet<Instruction *, 2>::const_iterator
2557 AI = ReleasesToMove.Calls.begin(),
2558 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
2559 Instruction *OrigRelease = *AI;
2560 Releases.erase(OrigRelease);
2561 DeadInsts.push_back(OrigRelease);
2562 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
2568 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2570 MapVector<Value *, RRInfo> &Retains,
2571 DenseMap<Value *, RRInfo> &Releases,
2573 SmallVector<Instruction *, 4> &NewRetains,
2574 SmallVector<Instruction *, 4> &NewReleases,
2575 SmallVector<Instruction *, 8> &DeadInsts,
2576 RRInfo &RetainsToMove,
2577 RRInfo &ReleasesToMove,
2580 bool &AnyPairsCompletelyEliminated) {
2581 // If a pair happens in a region where it is known that the reference count
2582 // is already incremented, we can similarly ignore possible decrements unless
2583 // we are dealing with a retainable object with multiple provenance sources.
2584 bool KnownSafeTD = true, KnownSafeBU = true;
2585 bool MultipleOwners = false;
2586 bool CFGHazardAfflicted = false;
2588 // Connect the dots between the top-down-collected RetainsToMove and
2589 // bottom-up-collected ReleasesToMove to form sets of related calls.
2590 // This is an iterative process so that we connect multiple releases
2591 // to multiple retains if needed.
2592 unsigned OldDelta = 0;
2593 unsigned NewDelta = 0;
2594 unsigned OldCount = 0;
2595 unsigned NewCount = 0;
2596 bool FirstRelease = true;
2598 for (SmallVectorImpl<Instruction *>::const_iterator
2599 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2600 Instruction *NewRetain = *NI;
2601 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2602 assert(It != Retains.end());
2603 const RRInfo &NewRetainRRI = It->second;
2604 KnownSafeTD &= NewRetainRRI.KnownSafe;
2606 MultipleOwners || MultiOwnersSet.count(GetObjCArg(NewRetain));
2607 for (SmallPtrSet<Instruction *, 2>::const_iterator
2608 LI = NewRetainRRI.Calls.begin(),
2609 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
2610 Instruction *NewRetainRelease = *LI;
2611 DenseMap<Value *, RRInfo>::const_iterator Jt =
2612 Releases.find(NewRetainRelease);
2613 if (Jt == Releases.end())
2615 const RRInfo &NewRetainReleaseRRI = Jt->second;
2616 assert(NewRetainReleaseRRI.Calls.count(NewRetain));
2617 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
2619 // If we overflow when we compute the path count, don't remove/move
2621 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
2623 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2625 OldDelta -= PathCount;
2627 // Merge the ReleaseMetadata and IsTailCallRelease values.
2629 ReleasesToMove.ReleaseMetadata =
2630 NewRetainReleaseRRI.ReleaseMetadata;
2631 ReleasesToMove.IsTailCallRelease =
2632 NewRetainReleaseRRI.IsTailCallRelease;
2633 FirstRelease = false;
2635 if (ReleasesToMove.ReleaseMetadata !=
2636 NewRetainReleaseRRI.ReleaseMetadata)
2637 ReleasesToMove.ReleaseMetadata = 0;
2638 if (ReleasesToMove.IsTailCallRelease !=
2639 NewRetainReleaseRRI.IsTailCallRelease)
2640 ReleasesToMove.IsTailCallRelease = false;
2643 // Collect the optimal insertion points.
2645 for (SmallPtrSet<Instruction *, 2>::const_iterator
2646 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
2647 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
2649 Instruction *RIP = *RI;
2650 if (ReleasesToMove.ReverseInsertPts.insert(RIP)) {
2651 // If we overflow when we compute the path count, don't
2652 // remove/move anything.
2653 const BBState &RIPBBState = BBStates[RIP->getParent()];
2654 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2656 NewDelta -= PathCount;
2659 NewReleases.push_back(NewRetainRelease);
2664 if (NewReleases.empty()) break;
2666 // Back the other way.
2667 for (SmallVectorImpl<Instruction *>::const_iterator
2668 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2669 Instruction *NewRelease = *NI;
2670 DenseMap<Value *, RRInfo>::const_iterator It =
2671 Releases.find(NewRelease);
2672 assert(It != Releases.end());
2673 const RRInfo &NewReleaseRRI = It->second;
2674 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2675 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
2676 for (SmallPtrSet<Instruction *, 2>::const_iterator
2677 LI = NewReleaseRRI.Calls.begin(),
2678 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
2679 Instruction *NewReleaseRetain = *LI;
2680 MapVector<Value *, RRInfo>::const_iterator Jt =
2681 Retains.find(NewReleaseRetain);
2682 if (Jt == Retains.end())
2684 const RRInfo &NewReleaseRetainRRI = Jt->second;
2685 assert(NewReleaseRetainRRI.Calls.count(NewRelease));
2686 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
2688 // If we overflow when we compute the path count, don't remove/move
2690 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
2692 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2694 OldDelta += PathCount;
2695 OldCount += PathCount;
2697 // Collect the optimal insertion points.
2699 for (SmallPtrSet<Instruction *, 2>::const_iterator
2700 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
2701 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
2703 Instruction *RIP = *RI;
2704 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
2705 // If we overflow when we compute the path count, don't
2706 // remove/move anything.
2707 const BBState &RIPBBState = BBStates[RIP->getParent()];
2708 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2710 NewDelta += PathCount;
2711 NewCount += PathCount;
2714 NewRetains.push_back(NewReleaseRetain);
2718 NewReleases.clear();
2719 if (NewRetains.empty()) break;
2722 // If the pointer is known incremented in 1 direction and we do not have
2723 // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
2724 // to be known safe in both directions.
2725 bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
2726 ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
2727 if (UnconditionallySafe) {
2728 RetainsToMove.ReverseInsertPts.clear();
2729 ReleasesToMove.ReverseInsertPts.clear();
2732 // Determine whether the new insertion points we computed preserve the
2733 // balance of retain and release calls through the program.
2734 // TODO: If the fully aggressive solution isn't valid, try to find a
2735 // less aggressive solution which is.
2739 // At this point, we are not going to remove any RR pairs, but we still are
2740 // able to move RR pairs. If one of our pointers is afflicted with
2741 // CFGHazards, we cannot perform such code motion so exit early.
2742 const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
2743 ReleasesToMove.ReverseInsertPts.size();
2744 if (CFGHazardAfflicted && WillPerformCodeMotion)
2748 // Determine whether the original call points are balanced in the retain and
2749 // release calls through the program. If not, conservatively don't touch
2751 // TODO: It's theoretically possible to do code motion in this case, as
2752 // long as the existing imbalances are maintained.
2756 #ifdef ARC_ANNOTATIONS
2757 // Do not move calls if ARC annotations are requested.
2758 if (EnableARCAnnotations)
2760 #endif // ARC_ANNOTATIONS
2763 assert(OldCount != 0 && "Unreachable code?");
2764 NumRRs += OldCount - NewCount;
2765 // Set to true if we completely removed any RR pairs.
2766 AnyPairsCompletelyEliminated = NewCount == 0;
2768 // We can move calls!
2772 /// Identify pairings between the retains and releases, and delete and/or move
2775 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2777 MapVector<Value *, RRInfo> &Retains,
2778 DenseMap<Value *, RRInfo> &Releases,
2780 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2782 bool AnyPairsCompletelyEliminated = false;
2783 RRInfo RetainsToMove;
2784 RRInfo ReleasesToMove;
2785 SmallVector<Instruction *, 4> NewRetains;
2786 SmallVector<Instruction *, 4> NewReleases;
2787 SmallVector<Instruction *, 8> DeadInsts;
2789 // Visit each retain.
2790 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2791 E = Retains.end(); I != E; ++I) {
2792 Value *V = I->first;
2793 if (!V) continue; // blotted
2795 Instruction *Retain = cast<Instruction>(V);
2797 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2799 Value *Arg = GetObjCArg(Retain);
2801 // If the object being released is in static or stack storage, we know it's
2802 // not being managed by ObjC reference counting, so we can delete pairs
2803 // regardless of what possible decrements or uses lie between them.
2804 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2806 // A constant pointer can't be pointing to an object on the heap. It may
2807 // be reference-counted, but it won't be deleted.
2808 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2809 if (const GlobalVariable *GV =
2810 dyn_cast<GlobalVariable>(
2811 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
2812 if (GV->isConstant())
2815 // Connect the dots between the top-down-collected RetainsToMove and
2816 // bottom-up-collected ReleasesToMove to form sets of related calls.
2817 NewRetains.push_back(Retain);
2818 bool PerformMoveCalls =
2819 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2820 NewReleases, DeadInsts, RetainsToMove,
2821 ReleasesToMove, Arg, KnownSafe,
2822 AnyPairsCompletelyEliminated);
2824 if (PerformMoveCalls) {
2825 // Ok, everything checks out and we're all set. Let's move/delete some
2827 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2828 Retains, Releases, DeadInsts, M);
2831 // Clean up state for next retain.
2832 NewReleases.clear();
2834 RetainsToMove.clear();
2835 ReleasesToMove.clear();
2838 // Now that we're done moving everything, we can delete the newly dead
2839 // instructions, as we no longer need them as insert points.
2840 while (!DeadInsts.empty())
2841 EraseInstruction(DeadInsts.pop_back_val());
2843 return AnyPairsCompletelyEliminated;
2846 /// Weak pointer optimizations.
2847 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2848 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2850 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2851 // itself because it uses AliasAnalysis and we need to do provenance
2853 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2854 Instruction *Inst = &*I++;
2856 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2858 InstructionClass Class = GetBasicInstructionClass(Inst);
2859 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
2862 // Delete objc_loadWeak calls with no users.
2863 if (Class == IC_LoadWeak && Inst->use_empty()) {
2864 Inst->eraseFromParent();
2868 // TODO: For now, just look for an earlier available version of this value
2869 // within the same block. Theoretically, we could do memdep-style non-local
2870 // analysis too, but that would want caching. A better approach would be to
2871 // use the technique that EarlyCSE uses.
2872 inst_iterator Current = llvm::prior(I);
2873 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2874 for (BasicBlock::iterator B = CurrentBB->begin(),
2875 J = Current.getInstructionIterator();
2877 Instruction *EarlierInst = &*llvm::prior(J);
2878 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
2879 switch (EarlierClass) {
2881 case IC_LoadWeakRetained: {
2882 // If this is loading from the same pointer, replace this load's value
2884 CallInst *Call = cast<CallInst>(Inst);
2885 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2886 Value *Arg = Call->getArgOperand(0);
2887 Value *EarlierArg = EarlierCall->getArgOperand(0);
2888 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2889 case AliasAnalysis::MustAlias:
2891 // If the load has a builtin retain, insert a plain retain for it.
2892 if (Class == IC_LoadWeakRetained) {
2894 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2898 // Zap the fully redundant load.
2899 Call->replaceAllUsesWith(EarlierCall);
2900 Call->eraseFromParent();
2902 case AliasAnalysis::MayAlias:
2903 case AliasAnalysis::PartialAlias:
2905 case AliasAnalysis::NoAlias:
2912 // If this is storing to the same pointer and has the same size etc.
2913 // replace this load's value with the stored value.
2914 CallInst *Call = cast<CallInst>(Inst);
2915 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2916 Value *Arg = Call->getArgOperand(0);
2917 Value *EarlierArg = EarlierCall->getArgOperand(0);
2918 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2919 case AliasAnalysis::MustAlias:
2921 // If the load has a builtin retain, insert a plain retain for it.
2922 if (Class == IC_LoadWeakRetained) {
2924 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2928 // Zap the fully redundant load.
2929 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2930 Call->eraseFromParent();
2932 case AliasAnalysis::MayAlias:
2933 case AliasAnalysis::PartialAlias:
2935 case AliasAnalysis::NoAlias:
2942 // TOOD: Grab the copied value.
2944 case IC_AutoreleasepoolPush:
2946 case IC_IntrinsicUser:
2948 // Weak pointers are only modified through the weak entry points
2949 // (and arbitrary calls, which could call the weak entry points).
2952 // Anything else could modify the weak pointer.
2959 // Then, for each destroyWeak with an alloca operand, check to see if
2960 // the alloca and all its users can be zapped.
2961 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2962 Instruction *Inst = &*I++;
2963 InstructionClass Class = GetBasicInstructionClass(Inst);
2964 if (Class != IC_DestroyWeak)
2967 CallInst *Call = cast<CallInst>(Inst);
2968 Value *Arg = Call->getArgOperand(0);
2969 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2970 for (Value::use_iterator UI = Alloca->use_begin(),
2971 UE = Alloca->use_end(); UI != UE; ++UI) {
2972 const Instruction *UserInst = cast<Instruction>(*UI);
2973 switch (GetBasicInstructionClass(UserInst)) {
2976 case IC_DestroyWeak:
2983 for (Value::use_iterator UI = Alloca->use_begin(),
2984 UE = Alloca->use_end(); UI != UE; ) {
2985 CallInst *UserInst = cast<CallInst>(*UI++);
2986 switch (GetBasicInstructionClass(UserInst)) {
2989 // These functions return their second argument.
2990 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2992 case IC_DestroyWeak:
2996 llvm_unreachable("alloca really is used!");
2998 UserInst->eraseFromParent();
3000 Alloca->eraseFromParent();
3006 /// Identify program paths which execute sequences of retains and releases which
3007 /// can be eliminated.
3008 bool ObjCARCOpt::OptimizeSequences(Function &F) {
3009 // Releases, Retains - These are used to store the results of the main flow
3010 // analysis. These use Value* as the key instead of Instruction* so that the
3011 // map stays valid when we get around to rewriting code and calls get
3012 // replaced by arguments.
3013 DenseMap<Value *, RRInfo> Releases;
3014 MapVector<Value *, RRInfo> Retains;
3016 // This is used during the traversal of the function to track the
3017 // states for each identified object at each block.
3018 DenseMap<const BasicBlock *, BBState> BBStates;
3020 // Analyze the CFG of the function, and all instructions.
3021 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
3024 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
3029 MultiOwnersSet.clear();
3031 return AnyPairsCompletelyEliminated && NestingDetected;
3034 /// Check if there is a dependent call earlier that does not have anything in
3035 /// between the Retain and the call that can affect the reference count of their
3036 /// shared pointer argument. Note that Retain need not be in BB.
3038 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
3039 SmallPtrSet<Instruction *, 4> &DepInsts,
3040 SmallPtrSet<const BasicBlock *, 4> &Visited,
3041 ProvenanceAnalysis &PA) {
3042 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
3043 DepInsts, Visited, PA);
3044 if (DepInsts.size() != 1)
3048 dyn_cast_or_null<CallInst>(*DepInsts.begin());
3050 // Check that the pointer is the return value of the call.
3051 if (!Call || Arg != Call)
3054 // Check that the call is a regular call.
3055 InstructionClass Class = GetBasicInstructionClass(Call);
3056 if (Class != IC_CallOrUser && Class != IC_Call)
3062 /// Find a dependent retain that precedes the given autorelease for which there
3063 /// is nothing in between the two instructions that can affect the ref count of
3066 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
3067 Instruction *Autorelease,
3068 SmallPtrSet<Instruction *, 4> &DepInsts,
3069 SmallPtrSet<const BasicBlock *, 4> &Visited,
3070 ProvenanceAnalysis &PA) {
3071 FindDependencies(CanChangeRetainCount, Arg,
3072 BB, Autorelease, DepInsts, Visited, PA);
3073 if (DepInsts.size() != 1)
3077 dyn_cast_or_null<CallInst>(*DepInsts.begin());
3079 // Check that we found a retain with the same argument.
3081 !IsRetain(GetBasicInstructionClass(Retain)) ||
3082 GetObjCArg(Retain) != Arg) {
3089 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
3090 /// no instructions dependent on Arg that need a positive ref count in between
3091 /// the autorelease and the ret.
3093 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
3095 SmallPtrSet<Instruction *, 4> &DepInsts,
3096 SmallPtrSet<const BasicBlock *, 4> &V,
3097 ProvenanceAnalysis &PA) {
3098 FindDependencies(NeedsPositiveRetainCount, Arg,
3099 BB, Ret, DepInsts, V, PA);
3100 if (DepInsts.size() != 1)
3103 CallInst *Autorelease =
3104 dyn_cast_or_null<CallInst>(*DepInsts.begin());
3107 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
3108 if (!IsAutorelease(AutoreleaseClass))
3110 if (GetObjCArg(Autorelease) != Arg)
3116 /// Look for this pattern:
3118 /// %call = call i8* @something(...)
3119 /// %2 = call i8* @objc_retain(i8* %call)
3120 /// %3 = call i8* @objc_autorelease(i8* %2)
3123 /// And delete the retain and autorelease.
3124 void ObjCARCOpt::OptimizeReturns(Function &F) {
3125 if (!F.getReturnType()->isPointerTy())
3128 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
3130 SmallPtrSet<Instruction *, 4> DependingInstructions;
3131 SmallPtrSet<const BasicBlock *, 4> Visited;
3132 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
3133 BasicBlock *BB = FI;
3134 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
3136 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
3141 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
3143 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
3144 // dependent on Arg such that there are no instructions dependent on Arg
3145 // that need a positive ref count in between the autorelease and Ret.
3146 CallInst *Autorelease =
3147 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
3148 DependingInstructions, Visited,
3150 DependingInstructions.clear();
3157 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
3158 DependingInstructions, Visited, PA);
3159 DependingInstructions.clear();
3165 // Check that there is nothing that can affect the reference count
3166 // between the retain and the call. Note that Retain need not be in BB.
3167 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
3168 DependingInstructions,
3170 DependingInstructions.clear();
3173 if (!HasSafePathToCall)
3176 // If so, we can zap the retain and autorelease.
3179 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
3180 << *Autorelease << "\n");
3181 EraseInstruction(Retain);
3182 EraseInstruction(Autorelease);
3188 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
3189 llvm::Statistic &NumRetains =
3190 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
3191 llvm::Statistic &NumReleases =
3192 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
3194 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
3195 Instruction *Inst = &*I++;
3196 switch (GetBasicInstructionClass(Inst)) {
3210 bool ObjCARCOpt::doInitialization(Module &M) {
3214 // If nothing in the Module uses ARC, don't do anything.
3215 Run = ModuleHasARC(M);
3219 // Identify the imprecise release metadata kind.
3220 ImpreciseReleaseMDKind =
3221 M.getContext().getMDKindID("clang.imprecise_release");
3222 CopyOnEscapeMDKind =
3223 M.getContext().getMDKindID("clang.arc.copy_on_escape");
3224 NoObjCARCExceptionsMDKind =
3225 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
3226 #ifdef ARC_ANNOTATIONS
3227 ARCAnnotationBottomUpMDKind =
3228 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
3229 ARCAnnotationTopDownMDKind =
3230 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
3231 ARCAnnotationProvenanceSourceMDKind =
3232 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
3233 #endif // ARC_ANNOTATIONS
3235 // Intuitively, objc_retain and others are nocapture, however in practice
3236 // they are not, because they return their argument value. And objc_release
3237 // calls finalizers which can have arbitrary side effects.
3239 // These are initialized lazily.
3240 AutoreleaseRVCallee = 0;
3243 RetainBlockCallee = 0;
3244 AutoreleaseCallee = 0;
3249 bool ObjCARCOpt::runOnFunction(Function &F) {
3253 // If nothing in the Module uses ARC, don't do anything.
3259 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
3262 PA.setAA(&getAnalysis<AliasAnalysis>());
3265 if (AreStatisticsEnabled()) {
3266 GatherStatistics(F, false);
3270 // This pass performs several distinct transformations. As a compile-time aid
3271 // when compiling code that isn't ObjC, skip these if the relevant ObjC
3272 // library functions aren't declared.
3274 // Preliminary optimizations. This also computes UsedInThisFunction.
3275 OptimizeIndividualCalls(F);
3277 // Optimizations for weak pointers.
3278 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
3279 (1 << IC_LoadWeakRetained) |
3280 (1 << IC_StoreWeak) |
3281 (1 << IC_InitWeak) |
3282 (1 << IC_CopyWeak) |
3283 (1 << IC_MoveWeak) |
3284 (1 << IC_DestroyWeak)))
3285 OptimizeWeakCalls(F);
3287 // Optimizations for retain+release pairs.
3288 if (UsedInThisFunction & ((1 << IC_Retain) |
3289 (1 << IC_RetainRV) |
3290 (1 << IC_RetainBlock)))
3291 if (UsedInThisFunction & (1 << IC_Release))
3292 // Run OptimizeSequences until it either stops making changes or
3293 // no retain+release pair nesting is detected.
3294 while (OptimizeSequences(F)) {}
3296 // Optimizations if objc_autorelease is used.
3297 if (UsedInThisFunction & ((1 << IC_Autorelease) |
3298 (1 << IC_AutoreleaseRV)))
3301 // Gather statistics after optimization.
3303 if (AreStatisticsEnabled()) {
3304 GatherStatistics(F, true);
3308 DEBUG(dbgs() << "\n");
3313 void ObjCARCOpt::releaseMemory() {