1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
27 #define DEBUG_TYPE "objc-arc-opts"
29 #include "ARCRuntimeEntryPoints.h"
30 #include "DependencyAnalysis.h"
31 #include "ObjCARCAliasAnalysis.h"
32 #include "ProvenanceAnalysis.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/DenseSet.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/Statistic.h"
38 #include "llvm/IR/IRBuilder.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/Support/CFG.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
45 using namespace llvm::objcarc;
47 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
51 /// \brief An associative container with fast insertion-order (deterministic)
52 /// iteration over its elements. Plus the special blot operation.
53 template<class KeyT, class ValueT>
55 /// Map keys to indices in Vector.
56 typedef DenseMap<KeyT, size_t> MapTy;
59 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
64 typedef typename VectorTy::iterator iterator;
65 typedef typename VectorTy::const_iterator const_iterator;
66 iterator begin() { return Vector.begin(); }
67 iterator end() { return Vector.end(); }
68 const_iterator begin() const { return Vector.begin(); }
69 const_iterator end() const { return Vector.end(); }
73 assert(Vector.size() >= Map.size()); // May differ due to blotting.
74 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
76 assert(I->second < Vector.size());
77 assert(Vector[I->second].first == I->first);
79 for (typename VectorTy::const_iterator I = Vector.begin(),
80 E = Vector.end(); I != E; ++I)
82 (Map.count(I->first) &&
83 Map[I->first] == size_t(I - Vector.begin())));
87 ValueT &operator[](const KeyT &Arg) {
88 std::pair<typename MapTy::iterator, bool> Pair =
89 Map.insert(std::make_pair(Arg, size_t(0)));
91 size_t Num = Vector.size();
92 Pair.first->second = Num;
93 Vector.push_back(std::make_pair(Arg, ValueT()));
94 return Vector[Num].second;
96 return Vector[Pair.first->second].second;
99 std::pair<iterator, bool>
100 insert(const std::pair<KeyT, ValueT> &InsertPair) {
101 std::pair<typename MapTy::iterator, bool> Pair =
102 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
104 size_t Num = Vector.size();
105 Pair.first->second = Num;
106 Vector.push_back(InsertPair);
107 return std::make_pair(Vector.begin() + Num, true);
109 return std::make_pair(Vector.begin() + Pair.first->second, false);
112 iterator find(const KeyT &Key) {
113 typename MapTy::iterator It = Map.find(Key);
114 if (It == Map.end()) return Vector.end();
115 return Vector.begin() + It->second;
118 const_iterator find(const KeyT &Key) const {
119 typename MapTy::const_iterator It = Map.find(Key);
120 if (It == Map.end()) return Vector.end();
121 return Vector.begin() + It->second;
124 /// This is similar to erase, but instead of removing the element from the
125 /// vector, it just zeros out the key in the vector. This leaves iterators
126 /// intact, but clients must be prepared for zeroed-out keys when iterating.
127 void blot(const KeyT &Key) {
128 typename MapTy::iterator It = Map.find(Key);
129 if (It == Map.end()) return;
130 Vector[It->second].first = KeyT();
143 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
146 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
147 /// as it finds a value with multiple uses.
148 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
149 if (Arg->hasOneUse()) {
150 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
151 return FindSingleUseIdentifiedObject(BC->getOperand(0));
152 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
153 if (GEP->hasAllZeroIndices())
154 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
155 if (IsForwarding(GetBasicInstructionClass(Arg)))
156 return FindSingleUseIdentifiedObject(
157 cast<CallInst>(Arg)->getArgOperand(0));
158 if (!IsObjCIdentifiedObject(Arg))
163 // If we found an identifiable object but it has multiple uses, but they are
164 // trivial uses, we can still consider this to be a single-use value.
165 if (IsObjCIdentifiedObject(Arg)) {
166 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
169 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
179 /// \brief Test whether the given retainable object pointer escapes.
181 /// This differs from regular escape analysis in that a use as an
182 /// argument to a call is not considered an escape.
184 static bool DoesRetainableObjPtrEscape(const User *Ptr) {
185 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Target: " << *Ptr << "\n");
187 // Walk the def-use chains.
188 SmallVector<const Value *, 4> Worklist;
189 Worklist.push_back(Ptr);
190 // If Ptr has any operands add them as well.
191 for (User::const_op_iterator I = Ptr->op_begin(), E = Ptr->op_end(); I != E;
193 Worklist.push_back(*I);
196 // Ensure we do not visit any value twice.
197 SmallPtrSet<const Value *, 8> VisitedSet;
200 const Value *V = Worklist.pop_back_val();
202 DEBUG(dbgs() << "Visiting: " << *V << "\n");
204 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
206 const User *UUser = *UI;
208 DEBUG(dbgs() << "User: " << *UUser << "\n");
210 // Special - Use by a call (callee or argument) is not considered
212 switch (GetBasicInstructionClass(UUser)) {
217 case IC_AutoreleaseRV: {
218 DEBUG(dbgs() << "User copies pointer arguments. Pointer Escapes!\n");
219 // These special functions make copies of their pointer arguments.
222 case IC_IntrinsicUser:
223 // Use by the use intrinsic is not an escape.
227 // Use by an instruction which copies the value is an escape if the
228 // result is an escape.
229 if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
230 isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
232 if (VisitedSet.insert(UUser)) {
233 DEBUG(dbgs() << "User copies value. Ptr escapes if result escapes."
234 " Adding to list.\n");
235 Worklist.push_back(UUser);
237 DEBUG(dbgs() << "Already visited node.\n");
241 // Use by a load is not an escape.
242 if (isa<LoadInst>(UUser))
244 // Use by a store is not an escape if the use is the address.
245 if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
246 if (V != SI->getValueOperand())
250 // Regular calls and other stuff are not considered escapes.
253 // Otherwise, conservatively assume an escape.
254 DEBUG(dbgs() << "Assuming ptr escapes.\n");
257 } while (!Worklist.empty());
260 DEBUG(dbgs() << "Ptr does not escape.\n");
264 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
265 /// GetUnderlyingObjects except that it returns early when it sees the first
267 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
268 SmallPtrSet<const Value *, 4> Visited;
269 SmallVector<const Value *, 4> Worklist;
270 Worklist.push_back(V);
272 const Value *P = Worklist.pop_back_val();
273 P = GetUnderlyingObjCPtr(P);
275 if (isa<AllocaInst>(P))
278 if (!Visited.insert(P))
281 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
282 Worklist.push_back(SI->getTrueValue());
283 Worklist.push_back(SI->getFalseValue());
287 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
288 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
289 Worklist.push_back(PN->getIncomingValue(i));
292 } while (!Worklist.empty());
300 /// \defgroup ARCOpt ARC Optimization.
303 // TODO: On code like this:
306 // stuff_that_cannot_release()
307 // objc_autorelease(%x)
308 // stuff_that_cannot_release()
310 // stuff_that_cannot_release()
311 // objc_autorelease(%x)
313 // The second retain and autorelease can be deleted.
315 // TODO: It should be possible to delete
316 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
317 // pairs if nothing is actually autoreleased between them. Also, autorelease
318 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
319 // after inlining) can be turned into plain release calls.
321 // TODO: Critical-edge splitting. If the optimial insertion point is
322 // a critical edge, the current algorithm has to fail, because it doesn't
323 // know how to split edges. It should be possible to make the optimizer
324 // think in terms of edges, rather than blocks, and then split critical
327 // TODO: OptimizeSequences could generalized to be Interprocedural.
329 // TODO: Recognize that a bunch of other objc runtime calls have
330 // non-escaping arguments and non-releasing arguments, and may be
331 // non-autoreleasing.
333 // TODO: Sink autorelease calls as far as possible. Unfortunately we
334 // usually can't sink them past other calls, which would be the main
335 // case where it would be useful.
337 // TODO: The pointer returned from objc_loadWeakRetained is retained.
339 // TODO: Delete release+retain pairs (rare).
341 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
342 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
343 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
344 STATISTIC(NumRets, "Number of return value forwarding "
345 "retain+autoreleases eliminated");
346 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
347 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
349 STATISTIC(NumRetainsBeforeOpt,
350 "Number of retains before optimization");
351 STATISTIC(NumReleasesBeforeOpt,
352 "Number of releases before optimization");
353 STATISTIC(NumRetainsAfterOpt,
354 "Number of retains after optimization");
355 STATISTIC(NumReleasesAfterOpt,
356 "Number of releases after optimization");
362 /// \brief A sequence of states that a pointer may go through in which an
363 /// objc_retain and objc_release are actually needed.
366 S_Retain, ///< objc_retain(x).
367 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
368 S_Use, ///< any use of x.
369 S_Stop, ///< like S_Release, but code motion is stopped.
370 S_Release, ///< objc_release(x).
371 S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
374 raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
375 LLVM_ATTRIBUTE_UNUSED;
376 raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
379 return OS << "S_None";
381 return OS << "S_Retain";
383 return OS << "S_CanRelease";
385 return OS << "S_Use";
387 return OS << "S_Release";
388 case S_MovableRelease:
389 return OS << "S_MovableRelease";
391 return OS << "S_Stop";
393 llvm_unreachable("Unknown sequence type.");
397 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
401 if (A == S_None || B == S_None)
404 if (A > B) std::swap(A, B);
406 // Choose the side which is further along in the sequence.
407 if ((A == S_Retain || A == S_CanRelease) &&
408 (B == S_CanRelease || B == S_Use))
411 // Choose the side which is further along in the sequence.
412 if ((A == S_Use || A == S_CanRelease) &&
413 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
415 // If both sides are releases, choose the more conservative one.
416 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
418 if (A == S_Release && B == S_MovableRelease)
426 /// \brief Unidirectional information about either a
427 /// retain-decrement-use-release sequence or release-use-decrement-retain
428 /// reverse sequence.
430 /// After an objc_retain, the reference count of the referenced
431 /// object is known to be positive. Similarly, before an objc_release, the
432 /// reference count of the referenced object is known to be positive. If
433 /// there are retain-release pairs in code regions where the retain count
434 /// is known to be positive, they can be eliminated, regardless of any side
435 /// effects between them.
437 /// Also, a retain+release pair nested within another retain+release
438 /// pair all on the known same pointer value can be eliminated, regardless
439 /// of any intervening side effects.
441 /// KnownSafe is true when either of these conditions is satisfied.
444 /// True of the objc_release calls are all marked with the "tail" keyword.
445 bool IsTailCallRelease;
447 /// If the Calls are objc_release calls and they all have a
448 /// clang.imprecise_release tag, this is the metadata tag.
449 MDNode *ReleaseMetadata;
451 /// For a top-down sequence, the set of objc_retains or
452 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
453 SmallPtrSet<Instruction *, 2> Calls;
455 /// The set of optimal insert positions for moving calls in the opposite
457 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
459 /// If this is true, we cannot perform code motion but can still remove
460 /// retain/release pairs.
461 bool CFGHazardAfflicted;
464 KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0),
465 CFGHazardAfflicted(false) {}
469 /// Conservatively merge the two RRInfo. Returns true if a partial merge has
470 /// occured, false otherwise.
471 bool Merge(const RRInfo &Other);
476 void RRInfo::clear() {
478 IsTailCallRelease = false;
481 ReverseInsertPts.clear();
482 CFGHazardAfflicted = false;
485 bool RRInfo::Merge(const RRInfo &Other) {
486 // Conservatively merge the ReleaseMetadata information.
487 if (ReleaseMetadata != Other.ReleaseMetadata)
490 // Conservatively merge the boolean state.
491 KnownSafe &= Other.KnownSafe;
492 IsTailCallRelease &= Other.IsTailCallRelease;
493 CFGHazardAfflicted |= Other.CFGHazardAfflicted;
495 // Merge the call sets.
496 Calls.insert(Other.Calls.begin(), Other.Calls.end());
498 // Merge the insert point sets. If there are any differences,
499 // that makes this a partial merge.
500 bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
501 for (SmallPtrSet<Instruction *, 2>::const_iterator
502 I = Other.ReverseInsertPts.begin(),
503 E = Other.ReverseInsertPts.end(); I != E; ++I)
504 Partial |= ReverseInsertPts.insert(*I);
509 /// \brief This class summarizes several per-pointer runtime properties which
510 /// are propogated through the flow graph.
512 /// True if the reference count is known to be incremented.
513 bool KnownPositiveRefCount;
515 /// True if we've seen an opportunity for partial RR elimination, such as
516 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
519 /// The current position in the sequence.
522 /// Unidirectional information about the current sequence.
526 PtrState() : KnownPositiveRefCount(false), Partial(false),
530 bool IsKnownSafe() const {
531 return RRI.KnownSafe;
534 void SetKnownSafe(const bool NewValue) {
535 RRI.KnownSafe = NewValue;
538 bool IsTailCallRelease() const {
539 return RRI.IsTailCallRelease;
542 void SetTailCallRelease(const bool NewValue) {
543 RRI.IsTailCallRelease = NewValue;
546 bool IsTrackingImpreciseReleases() const {
547 return RRI.ReleaseMetadata != 0;
550 const MDNode *GetReleaseMetadata() const {
551 return RRI.ReleaseMetadata;
554 void SetReleaseMetadata(MDNode *NewValue) {
555 RRI.ReleaseMetadata = NewValue;
558 bool IsCFGHazardAfflicted() const {
559 return RRI.CFGHazardAfflicted;
562 void SetCFGHazardAfflicted(const bool NewValue) {
563 RRI.CFGHazardAfflicted = NewValue;
566 void SetKnownPositiveRefCount() {
567 DEBUG(dbgs() << "Setting Known Positive.\n");
568 KnownPositiveRefCount = true;
571 void ClearKnownPositiveRefCount() {
572 DEBUG(dbgs() << "Clearing Known Positive.\n");
573 KnownPositiveRefCount = false;
576 bool HasKnownPositiveRefCount() const {
577 return KnownPositiveRefCount;
580 void SetSeq(Sequence NewSeq) {
581 DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
585 Sequence GetSeq() const {
589 void ClearSequenceProgress() {
590 ResetSequenceProgress(S_None);
593 void ResetSequenceProgress(Sequence NewSeq) {
594 DEBUG(dbgs() << "Resetting sequence progress.\n");
600 void Merge(const PtrState &Other, bool TopDown);
602 void InsertCall(Instruction *I) {
606 void InsertReverseInsertPt(Instruction *I) {
607 RRI.ReverseInsertPts.insert(I);
610 void ClearReverseInsertPts() {
611 RRI.ReverseInsertPts.clear();
614 bool HasReverseInsertPts() const {
615 return !RRI.ReverseInsertPts.empty();
618 const RRInfo &GetRRInfo() const {
625 PtrState::Merge(const PtrState &Other, bool TopDown) {
626 Seq = MergeSeqs(Seq, Other.Seq, TopDown);
627 KnownPositiveRefCount &= Other.KnownPositiveRefCount;
629 // If we're not in a sequence (anymore), drop all associated state.
633 } else if (Partial || Other.Partial) {
634 // If we're doing a merge on a path that's previously seen a partial
635 // merge, conservatively drop the sequence, to avoid doing partial
636 // RR elimination. If the branch predicates for the two merge differ,
637 // mixing them is unsafe.
638 ClearSequenceProgress();
640 // Otherwise merge the other PtrState's RRInfo into our RRInfo. At this
641 // point, we know that currently we are not partial. Stash whether or not
642 // the merge operation caused us to undergo a partial merging of reverse
644 Partial = RRI.Merge(Other.RRI);
649 /// \brief Per-BasicBlock state.
651 /// The number of unique control paths from the entry which can reach this
653 unsigned TopDownPathCount;
655 /// The number of unique control paths to exits from this block.
656 unsigned BottomUpPathCount;
658 /// A type for PerPtrTopDown and PerPtrBottomUp.
659 typedef MapVector<const Value *, PtrState> MapTy;
661 /// The top-down traversal uses this to record information known about a
662 /// pointer at the bottom of each block.
665 /// The bottom-up traversal uses this to record information known about a
666 /// pointer at the top of each block.
667 MapTy PerPtrBottomUp;
669 /// Effective predecessors of the current block ignoring ignorable edges and
670 /// ignored backedges.
671 SmallVector<BasicBlock *, 2> Preds;
672 /// Effective successors of the current block ignoring ignorable edges and
673 /// ignored backedges.
674 SmallVector<BasicBlock *, 2> Succs;
677 static const unsigned OverflowOccurredValue;
679 BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
681 typedef MapTy::iterator ptr_iterator;
682 typedef MapTy::const_iterator ptr_const_iterator;
684 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
685 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
686 ptr_const_iterator top_down_ptr_begin() const {
687 return PerPtrTopDown.begin();
689 ptr_const_iterator top_down_ptr_end() const {
690 return PerPtrTopDown.end();
693 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
694 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
695 ptr_const_iterator bottom_up_ptr_begin() const {
696 return PerPtrBottomUp.begin();
698 ptr_const_iterator bottom_up_ptr_end() const {
699 return PerPtrBottomUp.end();
702 /// Mark this block as being an entry block, which has one path from the
703 /// entry by definition.
704 void SetAsEntry() { TopDownPathCount = 1; }
706 /// Mark this block as being an exit block, which has one path to an exit by
708 void SetAsExit() { BottomUpPathCount = 1; }
710 /// Attempt to find the PtrState object describing the top down state for
711 /// pointer Arg. Return a new initialized PtrState describing the top down
712 /// state for Arg if we do not find one.
713 PtrState &getPtrTopDownState(const Value *Arg) {
714 return PerPtrTopDown[Arg];
717 /// Attempt to find the PtrState object describing the bottom up state for
718 /// pointer Arg. Return a new initialized PtrState describing the bottom up
719 /// state for Arg if we do not find one.
720 PtrState &getPtrBottomUpState(const Value *Arg) {
721 return PerPtrBottomUp[Arg];
724 /// Attempt to find the PtrState object describing the bottom up state for
726 ptr_iterator findPtrBottomUpState(const Value *Arg) {
727 return PerPtrBottomUp.find(Arg);
730 void clearBottomUpPointers() {
731 PerPtrBottomUp.clear();
734 void clearTopDownPointers() {
735 PerPtrTopDown.clear();
738 void InitFromPred(const BBState &Other);
739 void InitFromSucc(const BBState &Other);
740 void MergePred(const BBState &Other);
741 void MergeSucc(const BBState &Other);
743 /// Compute the number of possible unique paths from an entry to an exit
744 /// which pass through this block. This is only valid after both the
745 /// top-down and bottom-up traversals are complete.
747 /// Returns true if overflow occured. Returns false if overflow did not
749 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
750 if (TopDownPathCount == OverflowOccurredValue ||
751 BottomUpPathCount == OverflowOccurredValue)
753 unsigned long long Product =
754 (unsigned long long)TopDownPathCount*BottomUpPathCount;
755 // Overflow occured if any of the upper bits of Product are set or if all
756 // the lower bits of Product are all set.
757 return (Product >> 32) ||
758 ((PathCount = Product) == OverflowOccurredValue);
761 // Specialized CFG utilities.
762 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
763 edge_iterator pred_begin() const { return Preds.begin(); }
764 edge_iterator pred_end() const { return Preds.end(); }
765 edge_iterator succ_begin() const { return Succs.begin(); }
766 edge_iterator succ_end() const { return Succs.end(); }
768 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
769 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
771 bool isExit() const { return Succs.empty(); }
774 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
777 void BBState::InitFromPred(const BBState &Other) {
778 PerPtrTopDown = Other.PerPtrTopDown;
779 TopDownPathCount = Other.TopDownPathCount;
782 void BBState::InitFromSucc(const BBState &Other) {
783 PerPtrBottomUp = Other.PerPtrBottomUp;
784 BottomUpPathCount = Other.BottomUpPathCount;
787 /// The top-down traversal uses this to merge information about predecessors to
788 /// form the initial state for a new block.
789 void BBState::MergePred(const BBState &Other) {
790 if (TopDownPathCount == OverflowOccurredValue)
793 // Other.TopDownPathCount can be 0, in which case it is either dead or a
794 // loop backedge. Loop backedges are special.
795 TopDownPathCount += Other.TopDownPathCount;
797 // In order to be consistent, we clear the top down pointers when by adding
798 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
800 if (TopDownPathCount == OverflowOccurredValue) {
801 clearTopDownPointers();
805 // Check for overflow. If we have overflow, fall back to conservative
807 if (TopDownPathCount < Other.TopDownPathCount) {
808 TopDownPathCount = OverflowOccurredValue;
809 clearTopDownPointers();
813 // For each entry in the other set, if our set has an entry with the same key,
814 // merge the entries. Otherwise, copy the entry and merge it with an empty
816 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
817 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
818 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
819 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
823 // For each entry in our set, if the other set doesn't have an entry with the
824 // same key, force it to merge with an empty entry.
825 for (ptr_iterator MI = top_down_ptr_begin(),
826 ME = top_down_ptr_end(); MI != ME; ++MI)
827 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
828 MI->second.Merge(PtrState(), /*TopDown=*/true);
831 /// The bottom-up traversal uses this to merge information about successors to
832 /// form the initial state for a new block.
833 void BBState::MergeSucc(const BBState &Other) {
834 if (BottomUpPathCount == OverflowOccurredValue)
837 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
838 // loop backedge. Loop backedges are special.
839 BottomUpPathCount += Other.BottomUpPathCount;
841 // In order to be consistent, we clear the top down pointers when by adding
842 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
844 if (BottomUpPathCount == OverflowOccurredValue) {
845 clearBottomUpPointers();
849 // Check for overflow. If we have overflow, fall back to conservative
851 if (BottomUpPathCount < Other.BottomUpPathCount) {
852 BottomUpPathCount = OverflowOccurredValue;
853 clearBottomUpPointers();
857 // For each entry in the other set, if our set has an entry with the
858 // same key, merge the entries. Otherwise, copy the entry and merge
859 // it with an empty entry.
860 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
861 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
862 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
863 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
867 // For each entry in our set, if the other set doesn't have an entry
868 // with the same key, force it to merge with an empty entry.
869 for (ptr_iterator MI = bottom_up_ptr_begin(),
870 ME = bottom_up_ptr_end(); MI != ME; ++MI)
871 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
872 MI->second.Merge(PtrState(), /*TopDown=*/false);
875 // Only enable ARC Annotations if we are building a debug version of
878 #define ARC_ANNOTATIONS
881 // Define some macros along the lines of DEBUG and some helper functions to make
882 // it cleaner to create annotations in the source code and to no-op when not
883 // building in debug mode.
884 #ifdef ARC_ANNOTATIONS
886 #include "llvm/Support/CommandLine.h"
888 /// Enable/disable ARC sequence annotations.
890 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
891 cl::desc("Enable emission of arc data flow analysis "
894 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
895 cl::desc("Disable check for cfg hazards when "
897 static cl::opt<std::string>
898 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
900 cl::desc("filter out all data flow annotations "
901 "but those that apply to the given "
902 "target llvm identifier."));
904 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
905 /// instruction so that we can track backwards when post processing via the llvm
906 /// arc annotation processor tool. If the function is an
907 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
911 // If pointer is a result of an instruction and it does not have a source
912 // MDNode it, attach a new MDNode onto it. If pointer is a result of
913 // an instruction and does have a source MDNode attached to it, return a
914 // reference to said Node. Otherwise just return 0.
915 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
917 if (!(Node = Inst->getMetadata(NodeId))) {
918 // We do not have any node. Generate and attatch the hash MDString to the
921 // We just use an MDString to ensure that this metadata gets written out
922 // of line at the module level and to provide a very simple format
923 // encoding the information herein. Both of these makes it simpler to
924 // parse the annotations by a simple external program.
926 raw_string_ostream os(Str);
927 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
928 << Inst->getName() << ")";
930 Hash = MDString::get(Inst->getContext(), os.str());
931 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
933 // We have a node. Grab its hash and return it.
934 assert(Node->getNumOperands() == 1 &&
935 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
936 Hash = cast<MDString>(Node->getOperand(0));
938 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
940 raw_string_ostream os(str);
941 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
943 Hash = MDString::get(Arg->getContext(), os.str());
949 static std::string SequenceToString(Sequence A) {
951 raw_string_ostream os(str);
956 /// Helper function to change a Sequence into a String object using our overload
957 /// for raw_ostream so we only have printing code in one location.
958 static MDString *SequenceToMDString(LLVMContext &Context,
960 return MDString::get(Context, SequenceToString(A));
963 /// A simple function to generate a MDNode which describes the change in state
964 /// for Value *Ptr caused by Instruction *Inst.
965 static void AppendMDNodeToInstForPtr(unsigned NodeId,
968 MDString *PtrSourceMDNodeID,
972 Value *tmp[3] = {PtrSourceMDNodeID,
973 SequenceToMDString(Inst->getContext(),
975 SequenceToMDString(Inst->getContext(),
977 Node = MDNode::get(Inst->getContext(),
978 ArrayRef<Value*>(tmp, 3));
980 Inst->setMetadata(NodeId, Node);
983 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
984 /// state of a pointer at the entrance to a basic block.
985 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
986 Value *Ptr, Sequence Seq) {
987 // If we have a target identifier, make sure that we match it before
989 if(!ARCAnnotationTargetIdentifier.empty() &&
990 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
993 Module *M = BB->getParent()->getParent();
994 LLVMContext &C = M->getContext();
995 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
996 Type *I8XX = PointerType::getUnqual(I8X);
997 Type *Params[] = {I8XX, I8XX};
998 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
999 ArrayRef<Type*>(Params, 2),
1000 /*isVarArg=*/false);
1001 Constant *Callee = M->getOrInsertFunction(Name, FTy);
1003 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
1006 StringRef Tmp = Ptr->getName();
1007 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
1008 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
1010 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
1011 cast<Constant>(ActualPtrName), Tmp);
1015 std::string SeqStr = SequenceToString(Seq);
1016 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
1017 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
1019 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
1020 cast<Constant>(ActualPtrName), SeqStr);
1023 Builder.CreateCall2(Callee, PtrName, S);
1026 /// Add to the end of the basic block llvm.ptr.annotations which show the state
1027 /// of the pointer at the bottom of the basic block.
1028 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
1029 Value *Ptr, Sequence Seq) {
1030 // If we have a target identifier, make sure that we match it before emitting
1032 if(!ARCAnnotationTargetIdentifier.empty() &&
1033 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
1036 Module *M = BB->getParent()->getParent();
1037 LLVMContext &C = M->getContext();
1038 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1039 Type *I8XX = PointerType::getUnqual(I8X);
1040 Type *Params[] = {I8XX, I8XX};
1041 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
1042 ArrayRef<Type*>(Params, 2),
1043 /*isVarArg=*/false);
1044 Constant *Callee = M->getOrInsertFunction(Name, FTy);
1046 IRBuilder<> Builder(BB, llvm::prior(BB->end()));
1049 StringRef Tmp = Ptr->getName();
1050 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
1051 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
1053 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
1054 cast<Constant>(ActualPtrName), Tmp);
1058 std::string SeqStr = SequenceToString(Seq);
1059 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
1060 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
1062 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
1063 cast<Constant>(ActualPtrName), SeqStr);
1065 Builder.CreateCall2(Callee, PtrName, S);
1068 /// Adds a source annotation to pointer and a state change annotation to Inst
1069 /// referencing the source annotation and the old/new state of pointer.
1070 static void GenerateARCAnnotation(unsigned InstMDId,
1076 if (EnableARCAnnotations) {
1077 // If we have a target identifier, make sure that we match it before
1078 // emitting an annotation.
1079 if(!ARCAnnotationTargetIdentifier.empty() &&
1080 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
1083 // First generate the source annotation on our pointer. This will return an
1084 // MDString* if Ptr actually comes from an instruction implying we can put
1085 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
1086 // then we know that our pointer is from an Argument so we put a reference
1087 // to the argument number.
1089 // The point of this is to make it easy for the
1090 // llvm-arc-annotation-processor tool to cross reference where the source
1091 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
1092 // information via debug info for backends to use (since why would anyone
1093 // need such a thing from LLVM IR besides in non standard cases
1095 MDString *SourcePtrMDNode =
1096 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
1097 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
1102 // The actual interface for accessing the above functionality is defined via
1103 // some simple macros which are defined below. We do this so that the user does
1104 // not need to pass in what metadata id is needed resulting in cleaner code and
1105 // additionally since it provides an easy way to conditionally no-op all
1106 // annotation support in a non-debug build.
1108 /// Use this macro to annotate a sequence state change when processing
1109 /// instructions bottom up,
1110 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
1111 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
1112 ARCAnnotationProvenanceSourceMDKind, (inst), \
1113 const_cast<Value*>(ptr), (old), (new))
1114 /// Use this macro to annotate a sequence state change when processing
1115 /// instructions top down.
1116 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
1117 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
1118 ARCAnnotationProvenanceSourceMDKind, (inst), \
1119 const_cast<Value*>(ptr), (old), (new))
1121 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
1123 if (EnableARCAnnotations) { \
1124 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
1125 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
1126 Value *Ptr = const_cast<Value*>(I->first); \
1127 Sequence Seq = I->second.GetSeq(); \
1128 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
1133 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
1134 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
1135 Entrance, bottom_up)
1136 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
1137 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
1138 Terminator, bottom_up)
1139 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
1140 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
1142 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
1143 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
1144 Terminator, top_down)
1146 #else // !ARC_ANNOTATION
1147 // If annotations are off, noop.
1148 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
1149 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
1150 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
1151 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
1152 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
1153 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
1154 #endif // !ARC_ANNOTATION
1157 /// \brief The main ARC optimization pass.
1158 class ObjCARCOpt : public FunctionPass {
1160 ProvenanceAnalysis PA;
1161 ARCRuntimeEntryPoints EP;
1163 // This is used to track if a pointer is stored into an alloca.
1164 DenseSet<const Value *> MultiOwnersSet;
1166 /// A flag indicating whether this optimization pass should run.
1169 /// Flags which determine whether each of the interesting runtine functions
1170 /// is in fact used in the current function.
1171 unsigned UsedInThisFunction;
1173 /// The Metadata Kind for clang.imprecise_release metadata.
1174 unsigned ImpreciseReleaseMDKind;
1176 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
1177 unsigned CopyOnEscapeMDKind;
1179 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
1180 unsigned NoObjCARCExceptionsMDKind;
1182 #ifdef ARC_ANNOTATIONS
1183 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
1184 unsigned ARCAnnotationBottomUpMDKind;
1185 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
1186 unsigned ARCAnnotationTopDownMDKind;
1187 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
1188 unsigned ARCAnnotationProvenanceSourceMDKind;
1189 #endif // ARC_ANNOATIONS
1191 bool IsRetainBlockOptimizable(const Instruction *Inst);
1193 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1194 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1195 InstructionClass &Class);
1196 bool OptimizeRetainBlockCall(Function &F, Instruction *RetainBlock,
1197 InstructionClass &Class);
1198 void OptimizeIndividualCalls(Function &F);
1200 void CheckForCFGHazards(const BasicBlock *BB,
1201 DenseMap<const BasicBlock *, BBState> &BBStates,
1202 BBState &MyStates) const;
1203 bool VisitInstructionBottomUp(Instruction *Inst,
1205 MapVector<Value *, RRInfo> &Retains,
1207 bool VisitBottomUp(BasicBlock *BB,
1208 DenseMap<const BasicBlock *, BBState> &BBStates,
1209 MapVector<Value *, RRInfo> &Retains);
1210 bool VisitInstructionTopDown(Instruction *Inst,
1211 DenseMap<Value *, RRInfo> &Releases,
1213 bool VisitTopDown(BasicBlock *BB,
1214 DenseMap<const BasicBlock *, BBState> &BBStates,
1215 DenseMap<Value *, RRInfo> &Releases);
1216 bool Visit(Function &F,
1217 DenseMap<const BasicBlock *, BBState> &BBStates,
1218 MapVector<Value *, RRInfo> &Retains,
1219 DenseMap<Value *, RRInfo> &Releases);
1221 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1222 MapVector<Value *, RRInfo> &Retains,
1223 DenseMap<Value *, RRInfo> &Releases,
1224 SmallVectorImpl<Instruction *> &DeadInsts,
1227 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
1228 MapVector<Value *, RRInfo> &Retains,
1229 DenseMap<Value *, RRInfo> &Releases,
1231 SmallVectorImpl<Instruction *> &NewRetains,
1232 SmallVectorImpl<Instruction *> &NewReleases,
1233 SmallVectorImpl<Instruction *> &DeadInsts,
1234 RRInfo &RetainsToMove,
1235 RRInfo &ReleasesToMove,
1238 bool &AnyPairsCompletelyEliminated);
1240 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1241 MapVector<Value *, RRInfo> &Retains,
1242 DenseMap<Value *, RRInfo> &Releases,
1245 void OptimizeWeakCalls(Function &F);
1247 bool OptimizeSequences(Function &F);
1249 void OptimizeReturns(Function &F);
1252 void GatherStatistics(Function &F, bool AfterOptimization = false);
1255 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
1256 virtual bool doInitialization(Module &M);
1257 virtual bool runOnFunction(Function &F);
1258 virtual void releaseMemory();
1262 ObjCARCOpt() : FunctionPass(ID) {
1263 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1268 char ObjCARCOpt::ID = 0;
1269 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1270 "objc-arc", "ObjC ARC optimization", false, false)
1271 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1272 INITIALIZE_PASS_END(ObjCARCOpt,
1273 "objc-arc", "ObjC ARC optimization", false, false)
1275 Pass *llvm::createObjCARCOptPass() {
1276 return new ObjCARCOpt();
1279 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1280 AU.addRequired<ObjCARCAliasAnalysis>();
1281 AU.addRequired<AliasAnalysis>();
1282 // ARC optimization doesn't currently split critical edges.
1283 AU.setPreservesCFG();
1286 bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
1287 // Without the magic metadata tag, we have to assume this might be an
1288 // objc_retainBlock call inserted to convert a block pointer to an id,
1289 // in which case it really is needed.
1290 if (!Inst->getMetadata(CopyOnEscapeMDKind))
1293 // If the pointer "escapes" (not including being used in a call),
1294 // the copy may be needed.
1295 if (DoesRetainableObjPtrEscape(Inst))
1298 // Otherwise, it's not needed.
1302 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
1303 /// not a return value. Or, if it can be paired with an
1304 /// objc_autoreleaseReturnValue, delete the pair and return true.
1306 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
1307 // Check for the argument being from an immediately preceding call or invoke.
1308 const Value *Arg = GetObjCArg(RetainRV);
1309 ImmutableCallSite CS(Arg);
1310 if (const Instruction *Call = CS.getInstruction()) {
1311 if (Call->getParent() == RetainRV->getParent()) {
1312 BasicBlock::const_iterator I = Call;
1314 while (IsNoopInstruction(I)) ++I;
1315 if (&*I == RetainRV)
1317 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
1318 BasicBlock *RetainRVParent = RetainRV->getParent();
1319 if (II->getNormalDest() == RetainRVParent) {
1320 BasicBlock::const_iterator I = RetainRVParent->begin();
1321 while (IsNoopInstruction(I)) ++I;
1322 if (&*I == RetainRV)
1328 // Check for being preceded by an objc_autoreleaseReturnValue on the same
1329 // pointer. In this case, we can delete the pair.
1330 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
1332 do --I; while (I != Begin && IsNoopInstruction(I));
1333 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
1334 GetObjCArg(I) == Arg) {
1338 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
1339 << "Erasing " << *RetainRV << "\n");
1341 EraseInstruction(I);
1342 EraseInstruction(RetainRV);
1347 // Turn it to a plain objc_retain.
1351 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
1352 "objc_retain since the operand is not a return value.\n"
1353 "Old = " << *RetainRV << "\n");
1355 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1356 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
1358 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
1363 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1364 /// used as a return value.
1366 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1367 InstructionClass &Class) {
1368 // Check for a return of the pointer value.
1369 const Value *Ptr = GetObjCArg(AutoreleaseRV);
1370 SmallVector<const Value *, 2> Users;
1371 Users.push_back(Ptr);
1373 Ptr = Users.pop_back_val();
1374 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
1376 const User *I = *UI;
1377 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
1379 if (isa<BitCastInst>(I))
1382 } while (!Users.empty());
1387 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
1388 "objc_autorelease since its operand is not used as a return "
1390 "Old = " << *AutoreleaseRV << "\n");
1392 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1393 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
1394 AutoreleaseRVCI->setCalledFunction(NewDecl);
1395 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1396 Class = IC_Autorelease;
1398 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
1402 // \brief Attempt to strength reduce objc_retainBlock calls to objc_retain
1405 // Specifically: If an objc_retainBlock call has the copy_on_escape metadata and
1406 // does not escape (following the rules of block escaping), strength reduce the
1407 // objc_retainBlock to an objc_retain.
1409 // TODO: If an objc_retainBlock call is dominated period by a previous
1410 // objc_retainBlock call, strength reduce the objc_retainBlock to an
1413 ObjCARCOpt::OptimizeRetainBlockCall(Function &F, Instruction *Inst,
1414 InstructionClass &Class) {
1415 assert(GetBasicInstructionClass(Inst) == Class);
1416 assert(IC_RetainBlock == Class);
1418 // If we can not optimize Inst, return false.
1419 if (!IsRetainBlockOptimizable(Inst))
1425 DEBUG(dbgs() << "Strength reduced retainBlock => retain.\n");
1426 DEBUG(dbgs() << "Old: " << *Inst << "\n");
1427 CallInst *RetainBlock = cast<CallInst>(Inst);
1428 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1429 RetainBlock->setCalledFunction(NewDecl);
1430 // Remove copy_on_escape metadata.
1431 RetainBlock->setMetadata(CopyOnEscapeMDKind, 0);
1433 DEBUG(dbgs() << "New: " << *Inst << "\n");
1437 /// Visit each call, one at a time, and make simplifications without doing any
1438 /// additional analysis.
1439 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1440 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
1441 // Reset all the flags in preparation for recomputing them.
1442 UsedInThisFunction = 0;
1444 // Visit all objc_* calls in F.
1445 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1446 Instruction *Inst = &*I++;
1448 InstructionClass Class = GetBasicInstructionClass(Inst);
1450 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
1455 // Delete no-op casts. These function calls have special semantics, but
1456 // the semantics are entirely implemented via lowering in the front-end,
1457 // so by the time they reach the optimizer, they are just no-op calls
1458 // which return their argument.
1460 // There are gray areas here, as the ability to cast reference-counted
1461 // pointers to raw void* and back allows code to break ARC assumptions,
1462 // however these are currently considered to be unimportant.
1466 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
1467 EraseInstruction(Inst);
1470 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1473 case IC_LoadWeakRetained:
1475 case IC_DestroyWeak: {
1476 CallInst *CI = cast<CallInst>(Inst);
1477 if (IsNullOrUndef(CI->getArgOperand(0))) {
1479 Type *Ty = CI->getArgOperand(0)->getType();
1480 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1481 Constant::getNullValue(Ty),
1483 llvm::Value *NewValue = UndefValue::get(CI->getType());
1484 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1485 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1486 CI->replaceAllUsesWith(NewValue);
1487 CI->eraseFromParent();
1494 CallInst *CI = cast<CallInst>(Inst);
1495 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1496 IsNullOrUndef(CI->getArgOperand(1))) {
1498 Type *Ty = CI->getArgOperand(0)->getType();
1499 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1500 Constant::getNullValue(Ty),
1503 llvm::Value *NewValue = UndefValue::get(CI->getType());
1504 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1505 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1507 CI->replaceAllUsesWith(NewValue);
1508 CI->eraseFromParent();
1514 if (OptimizeRetainRVCall(F, Inst))
1517 case IC_AutoreleaseRV:
1518 OptimizeAutoreleaseRVCall(F, Inst, Class);
1522 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1523 if (IsAutorelease(Class) && Inst->use_empty()) {
1524 CallInst *Call = cast<CallInst>(Inst);
1525 const Value *Arg = Call->getArgOperand(0);
1526 Arg = FindSingleUseIdentifiedObject(Arg);
1531 // Create the declaration lazily.
1532 LLVMContext &C = Inst->getContext();
1534 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
1535 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
1537 NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
1539 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1540 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
1541 << *NewCall << "\n");
1543 EraseInstruction(Call);
1549 // For functions which can never be passed stack arguments, add
1551 if (IsAlwaysTail(Class)) {
1553 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
1554 "passed stack args: " << *Inst << "\n");
1555 cast<CallInst>(Inst)->setTailCall();
1558 // Ensure that functions that can never have a "tail" keyword due to the
1559 // semantics of ARC truly do not do so.
1560 if (IsNeverTail(Class)) {
1562 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
1564 cast<CallInst>(Inst)->setTailCall(false);
1567 // Set nounwind as needed.
1568 if (IsNoThrow(Class)) {
1570 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1572 cast<CallInst>(Inst)->setDoesNotThrow();
1575 if (!IsNoopOnNull(Class)) {
1576 UsedInThisFunction |= 1 << Class;
1580 const Value *Arg = GetObjCArg(Inst);
1582 // ARC calls with null are no-ops. Delete them.
1583 if (IsNullOrUndef(Arg)) {
1586 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1588 EraseInstruction(Inst);
1592 // Keep track of which of retain, release, autorelease, and retain_block
1593 // are actually present in this function.
1594 UsedInThisFunction |= 1 << Class;
1596 // If Arg is a PHI, and one or more incoming values to the
1597 // PHI are null, and the call is control-equivalent to the PHI, and there
1598 // are no relevant side effects between the PHI and the call, the call
1599 // could be pushed up to just those paths with non-null incoming values.
1600 // For now, don't bother splitting critical edges for this.
1601 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1602 Worklist.push_back(std::make_pair(Inst, Arg));
1604 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1608 const PHINode *PN = dyn_cast<PHINode>(Arg);
1611 // Determine if the PHI has any null operands, or any incoming
1613 bool HasNull = false;
1614 bool HasCriticalEdges = false;
1615 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1617 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1618 if (IsNullOrUndef(Incoming))
1620 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1621 .getNumSuccessors() != 1) {
1622 HasCriticalEdges = true;
1626 // If we have null operands and no critical edges, optimize.
1627 if (!HasCriticalEdges && HasNull) {
1628 SmallPtrSet<Instruction *, 4> DependingInstructions;
1629 SmallPtrSet<const BasicBlock *, 4> Visited;
1631 // Check that there is nothing that cares about the reference
1632 // count between the call and the phi.
1635 case IC_RetainBlock:
1636 // These can always be moved up.
1639 // These can't be moved across things that care about the retain
1641 FindDependencies(NeedsPositiveRetainCount, Arg,
1642 Inst->getParent(), Inst,
1643 DependingInstructions, Visited, PA);
1645 case IC_Autorelease:
1646 // These can't be moved across autorelease pool scope boundaries.
1647 FindDependencies(AutoreleasePoolBoundary, Arg,
1648 Inst->getParent(), Inst,
1649 DependingInstructions, Visited, PA);
1652 case IC_AutoreleaseRV:
1653 // Don't move these; the RV optimization depends on the autoreleaseRV
1654 // being tail called, and the retainRV being immediately after a call
1655 // (which might still happen if we get lucky with codegen layout, but
1656 // it's not worth taking the chance).
1659 llvm_unreachable("Invalid dependence flavor");
1662 if (DependingInstructions.size() == 1 &&
1663 *DependingInstructions.begin() == PN) {
1666 // Clone the call into each predecessor that has a non-null value.
1667 CallInst *CInst = cast<CallInst>(Inst);
1668 Type *ParamTy = CInst->getArgOperand(0)->getType();
1669 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1671 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1672 if (!IsNullOrUndef(Incoming)) {
1673 CallInst *Clone = cast<CallInst>(CInst->clone());
1674 Value *Op = PN->getIncomingValue(i);
1675 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1676 if (Op->getType() != ParamTy)
1677 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1678 Clone->setArgOperand(0, Op);
1679 Clone->insertBefore(InsertPos);
1681 DEBUG(dbgs() << "Cloning "
1683 "And inserting clone at " << *InsertPos << "\n");
1684 Worklist.push_back(std::make_pair(Clone, Incoming));
1687 // Erase the original call.
1688 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1689 EraseInstruction(CInst);
1693 } while (!Worklist.empty());
1697 /// If we have a top down pointer in the S_Use state, make sure that there are
1698 /// no CFG hazards by checking the states of various bottom up pointers.
1699 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1700 const bool SuccSRRIKnownSafe,
1702 bool &SomeSuccHasSame,
1703 bool &AllSuccsHaveSame,
1704 bool &NotAllSeqEqualButKnownSafe,
1705 bool &ShouldContinue) {
1707 case S_CanRelease: {
1708 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1709 S.ClearSequenceProgress();
1712 S.SetCFGHazardAfflicted(true);
1713 ShouldContinue = true;
1717 SomeSuccHasSame = true;
1721 case S_MovableRelease:
1722 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1723 AllSuccsHaveSame = false;
1725 NotAllSeqEqualButKnownSafe = true;
1728 llvm_unreachable("bottom-up pointer in retain state!");
1730 llvm_unreachable("This should have been handled earlier.");
1734 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1735 /// there are no CFG hazards by checking the states of various bottom up
1737 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1738 const bool SuccSRRIKnownSafe,
1740 bool &SomeSuccHasSame,
1741 bool &AllSuccsHaveSame,
1742 bool &NotAllSeqEqualButKnownSafe) {
1745 SomeSuccHasSame = true;
1749 case S_MovableRelease:
1751 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1752 AllSuccsHaveSame = false;
1754 NotAllSeqEqualButKnownSafe = true;
1757 llvm_unreachable("bottom-up pointer in retain state!");
1759 llvm_unreachable("This should have been handled earlier.");
1763 /// Check for critical edges, loop boundaries, irreducible control flow, or
1764 /// other CFG structures where moving code across the edge would result in it
1765 /// being executed more.
1767 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1768 DenseMap<const BasicBlock *, BBState> &BBStates,
1769 BBState &MyStates) const {
1770 // If any top-down local-use or possible-dec has a succ which is earlier in
1771 // the sequence, forget it.
1772 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1773 E = MyStates.top_down_ptr_end(); I != E; ++I) {
1774 PtrState &S = I->second;
1775 const Sequence Seq = I->second.GetSeq();
1777 // We only care about S_Retain, S_CanRelease, and S_Use.
1781 // Make sure that if extra top down states are added in the future that this
1782 // code is updated to handle it.
1783 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1784 "Unknown top down sequence state.");
1786 const Value *Arg = I->first;
1787 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1788 bool SomeSuccHasSame = false;
1789 bool AllSuccsHaveSame = true;
1790 bool NotAllSeqEqualButKnownSafe = false;
1792 succ_const_iterator SI(TI), SE(TI, false);
1794 for (; SI != SE; ++SI) {
1795 // If VisitBottomUp has pointer information for this successor, take
1796 // what we know about it.
1797 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1799 assert(BBI != BBStates.end());
1800 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1801 const Sequence SuccSSeq = SuccS.GetSeq();
1803 // If bottom up, the pointer is in an S_None state, clear the sequence
1804 // progress since the sequence in the bottom up state finished
1805 // suggesting a mismatch in between retains/releases. This is true for
1806 // all three cases that we are handling here: S_Retain, S_Use, and
1808 if (SuccSSeq == S_None) {
1809 S.ClearSequenceProgress();
1813 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1815 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1817 // *NOTE* We do not use Seq from above here since we are allowing for
1818 // S.GetSeq() to change while we are visiting basic blocks.
1819 switch(S.GetSeq()) {
1821 bool ShouldContinue = false;
1822 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1823 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1829 case S_CanRelease: {
1830 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1831 SomeSuccHasSame, AllSuccsHaveSame,
1832 NotAllSeqEqualButKnownSafe);
1839 case S_MovableRelease:
1844 // If the state at the other end of any of the successor edges
1845 // matches the current state, require all edges to match. This
1846 // guards against loops in the middle of a sequence.
1847 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1848 S.ClearSequenceProgress();
1849 } else if (NotAllSeqEqualButKnownSafe) {
1850 // If we would have cleared the state foregoing the fact that we are known
1851 // safe, stop code motion. This is because whether or not it is safe to
1852 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1853 // are allowed to perform code motion.
1854 S.SetCFGHazardAfflicted(true);
1860 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1862 MapVector<Value *, RRInfo> &Retains,
1863 BBState &MyStates) {
1864 bool NestingDetected = false;
1865 InstructionClass Class = GetInstructionClass(Inst);
1866 const Value *Arg = 0;
1868 DEBUG(dbgs() << "Class: " << Class << "\n");
1872 Arg = GetObjCArg(Inst);
1874 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1876 // If we see two releases in a row on the same pointer. If so, make
1877 // a note, and we'll cicle back to revisit it after we've
1878 // hopefully eliminated the second release, which may allow us to
1879 // eliminate the first release too.
1880 // Theoretically we could implement removal of nested retain+release
1881 // pairs by making PtrState hold a stack of states, but this is
1882 // simple and avoids adding overhead for the non-nested case.
1883 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1884 DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
1885 NestingDetected = true;
1888 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1889 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1890 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1891 S.ResetSequenceProgress(NewSeq);
1892 S.SetReleaseMetadata(ReleaseMetadata);
1893 S.SetKnownSafe(S.HasKnownPositiveRefCount());
1894 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
1896 S.SetKnownPositiveRefCount();
1899 case IC_RetainBlock:
1900 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1901 // objc_retainBlocks to objc_retains. Thus at this point any
1902 // objc_retainBlocks that we see are not optimizable.
1906 Arg = GetObjCArg(Inst);
1908 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1909 S.SetKnownPositiveRefCount();
1911 Sequence OldSeq = S.GetSeq();
1915 case S_MovableRelease:
1917 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1918 // imprecise release, clear our reverse insertion points.
1919 if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
1920 S.ClearReverseInsertPts();
1923 // Don't do retain+release tracking for IC_RetainRV, because it's
1924 // better to let it remain as the first instruction after a call.
1925 if (Class != IC_RetainRV)
1926 Retains[Inst] = S.GetRRInfo();
1927 S.ClearSequenceProgress();
1932 llvm_unreachable("bottom-up pointer in retain state!");
1934 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
1935 // A retain moving bottom up can be a use.
1938 case IC_AutoreleasepoolPop:
1939 // Conservatively, clear MyStates for all known pointers.
1940 MyStates.clearBottomUpPointers();
1941 return NestingDetected;
1942 case IC_AutoreleasepoolPush:
1944 // These are irrelevant.
1945 return NestingDetected;
1947 // If we have a store into an alloca of a pointer we are tracking, the
1948 // pointer has multiple owners implying that we must be more conservative.
1950 // This comes up in the context of a pointer being ``KnownSafe''. In the
1951 // presense of a block being initialized, the frontend will emit the
1952 // objc_retain on the original pointer and the release on the pointer loaded
1953 // from the alloca. The optimizer will through the provenance analysis
1954 // realize that the two are related, but since we only require KnownSafe in
1955 // one direction, will match the inner retain on the original pointer with
1956 // the guard release on the original pointer. This is fixed by ensuring that
1957 // in the presense of allocas we only unconditionally remove pointers if
1958 // both our retain and our release are KnownSafe.
1959 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1960 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
1961 BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
1962 StripPointerCastsAndObjCCalls(SI->getValueOperand()));
1963 if (I != MyStates.bottom_up_ptr_end())
1964 MultiOwnersSet.insert(I->first);
1972 // Consider any other possible effects of this instruction on each
1973 // pointer being tracked.
1974 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1975 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1976 const Value *Ptr = MI->first;
1978 continue; // Handled above.
1979 PtrState &S = MI->second;
1980 Sequence Seq = S.GetSeq();
1982 // Check for possible releases.
1983 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1984 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1986 S.ClearKnownPositiveRefCount();
1989 S.SetSeq(S_CanRelease);
1990 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
1994 case S_MovableRelease:
1999 llvm_unreachable("bottom-up pointer in retain state!");
2003 // Check for possible direct uses.
2006 case S_MovableRelease:
2007 if (CanUse(Inst, Ptr, PA, Class)) {
2008 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2010 assert(!S.HasReverseInsertPts());
2011 // If this is an invoke instruction, we're scanning it as part of
2012 // one of its successor blocks, since we can't insert code after it
2013 // in its own block, and we don't want to split critical edges.
2014 if (isa<InvokeInst>(Inst))
2015 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
2017 S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst)));
2019 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
2020 } else if (Seq == S_Release && IsUser(Class)) {
2021 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
2023 // Non-movable releases depend on any possible objc pointer use.
2025 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
2026 assert(!S.HasReverseInsertPts());
2027 // As above; handle invoke specially.
2028 if (isa<InvokeInst>(Inst))
2029 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
2031 S.InsertReverseInsertPt(llvm::next(BasicBlock::iterator(Inst)));
2035 if (CanUse(Inst, Ptr, PA, Class)) {
2036 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
2039 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
2047 llvm_unreachable("bottom-up pointer in retain state!");
2051 return NestingDetected;
2055 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
2056 DenseMap<const BasicBlock *, BBState> &BBStates,
2057 MapVector<Value *, RRInfo> &Retains) {
2059 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
2061 bool NestingDetected = false;
2062 BBState &MyStates = BBStates[BB];
2064 // Merge the states from each successor to compute the initial state
2065 // for the current block.
2066 BBState::edge_iterator SI(MyStates.succ_begin()),
2067 SE(MyStates.succ_end());
2069 const BasicBlock *Succ = *SI;
2070 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
2071 assert(I != BBStates.end());
2072 MyStates.InitFromSucc(I->second);
2074 for (; SI != SE; ++SI) {
2076 I = BBStates.find(Succ);
2077 assert(I != BBStates.end());
2078 MyStates.MergeSucc(I->second);
2082 // If ARC Annotations are enabled, output the current state of pointers at the
2083 // bottom of the basic block.
2084 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
2086 // Visit all the instructions, bottom-up.
2087 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
2088 Instruction *Inst = llvm::prior(I);
2090 // Invoke instructions are visited as part of their successors (below).
2091 if (isa<InvokeInst>(Inst))
2094 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2096 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
2099 // If there's a predecessor with an invoke, visit the invoke as if it were
2100 // part of this block, since we can't insert code after an invoke in its own
2101 // block, and we don't want to split critical edges.
2102 for (BBState::edge_iterator PI(MyStates.pred_begin()),
2103 PE(MyStates.pred_end()); PI != PE; ++PI) {
2104 BasicBlock *Pred = *PI;
2105 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
2106 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
2109 // If ARC Annotations are enabled, output the current state of pointers at the
2110 // top of the basic block.
2111 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
2113 return NestingDetected;
2117 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
2118 DenseMap<Value *, RRInfo> &Releases,
2119 BBState &MyStates) {
2120 bool NestingDetected = false;
2121 InstructionClass Class = GetInstructionClass(Inst);
2122 const Value *Arg = 0;
2125 case IC_RetainBlock:
2126 // In OptimizeIndividualCalls, we have strength reduced all optimizable
2127 // objc_retainBlocks to objc_retains. Thus at this point any
2128 // objc_retainBlocks that we see are not optimizable.
2132 Arg = GetObjCArg(Inst);
2134 PtrState &S = MyStates.getPtrTopDownState(Arg);
2136 // Don't do retain+release tracking for IC_RetainRV, because it's
2137 // better to let it remain as the first instruction after a call.
2138 if (Class != IC_RetainRV) {
2139 // If we see two retains in a row on the same pointer. If so, make
2140 // a note, and we'll cicle back to revisit it after we've
2141 // hopefully eliminated the second retain, which may allow us to
2142 // eliminate the first retain too.
2143 // Theoretically we could implement removal of nested retain+release
2144 // pairs by making PtrState hold a stack of states, but this is
2145 // simple and avoids adding overhead for the non-nested case.
2146 if (S.GetSeq() == S_Retain)
2147 NestingDetected = true;
2149 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
2150 S.ResetSequenceProgress(S_Retain);
2151 S.SetKnownSafe(S.HasKnownPositiveRefCount());
2155 S.SetKnownPositiveRefCount();
2157 // A retain can be a potential use; procede to the generic checking
2162 Arg = GetObjCArg(Inst);
2164 PtrState &S = MyStates.getPtrTopDownState(Arg);
2165 S.ClearKnownPositiveRefCount();
2167 Sequence OldSeq = S.GetSeq();
2169 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2174 if (OldSeq == S_Retain || ReleaseMetadata != 0)
2175 S.ClearReverseInsertPts();
2178 S.SetReleaseMetadata(ReleaseMetadata);
2179 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
2180 Releases[Inst] = S.GetRRInfo();
2181 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
2182 S.ClearSequenceProgress();
2188 case S_MovableRelease:
2189 llvm_unreachable("top-down pointer in release state!");
2193 case IC_AutoreleasepoolPop:
2194 // Conservatively, clear MyStates for all known pointers.
2195 MyStates.clearTopDownPointers();
2196 return NestingDetected;
2197 case IC_AutoreleasepoolPush:
2199 // These are irrelevant.
2200 return NestingDetected;
2205 // Consider any other possible effects of this instruction on each
2206 // pointer being tracked.
2207 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2208 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2209 const Value *Ptr = MI->first;
2211 continue; // Handled above.
2212 PtrState &S = MI->second;
2213 Sequence Seq = S.GetSeq();
2215 // Check for possible releases.
2216 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2217 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
2219 S.ClearKnownPositiveRefCount();
2222 S.SetSeq(S_CanRelease);
2223 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
2224 assert(!S.HasReverseInsertPts());
2225 S.InsertReverseInsertPt(Inst);
2227 // One call can't cause a transition from S_Retain to S_CanRelease
2228 // and S_CanRelease to S_Use. If we've made the first transition,
2237 case S_MovableRelease:
2238 llvm_unreachable("top-down pointer in release state!");
2242 // Check for possible direct uses.
2245 if (CanUse(Inst, Ptr, PA, Class)) {
2246 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2249 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
2258 case S_MovableRelease:
2259 llvm_unreachable("top-down pointer in release state!");
2263 return NestingDetected;
2267 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
2268 DenseMap<const BasicBlock *, BBState> &BBStates,
2269 DenseMap<Value *, RRInfo> &Releases) {
2270 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
2271 bool NestingDetected = false;
2272 BBState &MyStates = BBStates[BB];
2274 // Merge the states from each predecessor to compute the initial state
2275 // for the current block.
2276 BBState::edge_iterator PI(MyStates.pred_begin()),
2277 PE(MyStates.pred_end());
2279 const BasicBlock *Pred = *PI;
2280 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
2281 assert(I != BBStates.end());
2282 MyStates.InitFromPred(I->second);
2284 for (; PI != PE; ++PI) {
2286 I = BBStates.find(Pred);
2287 assert(I != BBStates.end());
2288 MyStates.MergePred(I->second);
2292 // If ARC Annotations are enabled, output the current state of pointers at the
2293 // top of the basic block.
2294 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
2296 // Visit all the instructions, top-down.
2297 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
2298 Instruction *Inst = I;
2300 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2302 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
2305 // If ARC Annotations are enabled, output the current state of pointers at the
2306 // bottom of the basic block.
2307 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
2309 #ifdef ARC_ANNOTATIONS
2310 if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
2312 CheckForCFGHazards(BB, BBStates, MyStates);
2313 return NestingDetected;
2317 ComputePostOrders(Function &F,
2318 SmallVectorImpl<BasicBlock *> &PostOrder,
2319 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
2320 unsigned NoObjCARCExceptionsMDKind,
2321 DenseMap<const BasicBlock *, BBState> &BBStates) {
2322 /// The visited set, for doing DFS walks.
2323 SmallPtrSet<BasicBlock *, 16> Visited;
2325 // Do DFS, computing the PostOrder.
2326 SmallPtrSet<BasicBlock *, 16> OnStack;
2327 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
2329 // Functions always have exactly one entry block, and we don't have
2330 // any other block that we treat like an entry block.
2331 BasicBlock *EntryBB = &F.getEntryBlock();
2332 BBState &MyStates = BBStates[EntryBB];
2333 MyStates.SetAsEntry();
2334 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
2335 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
2336 Visited.insert(EntryBB);
2337 OnStack.insert(EntryBB);
2340 BasicBlock *CurrBB = SuccStack.back().first;
2341 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
2342 succ_iterator SE(TI, false);
2344 while (SuccStack.back().second != SE) {
2345 BasicBlock *SuccBB = *SuccStack.back().second++;
2346 if (Visited.insert(SuccBB)) {
2347 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
2348 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
2349 BBStates[CurrBB].addSucc(SuccBB);
2350 BBState &SuccStates = BBStates[SuccBB];
2351 SuccStates.addPred(CurrBB);
2352 OnStack.insert(SuccBB);
2356 if (!OnStack.count(SuccBB)) {
2357 BBStates[CurrBB].addSucc(SuccBB);
2358 BBStates[SuccBB].addPred(CurrBB);
2361 OnStack.erase(CurrBB);
2362 PostOrder.push_back(CurrBB);
2363 SuccStack.pop_back();
2364 } while (!SuccStack.empty());
2368 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
2369 // Functions may have many exits, and there also blocks which we treat
2370 // as exits due to ignored edges.
2371 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
2372 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2373 BasicBlock *ExitBB = I;
2374 BBState &MyStates = BBStates[ExitBB];
2375 if (!MyStates.isExit())
2378 MyStates.SetAsExit();
2380 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
2381 Visited.insert(ExitBB);
2382 while (!PredStack.empty()) {
2383 reverse_dfs_next_succ:
2384 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
2385 while (PredStack.back().second != PE) {
2386 BasicBlock *BB = *PredStack.back().second++;
2387 if (Visited.insert(BB)) {
2388 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
2389 goto reverse_dfs_next_succ;
2392 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
2397 // Visit the function both top-down and bottom-up.
2399 ObjCARCOpt::Visit(Function &F,
2400 DenseMap<const BasicBlock *, BBState> &BBStates,
2401 MapVector<Value *, RRInfo> &Retains,
2402 DenseMap<Value *, RRInfo> &Releases) {
2404 // Use reverse-postorder traversals, because we magically know that loops
2405 // will be well behaved, i.e. they won't repeatedly call retain on a single
2406 // pointer without doing a release. We can't use the ReversePostOrderTraversal
2407 // class here because we want the reverse-CFG postorder to consider each
2408 // function exit point, and we want to ignore selected cycle edges.
2409 SmallVector<BasicBlock *, 16> PostOrder;
2410 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
2411 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
2412 NoObjCARCExceptionsMDKind,
2415 // Use reverse-postorder on the reverse CFG for bottom-up.
2416 bool BottomUpNestingDetected = false;
2417 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2418 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
2420 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
2422 // Use reverse-postorder for top-down.
2423 bool TopDownNestingDetected = false;
2424 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2425 PostOrder.rbegin(), E = PostOrder.rend();
2427 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
2429 return TopDownNestingDetected && BottomUpNestingDetected;
2432 /// Move the calls in RetainsToMove and ReleasesToMove.
2433 void ObjCARCOpt::MoveCalls(Value *Arg,
2434 RRInfo &RetainsToMove,
2435 RRInfo &ReleasesToMove,
2436 MapVector<Value *, RRInfo> &Retains,
2437 DenseMap<Value *, RRInfo> &Releases,
2438 SmallVectorImpl<Instruction *> &DeadInsts,
2440 Type *ArgTy = Arg->getType();
2441 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
2443 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
2445 // Insert the new retain and release calls.
2446 for (SmallPtrSet<Instruction *, 2>::const_iterator
2447 PI = ReleasesToMove.ReverseInsertPts.begin(),
2448 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2449 Instruction *InsertPt = *PI;
2450 Value *MyArg = ArgTy == ParamTy ? Arg :
2451 new BitCastInst(Arg, ParamTy, "", InsertPt);
2452 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2453 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
2454 Call->setDoesNotThrow();
2455 Call->setTailCall();
2457 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
2458 "At insertion point: " << *InsertPt << "\n");
2460 for (SmallPtrSet<Instruction *, 2>::const_iterator
2461 PI = RetainsToMove.ReverseInsertPts.begin(),
2462 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2463 Instruction *InsertPt = *PI;
2464 Value *MyArg = ArgTy == ParamTy ? Arg :
2465 new BitCastInst(Arg, ParamTy, "", InsertPt);
2466 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
2467 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
2468 // Attach a clang.imprecise_release metadata tag, if appropriate.
2469 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2470 Call->setMetadata(ImpreciseReleaseMDKind, M);
2471 Call->setDoesNotThrow();
2472 if (ReleasesToMove.IsTailCallRelease)
2473 Call->setTailCall();
2475 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
2476 "At insertion point: " << *InsertPt << "\n");
2479 // Delete the original retain and release calls.
2480 for (SmallPtrSet<Instruction *, 2>::const_iterator
2481 AI = RetainsToMove.Calls.begin(),
2482 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
2483 Instruction *OrigRetain = *AI;
2484 Retains.blot(OrigRetain);
2485 DeadInsts.push_back(OrigRetain);
2486 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
2488 for (SmallPtrSet<Instruction *, 2>::const_iterator
2489 AI = ReleasesToMove.Calls.begin(),
2490 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
2491 Instruction *OrigRelease = *AI;
2492 Releases.erase(OrigRelease);
2493 DeadInsts.push_back(OrigRelease);
2494 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
2500 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2502 MapVector<Value *, RRInfo> &Retains,
2503 DenseMap<Value *, RRInfo> &Releases,
2505 SmallVectorImpl<Instruction *> &NewRetains,
2506 SmallVectorImpl<Instruction *> &NewReleases,
2507 SmallVectorImpl<Instruction *> &DeadInsts,
2508 RRInfo &RetainsToMove,
2509 RRInfo &ReleasesToMove,
2512 bool &AnyPairsCompletelyEliminated) {
2513 // If a pair happens in a region where it is known that the reference count
2514 // is already incremented, we can similarly ignore possible decrements unless
2515 // we are dealing with a retainable object with multiple provenance sources.
2516 bool KnownSafeTD = true, KnownSafeBU = true;
2517 bool MultipleOwners = false;
2518 bool CFGHazardAfflicted = false;
2520 // Connect the dots between the top-down-collected RetainsToMove and
2521 // bottom-up-collected ReleasesToMove to form sets of related calls.
2522 // This is an iterative process so that we connect multiple releases
2523 // to multiple retains if needed.
2524 unsigned OldDelta = 0;
2525 unsigned NewDelta = 0;
2526 unsigned OldCount = 0;
2527 unsigned NewCount = 0;
2528 bool FirstRelease = true;
2530 for (SmallVectorImpl<Instruction *>::const_iterator
2531 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2532 Instruction *NewRetain = *NI;
2533 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2534 assert(It != Retains.end());
2535 const RRInfo &NewRetainRRI = It->second;
2536 KnownSafeTD &= NewRetainRRI.KnownSafe;
2538 MultipleOwners || MultiOwnersSet.count(GetObjCArg(NewRetain));
2539 for (SmallPtrSet<Instruction *, 2>::const_iterator
2540 LI = NewRetainRRI.Calls.begin(),
2541 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
2542 Instruction *NewRetainRelease = *LI;
2543 DenseMap<Value *, RRInfo>::const_iterator Jt =
2544 Releases.find(NewRetainRelease);
2545 if (Jt == Releases.end())
2547 const RRInfo &NewRetainReleaseRRI = Jt->second;
2548 assert(NewRetainReleaseRRI.Calls.count(NewRetain));
2549 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
2551 // If we overflow when we compute the path count, don't remove/move
2553 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
2554 unsigned PathCount = BBState::OverflowOccurredValue;
2555 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2557 assert(PathCount != BBState::OverflowOccurredValue &&
2558 "PathCount at this point can not be "
2559 "OverflowOccurredValue.");
2560 OldDelta -= PathCount;
2562 // Merge the ReleaseMetadata and IsTailCallRelease values.
2564 ReleasesToMove.ReleaseMetadata =
2565 NewRetainReleaseRRI.ReleaseMetadata;
2566 ReleasesToMove.IsTailCallRelease =
2567 NewRetainReleaseRRI.IsTailCallRelease;
2568 FirstRelease = false;
2570 if (ReleasesToMove.ReleaseMetadata !=
2571 NewRetainReleaseRRI.ReleaseMetadata)
2572 ReleasesToMove.ReleaseMetadata = 0;
2573 if (ReleasesToMove.IsTailCallRelease !=
2574 NewRetainReleaseRRI.IsTailCallRelease)
2575 ReleasesToMove.IsTailCallRelease = false;
2578 // Collect the optimal insertion points.
2580 for (SmallPtrSet<Instruction *, 2>::const_iterator
2581 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
2582 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
2584 Instruction *RIP = *RI;
2585 if (ReleasesToMove.ReverseInsertPts.insert(RIP)) {
2586 // If we overflow when we compute the path count, don't
2587 // remove/move anything.
2588 const BBState &RIPBBState = BBStates[RIP->getParent()];
2589 PathCount = BBState::OverflowOccurredValue;
2590 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2592 assert(PathCount != BBState::OverflowOccurredValue &&
2593 "PathCount at this point can not be "
2594 "OverflowOccurredValue.");
2595 NewDelta -= PathCount;
2598 NewReleases.push_back(NewRetainRelease);
2603 if (NewReleases.empty()) break;
2605 // Back the other way.
2606 for (SmallVectorImpl<Instruction *>::const_iterator
2607 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2608 Instruction *NewRelease = *NI;
2609 DenseMap<Value *, RRInfo>::const_iterator It =
2610 Releases.find(NewRelease);
2611 assert(It != Releases.end());
2612 const RRInfo &NewReleaseRRI = It->second;
2613 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2614 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
2615 for (SmallPtrSet<Instruction *, 2>::const_iterator
2616 LI = NewReleaseRRI.Calls.begin(),
2617 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
2618 Instruction *NewReleaseRetain = *LI;
2619 MapVector<Value *, RRInfo>::const_iterator Jt =
2620 Retains.find(NewReleaseRetain);
2621 if (Jt == Retains.end())
2623 const RRInfo &NewReleaseRetainRRI = Jt->second;
2624 assert(NewReleaseRetainRRI.Calls.count(NewRelease));
2625 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
2627 // If we overflow when we compute the path count, don't remove/move
2629 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
2630 unsigned PathCount = BBState::OverflowOccurredValue;
2631 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2633 assert(PathCount != BBState::OverflowOccurredValue &&
2634 "PathCount at this point can not be "
2635 "OverflowOccurredValue.");
2636 OldDelta += PathCount;
2637 OldCount += PathCount;
2639 // Collect the optimal insertion points.
2641 for (SmallPtrSet<Instruction *, 2>::const_iterator
2642 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
2643 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
2645 Instruction *RIP = *RI;
2646 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
2647 // If we overflow when we compute the path count, don't
2648 // remove/move anything.
2649 const BBState &RIPBBState = BBStates[RIP->getParent()];
2651 PathCount = BBState::OverflowOccurredValue;
2652 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2654 assert(PathCount != BBState::OverflowOccurredValue &&
2655 "PathCount at this point can not be "
2656 "OverflowOccurredValue.");
2657 NewDelta += PathCount;
2658 NewCount += PathCount;
2661 NewRetains.push_back(NewReleaseRetain);
2665 NewReleases.clear();
2666 if (NewRetains.empty()) break;
2669 // If the pointer is known incremented in 1 direction and we do not have
2670 // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
2671 // to be known safe in both directions.
2672 bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
2673 ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
2674 if (UnconditionallySafe) {
2675 RetainsToMove.ReverseInsertPts.clear();
2676 ReleasesToMove.ReverseInsertPts.clear();
2679 // Determine whether the new insertion points we computed preserve the
2680 // balance of retain and release calls through the program.
2681 // TODO: If the fully aggressive solution isn't valid, try to find a
2682 // less aggressive solution which is.
2686 // At this point, we are not going to remove any RR pairs, but we still are
2687 // able to move RR pairs. If one of our pointers is afflicted with
2688 // CFGHazards, we cannot perform such code motion so exit early.
2689 const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
2690 ReleasesToMove.ReverseInsertPts.size();
2691 if (CFGHazardAfflicted && WillPerformCodeMotion)
2695 // Determine whether the original call points are balanced in the retain and
2696 // release calls through the program. If not, conservatively don't touch
2698 // TODO: It's theoretically possible to do code motion in this case, as
2699 // long as the existing imbalances are maintained.
2703 #ifdef ARC_ANNOTATIONS
2704 // Do not move calls if ARC annotations are requested.
2705 if (EnableARCAnnotations)
2707 #endif // ARC_ANNOTATIONS
2710 assert(OldCount != 0 && "Unreachable code?");
2711 NumRRs += OldCount - NewCount;
2712 // Set to true if we completely removed any RR pairs.
2713 AnyPairsCompletelyEliminated = NewCount == 0;
2715 // We can move calls!
2719 /// Identify pairings between the retains and releases, and delete and/or move
2722 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2724 MapVector<Value *, RRInfo> &Retains,
2725 DenseMap<Value *, RRInfo> &Releases,
2727 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2729 bool AnyPairsCompletelyEliminated = false;
2730 RRInfo RetainsToMove;
2731 RRInfo ReleasesToMove;
2732 SmallVector<Instruction *, 4> NewRetains;
2733 SmallVector<Instruction *, 4> NewReleases;
2734 SmallVector<Instruction *, 8> DeadInsts;
2736 // Visit each retain.
2737 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2738 E = Retains.end(); I != E; ++I) {
2739 Value *V = I->first;
2740 if (!V) continue; // blotted
2742 Instruction *Retain = cast<Instruction>(V);
2744 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2746 Value *Arg = GetObjCArg(Retain);
2748 // If the object being released is in static or stack storage, we know it's
2749 // not being managed by ObjC reference counting, so we can delete pairs
2750 // regardless of what possible decrements or uses lie between them.
2751 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2753 // A constant pointer can't be pointing to an object on the heap. It may
2754 // be reference-counted, but it won't be deleted.
2755 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2756 if (const GlobalVariable *GV =
2757 dyn_cast<GlobalVariable>(
2758 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
2759 if (GV->isConstant())
2762 // Connect the dots between the top-down-collected RetainsToMove and
2763 // bottom-up-collected ReleasesToMove to form sets of related calls.
2764 NewRetains.push_back(Retain);
2765 bool PerformMoveCalls =
2766 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2767 NewReleases, DeadInsts, RetainsToMove,
2768 ReleasesToMove, Arg, KnownSafe,
2769 AnyPairsCompletelyEliminated);
2771 if (PerformMoveCalls) {
2772 // Ok, everything checks out and we're all set. Let's move/delete some
2774 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2775 Retains, Releases, DeadInsts, M);
2778 // Clean up state for next retain.
2779 NewReleases.clear();
2781 RetainsToMove.clear();
2782 ReleasesToMove.clear();
2785 // Now that we're done moving everything, we can delete the newly dead
2786 // instructions, as we no longer need them as insert points.
2787 while (!DeadInsts.empty())
2788 EraseInstruction(DeadInsts.pop_back_val());
2790 return AnyPairsCompletelyEliminated;
2793 /// Weak pointer optimizations.
2794 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2795 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2797 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2798 // itself because it uses AliasAnalysis and we need to do provenance
2800 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2801 Instruction *Inst = &*I++;
2803 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2805 InstructionClass Class = GetBasicInstructionClass(Inst);
2806 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
2809 // Delete objc_loadWeak calls with no users.
2810 if (Class == IC_LoadWeak && Inst->use_empty()) {
2811 Inst->eraseFromParent();
2815 // TODO: For now, just look for an earlier available version of this value
2816 // within the same block. Theoretically, we could do memdep-style non-local
2817 // analysis too, but that would want caching. A better approach would be to
2818 // use the technique that EarlyCSE uses.
2819 inst_iterator Current = llvm::prior(I);
2820 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2821 for (BasicBlock::iterator B = CurrentBB->begin(),
2822 J = Current.getInstructionIterator();
2824 Instruction *EarlierInst = &*llvm::prior(J);
2825 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
2826 switch (EarlierClass) {
2828 case IC_LoadWeakRetained: {
2829 // If this is loading from the same pointer, replace this load's value
2831 CallInst *Call = cast<CallInst>(Inst);
2832 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2833 Value *Arg = Call->getArgOperand(0);
2834 Value *EarlierArg = EarlierCall->getArgOperand(0);
2835 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2836 case AliasAnalysis::MustAlias:
2838 // If the load has a builtin retain, insert a plain retain for it.
2839 if (Class == IC_LoadWeakRetained) {
2840 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2841 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2844 // Zap the fully redundant load.
2845 Call->replaceAllUsesWith(EarlierCall);
2846 Call->eraseFromParent();
2848 case AliasAnalysis::MayAlias:
2849 case AliasAnalysis::PartialAlias:
2851 case AliasAnalysis::NoAlias:
2858 // If this is storing to the same pointer and has the same size etc.
2859 // replace this load's value with the stored value.
2860 CallInst *Call = cast<CallInst>(Inst);
2861 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2862 Value *Arg = Call->getArgOperand(0);
2863 Value *EarlierArg = EarlierCall->getArgOperand(0);
2864 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2865 case AliasAnalysis::MustAlias:
2867 // If the load has a builtin retain, insert a plain retain for it.
2868 if (Class == IC_LoadWeakRetained) {
2869 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2870 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2873 // Zap the fully redundant load.
2874 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2875 Call->eraseFromParent();
2877 case AliasAnalysis::MayAlias:
2878 case AliasAnalysis::PartialAlias:
2880 case AliasAnalysis::NoAlias:
2887 // TOOD: Grab the copied value.
2889 case IC_AutoreleasepoolPush:
2891 case IC_IntrinsicUser:
2893 // Weak pointers are only modified through the weak entry points
2894 // (and arbitrary calls, which could call the weak entry points).
2897 // Anything else could modify the weak pointer.
2904 // Then, for each destroyWeak with an alloca operand, check to see if
2905 // the alloca and all its users can be zapped.
2906 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2907 Instruction *Inst = &*I++;
2908 InstructionClass Class = GetBasicInstructionClass(Inst);
2909 if (Class != IC_DestroyWeak)
2912 CallInst *Call = cast<CallInst>(Inst);
2913 Value *Arg = Call->getArgOperand(0);
2914 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2915 for (Value::use_iterator UI = Alloca->use_begin(),
2916 UE = Alloca->use_end(); UI != UE; ++UI) {
2917 const Instruction *UserInst = cast<Instruction>(*UI);
2918 switch (GetBasicInstructionClass(UserInst)) {
2921 case IC_DestroyWeak:
2928 for (Value::use_iterator UI = Alloca->use_begin(),
2929 UE = Alloca->use_end(); UI != UE; ) {
2930 CallInst *UserInst = cast<CallInst>(*UI++);
2931 switch (GetBasicInstructionClass(UserInst)) {
2934 // These functions return their second argument.
2935 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2937 case IC_DestroyWeak:
2941 llvm_unreachable("alloca really is used!");
2943 UserInst->eraseFromParent();
2945 Alloca->eraseFromParent();
2951 /// Identify program paths which execute sequences of retains and releases which
2952 /// can be eliminated.
2953 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2954 // Releases, Retains - These are used to store the results of the main flow
2955 // analysis. These use Value* as the key instead of Instruction* so that the
2956 // map stays valid when we get around to rewriting code and calls get
2957 // replaced by arguments.
2958 DenseMap<Value *, RRInfo> Releases;
2959 MapVector<Value *, RRInfo> Retains;
2961 // This is used during the traversal of the function to track the
2962 // states for each identified object at each block.
2963 DenseMap<const BasicBlock *, BBState> BBStates;
2965 // Analyze the CFG of the function, and all instructions.
2966 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2969 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2974 MultiOwnersSet.clear();
2976 return AnyPairsCompletelyEliminated && NestingDetected;
2979 /// Check if there is a dependent call earlier that does not have anything in
2980 /// between the Retain and the call that can affect the reference count of their
2981 /// shared pointer argument. Note that Retain need not be in BB.
2983 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2984 SmallPtrSet<Instruction *, 4> &DepInsts,
2985 SmallPtrSet<const BasicBlock *, 4> &Visited,
2986 ProvenanceAnalysis &PA) {
2987 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2988 DepInsts, Visited, PA);
2989 if (DepInsts.size() != 1)
2993 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2995 // Check that the pointer is the return value of the call.
2996 if (!Call || Arg != Call)
2999 // Check that the call is a regular call.
3000 InstructionClass Class = GetBasicInstructionClass(Call);
3001 if (Class != IC_CallOrUser && Class != IC_Call)
3007 /// Find a dependent retain that precedes the given autorelease for which there
3008 /// is nothing in between the two instructions that can affect the ref count of
3011 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
3012 Instruction *Autorelease,
3013 SmallPtrSet<Instruction *, 4> &DepInsts,
3014 SmallPtrSet<const BasicBlock *, 4> &Visited,
3015 ProvenanceAnalysis &PA) {
3016 FindDependencies(CanChangeRetainCount, Arg,
3017 BB, Autorelease, DepInsts, Visited, PA);
3018 if (DepInsts.size() != 1)
3022 dyn_cast_or_null<CallInst>(*DepInsts.begin());
3024 // Check that we found a retain with the same argument.
3026 !IsRetain(GetBasicInstructionClass(Retain)) ||
3027 GetObjCArg(Retain) != Arg) {
3034 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
3035 /// no instructions dependent on Arg that need a positive ref count in between
3036 /// the autorelease and the ret.
3038 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
3040 SmallPtrSet<Instruction *, 4> &DepInsts,
3041 SmallPtrSet<const BasicBlock *, 4> &V,
3042 ProvenanceAnalysis &PA) {
3043 FindDependencies(NeedsPositiveRetainCount, Arg,
3044 BB, Ret, DepInsts, V, PA);
3045 if (DepInsts.size() != 1)
3048 CallInst *Autorelease =
3049 dyn_cast_or_null<CallInst>(*DepInsts.begin());
3052 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
3053 if (!IsAutorelease(AutoreleaseClass))
3055 if (GetObjCArg(Autorelease) != Arg)
3061 /// Look for this pattern:
3063 /// %call = call i8* @something(...)
3064 /// %2 = call i8* @objc_retain(i8* %call)
3065 /// %3 = call i8* @objc_autorelease(i8* %2)
3068 /// And delete the retain and autorelease.
3069 void ObjCARCOpt::OptimizeReturns(Function &F) {
3070 if (!F.getReturnType()->isPointerTy())
3073 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
3075 SmallPtrSet<Instruction *, 4> DependingInstructions;
3076 SmallPtrSet<const BasicBlock *, 4> Visited;
3077 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
3078 BasicBlock *BB = FI;
3079 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
3081 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
3086 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
3088 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
3089 // dependent on Arg such that there are no instructions dependent on Arg
3090 // that need a positive ref count in between the autorelease and Ret.
3091 CallInst *Autorelease =
3092 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
3093 DependingInstructions, Visited,
3095 DependingInstructions.clear();
3102 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
3103 DependingInstructions, Visited, PA);
3104 DependingInstructions.clear();
3110 // Check that there is nothing that can affect the reference count
3111 // between the retain and the call. Note that Retain need not be in BB.
3112 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
3113 DependingInstructions,
3115 DependingInstructions.clear();
3118 if (!HasSafePathToCall)
3121 // If so, we can zap the retain and autorelease.
3124 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
3125 << *Autorelease << "\n");
3126 EraseInstruction(Retain);
3127 EraseInstruction(Autorelease);
3133 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
3134 llvm::Statistic &NumRetains =
3135 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
3136 llvm::Statistic &NumReleases =
3137 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
3139 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
3140 Instruction *Inst = &*I++;
3141 switch (GetBasicInstructionClass(Inst)) {
3155 bool ObjCARCOpt::doInitialization(Module &M) {
3159 // If nothing in the Module uses ARC, don't do anything.
3160 Run = ModuleHasARC(M);
3164 // Identify the imprecise release metadata kind.
3165 ImpreciseReleaseMDKind =
3166 M.getContext().getMDKindID("clang.imprecise_release");
3167 CopyOnEscapeMDKind =
3168 M.getContext().getMDKindID("clang.arc.copy_on_escape");
3169 NoObjCARCExceptionsMDKind =
3170 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
3171 #ifdef ARC_ANNOTATIONS
3172 ARCAnnotationBottomUpMDKind =
3173 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
3174 ARCAnnotationTopDownMDKind =
3175 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
3176 ARCAnnotationProvenanceSourceMDKind =
3177 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
3178 #endif // ARC_ANNOTATIONS
3180 // Intuitively, objc_retain and others are nocapture, however in practice
3181 // they are not, because they return their argument value. And objc_release
3182 // calls finalizers which can have arbitrary side effects.
3184 // Initialize our runtime entry point cache.
3190 bool ObjCARCOpt::runOnFunction(Function &F) {
3194 // If nothing in the Module uses ARC, don't do anything.
3200 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
3203 PA.setAA(&getAnalysis<AliasAnalysis>());
3206 if (AreStatisticsEnabled()) {
3207 GatherStatistics(F, false);
3211 // This pass performs several distinct transformations. As a compile-time aid
3212 // when compiling code that isn't ObjC, skip these if the relevant ObjC
3213 // library functions aren't declared.
3215 // Preliminary optimizations. This also computes UsedInThisFunction.
3216 OptimizeIndividualCalls(F);
3218 // Optimizations for weak pointers.
3219 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
3220 (1 << IC_LoadWeakRetained) |
3221 (1 << IC_StoreWeak) |
3222 (1 << IC_InitWeak) |
3223 (1 << IC_CopyWeak) |
3224 (1 << IC_MoveWeak) |
3225 (1 << IC_DestroyWeak)))
3226 OptimizeWeakCalls(F);
3228 // Optimizations for retain+release pairs.
3229 if (UsedInThisFunction & ((1 << IC_Retain) |
3230 (1 << IC_RetainRV) |
3231 (1 << IC_RetainBlock)))
3232 if (UsedInThisFunction & (1 << IC_Release))
3233 // Run OptimizeSequences until it either stops making changes or
3234 // no retain+release pair nesting is detected.
3235 while (OptimizeSequences(F)) {}
3237 // Optimizations if objc_autorelease is used.
3238 if (UsedInThisFunction & ((1 << IC_Autorelease) |
3239 (1 << IC_AutoreleaseRV)))
3242 // Gather statistics after optimization.
3244 if (AreStatisticsEnabled()) {
3245 GatherStatistics(F, true);
3249 DEBUG(dbgs() << "\n");
3254 void ObjCARCOpt::releaseMemory() {