1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
27 #define DEBUG_TYPE "objc-arc-opts"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/Support/CFG.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
43 using namespace llvm::objcarc;
45 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
49 /// \brief An associative container with fast insertion-order (deterministic)
50 /// iteration over its elements. Plus the special blot operation.
51 template<class KeyT, class ValueT>
53 /// Map keys to indices in Vector.
54 typedef DenseMap<KeyT, size_t> MapTy;
57 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
62 typedef typename VectorTy::iterator iterator;
63 typedef typename VectorTy::const_iterator const_iterator;
64 iterator begin() { return Vector.begin(); }
65 iterator end() { return Vector.end(); }
66 const_iterator begin() const { return Vector.begin(); }
67 const_iterator end() const { return Vector.end(); }
71 assert(Vector.size() >= Map.size()); // May differ due to blotting.
72 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
74 assert(I->second < Vector.size());
75 assert(Vector[I->second].first == I->first);
77 for (typename VectorTy::const_iterator I = Vector.begin(),
78 E = Vector.end(); I != E; ++I)
80 (Map.count(I->first) &&
81 Map[I->first] == size_t(I - Vector.begin())));
85 ValueT &operator[](const KeyT &Arg) {
86 std::pair<typename MapTy::iterator, bool> Pair =
87 Map.insert(std::make_pair(Arg, size_t(0)));
89 size_t Num = Vector.size();
90 Pair.first->second = Num;
91 Vector.push_back(std::make_pair(Arg, ValueT()));
92 return Vector[Num].second;
94 return Vector[Pair.first->second].second;
97 std::pair<iterator, bool>
98 insert(const std::pair<KeyT, ValueT> &InsertPair) {
99 std::pair<typename MapTy::iterator, bool> Pair =
100 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
102 size_t Num = Vector.size();
103 Pair.first->second = Num;
104 Vector.push_back(InsertPair);
105 return std::make_pair(Vector.begin() + Num, true);
107 return std::make_pair(Vector.begin() + Pair.first->second, false);
110 iterator find(const KeyT &Key) {
111 typename MapTy::iterator It = Map.find(Key);
112 if (It == Map.end()) return Vector.end();
113 return Vector.begin() + It->second;
116 const_iterator find(const KeyT &Key) const {
117 typename MapTy::const_iterator It = Map.find(Key);
118 if (It == Map.end()) return Vector.end();
119 return Vector.begin() + It->second;
122 /// This is similar to erase, but instead of removing the element from the
123 /// vector, it just zeros out the key in the vector. This leaves iterators
124 /// intact, but clients must be prepared for zeroed-out keys when iterating.
125 void blot(const KeyT &Key) {
126 typename MapTy::iterator It = Map.find(Key);
127 if (It == Map.end()) return;
128 Vector[It->second].first = KeyT();
141 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
144 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
145 /// as it finds a value with multiple uses.
146 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
147 if (Arg->hasOneUse()) {
148 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
149 return FindSingleUseIdentifiedObject(BC->getOperand(0));
150 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
151 if (GEP->hasAllZeroIndices())
152 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
153 if (IsForwarding(GetBasicInstructionClass(Arg)))
154 return FindSingleUseIdentifiedObject(
155 cast<CallInst>(Arg)->getArgOperand(0));
156 if (!IsObjCIdentifiedObject(Arg))
161 // If we found an identifiable object but it has multiple uses, but they are
162 // trivial uses, we can still consider this to be a single-use value.
163 if (IsObjCIdentifiedObject(Arg)) {
164 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
167 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
177 /// \brief Test whether the given retainable object pointer escapes.
179 /// This differs from regular escape analysis in that a use as an
180 /// argument to a call is not considered an escape.
182 static bool DoesRetainableObjPtrEscape(const User *Ptr) {
183 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Target: " << *Ptr << "\n");
185 // Walk the def-use chains.
186 SmallVector<const Value *, 4> Worklist;
187 Worklist.push_back(Ptr);
188 // If Ptr has any operands add them as well.
189 for (User::const_op_iterator I = Ptr->op_begin(), E = Ptr->op_end(); I != E;
191 Worklist.push_back(*I);
194 // Ensure we do not visit any value twice.
195 SmallPtrSet<const Value *, 8> VisitedSet;
198 const Value *V = Worklist.pop_back_val();
200 DEBUG(dbgs() << "Visiting: " << *V << "\n");
202 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
204 const User *UUser = *UI;
206 DEBUG(dbgs() << "User: " << *UUser << "\n");
208 // Special - Use by a call (callee or argument) is not considered
210 switch (GetBasicInstructionClass(UUser)) {
215 case IC_AutoreleaseRV: {
216 DEBUG(dbgs() << "User copies pointer arguments. Pointer Escapes!\n");
217 // These special functions make copies of their pointer arguments.
220 case IC_IntrinsicUser:
221 // Use by the use intrinsic is not an escape.
225 // Use by an instruction which copies the value is an escape if the
226 // result is an escape.
227 if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
228 isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
230 if (VisitedSet.insert(UUser)) {
231 DEBUG(dbgs() << "User copies value. Ptr escapes if result escapes."
232 " Adding to list.\n");
233 Worklist.push_back(UUser);
235 DEBUG(dbgs() << "Already visited node.\n");
239 // Use by a load is not an escape.
240 if (isa<LoadInst>(UUser))
242 // Use by a store is not an escape if the use is the address.
243 if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
244 if (V != SI->getValueOperand())
248 // Regular calls and other stuff are not considered escapes.
251 // Otherwise, conservatively assume an escape.
252 DEBUG(dbgs() << "Assuming ptr escapes.\n");
255 } while (!Worklist.empty());
258 DEBUG(dbgs() << "Ptr does not escape.\n");
262 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
263 /// GetUnderlyingObjects except that it returns early when it sees the first
265 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
266 SmallPtrSet<const Value *, 4> Visited;
267 SmallVector<const Value *, 4> Worklist;
268 Worklist.push_back(V);
270 const Value *P = Worklist.pop_back_val();
271 P = GetUnderlyingObjCPtr(P);
273 if (isa<AllocaInst>(P))
276 if (!Visited.insert(P))
279 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
280 Worklist.push_back(SI->getTrueValue());
281 Worklist.push_back(SI->getFalseValue());
285 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
286 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
287 Worklist.push_back(PN->getIncomingValue(i));
290 } while (!Worklist.empty());
298 /// \defgroup ARCOpt ARC Optimization.
301 // TODO: On code like this:
304 // stuff_that_cannot_release()
305 // objc_autorelease(%x)
306 // stuff_that_cannot_release()
308 // stuff_that_cannot_release()
309 // objc_autorelease(%x)
311 // The second retain and autorelease can be deleted.
313 // TODO: It should be possible to delete
314 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
315 // pairs if nothing is actually autoreleased between them. Also, autorelease
316 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
317 // after inlining) can be turned into plain release calls.
319 // TODO: Critical-edge splitting. If the optimial insertion point is
320 // a critical edge, the current algorithm has to fail, because it doesn't
321 // know how to split edges. It should be possible to make the optimizer
322 // think in terms of edges, rather than blocks, and then split critical
325 // TODO: OptimizeSequences could generalized to be Interprocedural.
327 // TODO: Recognize that a bunch of other objc runtime calls have
328 // non-escaping arguments and non-releasing arguments, and may be
329 // non-autoreleasing.
331 // TODO: Sink autorelease calls as far as possible. Unfortunately we
332 // usually can't sink them past other calls, which would be the main
333 // case where it would be useful.
335 // TODO: The pointer returned from objc_loadWeakRetained is retained.
337 // TODO: Delete release+retain pairs (rare).
339 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
340 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
341 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
342 STATISTIC(NumRets, "Number of return value forwarding "
343 "retain+autoreleases eliminated");
344 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
345 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
347 STATISTIC(NumRetainsBeforeOpt,
348 "Number of retains before optimization");
349 STATISTIC(NumReleasesBeforeOpt,
350 "Number of releases before optimization");
351 STATISTIC(NumRetainsAfterOpt,
352 "Number of retains after optimization");
353 STATISTIC(NumReleasesAfterOpt,
354 "Number of releases after optimization");
360 /// \brief A sequence of states that a pointer may go through in which an
361 /// objc_retain and objc_release are actually needed.
364 S_Retain, ///< objc_retain(x).
365 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
366 S_Use, ///< any use of x.
367 S_Stop, ///< like S_Release, but code motion is stopped.
368 S_Release, ///< objc_release(x).
369 S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
372 raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
373 LLVM_ATTRIBUTE_UNUSED;
374 raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
377 return OS << "S_None";
379 return OS << "S_Retain";
381 return OS << "S_CanRelease";
383 return OS << "S_Use";
385 return OS << "S_Release";
386 case S_MovableRelease:
387 return OS << "S_MovableRelease";
389 return OS << "S_Stop";
391 llvm_unreachable("Unknown sequence type.");
395 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
399 if (A == S_None || B == S_None)
402 if (A > B) std::swap(A, B);
404 // Choose the side which is further along in the sequence.
405 if ((A == S_Retain || A == S_CanRelease) &&
406 (B == S_CanRelease || B == S_Use))
409 // Choose the side which is further along in the sequence.
410 if ((A == S_Use || A == S_CanRelease) &&
411 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
413 // If both sides are releases, choose the more conservative one.
414 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
416 if (A == S_Release && B == S_MovableRelease)
424 /// \brief Unidirectional information about either a
425 /// retain-decrement-use-release sequence or release-use-decrement-retain
426 /// reverse sequence.
428 /// After an objc_retain, the reference count of the referenced
429 /// object is known to be positive. Similarly, before an objc_release, the
430 /// reference count of the referenced object is known to be positive. If
431 /// there are retain-release pairs in code regions where the retain count
432 /// is known to be positive, they can be eliminated, regardless of any side
433 /// effects between them.
435 /// Also, a retain+release pair nested within another retain+release
436 /// pair all on the known same pointer value can be eliminated, regardless
437 /// of any intervening side effects.
439 /// KnownSafe is true when either of these conditions is satisfied.
442 /// True of the objc_release calls are all marked with the "tail" keyword.
443 bool IsTailCallRelease;
445 /// If the Calls are objc_release calls and they all have a
446 /// clang.imprecise_release tag, this is the metadata tag.
447 MDNode *ReleaseMetadata;
449 /// For a top-down sequence, the set of objc_retains or
450 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
451 SmallPtrSet<Instruction *, 2> Calls;
453 /// The set of optimal insert positions for moving calls in the opposite
455 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
457 /// Does this pointer have multiple owners?
459 /// In the presence of multiple owners with the same provenance caused by
460 /// allocas, we can not assume that the frontend will emit balanced code
461 /// since it could put the release on the pointer loaded from the
462 /// alloca. This confuses the optimizer so we must be more conservative in
467 KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0),
468 MultipleOwners(false) {}
472 bool IsTrackingImpreciseReleases() {
473 return ReleaseMetadata != 0;
478 void RRInfo::clear() {
480 IsTailCallRelease = false;
481 MultipleOwners = false;
484 ReverseInsertPts.clear();
488 /// \brief This class summarizes several per-pointer runtime properties which
489 /// are propogated through the flow graph.
491 /// True if the reference count is known to be incremented.
492 bool KnownPositiveRefCount;
494 /// True if we've seen an opportunity for partial RR elimination, such as
495 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
498 /// The current position in the sequence.
502 /// Unidirectional information about the current sequence.
504 /// TODO: Encapsulate this better.
507 PtrState() : KnownPositiveRefCount(false), Partial(false),
510 void SetKnownPositiveRefCount() {
511 DEBUG(dbgs() << "Setting Known Positive.\n");
512 KnownPositiveRefCount = true;
515 void ClearKnownPositiveRefCount() {
516 DEBUG(dbgs() << "Clearing Known Positive.\n");
517 KnownPositiveRefCount = false;
520 bool HasKnownPositiveRefCount() const {
521 return KnownPositiveRefCount;
524 void SetSeq(Sequence NewSeq) {
525 DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
529 Sequence GetSeq() const {
533 void ClearSequenceProgress() {
534 ResetSequenceProgress(S_None);
537 void ResetSequenceProgress(Sequence NewSeq) {
538 DEBUG(dbgs() << "Resetting sequence progress.\n");
544 void Merge(const PtrState &Other, bool TopDown);
549 PtrState::Merge(const PtrState &Other, bool TopDown) {
550 Seq = MergeSeqs(Seq, Other.Seq, TopDown);
551 KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
553 // If we're not in a sequence (anymore), drop all associated state.
557 } else if (Partial || Other.Partial) {
558 // If we're doing a merge on a path that's previously seen a partial
559 // merge, conservatively drop the sequence, to avoid doing partial
560 // RR elimination. If the branch predicates for the two merge differ,
561 // mixing them is unsafe.
562 ClearSequenceProgress();
564 // Conservatively merge the ReleaseMetadata information.
565 if (RRI.ReleaseMetadata != Other.RRI.ReleaseMetadata)
566 RRI.ReleaseMetadata = 0;
568 RRI.KnownSafe = RRI.KnownSafe && Other.RRI.KnownSafe;
569 RRI.IsTailCallRelease = RRI.IsTailCallRelease &&
570 Other.RRI.IsTailCallRelease;
571 RRI.Calls.insert(Other.RRI.Calls.begin(), Other.RRI.Calls.end());
572 RRI.MultipleOwners |= Other.RRI.MultipleOwners;
574 // Merge the insert point sets. If there are any differences,
575 // that makes this a partial merge.
576 Partial = RRI.ReverseInsertPts.size() != Other.RRI.ReverseInsertPts.size();
577 for (SmallPtrSet<Instruction *, 2>::const_iterator
578 I = Other.RRI.ReverseInsertPts.begin(),
579 E = Other.RRI.ReverseInsertPts.end(); I != E; ++I)
580 Partial |= RRI.ReverseInsertPts.insert(*I);
585 /// \brief Per-BasicBlock state.
587 /// The number of unique control paths from the entry which can reach this
589 unsigned TopDownPathCount;
591 /// The number of unique control paths to exits from this block.
592 unsigned BottomUpPathCount;
594 /// A type for PerPtrTopDown and PerPtrBottomUp.
595 typedef MapVector<const Value *, PtrState> MapTy;
597 /// The top-down traversal uses this to record information known about a
598 /// pointer at the bottom of each block.
601 /// The bottom-up traversal uses this to record information known about a
602 /// pointer at the top of each block.
603 MapTy PerPtrBottomUp;
605 /// Effective predecessors of the current block ignoring ignorable edges and
606 /// ignored backedges.
607 SmallVector<BasicBlock *, 2> Preds;
608 /// Effective successors of the current block ignoring ignorable edges and
609 /// ignored backedges.
610 SmallVector<BasicBlock *, 2> Succs;
613 BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
615 typedef MapTy::iterator ptr_iterator;
616 typedef MapTy::const_iterator ptr_const_iterator;
618 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
619 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
620 ptr_const_iterator top_down_ptr_begin() const {
621 return PerPtrTopDown.begin();
623 ptr_const_iterator top_down_ptr_end() const {
624 return PerPtrTopDown.end();
627 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
628 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
629 ptr_const_iterator bottom_up_ptr_begin() const {
630 return PerPtrBottomUp.begin();
632 ptr_const_iterator bottom_up_ptr_end() const {
633 return PerPtrBottomUp.end();
636 /// Mark this block as being an entry block, which has one path from the
637 /// entry by definition.
638 void SetAsEntry() { TopDownPathCount = 1; }
640 /// Mark this block as being an exit block, which has one path to an exit by
642 void SetAsExit() { BottomUpPathCount = 1; }
644 /// Attempt to find the PtrState object describing the top down state for
645 /// pointer Arg. Return a new initialized PtrState describing the top down
646 /// state for Arg if we do not find one.
647 PtrState &getPtrTopDownState(const Value *Arg) {
648 return PerPtrTopDown[Arg];
651 /// Attempt to find the PtrState object describing the bottom up state for
652 /// pointer Arg. Return a new initialized PtrState describing the bottom up
653 /// state for Arg if we do not find one.
654 PtrState &getPtrBottomUpState(const Value *Arg) {
655 return PerPtrBottomUp[Arg];
658 /// Attempt to find the PtrState object describing the bottom up state for
660 ptr_iterator findPtrBottomUpState(const Value *Arg) {
661 return PerPtrBottomUp.find(Arg);
664 void clearBottomUpPointers() {
665 PerPtrBottomUp.clear();
668 void clearTopDownPointers() {
669 PerPtrTopDown.clear();
672 void InitFromPred(const BBState &Other);
673 void InitFromSucc(const BBState &Other);
674 void MergePred(const BBState &Other);
675 void MergeSucc(const BBState &Other);
677 /// Return the number of possible unique paths from an entry to an exit
678 /// which pass through this block. This is only valid after both the
679 /// top-down and bottom-up traversals are complete.
680 unsigned GetAllPathCount() const {
681 assert(TopDownPathCount != 0);
682 assert(BottomUpPathCount != 0);
683 return TopDownPathCount * BottomUpPathCount;
686 // Specialized CFG utilities.
687 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
688 edge_iterator pred_begin() { return Preds.begin(); }
689 edge_iterator pred_end() { return Preds.end(); }
690 edge_iterator succ_begin() { return Succs.begin(); }
691 edge_iterator succ_end() { return Succs.end(); }
693 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
694 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
696 bool isExit() const { return Succs.empty(); }
700 void BBState::InitFromPred(const BBState &Other) {
701 PerPtrTopDown = Other.PerPtrTopDown;
702 TopDownPathCount = Other.TopDownPathCount;
705 void BBState::InitFromSucc(const BBState &Other) {
706 PerPtrBottomUp = Other.PerPtrBottomUp;
707 BottomUpPathCount = Other.BottomUpPathCount;
710 /// The top-down traversal uses this to merge information about predecessors to
711 /// form the initial state for a new block.
712 void BBState::MergePred(const BBState &Other) {
713 // Other.TopDownPathCount can be 0, in which case it is either dead or a
714 // loop backedge. Loop backedges are special.
715 TopDownPathCount += Other.TopDownPathCount;
717 // Check for overflow. If we have overflow, fall back to conservative
719 if (TopDownPathCount < Other.TopDownPathCount) {
720 clearTopDownPointers();
724 // For each entry in the other set, if our set has an entry with the same key,
725 // merge the entries. Otherwise, copy the entry and merge it with an empty
727 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
728 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
729 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
730 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
734 // For each entry in our set, if the other set doesn't have an entry with the
735 // same key, force it to merge with an empty entry.
736 for (ptr_iterator MI = top_down_ptr_begin(),
737 ME = top_down_ptr_end(); MI != ME; ++MI)
738 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
739 MI->second.Merge(PtrState(), /*TopDown=*/true);
742 /// The bottom-up traversal uses this to merge information about successors to
743 /// form the initial state for a new block.
744 void BBState::MergeSucc(const BBState &Other) {
745 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
746 // loop backedge. Loop backedges are special.
747 BottomUpPathCount += Other.BottomUpPathCount;
749 // Check for overflow. If we have overflow, fall back to conservative
751 if (BottomUpPathCount < Other.BottomUpPathCount) {
752 clearBottomUpPointers();
756 // For each entry in the other set, if our set has an entry with the
757 // same key, merge the entries. Otherwise, copy the entry and merge
758 // it with an empty entry.
759 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
760 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
761 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
762 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
766 // For each entry in our set, if the other set doesn't have an entry
767 // with the same key, force it to merge with an empty entry.
768 for (ptr_iterator MI = bottom_up_ptr_begin(),
769 ME = bottom_up_ptr_end(); MI != ME; ++MI)
770 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
771 MI->second.Merge(PtrState(), /*TopDown=*/false);
774 // Only enable ARC Annotations if we are building a debug version of
777 #define ARC_ANNOTATIONS
780 // Define some macros along the lines of DEBUG and some helper functions to make
781 // it cleaner to create annotations in the source code and to no-op when not
782 // building in debug mode.
783 #ifdef ARC_ANNOTATIONS
785 #include "llvm/Support/CommandLine.h"
787 /// Enable/disable ARC sequence annotations.
789 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
790 cl::desc("Enable emission of arc data flow analysis "
793 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
794 cl::desc("Disable check for cfg hazards when "
796 static cl::opt<std::string>
797 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
799 cl::desc("filter out all data flow annotations "
800 "but those that apply to the given "
801 "target llvm identifier."));
803 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
804 /// instruction so that we can track backwards when post processing via the llvm
805 /// arc annotation processor tool. If the function is an
806 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
810 // If pointer is a result of an instruction and it does not have a source
811 // MDNode it, attach a new MDNode onto it. If pointer is a result of
812 // an instruction and does have a source MDNode attached to it, return a
813 // reference to said Node. Otherwise just return 0.
814 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
816 if (!(Node = Inst->getMetadata(NodeId))) {
817 // We do not have any node. Generate and attatch the hash MDString to the
820 // We just use an MDString to ensure that this metadata gets written out
821 // of line at the module level and to provide a very simple format
822 // encoding the information herein. Both of these makes it simpler to
823 // parse the annotations by a simple external program.
825 raw_string_ostream os(Str);
826 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
827 << Inst->getName() << ")";
829 Hash = MDString::get(Inst->getContext(), os.str());
830 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
832 // We have a node. Grab its hash and return it.
833 assert(Node->getNumOperands() == 1 &&
834 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
835 Hash = cast<MDString>(Node->getOperand(0));
837 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
839 raw_string_ostream os(str);
840 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
842 Hash = MDString::get(Arg->getContext(), os.str());
848 static std::string SequenceToString(Sequence A) {
850 raw_string_ostream os(str);
855 /// Helper function to change a Sequence into a String object using our overload
856 /// for raw_ostream so we only have printing code in one location.
857 static MDString *SequenceToMDString(LLVMContext &Context,
859 return MDString::get(Context, SequenceToString(A));
862 /// A simple function to generate a MDNode which describes the change in state
863 /// for Value *Ptr caused by Instruction *Inst.
864 static void AppendMDNodeToInstForPtr(unsigned NodeId,
867 MDString *PtrSourceMDNodeID,
871 Value *tmp[3] = {PtrSourceMDNodeID,
872 SequenceToMDString(Inst->getContext(),
874 SequenceToMDString(Inst->getContext(),
876 Node = MDNode::get(Inst->getContext(),
877 ArrayRef<Value*>(tmp, 3));
879 Inst->setMetadata(NodeId, Node);
882 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
883 /// state of a pointer at the entrance to a basic block.
884 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
885 Value *Ptr, Sequence Seq) {
886 // If we have a target identifier, make sure that we match it before
888 if(!ARCAnnotationTargetIdentifier.empty() &&
889 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
892 Module *M = BB->getParent()->getParent();
893 LLVMContext &C = M->getContext();
894 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
895 Type *I8XX = PointerType::getUnqual(I8X);
896 Type *Params[] = {I8XX, I8XX};
897 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
898 ArrayRef<Type*>(Params, 2),
900 Constant *Callee = M->getOrInsertFunction(Name, FTy);
902 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
905 StringRef Tmp = Ptr->getName();
906 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
907 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
909 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
910 cast<Constant>(ActualPtrName), Tmp);
914 std::string SeqStr = SequenceToString(Seq);
915 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
916 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
918 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
919 cast<Constant>(ActualPtrName), SeqStr);
922 Builder.CreateCall2(Callee, PtrName, S);
925 /// Add to the end of the basic block llvm.ptr.annotations which show the state
926 /// of the pointer at the bottom of the basic block.
927 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
928 Value *Ptr, Sequence Seq) {
929 // If we have a target identifier, make sure that we match it before emitting
931 if(!ARCAnnotationTargetIdentifier.empty() &&
932 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
935 Module *M = BB->getParent()->getParent();
936 LLVMContext &C = M->getContext();
937 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
938 Type *I8XX = PointerType::getUnqual(I8X);
939 Type *Params[] = {I8XX, I8XX};
940 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
941 ArrayRef<Type*>(Params, 2),
943 Constant *Callee = M->getOrInsertFunction(Name, FTy);
945 IRBuilder<> Builder(BB, llvm::prior(BB->end()));
948 StringRef Tmp = Ptr->getName();
949 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
950 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
952 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
953 cast<Constant>(ActualPtrName), Tmp);
957 std::string SeqStr = SequenceToString(Seq);
958 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
959 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
961 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
962 cast<Constant>(ActualPtrName), SeqStr);
964 Builder.CreateCall2(Callee, PtrName, S);
967 /// Adds a source annotation to pointer and a state change annotation to Inst
968 /// referencing the source annotation and the old/new state of pointer.
969 static void GenerateARCAnnotation(unsigned InstMDId,
975 if (EnableARCAnnotations) {
976 // If we have a target identifier, make sure that we match it before
977 // emitting an annotation.
978 if(!ARCAnnotationTargetIdentifier.empty() &&
979 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
982 // First generate the source annotation on our pointer. This will return an
983 // MDString* if Ptr actually comes from an instruction implying we can put
984 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
985 // then we know that our pointer is from an Argument so we put a reference
986 // to the argument number.
988 // The point of this is to make it easy for the
989 // llvm-arc-annotation-processor tool to cross reference where the source
990 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
991 // information via debug info for backends to use (since why would anyone
992 // need such a thing from LLVM IR besides in non standard cases
994 MDString *SourcePtrMDNode =
995 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
996 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
1001 // The actual interface for accessing the above functionality is defined via
1002 // some simple macros which are defined below. We do this so that the user does
1003 // not need to pass in what metadata id is needed resulting in cleaner code and
1004 // additionally since it provides an easy way to conditionally no-op all
1005 // annotation support in a non-debug build.
1007 /// Use this macro to annotate a sequence state change when processing
1008 /// instructions bottom up,
1009 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
1010 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
1011 ARCAnnotationProvenanceSourceMDKind, (inst), \
1012 const_cast<Value*>(ptr), (old), (new))
1013 /// Use this macro to annotate a sequence state change when processing
1014 /// instructions top down.
1015 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
1016 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
1017 ARCAnnotationProvenanceSourceMDKind, (inst), \
1018 const_cast<Value*>(ptr), (old), (new))
1020 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
1022 if (EnableARCAnnotations) { \
1023 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
1024 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
1025 Value *Ptr = const_cast<Value*>(I->first); \
1026 Sequence Seq = I->second.GetSeq(); \
1027 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
1032 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
1033 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
1034 Entrance, bottom_up)
1035 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
1036 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
1037 Terminator, bottom_up)
1038 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
1039 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
1041 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
1042 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
1043 Terminator, top_down)
1045 #else // !ARC_ANNOTATION
1046 // If annotations are off, noop.
1047 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
1048 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
1049 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
1050 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
1051 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
1052 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
1053 #endif // !ARC_ANNOTATION
1056 /// \brief The main ARC optimization pass.
1057 class ObjCARCOpt : public FunctionPass {
1059 ProvenanceAnalysis PA;
1061 /// A flag indicating whether this optimization pass should run.
1064 /// Declarations for ObjC runtime functions, for use in creating calls to
1065 /// them. These are initialized lazily to avoid cluttering up the Module
1066 /// with unused declarations.
1068 /// Declaration for ObjC runtime function objc_autoreleaseReturnValue.
1069 Constant *AutoreleaseRVCallee;
1070 /// Declaration for ObjC runtime function objc_release.
1071 Constant *ReleaseCallee;
1072 /// Declaration for ObjC runtime function objc_retain.
1073 Constant *RetainCallee;
1074 /// Declaration for ObjC runtime function objc_retainBlock.
1075 Constant *RetainBlockCallee;
1076 /// Declaration for ObjC runtime function objc_autorelease.
1077 Constant *AutoreleaseCallee;
1079 /// Flags which determine whether each of the interesting runtine functions
1080 /// is in fact used in the current function.
1081 unsigned UsedInThisFunction;
1083 /// The Metadata Kind for clang.imprecise_release metadata.
1084 unsigned ImpreciseReleaseMDKind;
1086 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
1087 unsigned CopyOnEscapeMDKind;
1089 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
1090 unsigned NoObjCARCExceptionsMDKind;
1092 #ifdef ARC_ANNOTATIONS
1093 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
1094 unsigned ARCAnnotationBottomUpMDKind;
1095 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
1096 unsigned ARCAnnotationTopDownMDKind;
1097 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
1098 unsigned ARCAnnotationProvenanceSourceMDKind;
1099 #endif // ARC_ANNOATIONS
1101 Constant *getAutoreleaseRVCallee(Module *M);
1102 Constant *getReleaseCallee(Module *M);
1103 Constant *getRetainCallee(Module *M);
1104 Constant *getRetainBlockCallee(Module *M);
1105 Constant *getAutoreleaseCallee(Module *M);
1107 bool IsRetainBlockOptimizable(const Instruction *Inst);
1109 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1110 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1111 InstructionClass &Class);
1112 bool OptimizeRetainBlockCall(Function &F, Instruction *RetainBlock,
1113 InstructionClass &Class);
1114 void OptimizeIndividualCalls(Function &F);
1116 void CheckForCFGHazards(const BasicBlock *BB,
1117 DenseMap<const BasicBlock *, BBState> &BBStates,
1118 BBState &MyStates) const;
1119 bool VisitInstructionBottomUp(Instruction *Inst,
1121 MapVector<Value *, RRInfo> &Retains,
1123 bool VisitBottomUp(BasicBlock *BB,
1124 DenseMap<const BasicBlock *, BBState> &BBStates,
1125 MapVector<Value *, RRInfo> &Retains);
1126 bool VisitInstructionTopDown(Instruction *Inst,
1127 DenseMap<Value *, RRInfo> &Releases,
1129 bool VisitTopDown(BasicBlock *BB,
1130 DenseMap<const BasicBlock *, BBState> &BBStates,
1131 DenseMap<Value *, RRInfo> &Releases);
1132 bool Visit(Function &F,
1133 DenseMap<const BasicBlock *, BBState> &BBStates,
1134 MapVector<Value *, RRInfo> &Retains,
1135 DenseMap<Value *, RRInfo> &Releases);
1137 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1138 MapVector<Value *, RRInfo> &Retains,
1139 DenseMap<Value *, RRInfo> &Releases,
1140 SmallVectorImpl<Instruction *> &DeadInsts,
1143 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
1144 MapVector<Value *, RRInfo> &Retains,
1145 DenseMap<Value *, RRInfo> &Releases,
1147 SmallVector<Instruction *, 4> &NewRetains,
1148 SmallVector<Instruction *, 4> &NewReleases,
1149 SmallVector<Instruction *, 8> &DeadInsts,
1150 RRInfo &RetainsToMove,
1151 RRInfo &ReleasesToMove,
1154 bool &AnyPairsCompletelyEliminated);
1156 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1157 MapVector<Value *, RRInfo> &Retains,
1158 DenseMap<Value *, RRInfo> &Releases,
1161 void OptimizeWeakCalls(Function &F);
1163 bool OptimizeSequences(Function &F);
1165 void OptimizeReturns(Function &F);
1168 void GatherStatistics(Function &F, bool AfterOptimization = false);
1171 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
1172 virtual bool doInitialization(Module &M);
1173 virtual bool runOnFunction(Function &F);
1174 virtual void releaseMemory();
1178 ObjCARCOpt() : FunctionPass(ID) {
1179 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1184 char ObjCARCOpt::ID = 0;
1185 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1186 "objc-arc", "ObjC ARC optimization", false, false)
1187 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1188 INITIALIZE_PASS_END(ObjCARCOpt,
1189 "objc-arc", "ObjC ARC optimization", false, false)
1191 Pass *llvm::createObjCARCOptPass() {
1192 return new ObjCARCOpt();
1195 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1196 AU.addRequired<ObjCARCAliasAnalysis>();
1197 AU.addRequired<AliasAnalysis>();
1198 // ARC optimization doesn't currently split critical edges.
1199 AU.setPreservesCFG();
1202 bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
1203 // Without the magic metadata tag, we have to assume this might be an
1204 // objc_retainBlock call inserted to convert a block pointer to an id,
1205 // in which case it really is needed.
1206 if (!Inst->getMetadata(CopyOnEscapeMDKind))
1209 // If the pointer "escapes" (not including being used in a call),
1210 // the copy may be needed.
1211 if (DoesRetainableObjPtrEscape(Inst))
1214 // Otherwise, it's not needed.
1218 Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
1219 if (!AutoreleaseRVCallee) {
1220 LLVMContext &C = M->getContext();
1221 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1222 Type *Params[] = { I8X };
1223 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1224 AttributeSet Attribute =
1225 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1226 Attribute::NoUnwind);
1227 AutoreleaseRVCallee =
1228 M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
1231 return AutoreleaseRVCallee;
1234 Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
1235 if (!ReleaseCallee) {
1236 LLVMContext &C = M->getContext();
1237 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1238 AttributeSet Attribute =
1239 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1240 Attribute::NoUnwind);
1242 M->getOrInsertFunction(
1244 FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
1247 return ReleaseCallee;
1250 Constant *ObjCARCOpt::getRetainCallee(Module *M) {
1251 if (!RetainCallee) {
1252 LLVMContext &C = M->getContext();
1253 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1254 AttributeSet Attribute =
1255 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1256 Attribute::NoUnwind);
1258 M->getOrInsertFunction(
1260 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1263 return RetainCallee;
1266 Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
1267 if (!RetainBlockCallee) {
1268 LLVMContext &C = M->getContext();
1269 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1270 // objc_retainBlock is not nounwind because it calls user copy constructors
1271 // which could theoretically throw.
1273 M->getOrInsertFunction(
1275 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1278 return RetainBlockCallee;
1281 Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
1282 if (!AutoreleaseCallee) {
1283 LLVMContext &C = M->getContext();
1284 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1285 AttributeSet Attribute =
1286 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1287 Attribute::NoUnwind);
1289 M->getOrInsertFunction(
1291 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1294 return AutoreleaseCallee;
1297 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
1298 /// not a return value. Or, if it can be paired with an
1299 /// objc_autoreleaseReturnValue, delete the pair and return true.
1301 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
1302 // Check for the argument being from an immediately preceding call or invoke.
1303 const Value *Arg = GetObjCArg(RetainRV);
1304 ImmutableCallSite CS(Arg);
1305 if (const Instruction *Call = CS.getInstruction()) {
1306 if (Call->getParent() == RetainRV->getParent()) {
1307 BasicBlock::const_iterator I = Call;
1309 while (IsNoopInstruction(I)) ++I;
1310 if (&*I == RetainRV)
1312 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
1313 BasicBlock *RetainRVParent = RetainRV->getParent();
1314 if (II->getNormalDest() == RetainRVParent) {
1315 BasicBlock::const_iterator I = RetainRVParent->begin();
1316 while (IsNoopInstruction(I)) ++I;
1317 if (&*I == RetainRV)
1323 // Check for being preceded by an objc_autoreleaseReturnValue on the same
1324 // pointer. In this case, we can delete the pair.
1325 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
1327 do --I; while (I != Begin && IsNoopInstruction(I));
1328 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
1329 GetObjCArg(I) == Arg) {
1333 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
1334 << "Erasing " << *RetainRV << "\n");
1336 EraseInstruction(I);
1337 EraseInstruction(RetainRV);
1342 // Turn it to a plain objc_retain.
1346 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
1347 "objc_retain since the operand is not a return value.\n"
1348 "Old = " << *RetainRV << "\n");
1350 cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent()));
1352 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
1357 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1358 /// used as a return value.
1360 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1361 InstructionClass &Class) {
1362 // Check for a return of the pointer value.
1363 const Value *Ptr = GetObjCArg(AutoreleaseRV);
1364 SmallVector<const Value *, 2> Users;
1365 Users.push_back(Ptr);
1367 Ptr = Users.pop_back_val();
1368 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
1370 const User *I = *UI;
1371 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
1373 if (isa<BitCastInst>(I))
1376 } while (!Users.empty());
1381 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
1382 "objc_autorelease since its operand is not used as a return "
1384 "Old = " << *AutoreleaseRV << "\n");
1386 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1388 setCalledFunction(getAutoreleaseCallee(F.getParent()));
1389 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1390 Class = IC_Autorelease;
1392 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
1396 // \brief Attempt to strength reduce objc_retainBlock calls to objc_retain
1399 // Specifically: If an objc_retainBlock call has the copy_on_escape metadata and
1400 // does not escape (following the rules of block escaping), strength reduce the
1401 // objc_retainBlock to an objc_retain.
1403 // TODO: If an objc_retainBlock call is dominated period by a previous
1404 // objc_retainBlock call, strength reduce the objc_retainBlock to an
1407 ObjCARCOpt::OptimizeRetainBlockCall(Function &F, Instruction *Inst,
1408 InstructionClass &Class) {
1409 assert(GetBasicInstructionClass(Inst) == Class);
1410 assert(IC_RetainBlock == Class);
1412 // If we can not optimize Inst, return false.
1413 if (!IsRetainBlockOptimizable(Inst))
1419 DEBUG(dbgs() << "Strength reduced retainBlock => retain.\n");
1420 DEBUG(dbgs() << "Old: " << *Inst << "\n");
1421 CallInst *RetainBlock = cast<CallInst>(Inst);
1422 RetainBlock->setCalledFunction(getRetainCallee(F.getParent()));
1423 // Remove copy_on_escape metadata.
1424 RetainBlock->setMetadata(CopyOnEscapeMDKind, 0);
1426 DEBUG(dbgs() << "New: " << *Inst << "\n");
1430 /// Visit each call, one at a time, and make simplifications without doing any
1431 /// additional analysis.
1432 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1433 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
1434 // Reset all the flags in preparation for recomputing them.
1435 UsedInThisFunction = 0;
1437 // Visit all objc_* calls in F.
1438 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1439 Instruction *Inst = &*I++;
1441 InstructionClass Class = GetBasicInstructionClass(Inst);
1443 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
1448 // Delete no-op casts. These function calls have special semantics, but
1449 // the semantics are entirely implemented via lowering in the front-end,
1450 // so by the time they reach the optimizer, they are just no-op calls
1451 // which return their argument.
1453 // There are gray areas here, as the ability to cast reference-counted
1454 // pointers to raw void* and back allows code to break ARC assumptions,
1455 // however these are currently considered to be unimportant.
1459 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
1460 EraseInstruction(Inst);
1463 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1466 case IC_LoadWeakRetained:
1468 case IC_DestroyWeak: {
1469 CallInst *CI = cast<CallInst>(Inst);
1470 if (IsNullOrUndef(CI->getArgOperand(0))) {
1472 Type *Ty = CI->getArgOperand(0)->getType();
1473 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1474 Constant::getNullValue(Ty),
1476 llvm::Value *NewValue = UndefValue::get(CI->getType());
1477 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1478 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1479 CI->replaceAllUsesWith(NewValue);
1480 CI->eraseFromParent();
1487 CallInst *CI = cast<CallInst>(Inst);
1488 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1489 IsNullOrUndef(CI->getArgOperand(1))) {
1491 Type *Ty = CI->getArgOperand(0)->getType();
1492 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1493 Constant::getNullValue(Ty),
1496 llvm::Value *NewValue = UndefValue::get(CI->getType());
1497 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1498 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1500 CI->replaceAllUsesWith(NewValue);
1501 CI->eraseFromParent();
1506 case IC_RetainBlock:
1507 // If we strength reduce an objc_retainBlock to an objc_retain, continue
1508 // onto the objc_retain peephole optimizations. Otherwise break.
1509 OptimizeRetainBlockCall(F, Inst, Class);
1512 if (OptimizeRetainRVCall(F, Inst))
1515 case IC_AutoreleaseRV:
1516 OptimizeAutoreleaseRVCall(F, Inst, Class);
1520 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1521 if (IsAutorelease(Class) && Inst->use_empty()) {
1522 CallInst *Call = cast<CallInst>(Inst);
1523 const Value *Arg = Call->getArgOperand(0);
1524 Arg = FindSingleUseIdentifiedObject(Arg);
1529 // Create the declaration lazily.
1530 LLVMContext &C = Inst->getContext();
1532 CallInst::Create(getReleaseCallee(F.getParent()),
1533 Call->getArgOperand(0), "", Call);
1534 NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
1536 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1537 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
1538 << *NewCall << "\n");
1540 EraseInstruction(Call);
1546 // For functions which can never be passed stack arguments, add
1548 if (IsAlwaysTail(Class)) {
1550 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
1551 "passed stack args: " << *Inst << "\n");
1552 cast<CallInst>(Inst)->setTailCall();
1555 // Ensure that functions that can never have a "tail" keyword due to the
1556 // semantics of ARC truly do not do so.
1557 if (IsNeverTail(Class)) {
1559 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
1561 cast<CallInst>(Inst)->setTailCall(false);
1564 // Set nounwind as needed.
1565 if (IsNoThrow(Class)) {
1567 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1569 cast<CallInst>(Inst)->setDoesNotThrow();
1572 if (!IsNoopOnNull(Class)) {
1573 UsedInThisFunction |= 1 << Class;
1577 const Value *Arg = GetObjCArg(Inst);
1579 // ARC calls with null are no-ops. Delete them.
1580 if (IsNullOrUndef(Arg)) {
1583 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1585 EraseInstruction(Inst);
1589 // Keep track of which of retain, release, autorelease, and retain_block
1590 // are actually present in this function.
1591 UsedInThisFunction |= 1 << Class;
1593 // If Arg is a PHI, and one or more incoming values to the
1594 // PHI are null, and the call is control-equivalent to the PHI, and there
1595 // are no relevant side effects between the PHI and the call, the call
1596 // could be pushed up to just those paths with non-null incoming values.
1597 // For now, don't bother splitting critical edges for this.
1598 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1599 Worklist.push_back(std::make_pair(Inst, Arg));
1601 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1605 const PHINode *PN = dyn_cast<PHINode>(Arg);
1608 // Determine if the PHI has any null operands, or any incoming
1610 bool HasNull = false;
1611 bool HasCriticalEdges = false;
1612 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1614 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1615 if (IsNullOrUndef(Incoming))
1617 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1618 .getNumSuccessors() != 1) {
1619 HasCriticalEdges = true;
1623 // If we have null operands and no critical edges, optimize.
1624 if (!HasCriticalEdges && HasNull) {
1625 SmallPtrSet<Instruction *, 4> DependingInstructions;
1626 SmallPtrSet<const BasicBlock *, 4> Visited;
1628 // Check that there is nothing that cares about the reference
1629 // count between the call and the phi.
1632 case IC_RetainBlock:
1633 // These can always be moved up.
1636 // These can't be moved across things that care about the retain
1638 FindDependencies(NeedsPositiveRetainCount, Arg,
1639 Inst->getParent(), Inst,
1640 DependingInstructions, Visited, PA);
1642 case IC_Autorelease:
1643 // These can't be moved across autorelease pool scope boundaries.
1644 FindDependencies(AutoreleasePoolBoundary, Arg,
1645 Inst->getParent(), Inst,
1646 DependingInstructions, Visited, PA);
1649 case IC_AutoreleaseRV:
1650 // Don't move these; the RV optimization depends on the autoreleaseRV
1651 // being tail called, and the retainRV being immediately after a call
1652 // (which might still happen if we get lucky with codegen layout, but
1653 // it's not worth taking the chance).
1656 llvm_unreachable("Invalid dependence flavor");
1659 if (DependingInstructions.size() == 1 &&
1660 *DependingInstructions.begin() == PN) {
1663 // Clone the call into each predecessor that has a non-null value.
1664 CallInst *CInst = cast<CallInst>(Inst);
1665 Type *ParamTy = CInst->getArgOperand(0)->getType();
1666 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1668 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1669 if (!IsNullOrUndef(Incoming)) {
1670 CallInst *Clone = cast<CallInst>(CInst->clone());
1671 Value *Op = PN->getIncomingValue(i);
1672 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1673 if (Op->getType() != ParamTy)
1674 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1675 Clone->setArgOperand(0, Op);
1676 Clone->insertBefore(InsertPos);
1678 DEBUG(dbgs() << "Cloning "
1680 "And inserting clone at " << *InsertPos << "\n");
1681 Worklist.push_back(std::make_pair(Clone, Incoming));
1684 // Erase the original call.
1685 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1686 EraseInstruction(CInst);
1690 } while (!Worklist.empty());
1694 /// If we have a top down pointer in the S_Use state, make sure that there are
1695 /// no CFG hazards by checking the states of various bottom up pointers.
1696 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1697 const bool SuccSRRIKnownSafe,
1699 bool &SomeSuccHasSame,
1700 bool &AllSuccsHaveSame,
1701 bool &ShouldContinue) {
1703 case S_CanRelease: {
1704 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
1705 S.ClearSequenceProgress();
1708 ShouldContinue = true;
1712 SomeSuccHasSame = true;
1716 case S_MovableRelease:
1717 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1718 AllSuccsHaveSame = false;
1721 llvm_unreachable("bottom-up pointer in retain state!");
1723 llvm_unreachable("This should have been handled earlier.");
1727 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1728 /// there are no CFG hazards by checking the states of various bottom up
1730 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1731 const bool SuccSRRIKnownSafe,
1733 bool &SomeSuccHasSame,
1734 bool &AllSuccsHaveSame) {
1737 SomeSuccHasSame = true;
1741 case S_MovableRelease:
1743 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1744 AllSuccsHaveSame = false;
1747 llvm_unreachable("bottom-up pointer in retain state!");
1749 llvm_unreachable("This should have been handled earlier.");
1753 /// Check for critical edges, loop boundaries, irreducible control flow, or
1754 /// other CFG structures where moving code across the edge would result in it
1755 /// being executed more.
1757 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1758 DenseMap<const BasicBlock *, BBState> &BBStates,
1759 BBState &MyStates) const {
1760 // If any top-down local-use or possible-dec has a succ which is earlier in
1761 // the sequence, forget it.
1762 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1763 E = MyStates.top_down_ptr_end(); I != E; ++I) {
1764 PtrState &S = I->second;
1765 const Sequence Seq = I->second.GetSeq();
1767 // We only care about S_Retain, S_CanRelease, and S_Use.
1771 // Make sure that if extra top down states are added in the future that this
1772 // code is updated to handle it.
1773 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1774 "Unknown top down sequence state.");
1776 const Value *Arg = I->first;
1777 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1778 bool SomeSuccHasSame = false;
1779 bool AllSuccsHaveSame = true;
1781 succ_const_iterator SI(TI), SE(TI, false);
1783 for (; SI != SE; ++SI) {
1784 // If VisitBottomUp has pointer information for this successor, take
1785 // what we know about it.
1786 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1788 assert(BBI != BBStates.end());
1789 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1790 const Sequence SuccSSeq = SuccS.GetSeq();
1792 // If bottom up, the pointer is in an S_None state, clear the sequence
1793 // progress since the sequence in the bottom up state finished
1794 // suggesting a mismatch in between retains/releases. This is true for
1795 // all three cases that we are handling here: S_Retain, S_Use, and
1797 if (SuccSSeq == S_None) {
1798 S.ClearSequenceProgress();
1802 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1804 const bool SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
1806 // *NOTE* We do not use Seq from above here since we are allowing for
1807 // S.GetSeq() to change while we are visiting basic blocks.
1808 switch(S.GetSeq()) {
1810 bool ShouldContinue = false;
1811 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1812 SomeSuccHasSame, AllSuccsHaveSame,
1818 case S_CanRelease: {
1819 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe,
1828 case S_MovableRelease:
1833 // If the state at the other end of any of the successor edges
1834 // matches the current state, require all edges to match. This
1835 // guards against loops in the middle of a sequence.
1836 if (SomeSuccHasSame && !AllSuccsHaveSame)
1837 S.ClearSequenceProgress();
1842 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1844 MapVector<Value *, RRInfo> &Retains,
1845 BBState &MyStates) {
1846 bool NestingDetected = false;
1847 InstructionClass Class = GetInstructionClass(Inst);
1848 const Value *Arg = 0;
1850 DEBUG(dbgs() << "Class: " << Class << "\n");
1854 Arg = GetObjCArg(Inst);
1856 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1858 // If we see two releases in a row on the same pointer. If so, make
1859 // a note, and we'll cicle back to revisit it after we've
1860 // hopefully eliminated the second release, which may allow us to
1861 // eliminate the first release too.
1862 // Theoretically we could implement removal of nested retain+release
1863 // pairs by making PtrState hold a stack of states, but this is
1864 // simple and avoids adding overhead for the non-nested case.
1865 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1866 DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
1867 NestingDetected = true;
1870 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1871 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1872 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1873 S.ResetSequenceProgress(NewSeq);
1874 S.RRI.ReleaseMetadata = ReleaseMetadata;
1875 S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
1876 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
1877 S.RRI.Calls.insert(Inst);
1878 S.SetKnownPositiveRefCount();
1881 case IC_RetainBlock:
1882 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1883 // objc_retainBlocks to objc_retains. Thus at this point any
1884 // objc_retainBlocks that we see are not optimizable.
1888 Arg = GetObjCArg(Inst);
1890 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1891 S.SetKnownPositiveRefCount();
1893 Sequence OldSeq = S.GetSeq();
1897 case S_MovableRelease:
1899 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1900 // imprecise release, clear our reverse insertion points.
1901 if (OldSeq != S_Use || S.RRI.IsTrackingImpreciseReleases())
1902 S.RRI.ReverseInsertPts.clear();
1905 // Don't do retain+release tracking for IC_RetainRV, because it's
1906 // better to let it remain as the first instruction after a call.
1907 if (Class != IC_RetainRV)
1908 Retains[Inst] = S.RRI;
1909 S.ClearSequenceProgress();
1914 llvm_unreachable("bottom-up pointer in retain state!");
1916 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
1917 // A retain moving bottom up can be a use.
1920 case IC_AutoreleasepoolPop:
1921 // Conservatively, clear MyStates for all known pointers.
1922 MyStates.clearBottomUpPointers();
1923 return NestingDetected;
1924 case IC_AutoreleasepoolPush:
1926 // These are irrelevant.
1927 return NestingDetected;
1929 // If we have a store into an alloca of a pointer we are tracking, the
1930 // pointer has multiple owners implying that we must be more conservative.
1932 // This comes up in the context of a pointer being ``KnownSafe''. In the
1933 // presense of a block being initialized, the frontend will emit the
1934 // objc_retain on the original pointer and the release on the pointer loaded
1935 // from the alloca. The optimizer will through the provenance analysis
1936 // realize that the two are related, but since we only require KnownSafe in
1937 // one direction, will match the inner retain on the original pointer with
1938 // the guard release on the original pointer. This is fixed by ensuring that
1939 // in the presense of allocas we only unconditionally remove pointers if
1940 // both our retain and our release are KnownSafe.
1941 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1942 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
1943 BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
1944 StripPointerCastsAndObjCCalls(SI->getValueOperand()));
1945 if (I != MyStates.bottom_up_ptr_end())
1946 I->second.RRI.MultipleOwners = true;
1954 // Consider any other possible effects of this instruction on each
1955 // pointer being tracked.
1956 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1957 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1958 const Value *Ptr = MI->first;
1960 continue; // Handled above.
1961 PtrState &S = MI->second;
1962 Sequence Seq = S.GetSeq();
1964 // Check for possible releases.
1965 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1966 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1968 S.ClearKnownPositiveRefCount();
1971 S.SetSeq(S_CanRelease);
1972 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
1976 case S_MovableRelease:
1981 llvm_unreachable("bottom-up pointer in retain state!");
1985 // Check for possible direct uses.
1988 case S_MovableRelease:
1989 if (CanUse(Inst, Ptr, PA, Class)) {
1990 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1992 assert(S.RRI.ReverseInsertPts.empty());
1993 // If this is an invoke instruction, we're scanning it as part of
1994 // one of its successor blocks, since we can't insert code after it
1995 // in its own block, and we don't want to split critical edges.
1996 if (isa<InvokeInst>(Inst))
1997 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
1999 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
2001 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
2002 } else if (Seq == S_Release && IsUser(Class)) {
2003 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
2005 // Non-movable releases depend on any possible objc pointer use.
2007 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
2008 assert(S.RRI.ReverseInsertPts.empty());
2009 // As above; handle invoke specially.
2010 if (isa<InvokeInst>(Inst))
2011 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
2013 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
2017 if (CanUse(Inst, Ptr, PA, Class)) {
2018 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
2021 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
2029 llvm_unreachable("bottom-up pointer in retain state!");
2033 return NestingDetected;
2037 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
2038 DenseMap<const BasicBlock *, BBState> &BBStates,
2039 MapVector<Value *, RRInfo> &Retains) {
2041 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
2043 bool NestingDetected = false;
2044 BBState &MyStates = BBStates[BB];
2046 // Merge the states from each successor to compute the initial state
2047 // for the current block.
2048 BBState::edge_iterator SI(MyStates.succ_begin()),
2049 SE(MyStates.succ_end());
2051 const BasicBlock *Succ = *SI;
2052 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
2053 assert(I != BBStates.end());
2054 MyStates.InitFromSucc(I->second);
2056 for (; SI != SE; ++SI) {
2058 I = BBStates.find(Succ);
2059 assert(I != BBStates.end());
2060 MyStates.MergeSucc(I->second);
2064 // If ARC Annotations are enabled, output the current state of pointers at the
2065 // bottom of the basic block.
2066 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
2068 // Visit all the instructions, bottom-up.
2069 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
2070 Instruction *Inst = llvm::prior(I);
2072 // Invoke instructions are visited as part of their successors (below).
2073 if (isa<InvokeInst>(Inst))
2076 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2078 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
2081 // If there's a predecessor with an invoke, visit the invoke as if it were
2082 // part of this block, since we can't insert code after an invoke in its own
2083 // block, and we don't want to split critical edges.
2084 for (BBState::edge_iterator PI(MyStates.pred_begin()),
2085 PE(MyStates.pred_end()); PI != PE; ++PI) {
2086 BasicBlock *Pred = *PI;
2087 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
2088 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
2091 // If ARC Annotations are enabled, output the current state of pointers at the
2092 // top of the basic block.
2093 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
2095 return NestingDetected;
2099 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
2100 DenseMap<Value *, RRInfo> &Releases,
2101 BBState &MyStates) {
2102 bool NestingDetected = false;
2103 InstructionClass Class = GetInstructionClass(Inst);
2104 const Value *Arg = 0;
2107 case IC_RetainBlock:
2108 // In OptimizeIndividualCalls, we have strength reduced all optimizable
2109 // objc_retainBlocks to objc_retains. Thus at this point any
2110 // objc_retainBlocks that we see are not optimizable.
2114 Arg = GetObjCArg(Inst);
2116 PtrState &S = MyStates.getPtrTopDownState(Arg);
2118 // Don't do retain+release tracking for IC_RetainRV, because it's
2119 // better to let it remain as the first instruction after a call.
2120 if (Class != IC_RetainRV) {
2121 // If we see two retains in a row on the same pointer. If so, make
2122 // a note, and we'll cicle back to revisit it after we've
2123 // hopefully eliminated the second retain, which may allow us to
2124 // eliminate the first retain too.
2125 // Theoretically we could implement removal of nested retain+release
2126 // pairs by making PtrState hold a stack of states, but this is
2127 // simple and avoids adding overhead for the non-nested case.
2128 if (S.GetSeq() == S_Retain)
2129 NestingDetected = true;
2131 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
2132 S.ResetSequenceProgress(S_Retain);
2133 S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
2134 S.RRI.Calls.insert(Inst);
2137 S.SetKnownPositiveRefCount();
2139 // A retain can be a potential use; procede to the generic checking
2144 Arg = GetObjCArg(Inst);
2146 PtrState &S = MyStates.getPtrTopDownState(Arg);
2147 S.ClearKnownPositiveRefCount();
2149 Sequence OldSeq = S.GetSeq();
2151 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2156 if (OldSeq == S_Retain || ReleaseMetadata != 0)
2157 S.RRI.ReverseInsertPts.clear();
2160 S.RRI.ReleaseMetadata = ReleaseMetadata;
2161 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
2162 Releases[Inst] = S.RRI;
2163 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
2164 S.ClearSequenceProgress();
2170 case S_MovableRelease:
2171 llvm_unreachable("top-down pointer in release state!");
2175 case IC_AutoreleasepoolPop:
2176 // Conservatively, clear MyStates for all known pointers.
2177 MyStates.clearTopDownPointers();
2178 return NestingDetected;
2179 case IC_AutoreleasepoolPush:
2181 // These are irrelevant.
2182 return NestingDetected;
2187 // Consider any other possible effects of this instruction on each
2188 // pointer being tracked.
2189 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2190 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2191 const Value *Ptr = MI->first;
2193 continue; // Handled above.
2194 PtrState &S = MI->second;
2195 Sequence Seq = S.GetSeq();
2197 // Check for possible releases.
2198 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2199 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
2201 S.ClearKnownPositiveRefCount();
2204 S.SetSeq(S_CanRelease);
2205 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
2206 assert(S.RRI.ReverseInsertPts.empty());
2207 S.RRI.ReverseInsertPts.insert(Inst);
2209 // One call can't cause a transition from S_Retain to S_CanRelease
2210 // and S_CanRelease to S_Use. If we've made the first transition,
2219 case S_MovableRelease:
2220 llvm_unreachable("top-down pointer in release state!");
2224 // Check for possible direct uses.
2227 if (CanUse(Inst, Ptr, PA, Class)) {
2228 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2231 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
2240 case S_MovableRelease:
2241 llvm_unreachable("top-down pointer in release state!");
2245 return NestingDetected;
2249 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
2250 DenseMap<const BasicBlock *, BBState> &BBStates,
2251 DenseMap<Value *, RRInfo> &Releases) {
2252 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
2253 bool NestingDetected = false;
2254 BBState &MyStates = BBStates[BB];
2256 // Merge the states from each predecessor to compute the initial state
2257 // for the current block.
2258 BBState::edge_iterator PI(MyStates.pred_begin()),
2259 PE(MyStates.pred_end());
2261 const BasicBlock *Pred = *PI;
2262 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
2263 assert(I != BBStates.end());
2264 MyStates.InitFromPred(I->second);
2266 for (; PI != PE; ++PI) {
2268 I = BBStates.find(Pred);
2269 assert(I != BBStates.end());
2270 MyStates.MergePred(I->second);
2274 // If ARC Annotations are enabled, output the current state of pointers at the
2275 // top of the basic block.
2276 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
2278 // Visit all the instructions, top-down.
2279 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
2280 Instruction *Inst = I;
2282 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2284 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
2287 // If ARC Annotations are enabled, output the current state of pointers at the
2288 // bottom of the basic block.
2289 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
2291 #ifdef ARC_ANNOTATIONS
2292 if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
2294 CheckForCFGHazards(BB, BBStates, MyStates);
2295 return NestingDetected;
2299 ComputePostOrders(Function &F,
2300 SmallVectorImpl<BasicBlock *> &PostOrder,
2301 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
2302 unsigned NoObjCARCExceptionsMDKind,
2303 DenseMap<const BasicBlock *, BBState> &BBStates) {
2304 /// The visited set, for doing DFS walks.
2305 SmallPtrSet<BasicBlock *, 16> Visited;
2307 // Do DFS, computing the PostOrder.
2308 SmallPtrSet<BasicBlock *, 16> OnStack;
2309 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
2311 // Functions always have exactly one entry block, and we don't have
2312 // any other block that we treat like an entry block.
2313 BasicBlock *EntryBB = &F.getEntryBlock();
2314 BBState &MyStates = BBStates[EntryBB];
2315 MyStates.SetAsEntry();
2316 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
2317 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
2318 Visited.insert(EntryBB);
2319 OnStack.insert(EntryBB);
2322 BasicBlock *CurrBB = SuccStack.back().first;
2323 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
2324 succ_iterator SE(TI, false);
2326 while (SuccStack.back().second != SE) {
2327 BasicBlock *SuccBB = *SuccStack.back().second++;
2328 if (Visited.insert(SuccBB)) {
2329 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
2330 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
2331 BBStates[CurrBB].addSucc(SuccBB);
2332 BBState &SuccStates = BBStates[SuccBB];
2333 SuccStates.addPred(CurrBB);
2334 OnStack.insert(SuccBB);
2338 if (!OnStack.count(SuccBB)) {
2339 BBStates[CurrBB].addSucc(SuccBB);
2340 BBStates[SuccBB].addPred(CurrBB);
2343 OnStack.erase(CurrBB);
2344 PostOrder.push_back(CurrBB);
2345 SuccStack.pop_back();
2346 } while (!SuccStack.empty());
2350 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
2351 // Functions may have many exits, and there also blocks which we treat
2352 // as exits due to ignored edges.
2353 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
2354 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2355 BasicBlock *ExitBB = I;
2356 BBState &MyStates = BBStates[ExitBB];
2357 if (!MyStates.isExit())
2360 MyStates.SetAsExit();
2362 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
2363 Visited.insert(ExitBB);
2364 while (!PredStack.empty()) {
2365 reverse_dfs_next_succ:
2366 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
2367 while (PredStack.back().second != PE) {
2368 BasicBlock *BB = *PredStack.back().second++;
2369 if (Visited.insert(BB)) {
2370 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
2371 goto reverse_dfs_next_succ;
2374 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
2379 // Visit the function both top-down and bottom-up.
2381 ObjCARCOpt::Visit(Function &F,
2382 DenseMap<const BasicBlock *, BBState> &BBStates,
2383 MapVector<Value *, RRInfo> &Retains,
2384 DenseMap<Value *, RRInfo> &Releases) {
2386 // Use reverse-postorder traversals, because we magically know that loops
2387 // will be well behaved, i.e. they won't repeatedly call retain on a single
2388 // pointer without doing a release. We can't use the ReversePostOrderTraversal
2389 // class here because we want the reverse-CFG postorder to consider each
2390 // function exit point, and we want to ignore selected cycle edges.
2391 SmallVector<BasicBlock *, 16> PostOrder;
2392 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
2393 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
2394 NoObjCARCExceptionsMDKind,
2397 // Use reverse-postorder on the reverse CFG for bottom-up.
2398 bool BottomUpNestingDetected = false;
2399 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2400 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
2402 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
2404 // Use reverse-postorder for top-down.
2405 bool TopDownNestingDetected = false;
2406 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2407 PostOrder.rbegin(), E = PostOrder.rend();
2409 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
2411 return TopDownNestingDetected && BottomUpNestingDetected;
2414 /// Move the calls in RetainsToMove and ReleasesToMove.
2415 void ObjCARCOpt::MoveCalls(Value *Arg,
2416 RRInfo &RetainsToMove,
2417 RRInfo &ReleasesToMove,
2418 MapVector<Value *, RRInfo> &Retains,
2419 DenseMap<Value *, RRInfo> &Releases,
2420 SmallVectorImpl<Instruction *> &DeadInsts,
2422 Type *ArgTy = Arg->getType();
2423 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
2425 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
2427 // Insert the new retain and release calls.
2428 for (SmallPtrSet<Instruction *, 2>::const_iterator
2429 PI = ReleasesToMove.ReverseInsertPts.begin(),
2430 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2431 Instruction *InsertPt = *PI;
2432 Value *MyArg = ArgTy == ParamTy ? Arg :
2433 new BitCastInst(Arg, ParamTy, "", InsertPt);
2435 CallInst::Create(getRetainCallee(M), MyArg, "", InsertPt);
2436 Call->setDoesNotThrow();
2437 Call->setTailCall();
2439 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
2440 "At insertion point: " << *InsertPt << "\n");
2442 for (SmallPtrSet<Instruction *, 2>::const_iterator
2443 PI = RetainsToMove.ReverseInsertPts.begin(),
2444 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2445 Instruction *InsertPt = *PI;
2446 Value *MyArg = ArgTy == ParamTy ? Arg :
2447 new BitCastInst(Arg, ParamTy, "", InsertPt);
2448 CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
2450 // Attach a clang.imprecise_release metadata tag, if appropriate.
2451 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2452 Call->setMetadata(ImpreciseReleaseMDKind, M);
2453 Call->setDoesNotThrow();
2454 if (ReleasesToMove.IsTailCallRelease)
2455 Call->setTailCall();
2457 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
2458 "At insertion point: " << *InsertPt << "\n");
2461 // Delete the original retain and release calls.
2462 for (SmallPtrSet<Instruction *, 2>::const_iterator
2463 AI = RetainsToMove.Calls.begin(),
2464 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
2465 Instruction *OrigRetain = *AI;
2466 Retains.blot(OrigRetain);
2467 DeadInsts.push_back(OrigRetain);
2468 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
2470 for (SmallPtrSet<Instruction *, 2>::const_iterator
2471 AI = ReleasesToMove.Calls.begin(),
2472 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
2473 Instruction *OrigRelease = *AI;
2474 Releases.erase(OrigRelease);
2475 DeadInsts.push_back(OrigRelease);
2476 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
2482 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2484 MapVector<Value *, RRInfo> &Retains,
2485 DenseMap<Value *, RRInfo> &Releases,
2487 SmallVector<Instruction *, 4> &NewRetains,
2488 SmallVector<Instruction *, 4> &NewReleases,
2489 SmallVector<Instruction *, 8> &DeadInsts,
2490 RRInfo &RetainsToMove,
2491 RRInfo &ReleasesToMove,
2494 bool &AnyPairsCompletelyEliminated) {
2495 // If a pair happens in a region where it is known that the reference count
2496 // is already incremented, we can similarly ignore possible decrements unless
2497 // we are dealing with a retainable object with multiple provenance sources.
2498 bool KnownSafeTD = true, KnownSafeBU = true;
2499 bool MultipleOwners = false;
2501 // Connect the dots between the top-down-collected RetainsToMove and
2502 // bottom-up-collected ReleasesToMove to form sets of related calls.
2503 // This is an iterative process so that we connect multiple releases
2504 // to multiple retains if needed.
2505 unsigned OldDelta = 0;
2506 unsigned NewDelta = 0;
2507 unsigned OldCount = 0;
2508 unsigned NewCount = 0;
2509 bool FirstRelease = true;
2511 for (SmallVectorImpl<Instruction *>::const_iterator
2512 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2513 Instruction *NewRetain = *NI;
2514 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2515 assert(It != Retains.end());
2516 const RRInfo &NewRetainRRI = It->second;
2517 KnownSafeTD &= NewRetainRRI.KnownSafe;
2518 MultipleOwners |= NewRetainRRI.MultipleOwners;
2519 for (SmallPtrSet<Instruction *, 2>::const_iterator
2520 LI = NewRetainRRI.Calls.begin(),
2521 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
2522 Instruction *NewRetainRelease = *LI;
2523 DenseMap<Value *, RRInfo>::const_iterator Jt =
2524 Releases.find(NewRetainRelease);
2525 if (Jt == Releases.end())
2527 const RRInfo &NewRetainReleaseRRI = Jt->second;
2528 assert(NewRetainReleaseRRI.Calls.count(NewRetain));
2529 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
2531 BBStates[NewRetainRelease->getParent()].GetAllPathCount();
2533 // Merge the ReleaseMetadata and IsTailCallRelease values.
2535 ReleasesToMove.ReleaseMetadata =
2536 NewRetainReleaseRRI.ReleaseMetadata;
2537 ReleasesToMove.IsTailCallRelease =
2538 NewRetainReleaseRRI.IsTailCallRelease;
2539 FirstRelease = false;
2541 if (ReleasesToMove.ReleaseMetadata !=
2542 NewRetainReleaseRRI.ReleaseMetadata)
2543 ReleasesToMove.ReleaseMetadata = 0;
2544 if (ReleasesToMove.IsTailCallRelease !=
2545 NewRetainReleaseRRI.IsTailCallRelease)
2546 ReleasesToMove.IsTailCallRelease = false;
2549 // Collect the optimal insertion points.
2551 for (SmallPtrSet<Instruction *, 2>::const_iterator
2552 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
2553 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
2555 Instruction *RIP = *RI;
2556 if (ReleasesToMove.ReverseInsertPts.insert(RIP))
2557 NewDelta -= BBStates[RIP->getParent()].GetAllPathCount();
2559 NewReleases.push_back(NewRetainRelease);
2564 if (NewReleases.empty()) break;
2566 // Back the other way.
2567 for (SmallVectorImpl<Instruction *>::const_iterator
2568 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2569 Instruction *NewRelease = *NI;
2570 DenseMap<Value *, RRInfo>::const_iterator It =
2571 Releases.find(NewRelease);
2572 assert(It != Releases.end());
2573 const RRInfo &NewReleaseRRI = It->second;
2574 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2575 for (SmallPtrSet<Instruction *, 2>::const_iterator
2576 LI = NewReleaseRRI.Calls.begin(),
2577 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
2578 Instruction *NewReleaseRetain = *LI;
2579 MapVector<Value *, RRInfo>::const_iterator Jt =
2580 Retains.find(NewReleaseRetain);
2581 if (Jt == Retains.end())
2583 const RRInfo &NewReleaseRetainRRI = Jt->second;
2584 assert(NewReleaseRetainRRI.Calls.count(NewRelease));
2585 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
2586 unsigned PathCount =
2587 BBStates[NewReleaseRetain->getParent()].GetAllPathCount();
2588 OldDelta += PathCount;
2589 OldCount += PathCount;
2591 // Collect the optimal insertion points.
2593 for (SmallPtrSet<Instruction *, 2>::const_iterator
2594 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
2595 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
2597 Instruction *RIP = *RI;
2598 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
2599 PathCount = BBStates[RIP->getParent()].GetAllPathCount();
2600 NewDelta += PathCount;
2601 NewCount += PathCount;
2604 NewRetains.push_back(NewReleaseRetain);
2608 NewReleases.clear();
2609 if (NewRetains.empty()) break;
2612 // If the pointer is known incremented in 1 direction and we do not have
2613 // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
2614 // to be known safe in both directions.
2615 bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
2616 ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
2617 if (UnconditionallySafe) {
2618 RetainsToMove.ReverseInsertPts.clear();
2619 ReleasesToMove.ReverseInsertPts.clear();
2622 // Determine whether the new insertion points we computed preserve the
2623 // balance of retain and release calls through the program.
2624 // TODO: If the fully aggressive solution isn't valid, try to find a
2625 // less aggressive solution which is.
2630 // Determine whether the original call points are balanced in the retain and
2631 // release calls through the program. If not, conservatively don't touch
2633 // TODO: It's theoretically possible to do code motion in this case, as
2634 // long as the existing imbalances are maintained.
2638 #ifdef ARC_ANNOTATIONS
2639 // Do not move calls if ARC annotations are requested.
2640 if (EnableARCAnnotations)
2642 #endif // ARC_ANNOTATIONS
2645 assert(OldCount != 0 && "Unreachable code?");
2646 NumRRs += OldCount - NewCount;
2647 // Set to true if we completely removed any RR pairs.
2648 AnyPairsCompletelyEliminated = NewCount == 0;
2650 // We can move calls!
2654 /// Identify pairings between the retains and releases, and delete and/or move
2657 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2659 MapVector<Value *, RRInfo> &Retains,
2660 DenseMap<Value *, RRInfo> &Releases,
2662 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2664 bool AnyPairsCompletelyEliminated = false;
2665 RRInfo RetainsToMove;
2666 RRInfo ReleasesToMove;
2667 SmallVector<Instruction *, 4> NewRetains;
2668 SmallVector<Instruction *, 4> NewReleases;
2669 SmallVector<Instruction *, 8> DeadInsts;
2671 // Visit each retain.
2672 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2673 E = Retains.end(); I != E; ++I) {
2674 Value *V = I->first;
2675 if (!V) continue; // blotted
2677 Instruction *Retain = cast<Instruction>(V);
2679 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2681 Value *Arg = GetObjCArg(Retain);
2683 // If the object being released is in static or stack storage, we know it's
2684 // not being managed by ObjC reference counting, so we can delete pairs
2685 // regardless of what possible decrements or uses lie between them.
2686 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2688 // A constant pointer can't be pointing to an object on the heap. It may
2689 // be reference-counted, but it won't be deleted.
2690 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2691 if (const GlobalVariable *GV =
2692 dyn_cast<GlobalVariable>(
2693 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
2694 if (GV->isConstant())
2697 // Connect the dots between the top-down-collected RetainsToMove and
2698 // bottom-up-collected ReleasesToMove to form sets of related calls.
2699 NewRetains.push_back(Retain);
2700 bool PerformMoveCalls =
2701 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2702 NewReleases, DeadInsts, RetainsToMove,
2703 ReleasesToMove, Arg, KnownSafe,
2704 AnyPairsCompletelyEliminated);
2706 if (PerformMoveCalls) {
2707 // Ok, everything checks out and we're all set. Let's move/delete some
2709 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2710 Retains, Releases, DeadInsts, M);
2713 // Clean up state for next retain.
2714 NewReleases.clear();
2716 RetainsToMove.clear();
2717 ReleasesToMove.clear();
2720 // Now that we're done moving everything, we can delete the newly dead
2721 // instructions, as we no longer need them as insert points.
2722 while (!DeadInsts.empty())
2723 EraseInstruction(DeadInsts.pop_back_val());
2725 return AnyPairsCompletelyEliminated;
2728 /// Weak pointer optimizations.
2729 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2730 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2732 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2733 // itself because it uses AliasAnalysis and we need to do provenance
2735 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2736 Instruction *Inst = &*I++;
2738 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2740 InstructionClass Class = GetBasicInstructionClass(Inst);
2741 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
2744 // Delete objc_loadWeak calls with no users.
2745 if (Class == IC_LoadWeak && Inst->use_empty()) {
2746 Inst->eraseFromParent();
2750 // TODO: For now, just look for an earlier available version of this value
2751 // within the same block. Theoretically, we could do memdep-style non-local
2752 // analysis too, but that would want caching. A better approach would be to
2753 // use the technique that EarlyCSE uses.
2754 inst_iterator Current = llvm::prior(I);
2755 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2756 for (BasicBlock::iterator B = CurrentBB->begin(),
2757 J = Current.getInstructionIterator();
2759 Instruction *EarlierInst = &*llvm::prior(J);
2760 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
2761 switch (EarlierClass) {
2763 case IC_LoadWeakRetained: {
2764 // If this is loading from the same pointer, replace this load's value
2766 CallInst *Call = cast<CallInst>(Inst);
2767 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2768 Value *Arg = Call->getArgOperand(0);
2769 Value *EarlierArg = EarlierCall->getArgOperand(0);
2770 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2771 case AliasAnalysis::MustAlias:
2773 // If the load has a builtin retain, insert a plain retain for it.
2774 if (Class == IC_LoadWeakRetained) {
2776 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2780 // Zap the fully redundant load.
2781 Call->replaceAllUsesWith(EarlierCall);
2782 Call->eraseFromParent();
2784 case AliasAnalysis::MayAlias:
2785 case AliasAnalysis::PartialAlias:
2787 case AliasAnalysis::NoAlias:
2794 // If this is storing to the same pointer and has the same size etc.
2795 // replace this load's value with the stored value.
2796 CallInst *Call = cast<CallInst>(Inst);
2797 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2798 Value *Arg = Call->getArgOperand(0);
2799 Value *EarlierArg = EarlierCall->getArgOperand(0);
2800 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2801 case AliasAnalysis::MustAlias:
2803 // If the load has a builtin retain, insert a plain retain for it.
2804 if (Class == IC_LoadWeakRetained) {
2806 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2810 // Zap the fully redundant load.
2811 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2812 Call->eraseFromParent();
2814 case AliasAnalysis::MayAlias:
2815 case AliasAnalysis::PartialAlias:
2817 case AliasAnalysis::NoAlias:
2824 // TOOD: Grab the copied value.
2826 case IC_AutoreleasepoolPush:
2828 case IC_IntrinsicUser:
2830 // Weak pointers are only modified through the weak entry points
2831 // (and arbitrary calls, which could call the weak entry points).
2834 // Anything else could modify the weak pointer.
2841 // Then, for each destroyWeak with an alloca operand, check to see if
2842 // the alloca and all its users can be zapped.
2843 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2844 Instruction *Inst = &*I++;
2845 InstructionClass Class = GetBasicInstructionClass(Inst);
2846 if (Class != IC_DestroyWeak)
2849 CallInst *Call = cast<CallInst>(Inst);
2850 Value *Arg = Call->getArgOperand(0);
2851 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2852 for (Value::use_iterator UI = Alloca->use_begin(),
2853 UE = Alloca->use_end(); UI != UE; ++UI) {
2854 const Instruction *UserInst = cast<Instruction>(*UI);
2855 switch (GetBasicInstructionClass(UserInst)) {
2858 case IC_DestroyWeak:
2865 for (Value::use_iterator UI = Alloca->use_begin(),
2866 UE = Alloca->use_end(); UI != UE; ) {
2867 CallInst *UserInst = cast<CallInst>(*UI++);
2868 switch (GetBasicInstructionClass(UserInst)) {
2871 // These functions return their second argument.
2872 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2874 case IC_DestroyWeak:
2878 llvm_unreachable("alloca really is used!");
2880 UserInst->eraseFromParent();
2882 Alloca->eraseFromParent();
2888 /// Identify program paths which execute sequences of retains and releases which
2889 /// can be eliminated.
2890 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2891 // Releases, Retains - These are used to store the results of the main flow
2892 // analysis. These use Value* as the key instead of Instruction* so that the
2893 // map stays valid when we get around to rewriting code and calls get
2894 // replaced by arguments.
2895 DenseMap<Value *, RRInfo> Releases;
2896 MapVector<Value *, RRInfo> Retains;
2898 // This is used during the traversal of the function to track the
2899 // states for each identified object at each block.
2900 DenseMap<const BasicBlock *, BBState> BBStates;
2902 // Analyze the CFG of the function, and all instructions.
2903 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2906 return PerformCodePlacement(BBStates, Retains, Releases, F.getParent()) &&
2910 /// Check if there is a dependent call earlier that does not have anything in
2911 /// between the Retain and the call that can affect the reference count of their
2912 /// shared pointer argument. Note that Retain need not be in BB.
2914 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2915 SmallPtrSet<Instruction *, 4> &DepInsts,
2916 SmallPtrSet<const BasicBlock *, 4> &Visited,
2917 ProvenanceAnalysis &PA) {
2918 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2919 DepInsts, Visited, PA);
2920 if (DepInsts.size() != 1)
2924 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2926 // Check that the pointer is the return value of the call.
2927 if (!Call || Arg != Call)
2930 // Check that the call is a regular call.
2931 InstructionClass Class = GetBasicInstructionClass(Call);
2932 if (Class != IC_CallOrUser && Class != IC_Call)
2938 /// Find a dependent retain that precedes the given autorelease for which there
2939 /// is nothing in between the two instructions that can affect the ref count of
2942 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2943 Instruction *Autorelease,
2944 SmallPtrSet<Instruction *, 4> &DepInsts,
2945 SmallPtrSet<const BasicBlock *, 4> &Visited,
2946 ProvenanceAnalysis &PA) {
2947 FindDependencies(CanChangeRetainCount, Arg,
2948 BB, Autorelease, DepInsts, Visited, PA);
2949 if (DepInsts.size() != 1)
2953 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2955 // Check that we found a retain with the same argument.
2957 !IsRetain(GetBasicInstructionClass(Retain)) ||
2958 GetObjCArg(Retain) != Arg) {
2965 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2966 /// no instructions dependent on Arg that need a positive ref count in between
2967 /// the autorelease and the ret.
2969 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2971 SmallPtrSet<Instruction *, 4> &DepInsts,
2972 SmallPtrSet<const BasicBlock *, 4> &V,
2973 ProvenanceAnalysis &PA) {
2974 FindDependencies(NeedsPositiveRetainCount, Arg,
2975 BB, Ret, DepInsts, V, PA);
2976 if (DepInsts.size() != 1)
2979 CallInst *Autorelease =
2980 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2983 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
2984 if (!IsAutorelease(AutoreleaseClass))
2986 if (GetObjCArg(Autorelease) != Arg)
2992 /// Look for this pattern:
2994 /// %call = call i8* @something(...)
2995 /// %2 = call i8* @objc_retain(i8* %call)
2996 /// %3 = call i8* @objc_autorelease(i8* %2)
2999 /// And delete the retain and autorelease.
3000 void ObjCARCOpt::OptimizeReturns(Function &F) {
3001 if (!F.getReturnType()->isPointerTy())
3004 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
3006 SmallPtrSet<Instruction *, 4> DependingInstructions;
3007 SmallPtrSet<const BasicBlock *, 4> Visited;
3008 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
3009 BasicBlock *BB = FI;
3010 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
3012 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
3017 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
3019 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
3020 // dependent on Arg such that there are no instructions dependent on Arg
3021 // that need a positive ref count in between the autorelease and Ret.
3022 CallInst *Autorelease =
3023 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
3024 DependingInstructions, Visited,
3026 DependingInstructions.clear();
3033 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
3034 DependingInstructions, Visited, PA);
3035 DependingInstructions.clear();
3041 // Check that there is nothing that can affect the reference count
3042 // between the retain and the call. Note that Retain need not be in BB.
3043 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
3044 DependingInstructions,
3046 DependingInstructions.clear();
3049 if (!HasSafePathToCall)
3052 // If so, we can zap the retain and autorelease.
3055 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
3056 << *Autorelease << "\n");
3057 EraseInstruction(Retain);
3058 EraseInstruction(Autorelease);
3064 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
3065 llvm::Statistic &NumRetains =
3066 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
3067 llvm::Statistic &NumReleases =
3068 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
3070 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
3071 Instruction *Inst = &*I++;
3072 switch (GetBasicInstructionClass(Inst)) {
3086 bool ObjCARCOpt::doInitialization(Module &M) {
3090 // If nothing in the Module uses ARC, don't do anything.
3091 Run = ModuleHasARC(M);
3095 // Identify the imprecise release metadata kind.
3096 ImpreciseReleaseMDKind =
3097 M.getContext().getMDKindID("clang.imprecise_release");
3098 CopyOnEscapeMDKind =
3099 M.getContext().getMDKindID("clang.arc.copy_on_escape");
3100 NoObjCARCExceptionsMDKind =
3101 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
3102 #ifdef ARC_ANNOTATIONS
3103 ARCAnnotationBottomUpMDKind =
3104 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
3105 ARCAnnotationTopDownMDKind =
3106 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
3107 ARCAnnotationProvenanceSourceMDKind =
3108 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
3109 #endif // ARC_ANNOTATIONS
3111 // Intuitively, objc_retain and others are nocapture, however in practice
3112 // they are not, because they return their argument value. And objc_release
3113 // calls finalizers which can have arbitrary side effects.
3115 // These are initialized lazily.
3116 AutoreleaseRVCallee = 0;
3119 RetainBlockCallee = 0;
3120 AutoreleaseCallee = 0;
3125 bool ObjCARCOpt::runOnFunction(Function &F) {
3129 // If nothing in the Module uses ARC, don't do anything.
3135 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
3138 PA.setAA(&getAnalysis<AliasAnalysis>());
3141 if (AreStatisticsEnabled()) {
3142 GatherStatistics(F, false);
3146 // This pass performs several distinct transformations. As a compile-time aid
3147 // when compiling code that isn't ObjC, skip these if the relevant ObjC
3148 // library functions aren't declared.
3150 // Preliminary optimizations. This also computes UsedInThisFunction.
3151 OptimizeIndividualCalls(F);
3153 // Optimizations for weak pointers.
3154 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
3155 (1 << IC_LoadWeakRetained) |
3156 (1 << IC_StoreWeak) |
3157 (1 << IC_InitWeak) |
3158 (1 << IC_CopyWeak) |
3159 (1 << IC_MoveWeak) |
3160 (1 << IC_DestroyWeak)))
3161 OptimizeWeakCalls(F);
3163 // Optimizations for retain+release pairs.
3164 if (UsedInThisFunction & ((1 << IC_Retain) |
3165 (1 << IC_RetainRV) |
3166 (1 << IC_RetainBlock)))
3167 if (UsedInThisFunction & (1 << IC_Release))
3168 // Run OptimizeSequences until it either stops making changes or
3169 // no retain+release pair nesting is detected.
3170 while (OptimizeSequences(F)) {}
3172 // Optimizations if objc_autorelease is used.
3173 if (UsedInThisFunction & ((1 << IC_Autorelease) |
3174 (1 << IC_AutoreleaseRV)))
3177 // Gather statistics after optimization.
3179 if (AreStatisticsEnabled()) {
3180 GatherStatistics(F, true);
3184 DEBUG(dbgs() << "\n");
3189 void ObjCARCOpt::releaseMemory() {