1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
27 #define DEBUG_TYPE "objc-arc-opts"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/Support/CFG.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
43 using namespace llvm::objcarc;
45 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
49 /// \brief An associative container with fast insertion-order (deterministic)
50 /// iteration over its elements. Plus the special blot operation.
51 template<class KeyT, class ValueT>
53 /// Map keys to indices in Vector.
54 typedef DenseMap<KeyT, size_t> MapTy;
57 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
62 typedef typename VectorTy::iterator iterator;
63 typedef typename VectorTy::const_iterator const_iterator;
64 iterator begin() { return Vector.begin(); }
65 iterator end() { return Vector.end(); }
66 const_iterator begin() const { return Vector.begin(); }
67 const_iterator end() const { return Vector.end(); }
71 assert(Vector.size() >= Map.size()); // May differ due to blotting.
72 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
74 assert(I->second < Vector.size());
75 assert(Vector[I->second].first == I->first);
77 for (typename VectorTy::const_iterator I = Vector.begin(),
78 E = Vector.end(); I != E; ++I)
80 (Map.count(I->first) &&
81 Map[I->first] == size_t(I - Vector.begin())));
85 ValueT &operator[](const KeyT &Arg) {
86 std::pair<typename MapTy::iterator, bool> Pair =
87 Map.insert(std::make_pair(Arg, size_t(0)));
89 size_t Num = Vector.size();
90 Pair.first->second = Num;
91 Vector.push_back(std::make_pair(Arg, ValueT()));
92 return Vector[Num].second;
94 return Vector[Pair.first->second].second;
97 std::pair<iterator, bool>
98 insert(const std::pair<KeyT, ValueT> &InsertPair) {
99 std::pair<typename MapTy::iterator, bool> Pair =
100 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
102 size_t Num = Vector.size();
103 Pair.first->second = Num;
104 Vector.push_back(InsertPair);
105 return std::make_pair(Vector.begin() + Num, true);
107 return std::make_pair(Vector.begin() + Pair.first->second, false);
110 const_iterator find(const KeyT &Key) const {
111 typename MapTy::const_iterator It = Map.find(Key);
112 if (It == Map.end()) return Vector.end();
113 return Vector.begin() + It->second;
116 /// This is similar to erase, but instead of removing the element from the
117 /// vector, it just zeros out the key in the vector. This leaves iterators
118 /// intact, but clients must be prepared for zeroed-out keys when iterating.
119 void blot(const KeyT &Key) {
120 typename MapTy::iterator It = Map.find(Key);
121 if (It == Map.end()) return;
122 Vector[It->second].first = KeyT();
135 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
138 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
139 /// as it finds a value with multiple uses.
140 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
141 if (Arg->hasOneUse()) {
142 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
143 return FindSingleUseIdentifiedObject(BC->getOperand(0));
144 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
145 if (GEP->hasAllZeroIndices())
146 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
147 if (IsForwarding(GetBasicInstructionClass(Arg)))
148 return FindSingleUseIdentifiedObject(
149 cast<CallInst>(Arg)->getArgOperand(0));
150 if (!IsObjCIdentifiedObject(Arg))
155 // If we found an identifiable object but it has multiple uses, but they are
156 // trivial uses, we can still consider this to be a single-use value.
157 if (IsObjCIdentifiedObject(Arg)) {
158 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
161 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
171 /// \brief Test whether the given retainable object pointer escapes.
173 /// This differs from regular escape analysis in that a use as an
174 /// argument to a call is not considered an escape.
176 static bool DoesRetainableObjPtrEscape(const User *Ptr) {
177 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Target: " << *Ptr << "\n");
179 // Walk the def-use chains.
180 SmallVector<const Value *, 4> Worklist;
181 Worklist.push_back(Ptr);
182 // If Ptr has any operands add them as well.
183 for (User::const_op_iterator I = Ptr->op_begin(), E = Ptr->op_end(); I != E;
185 Worklist.push_back(*I);
188 // Ensure we do not visit any value twice.
189 SmallPtrSet<const Value *, 8> VisitedSet;
192 const Value *V = Worklist.pop_back_val();
194 DEBUG(dbgs() << "Visiting: " << *V << "\n");
196 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
198 const User *UUser = *UI;
200 DEBUG(dbgs() << "User: " << *UUser << "\n");
202 // Special - Use by a call (callee or argument) is not considered
204 switch (GetBasicInstructionClass(UUser)) {
209 case IC_AutoreleaseRV: {
210 DEBUG(dbgs() << "User copies pointer arguments. Pointer Escapes!\n");
211 // These special functions make copies of their pointer arguments.
214 case IC_IntrinsicUser:
215 // Use by the use intrinsic is not an escape.
219 // Use by an instruction which copies the value is an escape if the
220 // result is an escape.
221 if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
222 isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
224 if (VisitedSet.insert(UUser)) {
225 DEBUG(dbgs() << "User copies value. Ptr escapes if result escapes."
226 " Adding to list.\n");
227 Worklist.push_back(UUser);
229 DEBUG(dbgs() << "Already visited node.\n");
233 // Use by a load is not an escape.
234 if (isa<LoadInst>(UUser))
236 // Use by a store is not an escape if the use is the address.
237 if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
238 if (V != SI->getValueOperand())
242 // Regular calls and other stuff are not considered escapes.
245 // Otherwise, conservatively assume an escape.
246 DEBUG(dbgs() << "Assuming ptr escapes.\n");
249 } while (!Worklist.empty());
252 DEBUG(dbgs() << "Ptr does not escape.\n");
258 /// \defgroup ARCOpt ARC Optimization.
261 // TODO: On code like this:
264 // stuff_that_cannot_release()
265 // objc_autorelease(%x)
266 // stuff_that_cannot_release()
268 // stuff_that_cannot_release()
269 // objc_autorelease(%x)
271 // The second retain and autorelease can be deleted.
273 // TODO: It should be possible to delete
274 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
275 // pairs if nothing is actually autoreleased between them. Also, autorelease
276 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
277 // after inlining) can be turned into plain release calls.
279 // TODO: Critical-edge splitting. If the optimial insertion point is
280 // a critical edge, the current algorithm has to fail, because it doesn't
281 // know how to split edges. It should be possible to make the optimizer
282 // think in terms of edges, rather than blocks, and then split critical
285 // TODO: OptimizeSequences could generalized to be Interprocedural.
287 // TODO: Recognize that a bunch of other objc runtime calls have
288 // non-escaping arguments and non-releasing arguments, and may be
289 // non-autoreleasing.
291 // TODO: Sink autorelease calls as far as possible. Unfortunately we
292 // usually can't sink them past other calls, which would be the main
293 // case where it would be useful.
295 // TODO: The pointer returned from objc_loadWeakRetained is retained.
297 // TODO: Delete release+retain pairs (rare).
299 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
300 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
301 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
302 STATISTIC(NumRets, "Number of return value forwarding "
303 "retain+autoreleaes eliminated");
304 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
305 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
310 /// \brief A sequence of states that a pointer may go through in which an
311 /// objc_retain and objc_release are actually needed.
314 S_Retain, ///< objc_retain(x).
315 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
316 S_Use, ///< any use of x.
317 S_Stop, ///< like S_Release, but code motion is stopped.
318 S_Release, ///< objc_release(x).
319 S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
322 raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
323 LLVM_ATTRIBUTE_UNUSED;
324 raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
327 return OS << "S_None";
329 return OS << "S_Retain";
331 return OS << "S_CanRelease";
333 return OS << "S_Use";
335 return OS << "S_Release";
336 case S_MovableRelease:
337 return OS << "S_MovableRelease";
339 return OS << "S_Stop";
341 llvm_unreachable("Unknown sequence type.");
345 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
349 if (A == S_None || B == S_None)
352 if (A > B) std::swap(A, B);
354 // Choose the side which is further along in the sequence.
355 if ((A == S_Retain || A == S_CanRelease) &&
356 (B == S_CanRelease || B == S_Use))
359 // Choose the side which is further along in the sequence.
360 if ((A == S_Use || A == S_CanRelease) &&
361 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
363 // If both sides are releases, choose the more conservative one.
364 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
366 if (A == S_Release && B == S_MovableRelease)
374 /// \brief Unidirectional information about either a
375 /// retain-decrement-use-release sequence or release-use-decrement-retain
376 /// reverse sequence.
378 /// After an objc_retain, the reference count of the referenced
379 /// object is known to be positive. Similarly, before an objc_release, the
380 /// reference count of the referenced object is known to be positive. If
381 /// there are retain-release pairs in code regions where the retain count
382 /// is known to be positive, they can be eliminated, regardless of any side
383 /// effects between them.
385 /// Also, a retain+release pair nested within another retain+release
386 /// pair all on the known same pointer value can be eliminated, regardless
387 /// of any intervening side effects.
389 /// KnownSafe is true when either of these conditions is satisfied.
392 /// True of the objc_release calls are all marked with the "tail" keyword.
393 bool IsTailCallRelease;
395 /// If the Calls are objc_release calls and they all have a
396 /// clang.imprecise_release tag, this is the metadata tag.
397 MDNode *ReleaseMetadata;
399 /// For a top-down sequence, the set of objc_retains or
400 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
401 SmallPtrSet<Instruction *, 2> Calls;
403 /// The set of optimal insert positions for moving calls in the opposite
405 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
408 KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0) {}
412 bool IsTrackingImpreciseReleases() {
413 return ReleaseMetadata != 0;
418 void RRInfo::clear() {
420 IsTailCallRelease = false;
423 ReverseInsertPts.clear();
427 /// \brief This class summarizes several per-pointer runtime properties which
428 /// are propogated through the flow graph.
430 /// True if the reference count is known to be incremented.
431 bool KnownPositiveRefCount;
433 /// True if we've seen an opportunity for partial RR elimination, such as
434 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
437 /// The current position in the sequence.
441 /// Unidirectional information about the current sequence.
443 /// TODO: Encapsulate this better.
446 PtrState() : KnownPositiveRefCount(false), Partial(false),
449 void SetKnownPositiveRefCount() {
450 KnownPositiveRefCount = true;
453 void ClearKnownPositiveRefCount() {
454 KnownPositiveRefCount = false;
457 bool HasKnownPositiveRefCount() const {
458 return KnownPositiveRefCount;
461 void SetSeq(Sequence NewSeq) {
462 DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
466 Sequence GetSeq() const {
470 void ClearSequenceProgress() {
471 ResetSequenceProgress(S_None);
474 void ResetSequenceProgress(Sequence NewSeq) {
480 void Merge(const PtrState &Other, bool TopDown);
485 PtrState::Merge(const PtrState &Other, bool TopDown) {
486 Seq = MergeSeqs(Seq, Other.Seq, TopDown);
487 KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
489 // If we're not in a sequence (anymore), drop all associated state.
493 } else if (Partial || Other.Partial) {
494 // If we're doing a merge on a path that's previously seen a partial
495 // merge, conservatively drop the sequence, to avoid doing partial
496 // RR elimination. If the branch predicates for the two merge differ,
497 // mixing them is unsafe.
498 ClearSequenceProgress();
500 // Conservatively merge the ReleaseMetadata information.
501 if (RRI.ReleaseMetadata != Other.RRI.ReleaseMetadata)
502 RRI.ReleaseMetadata = 0;
504 RRI.KnownSafe = RRI.KnownSafe && Other.RRI.KnownSafe;
505 RRI.IsTailCallRelease = RRI.IsTailCallRelease &&
506 Other.RRI.IsTailCallRelease;
507 RRI.Calls.insert(Other.RRI.Calls.begin(), Other.RRI.Calls.end());
509 // Merge the insert point sets. If there are any differences,
510 // that makes this a partial merge.
511 Partial = RRI.ReverseInsertPts.size() != Other.RRI.ReverseInsertPts.size();
512 for (SmallPtrSet<Instruction *, 2>::const_iterator
513 I = Other.RRI.ReverseInsertPts.begin(),
514 E = Other.RRI.ReverseInsertPts.end(); I != E; ++I)
515 Partial |= RRI.ReverseInsertPts.insert(*I);
520 /// \brief Per-BasicBlock state.
522 /// The number of unique control paths from the entry which can reach this
524 unsigned TopDownPathCount;
526 /// The number of unique control paths to exits from this block.
527 unsigned BottomUpPathCount;
529 /// A type for PerPtrTopDown and PerPtrBottomUp.
530 typedef MapVector<const Value *, PtrState> MapTy;
532 /// The top-down traversal uses this to record information known about a
533 /// pointer at the bottom of each block.
536 /// The bottom-up traversal uses this to record information known about a
537 /// pointer at the top of each block.
538 MapTy PerPtrBottomUp;
540 /// Effective predecessors of the current block ignoring ignorable edges and
541 /// ignored backedges.
542 SmallVector<BasicBlock *, 2> Preds;
543 /// Effective successors of the current block ignoring ignorable edges and
544 /// ignored backedges.
545 SmallVector<BasicBlock *, 2> Succs;
548 BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
550 typedef MapTy::iterator ptr_iterator;
551 typedef MapTy::const_iterator ptr_const_iterator;
553 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
554 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
555 ptr_const_iterator top_down_ptr_begin() const {
556 return PerPtrTopDown.begin();
558 ptr_const_iterator top_down_ptr_end() const {
559 return PerPtrTopDown.end();
562 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
563 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
564 ptr_const_iterator bottom_up_ptr_begin() const {
565 return PerPtrBottomUp.begin();
567 ptr_const_iterator bottom_up_ptr_end() const {
568 return PerPtrBottomUp.end();
571 /// Mark this block as being an entry block, which has one path from the
572 /// entry by definition.
573 void SetAsEntry() { TopDownPathCount = 1; }
575 /// Mark this block as being an exit block, which has one path to an exit by
577 void SetAsExit() { BottomUpPathCount = 1; }
579 PtrState &getPtrTopDownState(const Value *Arg) {
580 return PerPtrTopDown[Arg];
583 PtrState &getPtrBottomUpState(const Value *Arg) {
584 return PerPtrBottomUp[Arg];
587 void clearBottomUpPointers() {
588 PerPtrBottomUp.clear();
591 void clearTopDownPointers() {
592 PerPtrTopDown.clear();
595 void InitFromPred(const BBState &Other);
596 void InitFromSucc(const BBState &Other);
597 void MergePred(const BBState &Other);
598 void MergeSucc(const BBState &Other);
600 /// Return the number of possible unique paths from an entry to an exit
601 /// which pass through this block. This is only valid after both the
602 /// top-down and bottom-up traversals are complete.
603 unsigned GetAllPathCount() const {
604 assert(TopDownPathCount != 0);
605 assert(BottomUpPathCount != 0);
606 return TopDownPathCount * BottomUpPathCount;
609 // Specialized CFG utilities.
610 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
611 edge_iterator pred_begin() { return Preds.begin(); }
612 edge_iterator pred_end() { return Preds.end(); }
613 edge_iterator succ_begin() { return Succs.begin(); }
614 edge_iterator succ_end() { return Succs.end(); }
616 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
617 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
619 bool isExit() const { return Succs.empty(); }
623 void BBState::InitFromPred(const BBState &Other) {
624 PerPtrTopDown = Other.PerPtrTopDown;
625 TopDownPathCount = Other.TopDownPathCount;
628 void BBState::InitFromSucc(const BBState &Other) {
629 PerPtrBottomUp = Other.PerPtrBottomUp;
630 BottomUpPathCount = Other.BottomUpPathCount;
633 /// The top-down traversal uses this to merge information about predecessors to
634 /// form the initial state for a new block.
635 void BBState::MergePred(const BBState &Other) {
636 // Other.TopDownPathCount can be 0, in which case it is either dead or a
637 // loop backedge. Loop backedges are special.
638 TopDownPathCount += Other.TopDownPathCount;
640 // Check for overflow. If we have overflow, fall back to conservative
642 if (TopDownPathCount < Other.TopDownPathCount) {
643 clearTopDownPointers();
647 // For each entry in the other set, if our set has an entry with the same key,
648 // merge the entries. Otherwise, copy the entry and merge it with an empty
650 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
651 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
652 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
653 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
657 // For each entry in our set, if the other set doesn't have an entry with the
658 // same key, force it to merge with an empty entry.
659 for (ptr_iterator MI = top_down_ptr_begin(),
660 ME = top_down_ptr_end(); MI != ME; ++MI)
661 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
662 MI->second.Merge(PtrState(), /*TopDown=*/true);
665 /// The bottom-up traversal uses this to merge information about successors to
666 /// form the initial state for a new block.
667 void BBState::MergeSucc(const BBState &Other) {
668 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
669 // loop backedge. Loop backedges are special.
670 BottomUpPathCount += Other.BottomUpPathCount;
672 // Check for overflow. If we have overflow, fall back to conservative
674 if (BottomUpPathCount < Other.BottomUpPathCount) {
675 clearBottomUpPointers();
679 // For each entry in the other set, if our set has an entry with the
680 // same key, merge the entries. Otherwise, copy the entry and merge
681 // it with an empty entry.
682 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
683 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
684 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
685 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
689 // For each entry in our set, if the other set doesn't have an entry
690 // with the same key, force it to merge with an empty entry.
691 for (ptr_iterator MI = bottom_up_ptr_begin(),
692 ME = bottom_up_ptr_end(); MI != ME; ++MI)
693 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
694 MI->second.Merge(PtrState(), /*TopDown=*/false);
697 // Only enable ARC Annotations if we are building a debug version of
700 #define ARC_ANNOTATIONS
703 // Define some macros along the lines of DEBUG and some helper functions to make
704 // it cleaner to create annotations in the source code and to no-op when not
705 // building in debug mode.
706 #ifdef ARC_ANNOTATIONS
708 #include "llvm/Support/CommandLine.h"
710 /// Enable/disable ARC sequence annotations.
712 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
713 cl::desc("Enable emission of arc data flow analysis "
716 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
717 cl::desc("Disable check for cfg hazards when "
719 static cl::opt<std::string>
720 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
722 cl::desc("filter out all data flow annotations "
723 "but those that apply to the given "
724 "target llvm identifier."));
726 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
727 /// instruction so that we can track backwards when post processing via the llvm
728 /// arc annotation processor tool. If the function is an
729 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
733 // If pointer is a result of an instruction and it does not have a source
734 // MDNode it, attach a new MDNode onto it. If pointer is a result of
735 // an instruction and does have a source MDNode attached to it, return a
736 // reference to said Node. Otherwise just return 0.
737 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
739 if (!(Node = Inst->getMetadata(NodeId))) {
740 // We do not have any node. Generate and attatch the hash MDString to the
743 // We just use an MDString to ensure that this metadata gets written out
744 // of line at the module level and to provide a very simple format
745 // encoding the information herein. Both of these makes it simpler to
746 // parse the annotations by a simple external program.
748 raw_string_ostream os(Str);
749 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
750 << Inst->getName() << ")";
752 Hash = MDString::get(Inst->getContext(), os.str());
753 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
755 // We have a node. Grab its hash and return it.
756 assert(Node->getNumOperands() == 1 &&
757 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
758 Hash = cast<MDString>(Node->getOperand(0));
760 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
762 raw_string_ostream os(str);
763 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
765 Hash = MDString::get(Arg->getContext(), os.str());
771 static std::string SequenceToString(Sequence A) {
773 raw_string_ostream os(str);
778 /// Helper function to change a Sequence into a String object using our overload
779 /// for raw_ostream so we only have printing code in one location.
780 static MDString *SequenceToMDString(LLVMContext &Context,
782 return MDString::get(Context, SequenceToString(A));
785 /// A simple function to generate a MDNode which describes the change in state
786 /// for Value *Ptr caused by Instruction *Inst.
787 static void AppendMDNodeToInstForPtr(unsigned NodeId,
790 MDString *PtrSourceMDNodeID,
794 Value *tmp[3] = {PtrSourceMDNodeID,
795 SequenceToMDString(Inst->getContext(),
797 SequenceToMDString(Inst->getContext(),
799 Node = MDNode::get(Inst->getContext(),
800 ArrayRef<Value*>(tmp, 3));
802 Inst->setMetadata(NodeId, Node);
805 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
806 /// state of a pointer at the entrance to a basic block.
807 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
808 Value *Ptr, Sequence Seq) {
809 // If we have a target identifier, make sure that we match it before
811 if(!ARCAnnotationTargetIdentifier.empty() &&
812 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
815 Module *M = BB->getParent()->getParent();
816 LLVMContext &C = M->getContext();
817 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
818 Type *I8XX = PointerType::getUnqual(I8X);
819 Type *Params[] = {I8XX, I8XX};
820 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
821 ArrayRef<Type*>(Params, 2),
823 Constant *Callee = M->getOrInsertFunction(Name, FTy);
825 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
828 StringRef Tmp = Ptr->getName();
829 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
830 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
832 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
833 cast<Constant>(ActualPtrName), Tmp);
837 std::string SeqStr = SequenceToString(Seq);
838 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
839 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
841 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
842 cast<Constant>(ActualPtrName), SeqStr);
845 Builder.CreateCall2(Callee, PtrName, S);
848 /// Add to the end of the basic block llvm.ptr.annotations which show the state
849 /// of the pointer at the bottom of the basic block.
850 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
851 Value *Ptr, Sequence Seq) {
852 // If we have a target identifier, make sure that we match it before emitting
854 if(!ARCAnnotationTargetIdentifier.empty() &&
855 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
858 Module *M = BB->getParent()->getParent();
859 LLVMContext &C = M->getContext();
860 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
861 Type *I8XX = PointerType::getUnqual(I8X);
862 Type *Params[] = {I8XX, I8XX};
863 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
864 ArrayRef<Type*>(Params, 2),
866 Constant *Callee = M->getOrInsertFunction(Name, FTy);
868 IRBuilder<> Builder(BB, llvm::prior(BB->end()));
871 StringRef Tmp = Ptr->getName();
872 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
873 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
875 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
876 cast<Constant>(ActualPtrName), Tmp);
880 std::string SeqStr = SequenceToString(Seq);
881 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
882 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
884 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
885 cast<Constant>(ActualPtrName), SeqStr);
887 Builder.CreateCall2(Callee, PtrName, S);
890 /// Adds a source annotation to pointer and a state change annotation to Inst
891 /// referencing the source annotation and the old/new state of pointer.
892 static void GenerateARCAnnotation(unsigned InstMDId,
898 if (EnableARCAnnotations) {
899 // If we have a target identifier, make sure that we match it before
900 // emitting an annotation.
901 if(!ARCAnnotationTargetIdentifier.empty() &&
902 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
905 // First generate the source annotation on our pointer. This will return an
906 // MDString* if Ptr actually comes from an instruction implying we can put
907 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
908 // then we know that our pointer is from an Argument so we put a reference
909 // to the argument number.
911 // The point of this is to make it easy for the
912 // llvm-arc-annotation-processor tool to cross reference where the source
913 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
914 // information via debug info for backends to use (since why would anyone
915 // need such a thing from LLVM IR besides in non standard cases
917 MDString *SourcePtrMDNode =
918 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
919 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
924 // The actual interface for accessing the above functionality is defined via
925 // some simple macros which are defined below. We do this so that the user does
926 // not need to pass in what metadata id is needed resulting in cleaner code and
927 // additionally since it provides an easy way to conditionally no-op all
928 // annotation support in a non-debug build.
930 /// Use this macro to annotate a sequence state change when processing
931 /// instructions bottom up,
932 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
933 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
934 ARCAnnotationProvenanceSourceMDKind, (inst), \
935 const_cast<Value*>(ptr), (old), (new))
936 /// Use this macro to annotate a sequence state change when processing
937 /// instructions top down.
938 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
939 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
940 ARCAnnotationProvenanceSourceMDKind, (inst), \
941 const_cast<Value*>(ptr), (old), (new))
943 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
945 if (EnableARCAnnotations) { \
946 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
947 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
948 Value *Ptr = const_cast<Value*>(I->first); \
949 Sequence Seq = I->second.GetSeq(); \
950 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
955 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
956 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
958 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
959 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
960 Terminator, bottom_up)
961 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
962 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
964 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
965 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
966 Terminator, top_down)
968 #else // !ARC_ANNOTATION
969 // If annotations are off, noop.
970 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
971 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
972 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
973 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
974 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
975 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
976 #endif // !ARC_ANNOTATION
979 /// \brief The main ARC optimization pass.
980 class ObjCARCOpt : public FunctionPass {
982 ProvenanceAnalysis PA;
984 /// A flag indicating whether this optimization pass should run.
987 /// Declarations for ObjC runtime functions, for use in creating calls to
988 /// them. These are initialized lazily to avoid cluttering up the Module
989 /// with unused declarations.
991 /// Declaration for ObjC runtime function
992 /// objc_retainAutoreleasedReturnValue.
993 Constant *RetainRVCallee;
994 /// Declaration for ObjC runtime function objc_autoreleaseReturnValue.
995 Constant *AutoreleaseRVCallee;
996 /// Declaration for ObjC runtime function objc_release.
997 Constant *ReleaseCallee;
998 /// Declaration for ObjC runtime function objc_retain.
999 Constant *RetainCallee;
1000 /// Declaration for ObjC runtime function objc_retainBlock.
1001 Constant *RetainBlockCallee;
1002 /// Declaration for ObjC runtime function objc_autorelease.
1003 Constant *AutoreleaseCallee;
1005 /// Flags which determine whether each of the interesting runtine functions
1006 /// is in fact used in the current function.
1007 unsigned UsedInThisFunction;
1009 /// The Metadata Kind for clang.imprecise_release metadata.
1010 unsigned ImpreciseReleaseMDKind;
1012 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
1013 unsigned CopyOnEscapeMDKind;
1015 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
1016 unsigned NoObjCARCExceptionsMDKind;
1018 #ifdef ARC_ANNOTATIONS
1019 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
1020 unsigned ARCAnnotationBottomUpMDKind;
1021 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
1022 unsigned ARCAnnotationTopDownMDKind;
1023 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
1024 unsigned ARCAnnotationProvenanceSourceMDKind;
1025 #endif // ARC_ANNOATIONS
1027 Constant *getRetainRVCallee(Module *M);
1028 Constant *getAutoreleaseRVCallee(Module *M);
1029 Constant *getReleaseCallee(Module *M);
1030 Constant *getRetainCallee(Module *M);
1031 Constant *getRetainBlockCallee(Module *M);
1032 Constant *getAutoreleaseCallee(Module *M);
1034 bool IsRetainBlockOptimizable(const Instruction *Inst);
1036 void OptimizeRetainCall(Function &F, Instruction *Retain);
1037 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1038 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1039 InstructionClass &Class);
1040 bool OptimizeRetainBlockCall(Function &F, Instruction *RetainBlock,
1041 InstructionClass &Class);
1042 void OptimizeIndividualCalls(Function &F);
1044 void CheckForCFGHazards(const BasicBlock *BB,
1045 DenseMap<const BasicBlock *, BBState> &BBStates,
1046 BBState &MyStates) const;
1047 bool VisitInstructionBottomUp(Instruction *Inst,
1049 MapVector<Value *, RRInfo> &Retains,
1051 bool VisitBottomUp(BasicBlock *BB,
1052 DenseMap<const BasicBlock *, BBState> &BBStates,
1053 MapVector<Value *, RRInfo> &Retains);
1054 bool VisitInstructionTopDown(Instruction *Inst,
1055 DenseMap<Value *, RRInfo> &Releases,
1057 bool VisitTopDown(BasicBlock *BB,
1058 DenseMap<const BasicBlock *, BBState> &BBStates,
1059 DenseMap<Value *, RRInfo> &Releases);
1060 bool Visit(Function &F,
1061 DenseMap<const BasicBlock *, BBState> &BBStates,
1062 MapVector<Value *, RRInfo> &Retains,
1063 DenseMap<Value *, RRInfo> &Releases);
1065 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1066 MapVector<Value *, RRInfo> &Retains,
1067 DenseMap<Value *, RRInfo> &Releases,
1068 SmallVectorImpl<Instruction *> &DeadInsts,
1071 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
1072 MapVector<Value *, RRInfo> &Retains,
1073 DenseMap<Value *, RRInfo> &Releases,
1075 SmallVector<Instruction *, 4> &NewRetains,
1076 SmallVector<Instruction *, 4> &NewReleases,
1077 SmallVector<Instruction *, 8> &DeadInsts,
1078 RRInfo &RetainsToMove,
1079 RRInfo &ReleasesToMove,
1082 bool &AnyPairsCompletelyEliminated);
1084 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1085 MapVector<Value *, RRInfo> &Retains,
1086 DenseMap<Value *, RRInfo> &Releases,
1089 void OptimizeWeakCalls(Function &F);
1091 bool OptimizeSequences(Function &F);
1093 void OptimizeReturns(Function &F);
1095 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
1096 virtual bool doInitialization(Module &M);
1097 virtual bool runOnFunction(Function &F);
1098 virtual void releaseMemory();
1102 ObjCARCOpt() : FunctionPass(ID) {
1103 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1108 char ObjCARCOpt::ID = 0;
1109 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1110 "objc-arc", "ObjC ARC optimization", false, false)
1111 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1112 INITIALIZE_PASS_END(ObjCARCOpt,
1113 "objc-arc", "ObjC ARC optimization", false, false)
1115 Pass *llvm::createObjCARCOptPass() {
1116 return new ObjCARCOpt();
1119 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1120 AU.addRequired<ObjCARCAliasAnalysis>();
1121 AU.addRequired<AliasAnalysis>();
1122 // ARC optimization doesn't currently split critical edges.
1123 AU.setPreservesCFG();
1126 bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
1127 // Without the magic metadata tag, we have to assume this might be an
1128 // objc_retainBlock call inserted to convert a block pointer to an id,
1129 // in which case it really is needed.
1130 if (!Inst->getMetadata(CopyOnEscapeMDKind))
1133 // If the pointer "escapes" (not including being used in a call),
1134 // the copy may be needed.
1135 if (DoesRetainableObjPtrEscape(Inst))
1138 // Otherwise, it's not needed.
1142 Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
1143 if (!RetainRVCallee) {
1144 LLVMContext &C = M->getContext();
1145 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1146 Type *Params[] = { I8X };
1147 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1148 AttributeSet Attribute =
1149 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1150 Attribute::NoUnwind);
1152 M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy,
1155 return RetainRVCallee;
1158 Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
1159 if (!AutoreleaseRVCallee) {
1160 LLVMContext &C = M->getContext();
1161 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1162 Type *Params[] = { I8X };
1163 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1164 AttributeSet Attribute =
1165 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1166 Attribute::NoUnwind);
1167 AutoreleaseRVCallee =
1168 M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
1171 return AutoreleaseRVCallee;
1174 Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
1175 if (!ReleaseCallee) {
1176 LLVMContext &C = M->getContext();
1177 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1178 AttributeSet Attribute =
1179 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1180 Attribute::NoUnwind);
1182 M->getOrInsertFunction(
1184 FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
1187 return ReleaseCallee;
1190 Constant *ObjCARCOpt::getRetainCallee(Module *M) {
1191 if (!RetainCallee) {
1192 LLVMContext &C = M->getContext();
1193 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1194 AttributeSet Attribute =
1195 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1196 Attribute::NoUnwind);
1198 M->getOrInsertFunction(
1200 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1203 return RetainCallee;
1206 Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
1207 if (!RetainBlockCallee) {
1208 LLVMContext &C = M->getContext();
1209 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1210 // objc_retainBlock is not nounwind because it calls user copy constructors
1211 // which could theoretically throw.
1213 M->getOrInsertFunction(
1215 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1218 return RetainBlockCallee;
1221 Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
1222 if (!AutoreleaseCallee) {
1223 LLVMContext &C = M->getContext();
1224 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1225 AttributeSet Attribute =
1226 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1227 Attribute::NoUnwind);
1229 M->getOrInsertFunction(
1231 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1234 return AutoreleaseCallee;
1237 /// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a
1240 ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) {
1241 ImmutableCallSite CS(GetObjCArg(Retain));
1242 const Instruction *Call = CS.getInstruction();
1244 if (Call->getParent() != Retain->getParent()) return;
1246 // Check that the call is next to the retain.
1247 BasicBlock::const_iterator I = Call;
1249 while (IsNoopInstruction(I)) ++I;
1253 // Turn it to an objc_retainAutoreleasedReturnValue..
1257 DEBUG(dbgs() << "Transforming objc_retain => "
1258 "objc_retainAutoreleasedReturnValue since the operand is a "
1259 "return value.\nOld: "<< *Retain << "\n");
1261 cast<CallInst>(Retain)->setCalledFunction(getRetainRVCallee(F.getParent()));
1263 DEBUG(dbgs() << "New: " << *Retain << "\n");
1266 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
1267 /// not a return value. Or, if it can be paired with an
1268 /// objc_autoreleaseReturnValue, delete the pair and return true.
1270 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
1271 // Check for the argument being from an immediately preceding call or invoke.
1272 const Value *Arg = GetObjCArg(RetainRV);
1273 ImmutableCallSite CS(Arg);
1274 if (const Instruction *Call = CS.getInstruction()) {
1275 if (Call->getParent() == RetainRV->getParent()) {
1276 BasicBlock::const_iterator I = Call;
1278 while (IsNoopInstruction(I)) ++I;
1279 if (&*I == RetainRV)
1281 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
1282 BasicBlock *RetainRVParent = RetainRV->getParent();
1283 if (II->getNormalDest() == RetainRVParent) {
1284 BasicBlock::const_iterator I = RetainRVParent->begin();
1285 while (IsNoopInstruction(I)) ++I;
1286 if (&*I == RetainRV)
1292 // Check for being preceded by an objc_autoreleaseReturnValue on the same
1293 // pointer. In this case, we can delete the pair.
1294 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
1296 do --I; while (I != Begin && IsNoopInstruction(I));
1297 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
1298 GetObjCArg(I) == Arg) {
1302 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
1303 << "Erasing " << *RetainRV << "\n");
1305 EraseInstruction(I);
1306 EraseInstruction(RetainRV);
1311 // Turn it to a plain objc_retain.
1315 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
1316 "objc_retain since the operand is not a return value.\n"
1317 "Old = " << *RetainRV << "\n");
1319 cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent()));
1321 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
1326 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1327 /// used as a return value.
1329 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1330 InstructionClass &Class) {
1331 // Check for a return of the pointer value.
1332 const Value *Ptr = GetObjCArg(AutoreleaseRV);
1333 SmallVector<const Value *, 2> Users;
1334 Users.push_back(Ptr);
1336 Ptr = Users.pop_back_val();
1337 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
1339 const User *I = *UI;
1340 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
1342 if (isa<BitCastInst>(I))
1345 } while (!Users.empty());
1350 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
1351 "objc_autorelease since its operand is not used as a return "
1353 "Old = " << *AutoreleaseRV << "\n");
1355 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1357 setCalledFunction(getAutoreleaseCallee(F.getParent()));
1358 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1359 Class = IC_Autorelease;
1361 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
1365 // \brief Attempt to strength reduce objc_retainBlock calls to objc_retain
1368 // Specifically: If an objc_retainBlock call has the copy_on_escape metadata and
1369 // does not escape (following the rules of block escaping), strength reduce the
1370 // objc_retainBlock to an objc_retain.
1372 // TODO: If an objc_retainBlock call is dominated period by a previous
1373 // objc_retainBlock call, strength reduce the objc_retainBlock to an
1376 ObjCARCOpt::OptimizeRetainBlockCall(Function &F, Instruction *Inst,
1377 InstructionClass &Class) {
1378 assert(GetBasicInstructionClass(Inst) == Class);
1379 assert(IC_RetainBlock == Class);
1381 // If we can not optimize Inst, return false.
1382 if (!IsRetainBlockOptimizable(Inst))
1385 CallInst *RetainBlock = cast<CallInst>(Inst);
1386 RetainBlock->setCalledFunction(getRetainCallee(F.getParent()));
1387 // Remove copy_on_escape metadata.
1388 RetainBlock->setMetadata(CopyOnEscapeMDKind, 0);
1394 /// Visit each call, one at a time, and make simplifications without doing any
1395 /// additional analysis.
1396 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1397 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
1398 // Reset all the flags in preparation for recomputing them.
1399 UsedInThisFunction = 0;
1401 // Visit all objc_* calls in F.
1402 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1403 Instruction *Inst = &*I++;
1405 InstructionClass Class = GetBasicInstructionClass(Inst);
1407 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
1412 // Delete no-op casts. These function calls have special semantics, but
1413 // the semantics are entirely implemented via lowering in the front-end,
1414 // so by the time they reach the optimizer, they are just no-op calls
1415 // which return their argument.
1417 // There are gray areas here, as the ability to cast reference-counted
1418 // pointers to raw void* and back allows code to break ARC assumptions,
1419 // however these are currently considered to be unimportant.
1423 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
1424 EraseInstruction(Inst);
1427 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1430 case IC_LoadWeakRetained:
1432 case IC_DestroyWeak: {
1433 CallInst *CI = cast<CallInst>(Inst);
1434 if (IsNullOrUndef(CI->getArgOperand(0))) {
1436 Type *Ty = CI->getArgOperand(0)->getType();
1437 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1438 Constant::getNullValue(Ty),
1440 llvm::Value *NewValue = UndefValue::get(CI->getType());
1441 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1442 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1443 CI->replaceAllUsesWith(NewValue);
1444 CI->eraseFromParent();
1451 CallInst *CI = cast<CallInst>(Inst);
1452 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1453 IsNullOrUndef(CI->getArgOperand(1))) {
1455 Type *Ty = CI->getArgOperand(0)->getType();
1456 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1457 Constant::getNullValue(Ty),
1460 llvm::Value *NewValue = UndefValue::get(CI->getType());
1461 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1462 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1464 CI->replaceAllUsesWith(NewValue);
1465 CI->eraseFromParent();
1470 case IC_RetainBlock:
1471 // If we strength reduce an objc_retainBlock to amn objc_retain, continue
1472 // onto the objc_retain peephole optimizations. Otherwise break.
1473 if (!OptimizeRetainBlockCall(F, Inst, Class))
1477 OptimizeRetainCall(F, Inst);
1480 if (OptimizeRetainRVCall(F, Inst))
1483 case IC_AutoreleaseRV:
1484 OptimizeAutoreleaseRVCall(F, Inst, Class);
1488 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1489 if (IsAutorelease(Class) && Inst->use_empty()) {
1490 CallInst *Call = cast<CallInst>(Inst);
1491 const Value *Arg = Call->getArgOperand(0);
1492 Arg = FindSingleUseIdentifiedObject(Arg);
1497 // Create the declaration lazily.
1498 LLVMContext &C = Inst->getContext();
1500 CallInst::Create(getReleaseCallee(F.getParent()),
1501 Call->getArgOperand(0), "", Call);
1502 NewCall->setMetadata(ImpreciseReleaseMDKind,
1503 MDNode::get(C, ArrayRef<Value *>()));
1505 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1506 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
1507 << *NewCall << "\n");
1509 EraseInstruction(Call);
1515 // For functions which can never be passed stack arguments, add
1517 if (IsAlwaysTail(Class)) {
1519 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
1520 "passed stack args: " << *Inst << "\n");
1521 cast<CallInst>(Inst)->setTailCall();
1524 // Ensure that functions that can never have a "tail" keyword due to the
1525 // semantics of ARC truly do not do so.
1526 if (IsNeverTail(Class)) {
1528 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
1530 cast<CallInst>(Inst)->setTailCall(false);
1533 // Set nounwind as needed.
1534 if (IsNoThrow(Class)) {
1536 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1538 cast<CallInst>(Inst)->setDoesNotThrow();
1541 if (!IsNoopOnNull(Class)) {
1542 UsedInThisFunction |= 1 << Class;
1546 const Value *Arg = GetObjCArg(Inst);
1548 // ARC calls with null are no-ops. Delete them.
1549 if (IsNullOrUndef(Arg)) {
1552 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1554 EraseInstruction(Inst);
1558 // Keep track of which of retain, release, autorelease, and retain_block
1559 // are actually present in this function.
1560 UsedInThisFunction |= 1 << Class;
1562 // If Arg is a PHI, and one or more incoming values to the
1563 // PHI are null, and the call is control-equivalent to the PHI, and there
1564 // are no relevant side effects between the PHI and the call, the call
1565 // could be pushed up to just those paths with non-null incoming values.
1566 // For now, don't bother splitting critical edges for this.
1567 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1568 Worklist.push_back(std::make_pair(Inst, Arg));
1570 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1574 const PHINode *PN = dyn_cast<PHINode>(Arg);
1577 // Determine if the PHI has any null operands, or any incoming
1579 bool HasNull = false;
1580 bool HasCriticalEdges = false;
1581 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1583 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1584 if (IsNullOrUndef(Incoming))
1586 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1587 .getNumSuccessors() != 1) {
1588 HasCriticalEdges = true;
1592 // If we have null operands and no critical edges, optimize.
1593 if (!HasCriticalEdges && HasNull) {
1594 SmallPtrSet<Instruction *, 4> DependingInstructions;
1595 SmallPtrSet<const BasicBlock *, 4> Visited;
1597 // Check that there is nothing that cares about the reference
1598 // count between the call and the phi.
1601 case IC_RetainBlock:
1602 // These can always be moved up.
1605 // These can't be moved across things that care about the retain
1607 FindDependencies(NeedsPositiveRetainCount, Arg,
1608 Inst->getParent(), Inst,
1609 DependingInstructions, Visited, PA);
1611 case IC_Autorelease:
1612 // These can't be moved across autorelease pool scope boundaries.
1613 FindDependencies(AutoreleasePoolBoundary, Arg,
1614 Inst->getParent(), Inst,
1615 DependingInstructions, Visited, PA);
1618 case IC_AutoreleaseRV:
1619 // Don't move these; the RV optimization depends on the autoreleaseRV
1620 // being tail called, and the retainRV being immediately after a call
1621 // (which might still happen if we get lucky with codegen layout, but
1622 // it's not worth taking the chance).
1625 llvm_unreachable("Invalid dependence flavor");
1628 if (DependingInstructions.size() == 1 &&
1629 *DependingInstructions.begin() == PN) {
1632 // Clone the call into each predecessor that has a non-null value.
1633 CallInst *CInst = cast<CallInst>(Inst);
1634 Type *ParamTy = CInst->getArgOperand(0)->getType();
1635 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1637 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1638 if (!IsNullOrUndef(Incoming)) {
1639 CallInst *Clone = cast<CallInst>(CInst->clone());
1640 Value *Op = PN->getIncomingValue(i);
1641 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1642 if (Op->getType() != ParamTy)
1643 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1644 Clone->setArgOperand(0, Op);
1645 Clone->insertBefore(InsertPos);
1647 DEBUG(dbgs() << "Cloning "
1649 "And inserting clone at " << *InsertPos << "\n");
1650 Worklist.push_back(std::make_pair(Clone, Incoming));
1653 // Erase the original call.
1654 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1655 EraseInstruction(CInst);
1659 } while (!Worklist.empty());
1663 /// If we have a top down pointer in the S_Use state, make sure that there are
1664 /// no CFG hazards by checking the states of various bottom up pointers.
1665 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1666 const bool SuccSRRIKnownSafe,
1668 bool &SomeSuccHasSame,
1669 bool &AllSuccsHaveSame,
1670 bool &ShouldContinue) {
1672 case S_CanRelease: {
1673 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
1674 S.ClearSequenceProgress();
1677 ShouldContinue = true;
1681 SomeSuccHasSame = true;
1685 case S_MovableRelease:
1686 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1687 AllSuccsHaveSame = false;
1690 llvm_unreachable("bottom-up pointer in retain state!");
1692 llvm_unreachable("This should have been handled earlier.");
1696 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1697 /// there are no CFG hazards by checking the states of various bottom up
1699 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1700 const bool SuccSRRIKnownSafe,
1702 bool &SomeSuccHasSame,
1703 bool &AllSuccsHaveSame) {
1706 SomeSuccHasSame = true;
1710 case S_MovableRelease:
1712 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1713 AllSuccsHaveSame = false;
1716 llvm_unreachable("bottom-up pointer in retain state!");
1718 llvm_unreachable("This should have been handled earlier.");
1722 /// Check for critical edges, loop boundaries, irreducible control flow, or
1723 /// other CFG structures where moving code across the edge would result in it
1724 /// being executed more.
1726 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1727 DenseMap<const BasicBlock *, BBState> &BBStates,
1728 BBState &MyStates) const {
1729 // If any top-down local-use or possible-dec has a succ which is earlier in
1730 // the sequence, forget it.
1731 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1732 E = MyStates.top_down_ptr_end(); I != E; ++I) {
1733 PtrState &S = I->second;
1734 const Sequence Seq = I->second.GetSeq();
1736 // We only care about S_Retain, S_CanRelease, and S_Use.
1740 // Make sure that if extra top down states are added in the future that this
1741 // code is updated to handle it.
1742 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1743 "Unknown top down sequence state.");
1745 const Value *Arg = I->first;
1746 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1747 bool SomeSuccHasSame = false;
1748 bool AllSuccsHaveSame = true;
1750 succ_const_iterator SI(TI), SE(TI, false);
1752 for (; SI != SE; ++SI) {
1753 // If VisitBottomUp has pointer information for this successor, take
1754 // what we know about it.
1755 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1757 assert(BBI != BBStates.end());
1758 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1759 const Sequence SuccSSeq = SuccS.GetSeq();
1761 // If bottom up, the pointer is in an S_None state, clear the sequence
1762 // progress since the sequence in the bottom up state finished
1763 // suggesting a mismatch in between retains/releases. This is true for
1764 // all three cases that we are handling here: S_Retain, S_Use, and
1766 if (SuccSSeq == S_None) {
1767 S.ClearSequenceProgress();
1771 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1773 const bool SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
1775 // *NOTE* We do not use Seq from above here since we are allowing for
1776 // S.GetSeq() to change while we are visiting basic blocks.
1777 switch(S.GetSeq()) {
1779 bool ShouldContinue = false;
1780 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1781 SomeSuccHasSame, AllSuccsHaveSame,
1787 case S_CanRelease: {
1788 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe,
1797 case S_MovableRelease:
1802 // If the state at the other end of any of the successor edges
1803 // matches the current state, require all edges to match. This
1804 // guards against loops in the middle of a sequence.
1805 if (SomeSuccHasSame && !AllSuccsHaveSame)
1806 S.ClearSequenceProgress();
1811 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1813 MapVector<Value *, RRInfo> &Retains,
1814 BBState &MyStates) {
1815 bool NestingDetected = false;
1816 InstructionClass Class = GetInstructionClass(Inst);
1817 const Value *Arg = 0;
1819 DEBUG(dbgs() << "Class: " << Class << "\n");
1823 Arg = GetObjCArg(Inst);
1825 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1827 // If we see two releases in a row on the same pointer. If so, make
1828 // a note, and we'll cicle back to revisit it after we've
1829 // hopefully eliminated the second release, which may allow us to
1830 // eliminate the first release too.
1831 // Theoretically we could implement removal of nested retain+release
1832 // pairs by making PtrState hold a stack of states, but this is
1833 // simple and avoids adding overhead for the non-nested case.
1834 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1835 DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
1836 NestingDetected = true;
1839 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1840 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1841 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1842 S.ResetSequenceProgress(NewSeq);
1843 S.RRI.ReleaseMetadata = ReleaseMetadata;
1844 S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
1845 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
1846 S.RRI.Calls.insert(Inst);
1847 S.SetKnownPositiveRefCount();
1850 case IC_RetainBlock:
1851 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1852 // objc_retainBlocks to objc_retains. Thus at this point any
1853 // objc_retainBlocks that we see are not optimizable.
1857 Arg = GetObjCArg(Inst);
1859 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1860 S.SetKnownPositiveRefCount();
1862 Sequence OldSeq = S.GetSeq();
1866 case S_MovableRelease:
1868 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1869 // imprecise release, clear our reverse insertion points.
1870 if (OldSeq != S_Use || S.RRI.IsTrackingImpreciseReleases())
1871 S.RRI.ReverseInsertPts.clear();
1874 // Don't do retain+release tracking for IC_RetainRV, because it's
1875 // better to let it remain as the first instruction after a call.
1876 if (Class != IC_RetainRV)
1877 Retains[Inst] = S.RRI;
1878 S.ClearSequenceProgress();
1883 llvm_unreachable("bottom-up pointer in retain state!");
1885 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
1886 // A retain moving bottom up can be a use.
1889 case IC_AutoreleasepoolPop:
1890 // Conservatively, clear MyStates for all known pointers.
1891 MyStates.clearBottomUpPointers();
1892 return NestingDetected;
1893 case IC_AutoreleasepoolPush:
1895 // These are irrelevant.
1896 return NestingDetected;
1901 // Consider any other possible effects of this instruction on each
1902 // pointer being tracked.
1903 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1904 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1905 const Value *Ptr = MI->first;
1907 continue; // Handled above.
1908 PtrState &S = MI->second;
1909 Sequence Seq = S.GetSeq();
1911 // Check for possible releases.
1912 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1913 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1915 S.ClearKnownPositiveRefCount();
1918 S.SetSeq(S_CanRelease);
1919 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
1923 case S_MovableRelease:
1928 llvm_unreachable("bottom-up pointer in retain state!");
1932 // Check for possible direct uses.
1935 case S_MovableRelease:
1936 if (CanUse(Inst, Ptr, PA, Class)) {
1937 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1939 assert(S.RRI.ReverseInsertPts.empty());
1940 // If this is an invoke instruction, we're scanning it as part of
1941 // one of its successor blocks, since we can't insert code after it
1942 // in its own block, and we don't want to split critical edges.
1943 if (isa<InvokeInst>(Inst))
1944 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
1946 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
1948 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1949 } else if (Seq == S_Release && IsUser(Class)) {
1950 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
1952 // Non-movable releases depend on any possible objc pointer use.
1954 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
1955 assert(S.RRI.ReverseInsertPts.empty());
1956 // As above; handle invoke specially.
1957 if (isa<InvokeInst>(Inst))
1958 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
1960 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
1964 if (CanUse(Inst, Ptr, PA, Class)) {
1965 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
1968 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1976 llvm_unreachable("bottom-up pointer in retain state!");
1980 return NestingDetected;
1984 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1985 DenseMap<const BasicBlock *, BBState> &BBStates,
1986 MapVector<Value *, RRInfo> &Retains) {
1988 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1990 bool NestingDetected = false;
1991 BBState &MyStates = BBStates[BB];
1993 // Merge the states from each successor to compute the initial state
1994 // for the current block.
1995 BBState::edge_iterator SI(MyStates.succ_begin()),
1996 SE(MyStates.succ_end());
1998 const BasicBlock *Succ = *SI;
1999 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
2000 assert(I != BBStates.end());
2001 MyStates.InitFromSucc(I->second);
2003 for (; SI != SE; ++SI) {
2005 I = BBStates.find(Succ);
2006 assert(I != BBStates.end());
2007 MyStates.MergeSucc(I->second);
2011 // If ARC Annotations are enabled, output the current state of pointers at the
2012 // bottom of the basic block.
2013 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
2015 // Visit all the instructions, bottom-up.
2016 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
2017 Instruction *Inst = llvm::prior(I);
2019 // Invoke instructions are visited as part of their successors (below).
2020 if (isa<InvokeInst>(Inst))
2023 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2025 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
2028 // If there's a predecessor with an invoke, visit the invoke as if it were
2029 // part of this block, since we can't insert code after an invoke in its own
2030 // block, and we don't want to split critical edges.
2031 for (BBState::edge_iterator PI(MyStates.pred_begin()),
2032 PE(MyStates.pred_end()); PI != PE; ++PI) {
2033 BasicBlock *Pred = *PI;
2034 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
2035 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
2038 // If ARC Annotations are enabled, output the current state of pointers at the
2039 // top of the basic block.
2040 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
2042 return NestingDetected;
2046 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
2047 DenseMap<Value *, RRInfo> &Releases,
2048 BBState &MyStates) {
2049 bool NestingDetected = false;
2050 InstructionClass Class = GetInstructionClass(Inst);
2051 const Value *Arg = 0;
2054 case IC_RetainBlock:
2055 // In OptimizeIndividualCalls, we have strength reduced all optimizable
2056 // objc_retainBlocks to objc_retains. Thus at this point any
2057 // objc_retainBlocks that we see are not optimizable.
2061 Arg = GetObjCArg(Inst);
2063 PtrState &S = MyStates.getPtrTopDownState(Arg);
2065 // Don't do retain+release tracking for IC_RetainRV, because it's
2066 // better to let it remain as the first instruction after a call.
2067 if (Class != IC_RetainRV) {
2068 // If we see two retains in a row on the same pointer. If so, make
2069 // a note, and we'll cicle back to revisit it after we've
2070 // hopefully eliminated the second retain, which may allow us to
2071 // eliminate the first retain too.
2072 // Theoretically we could implement removal of nested retain+release
2073 // pairs by making PtrState hold a stack of states, but this is
2074 // simple and avoids adding overhead for the non-nested case.
2075 if (S.GetSeq() == S_Retain)
2076 NestingDetected = true;
2078 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
2079 S.ResetSequenceProgress(S_Retain);
2080 S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
2081 S.RRI.Calls.insert(Inst);
2084 S.SetKnownPositiveRefCount();
2086 // A retain can be a potential use; procede to the generic checking
2091 Arg = GetObjCArg(Inst);
2093 PtrState &S = MyStates.getPtrTopDownState(Arg);
2094 S.ClearKnownPositiveRefCount();
2096 Sequence OldSeq = S.GetSeq();
2098 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2103 if (OldSeq == S_Retain || ReleaseMetadata != 0)
2104 S.RRI.ReverseInsertPts.clear();
2107 S.RRI.ReleaseMetadata = ReleaseMetadata;
2108 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
2109 Releases[Inst] = S.RRI;
2110 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
2111 S.ClearSequenceProgress();
2117 case S_MovableRelease:
2118 llvm_unreachable("top-down pointer in release state!");
2122 case IC_AutoreleasepoolPop:
2123 // Conservatively, clear MyStates for all known pointers.
2124 MyStates.clearTopDownPointers();
2125 return NestingDetected;
2126 case IC_AutoreleasepoolPush:
2128 // These are irrelevant.
2129 return NestingDetected;
2134 // Consider any other possible effects of this instruction on each
2135 // pointer being tracked.
2136 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2137 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2138 const Value *Ptr = MI->first;
2140 continue; // Handled above.
2141 PtrState &S = MI->second;
2142 Sequence Seq = S.GetSeq();
2144 // Check for possible releases.
2145 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2146 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
2148 S.ClearKnownPositiveRefCount();
2151 S.SetSeq(S_CanRelease);
2152 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
2153 assert(S.RRI.ReverseInsertPts.empty());
2154 S.RRI.ReverseInsertPts.insert(Inst);
2156 // One call can't cause a transition from S_Retain to S_CanRelease
2157 // and S_CanRelease to S_Use. If we've made the first transition,
2166 case S_MovableRelease:
2167 llvm_unreachable("top-down pointer in release state!");
2171 // Check for possible direct uses.
2174 if (CanUse(Inst, Ptr, PA, Class)) {
2175 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2178 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
2187 case S_MovableRelease:
2188 llvm_unreachable("top-down pointer in release state!");
2192 return NestingDetected;
2196 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
2197 DenseMap<const BasicBlock *, BBState> &BBStates,
2198 DenseMap<Value *, RRInfo> &Releases) {
2199 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
2200 bool NestingDetected = false;
2201 BBState &MyStates = BBStates[BB];
2203 // Merge the states from each predecessor to compute the initial state
2204 // for the current block.
2205 BBState::edge_iterator PI(MyStates.pred_begin()),
2206 PE(MyStates.pred_end());
2208 const BasicBlock *Pred = *PI;
2209 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
2210 assert(I != BBStates.end());
2211 MyStates.InitFromPred(I->second);
2213 for (; PI != PE; ++PI) {
2215 I = BBStates.find(Pred);
2216 assert(I != BBStates.end());
2217 MyStates.MergePred(I->second);
2221 // If ARC Annotations are enabled, output the current state of pointers at the
2222 // top of the basic block.
2223 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
2225 // Visit all the instructions, top-down.
2226 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
2227 Instruction *Inst = I;
2229 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2231 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
2234 // If ARC Annotations are enabled, output the current state of pointers at the
2235 // bottom of the basic block.
2236 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
2238 #ifdef ARC_ANNOTATIONS
2239 if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
2241 CheckForCFGHazards(BB, BBStates, MyStates);
2242 return NestingDetected;
2246 ComputePostOrders(Function &F,
2247 SmallVectorImpl<BasicBlock *> &PostOrder,
2248 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
2249 unsigned NoObjCARCExceptionsMDKind,
2250 DenseMap<const BasicBlock *, BBState> &BBStates) {
2251 /// The visited set, for doing DFS walks.
2252 SmallPtrSet<BasicBlock *, 16> Visited;
2254 // Do DFS, computing the PostOrder.
2255 SmallPtrSet<BasicBlock *, 16> OnStack;
2256 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
2258 // Functions always have exactly one entry block, and we don't have
2259 // any other block that we treat like an entry block.
2260 BasicBlock *EntryBB = &F.getEntryBlock();
2261 BBState &MyStates = BBStates[EntryBB];
2262 MyStates.SetAsEntry();
2263 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
2264 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
2265 Visited.insert(EntryBB);
2266 OnStack.insert(EntryBB);
2269 BasicBlock *CurrBB = SuccStack.back().first;
2270 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
2271 succ_iterator SE(TI, false);
2273 while (SuccStack.back().second != SE) {
2274 BasicBlock *SuccBB = *SuccStack.back().second++;
2275 if (Visited.insert(SuccBB)) {
2276 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
2277 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
2278 BBStates[CurrBB].addSucc(SuccBB);
2279 BBState &SuccStates = BBStates[SuccBB];
2280 SuccStates.addPred(CurrBB);
2281 OnStack.insert(SuccBB);
2285 if (!OnStack.count(SuccBB)) {
2286 BBStates[CurrBB].addSucc(SuccBB);
2287 BBStates[SuccBB].addPred(CurrBB);
2290 OnStack.erase(CurrBB);
2291 PostOrder.push_back(CurrBB);
2292 SuccStack.pop_back();
2293 } while (!SuccStack.empty());
2297 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
2298 // Functions may have many exits, and there also blocks which we treat
2299 // as exits due to ignored edges.
2300 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
2301 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2302 BasicBlock *ExitBB = I;
2303 BBState &MyStates = BBStates[ExitBB];
2304 if (!MyStates.isExit())
2307 MyStates.SetAsExit();
2309 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
2310 Visited.insert(ExitBB);
2311 while (!PredStack.empty()) {
2312 reverse_dfs_next_succ:
2313 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
2314 while (PredStack.back().second != PE) {
2315 BasicBlock *BB = *PredStack.back().second++;
2316 if (Visited.insert(BB)) {
2317 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
2318 goto reverse_dfs_next_succ;
2321 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
2326 // Visit the function both top-down and bottom-up.
2328 ObjCARCOpt::Visit(Function &F,
2329 DenseMap<const BasicBlock *, BBState> &BBStates,
2330 MapVector<Value *, RRInfo> &Retains,
2331 DenseMap<Value *, RRInfo> &Releases) {
2333 // Use reverse-postorder traversals, because we magically know that loops
2334 // will be well behaved, i.e. they won't repeatedly call retain on a single
2335 // pointer without doing a release. We can't use the ReversePostOrderTraversal
2336 // class here because we want the reverse-CFG postorder to consider each
2337 // function exit point, and we want to ignore selected cycle edges.
2338 SmallVector<BasicBlock *, 16> PostOrder;
2339 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
2340 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
2341 NoObjCARCExceptionsMDKind,
2344 // Use reverse-postorder on the reverse CFG for bottom-up.
2345 bool BottomUpNestingDetected = false;
2346 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2347 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
2349 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
2351 // Use reverse-postorder for top-down.
2352 bool TopDownNestingDetected = false;
2353 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2354 PostOrder.rbegin(), E = PostOrder.rend();
2356 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
2358 return TopDownNestingDetected && BottomUpNestingDetected;
2361 /// Move the calls in RetainsToMove and ReleasesToMove.
2362 void ObjCARCOpt::MoveCalls(Value *Arg,
2363 RRInfo &RetainsToMove,
2364 RRInfo &ReleasesToMove,
2365 MapVector<Value *, RRInfo> &Retains,
2366 DenseMap<Value *, RRInfo> &Releases,
2367 SmallVectorImpl<Instruction *> &DeadInsts,
2369 Type *ArgTy = Arg->getType();
2370 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
2372 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
2374 // Insert the new retain and release calls.
2375 for (SmallPtrSet<Instruction *, 2>::const_iterator
2376 PI = ReleasesToMove.ReverseInsertPts.begin(),
2377 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2378 Instruction *InsertPt = *PI;
2379 Value *MyArg = ArgTy == ParamTy ? Arg :
2380 new BitCastInst(Arg, ParamTy, "", InsertPt);
2382 CallInst::Create(getRetainCallee(M), MyArg, "", InsertPt);
2383 Call->setDoesNotThrow();
2384 Call->setTailCall();
2386 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
2387 "At insertion point: " << *InsertPt << "\n");
2389 for (SmallPtrSet<Instruction *, 2>::const_iterator
2390 PI = RetainsToMove.ReverseInsertPts.begin(),
2391 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2392 Instruction *InsertPt = *PI;
2393 Value *MyArg = ArgTy == ParamTy ? Arg :
2394 new BitCastInst(Arg, ParamTy, "", InsertPt);
2395 CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
2397 // Attach a clang.imprecise_release metadata tag, if appropriate.
2398 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2399 Call->setMetadata(ImpreciseReleaseMDKind, M);
2400 Call->setDoesNotThrow();
2401 if (ReleasesToMove.IsTailCallRelease)
2402 Call->setTailCall();
2404 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
2405 "At insertion point: " << *InsertPt << "\n");
2408 // Delete the original retain and release calls.
2409 for (SmallPtrSet<Instruction *, 2>::const_iterator
2410 AI = RetainsToMove.Calls.begin(),
2411 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
2412 Instruction *OrigRetain = *AI;
2413 Retains.blot(OrigRetain);
2414 DeadInsts.push_back(OrigRetain);
2415 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
2417 for (SmallPtrSet<Instruction *, 2>::const_iterator
2418 AI = ReleasesToMove.Calls.begin(),
2419 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
2420 Instruction *OrigRelease = *AI;
2421 Releases.erase(OrigRelease);
2422 DeadInsts.push_back(OrigRelease);
2423 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
2429 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2431 MapVector<Value *, RRInfo> &Retains,
2432 DenseMap<Value *, RRInfo> &Releases,
2434 SmallVector<Instruction *, 4> &NewRetains,
2435 SmallVector<Instruction *, 4> &NewReleases,
2436 SmallVector<Instruction *, 8> &DeadInsts,
2437 RRInfo &RetainsToMove,
2438 RRInfo &ReleasesToMove,
2441 bool &AnyPairsCompletelyEliminated) {
2442 // If a pair happens in a region where it is known that the reference count
2443 // is already incremented, we can similarly ignore possible decrements.
2444 bool KnownSafeTD = true, KnownSafeBU = true;
2446 // Connect the dots between the top-down-collected RetainsToMove and
2447 // bottom-up-collected ReleasesToMove to form sets of related calls.
2448 // This is an iterative process so that we connect multiple releases
2449 // to multiple retains if needed.
2450 unsigned OldDelta = 0;
2451 unsigned NewDelta = 0;
2452 unsigned OldCount = 0;
2453 unsigned NewCount = 0;
2454 bool FirstRelease = true;
2456 for (SmallVectorImpl<Instruction *>::const_iterator
2457 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2458 Instruction *NewRetain = *NI;
2459 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2460 assert(It != Retains.end());
2461 const RRInfo &NewRetainRRI = It->second;
2462 KnownSafeTD &= NewRetainRRI.KnownSafe;
2463 for (SmallPtrSet<Instruction *, 2>::const_iterator
2464 LI = NewRetainRRI.Calls.begin(),
2465 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
2466 Instruction *NewRetainRelease = *LI;
2467 DenseMap<Value *, RRInfo>::const_iterator Jt =
2468 Releases.find(NewRetainRelease);
2469 if (Jt == Releases.end())
2471 const RRInfo &NewRetainReleaseRRI = Jt->second;
2472 assert(NewRetainReleaseRRI.Calls.count(NewRetain));
2473 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
2475 BBStates[NewRetainRelease->getParent()].GetAllPathCount();
2477 // Merge the ReleaseMetadata and IsTailCallRelease values.
2479 ReleasesToMove.ReleaseMetadata =
2480 NewRetainReleaseRRI.ReleaseMetadata;
2481 ReleasesToMove.IsTailCallRelease =
2482 NewRetainReleaseRRI.IsTailCallRelease;
2483 FirstRelease = false;
2485 if (ReleasesToMove.ReleaseMetadata !=
2486 NewRetainReleaseRRI.ReleaseMetadata)
2487 ReleasesToMove.ReleaseMetadata = 0;
2488 if (ReleasesToMove.IsTailCallRelease !=
2489 NewRetainReleaseRRI.IsTailCallRelease)
2490 ReleasesToMove.IsTailCallRelease = false;
2493 // Collect the optimal insertion points.
2495 for (SmallPtrSet<Instruction *, 2>::const_iterator
2496 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
2497 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
2499 Instruction *RIP = *RI;
2500 if (ReleasesToMove.ReverseInsertPts.insert(RIP))
2501 NewDelta -= BBStates[RIP->getParent()].GetAllPathCount();
2503 NewReleases.push_back(NewRetainRelease);
2508 if (NewReleases.empty()) break;
2510 // Back the other way.
2511 for (SmallVectorImpl<Instruction *>::const_iterator
2512 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2513 Instruction *NewRelease = *NI;
2514 DenseMap<Value *, RRInfo>::const_iterator It =
2515 Releases.find(NewRelease);
2516 assert(It != Releases.end());
2517 const RRInfo &NewReleaseRRI = It->second;
2518 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2519 for (SmallPtrSet<Instruction *, 2>::const_iterator
2520 LI = NewReleaseRRI.Calls.begin(),
2521 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
2522 Instruction *NewReleaseRetain = *LI;
2523 MapVector<Value *, RRInfo>::const_iterator Jt =
2524 Retains.find(NewReleaseRetain);
2525 if (Jt == Retains.end())
2527 const RRInfo &NewReleaseRetainRRI = Jt->second;
2528 assert(NewReleaseRetainRRI.Calls.count(NewRelease));
2529 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
2530 unsigned PathCount =
2531 BBStates[NewReleaseRetain->getParent()].GetAllPathCount();
2532 OldDelta += PathCount;
2533 OldCount += PathCount;
2535 // Collect the optimal insertion points.
2537 for (SmallPtrSet<Instruction *, 2>::const_iterator
2538 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
2539 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
2541 Instruction *RIP = *RI;
2542 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
2543 PathCount = BBStates[RIP->getParent()].GetAllPathCount();
2544 NewDelta += PathCount;
2545 NewCount += PathCount;
2548 NewRetains.push_back(NewReleaseRetain);
2552 NewReleases.clear();
2553 if (NewRetains.empty()) break;
2556 // If the pointer is known incremented or nested, we can safely delete the
2557 // pair regardless of what's between them.
2558 if (KnownSafeTD || KnownSafeBU) {
2559 RetainsToMove.ReverseInsertPts.clear();
2560 ReleasesToMove.ReverseInsertPts.clear();
2563 // Determine whether the new insertion points we computed preserve the
2564 // balance of retain and release calls through the program.
2565 // TODO: If the fully aggressive solution isn't valid, try to find a
2566 // less aggressive solution which is.
2571 // Determine whether the original call points are balanced in the retain and
2572 // release calls through the program. If not, conservatively don't touch
2574 // TODO: It's theoretically possible to do code motion in this case, as
2575 // long as the existing imbalances are maintained.
2580 assert(OldCount != 0 && "Unreachable code?");
2581 NumRRs += OldCount - NewCount;
2582 // Set to true if we completely removed any RR pairs.
2583 AnyPairsCompletelyEliminated = NewCount == 0;
2585 // We can move calls!
2589 /// Identify pairings between the retains and releases, and delete and/or move
2592 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2594 MapVector<Value *, RRInfo> &Retains,
2595 DenseMap<Value *, RRInfo> &Releases,
2597 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2599 bool AnyPairsCompletelyEliminated = false;
2600 RRInfo RetainsToMove;
2601 RRInfo ReleasesToMove;
2602 SmallVector<Instruction *, 4> NewRetains;
2603 SmallVector<Instruction *, 4> NewReleases;
2604 SmallVector<Instruction *, 8> DeadInsts;
2606 // Visit each retain.
2607 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2608 E = Retains.end(); I != E; ++I) {
2609 Value *V = I->first;
2610 if (!V) continue; // blotted
2612 Instruction *Retain = cast<Instruction>(V);
2614 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2616 Value *Arg = GetObjCArg(Retain);
2618 // If the object being released is in static or stack storage, we know it's
2619 // not being managed by ObjC reference counting, so we can delete pairs
2620 // regardless of what possible decrements or uses lie between them.
2621 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2623 // A constant pointer can't be pointing to an object on the heap. It may
2624 // be reference-counted, but it won't be deleted.
2625 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2626 if (const GlobalVariable *GV =
2627 dyn_cast<GlobalVariable>(
2628 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
2629 if (GV->isConstant())
2632 // Connect the dots between the top-down-collected RetainsToMove and
2633 // bottom-up-collected ReleasesToMove to form sets of related calls.
2634 NewRetains.push_back(Retain);
2635 bool PerformMoveCalls =
2636 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2637 NewReleases, DeadInsts, RetainsToMove,
2638 ReleasesToMove, Arg, KnownSafe,
2639 AnyPairsCompletelyEliminated);
2641 #ifdef ARC_ANNOTATIONS
2642 // Do not move calls if ARC annotations are requested. If we were to move
2643 // calls in this case, we would not be able
2644 PerformMoveCalls = PerformMoveCalls && !EnableARCAnnotations;
2645 #endif // ARC_ANNOTATIONS
2647 if (PerformMoveCalls) {
2648 // Ok, everything checks out and we're all set. Let's move/delete some
2650 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2651 Retains, Releases, DeadInsts, M);
2654 // Clean up state for next retain.
2655 NewReleases.clear();
2657 RetainsToMove.clear();
2658 ReleasesToMove.clear();
2661 // Now that we're done moving everything, we can delete the newly dead
2662 // instructions, as we no longer need them as insert points.
2663 while (!DeadInsts.empty())
2664 EraseInstruction(DeadInsts.pop_back_val());
2666 return AnyPairsCompletelyEliminated;
2669 /// Weak pointer optimizations.
2670 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2671 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2673 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2674 // itself because it uses AliasAnalysis and we need to do provenance
2676 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2677 Instruction *Inst = &*I++;
2679 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2681 InstructionClass Class = GetBasicInstructionClass(Inst);
2682 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
2685 // Delete objc_loadWeak calls with no users.
2686 if (Class == IC_LoadWeak && Inst->use_empty()) {
2687 Inst->eraseFromParent();
2691 // TODO: For now, just look for an earlier available version of this value
2692 // within the same block. Theoretically, we could do memdep-style non-local
2693 // analysis too, but that would want caching. A better approach would be to
2694 // use the technique that EarlyCSE uses.
2695 inst_iterator Current = llvm::prior(I);
2696 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2697 for (BasicBlock::iterator B = CurrentBB->begin(),
2698 J = Current.getInstructionIterator();
2700 Instruction *EarlierInst = &*llvm::prior(J);
2701 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
2702 switch (EarlierClass) {
2704 case IC_LoadWeakRetained: {
2705 // If this is loading from the same pointer, replace this load's value
2707 CallInst *Call = cast<CallInst>(Inst);
2708 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2709 Value *Arg = Call->getArgOperand(0);
2710 Value *EarlierArg = EarlierCall->getArgOperand(0);
2711 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2712 case AliasAnalysis::MustAlias:
2714 // If the load has a builtin retain, insert a plain retain for it.
2715 if (Class == IC_LoadWeakRetained) {
2717 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2721 // Zap the fully redundant load.
2722 Call->replaceAllUsesWith(EarlierCall);
2723 Call->eraseFromParent();
2725 case AliasAnalysis::MayAlias:
2726 case AliasAnalysis::PartialAlias:
2728 case AliasAnalysis::NoAlias:
2735 // If this is storing to the same pointer and has the same size etc.
2736 // replace this load's value with the stored value.
2737 CallInst *Call = cast<CallInst>(Inst);
2738 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2739 Value *Arg = Call->getArgOperand(0);
2740 Value *EarlierArg = EarlierCall->getArgOperand(0);
2741 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2742 case AliasAnalysis::MustAlias:
2744 // If the load has a builtin retain, insert a plain retain for it.
2745 if (Class == IC_LoadWeakRetained) {
2747 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2751 // Zap the fully redundant load.
2752 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2753 Call->eraseFromParent();
2755 case AliasAnalysis::MayAlias:
2756 case AliasAnalysis::PartialAlias:
2758 case AliasAnalysis::NoAlias:
2765 // TOOD: Grab the copied value.
2767 case IC_AutoreleasepoolPush:
2769 case IC_IntrinsicUser:
2771 // Weak pointers are only modified through the weak entry points
2772 // (and arbitrary calls, which could call the weak entry points).
2775 // Anything else could modify the weak pointer.
2782 // Then, for each destroyWeak with an alloca operand, check to see if
2783 // the alloca and all its users can be zapped.
2784 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2785 Instruction *Inst = &*I++;
2786 InstructionClass Class = GetBasicInstructionClass(Inst);
2787 if (Class != IC_DestroyWeak)
2790 CallInst *Call = cast<CallInst>(Inst);
2791 Value *Arg = Call->getArgOperand(0);
2792 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2793 for (Value::use_iterator UI = Alloca->use_begin(),
2794 UE = Alloca->use_end(); UI != UE; ++UI) {
2795 const Instruction *UserInst = cast<Instruction>(*UI);
2796 switch (GetBasicInstructionClass(UserInst)) {
2799 case IC_DestroyWeak:
2806 for (Value::use_iterator UI = Alloca->use_begin(),
2807 UE = Alloca->use_end(); UI != UE; ) {
2808 CallInst *UserInst = cast<CallInst>(*UI++);
2809 switch (GetBasicInstructionClass(UserInst)) {
2812 // These functions return their second argument.
2813 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2815 case IC_DestroyWeak:
2819 llvm_unreachable("alloca really is used!");
2821 UserInst->eraseFromParent();
2823 Alloca->eraseFromParent();
2829 /// Identify program paths which execute sequences of retains and releases which
2830 /// can be eliminated.
2831 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2832 /// Releases, Retains - These are used to store the results of the main flow
2833 /// analysis. These use Value* as the key instead of Instruction* so that the
2834 /// map stays valid when we get around to rewriting code and calls get
2835 /// replaced by arguments.
2836 DenseMap<Value *, RRInfo> Releases;
2837 MapVector<Value *, RRInfo> Retains;
2839 /// This is used during the traversal of the function to track the
2840 /// states for each identified object at each block.
2841 DenseMap<const BasicBlock *, BBState> BBStates;
2843 // Analyze the CFG of the function, and all instructions.
2844 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2847 return PerformCodePlacement(BBStates, Retains, Releases, F.getParent()) &&
2851 /// Check if there is a dependent call earlier that does not have anything in
2852 /// between the Retain and the call that can affect the reference count of their
2853 /// shared pointer argument. Note that Retain need not be in BB.
2855 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2856 SmallPtrSet<Instruction *, 4> &DepInsts,
2857 SmallPtrSet<const BasicBlock *, 4> &Visited,
2858 ProvenanceAnalysis &PA) {
2859 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2860 DepInsts, Visited, PA);
2861 if (DepInsts.size() != 1)
2865 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2867 // Check that the pointer is the return value of the call.
2868 if (!Call || Arg != Call)
2871 // Check that the call is a regular call.
2872 InstructionClass Class = GetBasicInstructionClass(Call);
2873 if (Class != IC_CallOrUser && Class != IC_Call)
2879 /// Find a dependent retain that precedes the given autorelease for which there
2880 /// is nothing in between the two instructions that can affect the ref count of
2883 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2884 Instruction *Autorelease,
2885 SmallPtrSet<Instruction *, 4> &DepInsts,
2886 SmallPtrSet<const BasicBlock *, 4> &Visited,
2887 ProvenanceAnalysis &PA) {
2888 FindDependencies(CanChangeRetainCount, Arg,
2889 BB, Autorelease, DepInsts, Visited, PA);
2890 if (DepInsts.size() != 1)
2894 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2896 // Check that we found a retain with the same argument.
2898 !IsRetain(GetBasicInstructionClass(Retain)) ||
2899 GetObjCArg(Retain) != Arg) {
2906 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2907 /// no instructions dependent on Arg that need a positive ref count in between
2908 /// the autorelease and the ret.
2910 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2912 SmallPtrSet<Instruction *, 4> &DepInsts,
2913 SmallPtrSet<const BasicBlock *, 4> &V,
2914 ProvenanceAnalysis &PA) {
2915 FindDependencies(NeedsPositiveRetainCount, Arg,
2916 BB, Ret, DepInsts, V, PA);
2917 if (DepInsts.size() != 1)
2920 CallInst *Autorelease =
2921 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2924 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
2925 if (!IsAutorelease(AutoreleaseClass))
2927 if (GetObjCArg(Autorelease) != Arg)
2933 /// Look for this pattern:
2935 /// %call = call i8* @something(...)
2936 /// %2 = call i8* @objc_retain(i8* %call)
2937 /// %3 = call i8* @objc_autorelease(i8* %2)
2940 /// And delete the retain and autorelease.
2941 void ObjCARCOpt::OptimizeReturns(Function &F) {
2942 if (!F.getReturnType()->isPointerTy())
2945 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2947 SmallPtrSet<Instruction *, 4> DependingInstructions;
2948 SmallPtrSet<const BasicBlock *, 4> Visited;
2949 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2950 BasicBlock *BB = FI;
2951 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2953 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2958 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
2960 // Look for an ``autorelease'' instruction that is a predecssor of Ret and
2961 // dependent on Arg such that there are no instructions dependent on Arg
2962 // that need a positive ref count in between the autorelease and Ret.
2963 CallInst *Autorelease =
2964 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
2965 DependingInstructions, Visited,
2968 DependingInstructions.clear();
2972 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
2973 DependingInstructions, Visited, PA);
2975 DependingInstructions.clear();
2978 // Check that there is nothing that can affect the reference count
2979 // between the retain and the call. Note that Retain need not be in BB.
2980 if (HasSafePathToPredecessorCall(Arg, Retain, DependingInstructions,
2982 // If so, we can zap the retain and autorelease.
2985 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2986 << *Autorelease << "\n");
2987 EraseInstruction(Retain);
2988 EraseInstruction(Autorelease);
2993 DependingInstructions.clear();
2998 bool ObjCARCOpt::doInitialization(Module &M) {
3002 // If nothing in the Module uses ARC, don't do anything.
3003 Run = ModuleHasARC(M);
3007 // Identify the imprecise release metadata kind.
3008 ImpreciseReleaseMDKind =
3009 M.getContext().getMDKindID("clang.imprecise_release");
3010 CopyOnEscapeMDKind =
3011 M.getContext().getMDKindID("clang.arc.copy_on_escape");
3012 NoObjCARCExceptionsMDKind =
3013 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
3014 #ifdef ARC_ANNOTATIONS
3015 ARCAnnotationBottomUpMDKind =
3016 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
3017 ARCAnnotationTopDownMDKind =
3018 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
3019 ARCAnnotationProvenanceSourceMDKind =
3020 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
3021 #endif // ARC_ANNOTATIONS
3023 // Intuitively, objc_retain and others are nocapture, however in practice
3024 // they are not, because they return their argument value. And objc_release
3025 // calls finalizers which can have arbitrary side effects.
3027 // These are initialized lazily.
3029 AutoreleaseRVCallee = 0;
3032 RetainBlockCallee = 0;
3033 AutoreleaseCallee = 0;
3038 bool ObjCARCOpt::runOnFunction(Function &F) {
3042 // If nothing in the Module uses ARC, don't do anything.
3048 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
3051 PA.setAA(&getAnalysis<AliasAnalysis>());
3053 // This pass performs several distinct transformations. As a compile-time aid
3054 // when compiling code that isn't ObjC, skip these if the relevant ObjC
3055 // library functions aren't declared.
3057 // Preliminary optimizations. This also computs UsedInThisFunction.
3058 OptimizeIndividualCalls(F);
3060 // Optimizations for weak pointers.
3061 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
3062 (1 << IC_LoadWeakRetained) |
3063 (1 << IC_StoreWeak) |
3064 (1 << IC_InitWeak) |
3065 (1 << IC_CopyWeak) |
3066 (1 << IC_MoveWeak) |
3067 (1 << IC_DestroyWeak)))
3068 OptimizeWeakCalls(F);
3070 // Optimizations for retain+release pairs.
3071 if (UsedInThisFunction & ((1 << IC_Retain) |
3072 (1 << IC_RetainRV) |
3073 (1 << IC_RetainBlock)))
3074 if (UsedInThisFunction & (1 << IC_Release))
3075 // Run OptimizeSequences until it either stops making changes or
3076 // no retain+release pair nesting is detected.
3077 while (OptimizeSequences(F)) {}
3079 // Optimizations if objc_autorelease is used.
3080 if (UsedInThisFunction & ((1 << IC_Autorelease) |
3081 (1 << IC_AutoreleaseRV)))
3084 DEBUG(dbgs() << "\n");
3089 void ObjCARCOpt::releaseMemory() {