1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
28 #include "ARCRuntimeEntryPoints.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "BlotMapVector.h"
34 #include "llvm/ADT/DenseMap.h"
35 #include "llvm/ADT/DenseSet.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/IR/CFG.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/raw_ostream.h"
46 using namespace llvm::objcarc;
48 #define DEBUG_TYPE "objc-arc-opts"
50 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
53 /// \brief This is similar to GetRCIdentityRoot but it stops as soon
54 /// as it finds a value with multiple uses.
55 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
56 if (Arg->hasOneUse()) {
57 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
58 return FindSingleUseIdentifiedObject(BC->getOperand(0));
59 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
60 if (GEP->hasAllZeroIndices())
61 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
62 if (IsForwarding(GetBasicARCInstKind(Arg)))
63 return FindSingleUseIdentifiedObject(
64 cast<CallInst>(Arg)->getArgOperand(0));
65 if (!IsObjCIdentifiedObject(Arg))
70 // If we found an identifiable object but it has multiple uses, but they are
71 // trivial uses, we can still consider this to be a single-use value.
72 if (IsObjCIdentifiedObject(Arg)) {
73 for (const User *U : Arg->users())
74 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
83 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
84 /// GetUnderlyingObjects except that it returns early when it sees the first
86 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
87 SmallPtrSet<const Value *, 4> Visited;
88 SmallVector<const Value *, 4> Worklist;
89 Worklist.push_back(V);
91 const Value *P = Worklist.pop_back_val();
92 P = GetUnderlyingObjCPtr(P);
94 if (isa<AllocaInst>(P))
97 if (!Visited.insert(P).second)
100 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
101 Worklist.push_back(SI->getTrueValue());
102 Worklist.push_back(SI->getFalseValue());
106 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
107 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
108 Worklist.push_back(PN->getIncomingValue(i));
111 } while (!Worklist.empty());
119 /// \defgroup ARCOpt ARC Optimization.
122 // TODO: On code like this:
125 // stuff_that_cannot_release()
126 // objc_autorelease(%x)
127 // stuff_that_cannot_release()
129 // stuff_that_cannot_release()
130 // objc_autorelease(%x)
132 // The second retain and autorelease can be deleted.
134 // TODO: It should be possible to delete
135 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
136 // pairs if nothing is actually autoreleased between them. Also, autorelease
137 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
138 // after inlining) can be turned into plain release calls.
140 // TODO: Critical-edge splitting. If the optimial insertion point is
141 // a critical edge, the current algorithm has to fail, because it doesn't
142 // know how to split edges. It should be possible to make the optimizer
143 // think in terms of edges, rather than blocks, and then split critical
146 // TODO: OptimizeSequences could generalized to be Interprocedural.
148 // TODO: Recognize that a bunch of other objc runtime calls have
149 // non-escaping arguments and non-releasing arguments, and may be
150 // non-autoreleasing.
152 // TODO: Sink autorelease calls as far as possible. Unfortunately we
153 // usually can't sink them past other calls, which would be the main
154 // case where it would be useful.
156 // TODO: The pointer returned from objc_loadWeakRetained is retained.
158 // TODO: Delete release+retain pairs (rare).
160 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
161 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
162 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
163 STATISTIC(NumRets, "Number of return value forwarding "
164 "retain+autoreleases eliminated");
165 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
166 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
168 STATISTIC(NumRetainsBeforeOpt,
169 "Number of retains before optimization");
170 STATISTIC(NumReleasesBeforeOpt,
171 "Number of releases before optimization");
172 STATISTIC(NumRetainsAfterOpt,
173 "Number of retains after optimization");
174 STATISTIC(NumReleasesAfterOpt,
175 "Number of releases after optimization");
179 /// \brief Per-BasicBlock state.
181 /// The number of unique control paths from the entry which can reach this
183 unsigned TopDownPathCount;
185 /// The number of unique control paths to exits from this block.
186 unsigned BottomUpPathCount;
188 /// A type for PerPtrTopDown and PerPtrBottomUp.
189 typedef BlotMapVector<const Value *, PtrState> MapTy;
191 /// The top-down traversal uses this to record information known about a
192 /// pointer at the bottom of each block.
195 /// The bottom-up traversal uses this to record information known about a
196 /// pointer at the top of each block.
197 MapTy PerPtrBottomUp;
199 /// Effective predecessors of the current block ignoring ignorable edges and
200 /// ignored backedges.
201 SmallVector<BasicBlock *, 2> Preds;
202 /// Effective successors of the current block ignoring ignorable edges and
203 /// ignored backedges.
204 SmallVector<BasicBlock *, 2> Succs;
207 static const unsigned OverflowOccurredValue;
209 BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
211 typedef MapTy::iterator ptr_iterator;
212 typedef MapTy::const_iterator ptr_const_iterator;
214 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
215 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
216 ptr_const_iterator top_down_ptr_begin() const {
217 return PerPtrTopDown.begin();
219 ptr_const_iterator top_down_ptr_end() const {
220 return PerPtrTopDown.end();
223 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
224 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
225 ptr_const_iterator bottom_up_ptr_begin() const {
226 return PerPtrBottomUp.begin();
228 ptr_const_iterator bottom_up_ptr_end() const {
229 return PerPtrBottomUp.end();
232 /// Mark this block as being an entry block, which has one path from the
233 /// entry by definition.
234 void SetAsEntry() { TopDownPathCount = 1; }
236 /// Mark this block as being an exit block, which has one path to an exit by
238 void SetAsExit() { BottomUpPathCount = 1; }
240 /// Attempt to find the PtrState object describing the top down state for
241 /// pointer Arg. Return a new initialized PtrState describing the top down
242 /// state for Arg if we do not find one.
243 PtrState &getPtrTopDownState(const Value *Arg) {
244 return PerPtrTopDown[Arg];
247 /// Attempt to find the PtrState object describing the bottom up state for
248 /// pointer Arg. Return a new initialized PtrState describing the bottom up
249 /// state for Arg if we do not find one.
250 PtrState &getPtrBottomUpState(const Value *Arg) {
251 return PerPtrBottomUp[Arg];
254 /// Attempt to find the PtrState object describing the bottom up state for
256 ptr_iterator findPtrBottomUpState(const Value *Arg) {
257 return PerPtrBottomUp.find(Arg);
260 void clearBottomUpPointers() {
261 PerPtrBottomUp.clear();
264 void clearTopDownPointers() {
265 PerPtrTopDown.clear();
268 void InitFromPred(const BBState &Other);
269 void InitFromSucc(const BBState &Other);
270 void MergePred(const BBState &Other);
271 void MergeSucc(const BBState &Other);
273 /// Compute the number of possible unique paths from an entry to an exit
274 /// which pass through this block. This is only valid after both the
275 /// top-down and bottom-up traversals are complete.
277 /// Returns true if overflow occurred. Returns false if overflow did not
279 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
280 if (TopDownPathCount == OverflowOccurredValue ||
281 BottomUpPathCount == OverflowOccurredValue)
283 unsigned long long Product =
284 (unsigned long long)TopDownPathCount*BottomUpPathCount;
285 // Overflow occurred if any of the upper bits of Product are set or if all
286 // the lower bits of Product are all set.
287 return (Product >> 32) ||
288 ((PathCount = Product) == OverflowOccurredValue);
291 // Specialized CFG utilities.
292 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
293 edge_iterator pred_begin() const { return Preds.begin(); }
294 edge_iterator pred_end() const { return Preds.end(); }
295 edge_iterator succ_begin() const { return Succs.begin(); }
296 edge_iterator succ_end() const { return Succs.end(); }
298 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
299 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
301 bool isExit() const { return Succs.empty(); }
304 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
307 void BBState::InitFromPred(const BBState &Other) {
308 PerPtrTopDown = Other.PerPtrTopDown;
309 TopDownPathCount = Other.TopDownPathCount;
312 void BBState::InitFromSucc(const BBState &Other) {
313 PerPtrBottomUp = Other.PerPtrBottomUp;
314 BottomUpPathCount = Other.BottomUpPathCount;
317 /// The top-down traversal uses this to merge information about predecessors to
318 /// form the initial state for a new block.
319 void BBState::MergePred(const BBState &Other) {
320 if (TopDownPathCount == OverflowOccurredValue)
323 // Other.TopDownPathCount can be 0, in which case it is either dead or a
324 // loop backedge. Loop backedges are special.
325 TopDownPathCount += Other.TopDownPathCount;
327 // In order to be consistent, we clear the top down pointers when by adding
328 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
330 if (TopDownPathCount == OverflowOccurredValue) {
331 clearTopDownPointers();
335 // Check for overflow. If we have overflow, fall back to conservative
337 if (TopDownPathCount < Other.TopDownPathCount) {
338 TopDownPathCount = OverflowOccurredValue;
339 clearTopDownPointers();
343 // For each entry in the other set, if our set has an entry with the same key,
344 // merge the entries. Otherwise, copy the entry and merge it with an empty
346 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
347 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
348 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
349 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
353 // For each entry in our set, if the other set doesn't have an entry with the
354 // same key, force it to merge with an empty entry.
355 for (ptr_iterator MI = top_down_ptr_begin(),
356 ME = top_down_ptr_end(); MI != ME; ++MI)
357 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
358 MI->second.Merge(PtrState(), /*TopDown=*/true);
361 /// The bottom-up traversal uses this to merge information about successors to
362 /// form the initial state for a new block.
363 void BBState::MergeSucc(const BBState &Other) {
364 if (BottomUpPathCount == OverflowOccurredValue)
367 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
368 // loop backedge. Loop backedges are special.
369 BottomUpPathCount += Other.BottomUpPathCount;
371 // In order to be consistent, we clear the top down pointers when by adding
372 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
374 if (BottomUpPathCount == OverflowOccurredValue) {
375 clearBottomUpPointers();
379 // Check for overflow. If we have overflow, fall back to conservative
381 if (BottomUpPathCount < Other.BottomUpPathCount) {
382 BottomUpPathCount = OverflowOccurredValue;
383 clearBottomUpPointers();
387 // For each entry in the other set, if our set has an entry with the
388 // same key, merge the entries. Otherwise, copy the entry and merge
389 // it with an empty entry.
390 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
391 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
392 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
393 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
397 // For each entry in our set, if the other set doesn't have an entry
398 // with the same key, force it to merge with an empty entry.
399 for (ptr_iterator MI = bottom_up_ptr_begin(),
400 ME = bottom_up_ptr_end(); MI != ME; ++MI)
401 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
402 MI->second.Merge(PtrState(), /*TopDown=*/false);
405 // Only enable ARC Annotations if we are building a debug version of
408 #define ARC_ANNOTATIONS
411 // Define some macros along the lines of DEBUG and some helper functions to make
412 // it cleaner to create annotations in the source code and to no-op when not
413 // building in debug mode.
414 #ifdef ARC_ANNOTATIONS
416 #include "llvm/Support/CommandLine.h"
418 /// Enable/disable ARC sequence annotations.
420 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
421 cl::desc("Enable emission of arc data flow analysis "
424 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
425 cl::desc("Disable check for cfg hazards when "
427 static cl::opt<std::string>
428 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
430 cl::desc("filter out all data flow annotations "
431 "but those that apply to the given "
432 "target llvm identifier."));
434 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
435 /// instruction so that we can track backwards when post processing via the llvm
436 /// arc annotation processor tool. If the function is an
437 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
439 MDString *Hash = nullptr;
441 // If pointer is a result of an instruction and it does not have a source
442 // MDNode it, attach a new MDNode onto it. If pointer is a result of
443 // an instruction and does have a source MDNode attached to it, return a
444 // reference to said Node. Otherwise just return 0.
445 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
447 if (!(Node = Inst->getMetadata(NodeId))) {
448 // We do not have any node. Generate and attatch the hash MDString to the
451 // We just use an MDString to ensure that this metadata gets written out
452 // of line at the module level and to provide a very simple format
453 // encoding the information herein. Both of these makes it simpler to
454 // parse the annotations by a simple external program.
456 raw_string_ostream os(Str);
457 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
458 << Inst->getName() << ")";
460 Hash = MDString::get(Inst->getContext(), os.str());
461 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
463 // We have a node. Grab its hash and return it.
464 assert(Node->getNumOperands() == 1 &&
465 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
466 Hash = cast<MDString>(Node->getOperand(0));
468 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
470 raw_string_ostream os(str);
471 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
473 Hash = MDString::get(Arg->getContext(), os.str());
479 static std::string SequenceToString(Sequence A) {
481 raw_string_ostream os(str);
486 /// Helper function to change a Sequence into a String object using our overload
487 /// for raw_ostream so we only have printing code in one location.
488 static MDString *SequenceToMDString(LLVMContext &Context,
490 return MDString::get(Context, SequenceToString(A));
493 /// A simple function to generate a MDNode which describes the change in state
494 /// for Value *Ptr caused by Instruction *Inst.
495 static void AppendMDNodeToInstForPtr(unsigned NodeId,
498 MDString *PtrSourceMDNodeID,
501 MDNode *Node = nullptr;
502 Metadata *tmp[3] = {PtrSourceMDNodeID,
503 SequenceToMDString(Inst->getContext(), OldSeq),
504 SequenceToMDString(Inst->getContext(), NewSeq)};
505 Node = MDNode::get(Inst->getContext(), tmp);
507 Inst->setMetadata(NodeId, Node);
510 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
511 /// state of a pointer at the entrance to a basic block.
512 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
513 Value *Ptr, Sequence Seq) {
514 // If we have a target identifier, make sure that we match it before
516 if(!ARCAnnotationTargetIdentifier.empty() &&
517 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
520 Module *M = BB->getParent()->getParent();
521 LLVMContext &C = M->getContext();
522 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
523 Type *I8XX = PointerType::getUnqual(I8X);
524 Type *Params[] = {I8XX, I8XX};
525 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
527 Constant *Callee = M->getOrInsertFunction(Name, FTy);
529 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
532 StringRef Tmp = Ptr->getName();
533 if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
534 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
536 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
537 cast<Constant>(ActualPtrName), Tmp);
541 std::string SeqStr = SequenceToString(Seq);
542 if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
543 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
545 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
546 cast<Constant>(ActualPtrName), SeqStr);
549 Builder.CreateCall2(Callee, PtrName, S);
552 /// Add to the end of the basic block llvm.ptr.annotations which show the state
553 /// of the pointer at the bottom of the basic block.
554 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
555 Value *Ptr, Sequence Seq) {
556 // If we have a target identifier, make sure that we match it before emitting
558 if(!ARCAnnotationTargetIdentifier.empty() &&
559 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
562 Module *M = BB->getParent()->getParent();
563 LLVMContext &C = M->getContext();
564 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
565 Type *I8XX = PointerType::getUnqual(I8X);
566 Type *Params[] = {I8XX, I8XX};
567 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
569 Constant *Callee = M->getOrInsertFunction(Name, FTy);
571 IRBuilder<> Builder(BB, std::prev(BB->end()));
574 StringRef Tmp = Ptr->getName();
575 if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
576 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
578 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
579 cast<Constant>(ActualPtrName), Tmp);
583 std::string SeqStr = SequenceToString(Seq);
584 if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
585 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
587 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
588 cast<Constant>(ActualPtrName), SeqStr);
590 Builder.CreateCall2(Callee, PtrName, S);
593 /// Adds a source annotation to pointer and a state change annotation to Inst
594 /// referencing the source annotation and the old/new state of pointer.
595 static void GenerateARCAnnotation(unsigned InstMDId,
601 if (EnableARCAnnotations) {
602 // If we have a target identifier, make sure that we match it before
603 // emitting an annotation.
604 if(!ARCAnnotationTargetIdentifier.empty() &&
605 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
608 // First generate the source annotation on our pointer. This will return an
609 // MDString* if Ptr actually comes from an instruction implying we can put
610 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
611 // then we know that our pointer is from an Argument so we put a reference
612 // to the argument number.
614 // The point of this is to make it easy for the
615 // llvm-arc-annotation-processor tool to cross reference where the source
616 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
617 // information via debug info for backends to use (since why would anyone
618 // need such a thing from LLVM IR besides in non-standard cases
620 MDString *SourcePtrMDNode =
621 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
622 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
627 // The actual interface for accessing the above functionality is defined via
628 // some simple macros which are defined below. We do this so that the user does
629 // not need to pass in what metadata id is needed resulting in cleaner code and
630 // additionally since it provides an easy way to conditionally no-op all
631 // annotation support in a non-debug build.
633 /// Use this macro to annotate a sequence state change when processing
634 /// instructions bottom up,
635 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
636 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
637 ARCAnnotationProvenanceSourceMDKind, (inst), \
638 const_cast<Value*>(ptr), (old), (new))
639 /// Use this macro to annotate a sequence state change when processing
640 /// instructions top down.
641 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
642 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
643 ARCAnnotationProvenanceSourceMDKind, (inst), \
644 const_cast<Value*>(ptr), (old), (new))
646 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
648 if (EnableARCAnnotations) { \
649 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
650 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
651 Value *Ptr = const_cast<Value*>(I->first); \
652 Sequence Seq = I->second.GetSeq(); \
653 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
658 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
659 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
661 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
662 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
663 Terminator, bottom_up)
664 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
665 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
667 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
668 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
669 Terminator, top_down)
671 #else // !ARC_ANNOTATION
672 // If annotations are off, noop.
673 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
674 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
675 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
676 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
677 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
678 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
679 #endif // !ARC_ANNOTATION
682 /// \brief The main ARC optimization pass.
683 class ObjCARCOpt : public FunctionPass {
685 ProvenanceAnalysis PA;
686 ARCRuntimeEntryPoints EP;
688 // This is used to track if a pointer is stored into an alloca.
689 DenseSet<const Value *> MultiOwnersSet;
691 /// A flag indicating whether this optimization pass should run.
694 /// Flags which determine whether each of the interesting runtine functions
695 /// is in fact used in the current function.
696 unsigned UsedInThisFunction;
698 /// The Metadata Kind for clang.imprecise_release metadata.
699 unsigned ImpreciseReleaseMDKind;
701 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
702 unsigned CopyOnEscapeMDKind;
704 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
705 unsigned NoObjCARCExceptionsMDKind;
707 #ifdef ARC_ANNOTATIONS
708 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
709 unsigned ARCAnnotationBottomUpMDKind;
710 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
711 unsigned ARCAnnotationTopDownMDKind;
712 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
713 unsigned ARCAnnotationProvenanceSourceMDKind;
714 #endif // ARC_ANNOATIONS
716 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
717 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
719 void OptimizeIndividualCalls(Function &F);
721 void CheckForCFGHazards(const BasicBlock *BB,
722 DenseMap<const BasicBlock *, BBState> &BBStates,
723 BBState &MyStates) const;
724 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
725 BlotMapVector<Value *, RRInfo> &Retains,
727 bool VisitBottomUp(BasicBlock *BB,
728 DenseMap<const BasicBlock *, BBState> &BBStates,
729 BlotMapVector<Value *, RRInfo> &Retains);
730 bool VisitInstructionTopDown(Instruction *Inst,
731 DenseMap<Value *, RRInfo> &Releases,
733 bool VisitTopDown(BasicBlock *BB,
734 DenseMap<const BasicBlock *, BBState> &BBStates,
735 DenseMap<Value *, RRInfo> &Releases);
736 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
737 BlotMapVector<Value *, RRInfo> &Retains,
738 DenseMap<Value *, RRInfo> &Releases);
740 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
741 BlotMapVector<Value *, RRInfo> &Retains,
742 DenseMap<Value *, RRInfo> &Releases,
743 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
745 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
746 BlotMapVector<Value *, RRInfo> &Retains,
747 DenseMap<Value *, RRInfo> &Releases, Module *M,
748 SmallVectorImpl<Instruction *> &NewRetains,
749 SmallVectorImpl<Instruction *> &NewReleases,
750 SmallVectorImpl<Instruction *> &DeadInsts,
751 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
752 Value *Arg, bool KnownSafe,
753 bool &AnyPairsCompletelyEliminated);
755 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
756 BlotMapVector<Value *, RRInfo> &Retains,
757 DenseMap<Value *, RRInfo> &Releases, Module *M);
759 void OptimizeWeakCalls(Function &F);
761 bool OptimizeSequences(Function &F);
763 void OptimizeReturns(Function &F);
766 void GatherStatistics(Function &F, bool AfterOptimization = false);
769 void getAnalysisUsage(AnalysisUsage &AU) const override;
770 bool doInitialization(Module &M) override;
771 bool runOnFunction(Function &F) override;
772 void releaseMemory() override;
776 ObjCARCOpt() : FunctionPass(ID) {
777 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
782 char ObjCARCOpt::ID = 0;
783 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
784 "objc-arc", "ObjC ARC optimization", false, false)
785 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
786 INITIALIZE_PASS_END(ObjCARCOpt,
787 "objc-arc", "ObjC ARC optimization", false, false)
789 Pass *llvm::createObjCARCOptPass() {
790 return new ObjCARCOpt();
793 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
794 AU.addRequired<ObjCARCAliasAnalysis>();
795 AU.addRequired<AliasAnalysis>();
796 // ARC optimization doesn't currently split critical edges.
797 AU.setPreservesCFG();
800 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
801 /// not a return value. Or, if it can be paired with an
802 /// objc_autoreleaseReturnValue, delete the pair and return true.
804 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
805 // Check for the argument being from an immediately preceding call or invoke.
806 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
807 ImmutableCallSite CS(Arg);
808 if (const Instruction *Call = CS.getInstruction()) {
809 if (Call->getParent() == RetainRV->getParent()) {
810 BasicBlock::const_iterator I = Call;
812 while (IsNoopInstruction(I)) ++I;
815 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
816 BasicBlock *RetainRVParent = RetainRV->getParent();
817 if (II->getNormalDest() == RetainRVParent) {
818 BasicBlock::const_iterator I = RetainRVParent->begin();
819 while (IsNoopInstruction(I)) ++I;
826 // Check for being preceded by an objc_autoreleaseReturnValue on the same
827 // pointer. In this case, we can delete the pair.
828 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
830 do --I; while (I != Begin && IsNoopInstruction(I));
831 if (GetBasicARCInstKind(I) == ARCInstKind::AutoreleaseRV &&
832 GetArgRCIdentityRoot(I) == Arg) {
836 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
837 << "Erasing " << *RetainRV << "\n");
840 EraseInstruction(RetainRV);
845 // Turn it to a plain objc_retain.
849 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
850 "objc_retain since the operand is not a return value.\n"
851 "Old = " << *RetainRV << "\n");
853 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
854 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
856 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
861 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
862 /// used as a return value.
863 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
864 Instruction *AutoreleaseRV,
865 ARCInstKind &Class) {
866 // Check for a return of the pointer value.
867 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
868 SmallVector<const Value *, 2> Users;
869 Users.push_back(Ptr);
871 Ptr = Users.pop_back_val();
872 for (const User *U : Ptr->users()) {
873 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
875 if (isa<BitCastInst>(U))
878 } while (!Users.empty());
883 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
884 "objc_autorelease since its operand is not used as a return "
886 "Old = " << *AutoreleaseRV << "\n");
888 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
889 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
890 AutoreleaseRVCI->setCalledFunction(NewDecl);
891 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
892 Class = ARCInstKind::Autorelease;
894 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
898 /// Visit each call, one at a time, and make simplifications without doing any
899 /// additional analysis.
900 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
901 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
902 // Reset all the flags in preparation for recomputing them.
903 UsedInThisFunction = 0;
905 // Visit all objc_* calls in F.
906 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
907 Instruction *Inst = &*I++;
909 ARCInstKind Class = GetBasicARCInstKind(Inst);
911 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
916 // Delete no-op casts. These function calls have special semantics, but
917 // the semantics are entirely implemented via lowering in the front-end,
918 // so by the time they reach the optimizer, they are just no-op calls
919 // which return their argument.
921 // There are gray areas here, as the ability to cast reference-counted
922 // pointers to raw void* and back allows code to break ARC assumptions,
923 // however these are currently considered to be unimportant.
924 case ARCInstKind::NoopCast:
927 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
928 EraseInstruction(Inst);
931 // If the pointer-to-weak-pointer is null, it's undefined behavior.
932 case ARCInstKind::StoreWeak:
933 case ARCInstKind::LoadWeak:
934 case ARCInstKind::LoadWeakRetained:
935 case ARCInstKind::InitWeak:
936 case ARCInstKind::DestroyWeak: {
937 CallInst *CI = cast<CallInst>(Inst);
938 if (IsNullOrUndef(CI->getArgOperand(0))) {
940 Type *Ty = CI->getArgOperand(0)->getType();
941 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
942 Constant::getNullValue(Ty),
944 llvm::Value *NewValue = UndefValue::get(CI->getType());
945 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
946 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
947 CI->replaceAllUsesWith(NewValue);
948 CI->eraseFromParent();
953 case ARCInstKind::CopyWeak:
954 case ARCInstKind::MoveWeak: {
955 CallInst *CI = cast<CallInst>(Inst);
956 if (IsNullOrUndef(CI->getArgOperand(0)) ||
957 IsNullOrUndef(CI->getArgOperand(1))) {
959 Type *Ty = CI->getArgOperand(0)->getType();
960 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
961 Constant::getNullValue(Ty),
964 llvm::Value *NewValue = UndefValue::get(CI->getType());
965 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
966 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
968 CI->replaceAllUsesWith(NewValue);
969 CI->eraseFromParent();
974 case ARCInstKind::RetainRV:
975 if (OptimizeRetainRVCall(F, Inst))
978 case ARCInstKind::AutoreleaseRV:
979 OptimizeAutoreleaseRVCall(F, Inst, Class);
983 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
984 if (IsAutorelease(Class) && Inst->use_empty()) {
985 CallInst *Call = cast<CallInst>(Inst);
986 const Value *Arg = Call->getArgOperand(0);
987 Arg = FindSingleUseIdentifiedObject(Arg);
992 // Create the declaration lazily.
993 LLVMContext &C = Inst->getContext();
995 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
996 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
998 NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
1000 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1001 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
1002 << *NewCall << "\n");
1004 EraseInstruction(Call);
1006 Class = ARCInstKind::Release;
1010 // For functions which can never be passed stack arguments, add
1012 if (IsAlwaysTail(Class)) {
1014 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
1015 "passed stack args: " << *Inst << "\n");
1016 cast<CallInst>(Inst)->setTailCall();
1019 // Ensure that functions that can never have a "tail" keyword due to the
1020 // semantics of ARC truly do not do so.
1021 if (IsNeverTail(Class)) {
1023 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
1025 cast<CallInst>(Inst)->setTailCall(false);
1028 // Set nounwind as needed.
1029 if (IsNoThrow(Class)) {
1031 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1033 cast<CallInst>(Inst)->setDoesNotThrow();
1036 if (!IsNoopOnNull(Class)) {
1037 UsedInThisFunction |= 1 << unsigned(Class);
1041 const Value *Arg = GetArgRCIdentityRoot(Inst);
1043 // ARC calls with null are no-ops. Delete them.
1044 if (IsNullOrUndef(Arg)) {
1047 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1049 EraseInstruction(Inst);
1053 // Keep track of which of retain, release, autorelease, and retain_block
1054 // are actually present in this function.
1055 UsedInThisFunction |= 1 << unsigned(Class);
1057 // If Arg is a PHI, and one or more incoming values to the
1058 // PHI are null, and the call is control-equivalent to the PHI, and there
1059 // are no relevant side effects between the PHI and the call, the call
1060 // could be pushed up to just those paths with non-null incoming values.
1061 // For now, don't bother splitting critical edges for this.
1062 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1063 Worklist.push_back(std::make_pair(Inst, Arg));
1065 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1069 const PHINode *PN = dyn_cast<PHINode>(Arg);
1072 // Determine if the PHI has any null operands, or any incoming
1074 bool HasNull = false;
1075 bool HasCriticalEdges = false;
1076 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1078 GetRCIdentityRoot(PN->getIncomingValue(i));
1079 if (IsNullOrUndef(Incoming))
1081 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1082 .getNumSuccessors() != 1) {
1083 HasCriticalEdges = true;
1087 // If we have null operands and no critical edges, optimize.
1088 if (!HasCriticalEdges && HasNull) {
1089 SmallPtrSet<Instruction *, 4> DependingInstructions;
1090 SmallPtrSet<const BasicBlock *, 4> Visited;
1092 // Check that there is nothing that cares about the reference
1093 // count between the call and the phi.
1095 case ARCInstKind::Retain:
1096 case ARCInstKind::RetainBlock:
1097 // These can always be moved up.
1099 case ARCInstKind::Release:
1100 // These can't be moved across things that care about the retain
1102 FindDependencies(NeedsPositiveRetainCount, Arg,
1103 Inst->getParent(), Inst,
1104 DependingInstructions, Visited, PA);
1106 case ARCInstKind::Autorelease:
1107 // These can't be moved across autorelease pool scope boundaries.
1108 FindDependencies(AutoreleasePoolBoundary, Arg,
1109 Inst->getParent(), Inst,
1110 DependingInstructions, Visited, PA);
1112 case ARCInstKind::RetainRV:
1113 case ARCInstKind::AutoreleaseRV:
1114 // Don't move these; the RV optimization depends on the autoreleaseRV
1115 // being tail called, and the retainRV being immediately after a call
1116 // (which might still happen if we get lucky with codegen layout, but
1117 // it's not worth taking the chance).
1120 llvm_unreachable("Invalid dependence flavor");
1123 if (DependingInstructions.size() == 1 &&
1124 *DependingInstructions.begin() == PN) {
1127 // Clone the call into each predecessor that has a non-null value.
1128 CallInst *CInst = cast<CallInst>(Inst);
1129 Type *ParamTy = CInst->getArgOperand(0)->getType();
1130 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1132 GetRCIdentityRoot(PN->getIncomingValue(i));
1133 if (!IsNullOrUndef(Incoming)) {
1134 CallInst *Clone = cast<CallInst>(CInst->clone());
1135 Value *Op = PN->getIncomingValue(i);
1136 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1137 if (Op->getType() != ParamTy)
1138 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1139 Clone->setArgOperand(0, Op);
1140 Clone->insertBefore(InsertPos);
1142 DEBUG(dbgs() << "Cloning "
1144 "And inserting clone at " << *InsertPos << "\n");
1145 Worklist.push_back(std::make_pair(Clone, Incoming));
1148 // Erase the original call.
1149 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1150 EraseInstruction(CInst);
1154 } while (!Worklist.empty());
1158 /// If we have a top down pointer in the S_Use state, make sure that there are
1159 /// no CFG hazards by checking the states of various bottom up pointers.
1160 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1161 const bool SuccSRRIKnownSafe,
1163 bool &SomeSuccHasSame,
1164 bool &AllSuccsHaveSame,
1165 bool &NotAllSeqEqualButKnownSafe,
1166 bool &ShouldContinue) {
1168 case S_CanRelease: {
1169 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1170 S.ClearSequenceProgress();
1173 S.SetCFGHazardAfflicted(true);
1174 ShouldContinue = true;
1178 SomeSuccHasSame = true;
1182 case S_MovableRelease:
1183 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1184 AllSuccsHaveSame = false;
1186 NotAllSeqEqualButKnownSafe = true;
1189 llvm_unreachable("bottom-up pointer in retain state!");
1191 llvm_unreachable("This should have been handled earlier.");
1195 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1196 /// there are no CFG hazards by checking the states of various bottom up
1198 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1199 const bool SuccSRRIKnownSafe,
1201 bool &SomeSuccHasSame,
1202 bool &AllSuccsHaveSame,
1203 bool &NotAllSeqEqualButKnownSafe) {
1206 SomeSuccHasSame = true;
1210 case S_MovableRelease:
1212 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1213 AllSuccsHaveSame = false;
1215 NotAllSeqEqualButKnownSafe = true;
1218 llvm_unreachable("bottom-up pointer in retain state!");
1220 llvm_unreachable("This should have been handled earlier.");
1224 /// Check for critical edges, loop boundaries, irreducible control flow, or
1225 /// other CFG structures where moving code across the edge would result in it
1226 /// being executed more.
1228 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1229 DenseMap<const BasicBlock *, BBState> &BBStates,
1230 BBState &MyStates) const {
1231 // If any top-down local-use or possible-dec has a succ which is earlier in
1232 // the sequence, forget it.
1233 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1234 E = MyStates.top_down_ptr_end(); I != E; ++I) {
1235 PtrState &S = I->second;
1236 const Sequence Seq = I->second.GetSeq();
1238 // We only care about S_Retain, S_CanRelease, and S_Use.
1242 // Make sure that if extra top down states are added in the future that this
1243 // code is updated to handle it.
1244 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1245 "Unknown top down sequence state.");
1247 const Value *Arg = I->first;
1248 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1249 bool SomeSuccHasSame = false;
1250 bool AllSuccsHaveSame = true;
1251 bool NotAllSeqEqualButKnownSafe = false;
1253 succ_const_iterator SI(TI), SE(TI, false);
1255 for (; SI != SE; ++SI) {
1256 // If VisitBottomUp has pointer information for this successor, take
1257 // what we know about it.
1258 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1260 assert(BBI != BBStates.end());
1261 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1262 const Sequence SuccSSeq = SuccS.GetSeq();
1264 // If bottom up, the pointer is in an S_None state, clear the sequence
1265 // progress since the sequence in the bottom up state finished
1266 // suggesting a mismatch in between retains/releases. This is true for
1267 // all three cases that we are handling here: S_Retain, S_Use, and
1269 if (SuccSSeq == S_None) {
1270 S.ClearSequenceProgress();
1274 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1276 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1278 // *NOTE* We do not use Seq from above here since we are allowing for
1279 // S.GetSeq() to change while we are visiting basic blocks.
1280 switch(S.GetSeq()) {
1282 bool ShouldContinue = false;
1283 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1284 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1290 case S_CanRelease: {
1291 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1292 SomeSuccHasSame, AllSuccsHaveSame,
1293 NotAllSeqEqualButKnownSafe);
1300 case S_MovableRelease:
1305 // If the state at the other end of any of the successor edges
1306 // matches the current state, require all edges to match. This
1307 // guards against loops in the middle of a sequence.
1308 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1309 S.ClearSequenceProgress();
1310 } else if (NotAllSeqEqualButKnownSafe) {
1311 // If we would have cleared the state foregoing the fact that we are known
1312 // safe, stop code motion. This is because whether or not it is safe to
1313 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1314 // are allowed to perform code motion.
1315 S.SetCFGHazardAfflicted(true);
1320 bool ObjCARCOpt::VisitInstructionBottomUp(
1321 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1322 BBState &MyStates) {
1323 bool NestingDetected = false;
1324 ARCInstKind Class = GetARCInstKind(Inst);
1325 const Value *Arg = nullptr;
1327 DEBUG(dbgs() << "Class: " << Class << "\n");
1330 case ARCInstKind::Release: {
1331 Arg = GetArgRCIdentityRoot(Inst);
1333 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1335 // If we see two releases in a row on the same pointer. If so, make
1336 // a note, and we'll cicle back to revisit it after we've
1337 // hopefully eliminated the second release, which may allow us to
1338 // eliminate the first release too.
1339 // Theoretically we could implement removal of nested retain+release
1340 // pairs by making PtrState hold a stack of states, but this is
1341 // simple and avoids adding overhead for the non-nested case.
1342 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1343 DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
1344 NestingDetected = true;
1347 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1348 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1349 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1350 S.ResetSequenceProgress(NewSeq);
1351 S.SetReleaseMetadata(ReleaseMetadata);
1352 S.SetKnownSafe(S.HasKnownPositiveRefCount());
1353 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
1355 S.SetKnownPositiveRefCount();
1358 case ARCInstKind::RetainBlock:
1359 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1360 // objc_retainBlocks to objc_retains. Thus at this point any
1361 // objc_retainBlocks that we see are not optimizable.
1363 case ARCInstKind::Retain:
1364 case ARCInstKind::RetainRV: {
1365 Arg = GetArgRCIdentityRoot(Inst);
1367 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1368 S.SetKnownPositiveRefCount();
1370 Sequence OldSeq = S.GetSeq();
1374 case S_MovableRelease:
1376 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1377 // imprecise release, clear our reverse insertion points.
1378 if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
1379 S.ClearReverseInsertPts();
1382 // Don't do retain+release tracking for ARCInstKind::RetainRV,
1384 // better to let it remain as the first instruction after a call.
1385 if (Class != ARCInstKind::RetainRV)
1386 Retains[Inst] = S.GetRRInfo();
1387 S.ClearSequenceProgress();
1392 llvm_unreachable("bottom-up pointer in retain state!");
1394 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
1395 // A retain moving bottom up can be a use.
1398 case ARCInstKind::AutoreleasepoolPop:
1399 // Conservatively, clear MyStates for all known pointers.
1400 MyStates.clearBottomUpPointers();
1401 return NestingDetected;
1402 case ARCInstKind::AutoreleasepoolPush:
1403 case ARCInstKind::None:
1404 // These are irrelevant.
1405 return NestingDetected;
1406 case ARCInstKind::User:
1407 // If we have a store into an alloca of a pointer we are tracking, the
1408 // pointer has multiple owners implying that we must be more conservative.
1410 // This comes up in the context of a pointer being ``KnownSafe''. In the
1411 // presence of a block being initialized, the frontend will emit the
1412 // objc_retain on the original pointer and the release on the pointer loaded
1413 // from the alloca. The optimizer will through the provenance analysis
1414 // realize that the two are related, but since we only require KnownSafe in
1415 // one direction, will match the inner retain on the original pointer with
1416 // the guard release on the original pointer. This is fixed by ensuring that
1417 // in the presence of allocas we only unconditionally remove pointers if
1418 // both our retain and our release are KnownSafe.
1419 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1420 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
1421 BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
1422 GetRCIdentityRoot(SI->getValueOperand()));
1423 if (I != MyStates.bottom_up_ptr_end())
1424 MultiOwnersSet.insert(I->first);
1432 // Consider any other possible effects of this instruction on each
1433 // pointer being tracked.
1434 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1435 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1436 const Value *Ptr = MI->first;
1438 continue; // Handled above.
1439 PtrState &S = MI->second;
1440 Sequence Seq = S.GetSeq();
1442 // Check for possible releases.
1443 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1444 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1446 S.ClearKnownPositiveRefCount();
1449 S.SetSeq(S_CanRelease);
1450 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
1454 case S_MovableRelease:
1459 llvm_unreachable("bottom-up pointer in retain state!");
1463 // Check for possible direct uses.
1466 case S_MovableRelease:
1467 if (CanUse(Inst, Ptr, PA, Class)) {
1468 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1470 assert(!S.HasReverseInsertPts());
1471 // If this is an invoke instruction, we're scanning it as part of
1472 // one of its successor blocks, since we can't insert code after it
1473 // in its own block, and we don't want to split critical edges.
1474 if (isa<InvokeInst>(Inst))
1475 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1477 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1479 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1480 } else if (Seq == S_Release && IsUser(Class)) {
1481 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
1483 // Non-movable releases depend on any possible objc pointer use.
1485 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
1486 assert(!S.HasReverseInsertPts());
1487 // As above; handle invoke specially.
1488 if (isa<InvokeInst>(Inst))
1489 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1491 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1495 if (CanUse(Inst, Ptr, PA, Class)) {
1496 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
1499 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1507 llvm_unreachable("bottom-up pointer in retain state!");
1511 return NestingDetected;
1514 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1515 DenseMap<const BasicBlock *, BBState> &BBStates,
1516 BlotMapVector<Value *, RRInfo> &Retains) {
1518 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1520 bool NestingDetected = false;
1521 BBState &MyStates = BBStates[BB];
1523 // Merge the states from each successor to compute the initial state
1524 // for the current block.
1525 BBState::edge_iterator SI(MyStates.succ_begin()),
1526 SE(MyStates.succ_end());
1528 const BasicBlock *Succ = *SI;
1529 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1530 assert(I != BBStates.end());
1531 MyStates.InitFromSucc(I->second);
1533 for (; SI != SE; ++SI) {
1535 I = BBStates.find(Succ);
1536 assert(I != BBStates.end());
1537 MyStates.MergeSucc(I->second);
1541 // If ARC Annotations are enabled, output the current state of pointers at the
1542 // bottom of the basic block.
1543 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
1545 // Visit all the instructions, bottom-up.
1546 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1547 Instruction *Inst = std::prev(I);
1549 // Invoke instructions are visited as part of their successors (below).
1550 if (isa<InvokeInst>(Inst))
1553 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
1555 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1558 // If there's a predecessor with an invoke, visit the invoke as if it were
1559 // part of this block, since we can't insert code after an invoke in its own
1560 // block, and we don't want to split critical edges.
1561 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1562 PE(MyStates.pred_end()); PI != PE; ++PI) {
1563 BasicBlock *Pred = *PI;
1564 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1565 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1568 // If ARC Annotations are enabled, output the current state of pointers at the
1569 // top of the basic block.
1570 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
1572 return NestingDetected;
1576 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1577 DenseMap<Value *, RRInfo> &Releases,
1578 BBState &MyStates) {
1579 bool NestingDetected = false;
1580 ARCInstKind Class = GetARCInstKind(Inst);
1581 const Value *Arg = nullptr;
1584 case ARCInstKind::RetainBlock:
1585 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1586 // objc_retainBlocks to objc_retains. Thus at this point any
1587 // objc_retainBlocks that we see are not optimizable.
1589 case ARCInstKind::Retain:
1590 case ARCInstKind::RetainRV: {
1591 Arg = GetArgRCIdentityRoot(Inst);
1593 PtrState &S = MyStates.getPtrTopDownState(Arg);
1595 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1597 // better to let it remain as the first instruction after a call.
1598 if (Class != ARCInstKind::RetainRV) {
1599 // If we see two retains in a row on the same pointer. If so, make
1600 // a note, and we'll cicle back to revisit it after we've
1601 // hopefully eliminated the second retain, which may allow us to
1602 // eliminate the first retain too.
1603 // Theoretically we could implement removal of nested retain+release
1604 // pairs by making PtrState hold a stack of states, but this is
1605 // simple and avoids adding overhead for the non-nested case.
1606 if (S.GetSeq() == S_Retain)
1607 NestingDetected = true;
1609 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
1610 S.ResetSequenceProgress(S_Retain);
1611 S.SetKnownSafe(S.HasKnownPositiveRefCount());
1615 S.SetKnownPositiveRefCount();
1617 // A retain can be a potential use; procede to the generic checking
1621 case ARCInstKind::Release: {
1622 Arg = GetArgRCIdentityRoot(Inst);
1624 PtrState &S = MyStates.getPtrTopDownState(Arg);
1625 S.ClearKnownPositiveRefCount();
1627 Sequence OldSeq = S.GetSeq();
1629 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1634 if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
1635 S.ClearReverseInsertPts();
1638 S.SetReleaseMetadata(ReleaseMetadata);
1639 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
1640 Releases[Inst] = S.GetRRInfo();
1641 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
1642 S.ClearSequenceProgress();
1648 case S_MovableRelease:
1649 llvm_unreachable("top-down pointer in release state!");
1653 case ARCInstKind::AutoreleasepoolPop:
1654 // Conservatively, clear MyStates for all known pointers.
1655 MyStates.clearTopDownPointers();
1656 return NestingDetected;
1657 case ARCInstKind::AutoreleasepoolPush:
1658 case ARCInstKind::None:
1659 // These are irrelevant.
1660 return NestingDetected;
1665 // Consider any other possible effects of this instruction on each
1666 // pointer being tracked.
1667 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
1668 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
1669 const Value *Ptr = MI->first;
1671 continue; // Handled above.
1672 PtrState &S = MI->second;
1673 Sequence Seq = S.GetSeq();
1675 // Check for possible releases.
1676 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1677 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1679 S.ClearKnownPositiveRefCount();
1682 S.SetSeq(S_CanRelease);
1683 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
1684 assert(!S.HasReverseInsertPts());
1685 S.InsertReverseInsertPt(Inst);
1687 // One call can't cause a transition from S_Retain to S_CanRelease
1688 // and S_CanRelease to S_Use. If we've made the first transition,
1697 case S_MovableRelease:
1698 llvm_unreachable("top-down pointer in release state!");
1702 // Check for possible direct uses.
1705 if (CanUse(Inst, Ptr, PA, Class)) {
1706 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1709 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
1718 case S_MovableRelease:
1719 llvm_unreachable("top-down pointer in release state!");
1723 return NestingDetected;
1727 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1728 DenseMap<const BasicBlock *, BBState> &BBStates,
1729 DenseMap<Value *, RRInfo> &Releases) {
1730 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1731 bool NestingDetected = false;
1732 BBState &MyStates = BBStates[BB];
1734 // Merge the states from each predecessor to compute the initial state
1735 // for the current block.
1736 BBState::edge_iterator PI(MyStates.pred_begin()),
1737 PE(MyStates.pred_end());
1739 const BasicBlock *Pred = *PI;
1740 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1741 assert(I != BBStates.end());
1742 MyStates.InitFromPred(I->second);
1744 for (; PI != PE; ++PI) {
1746 I = BBStates.find(Pred);
1747 assert(I != BBStates.end());
1748 MyStates.MergePred(I->second);
1752 // If ARC Annotations are enabled, output the current state of pointers at the
1753 // top of the basic block.
1754 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
1756 // Visit all the instructions, top-down.
1757 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1758 Instruction *Inst = I;
1760 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
1762 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
1765 // If ARC Annotations are enabled, output the current state of pointers at the
1766 // bottom of the basic block.
1767 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
1769 #ifdef ARC_ANNOTATIONS
1770 if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
1772 CheckForCFGHazards(BB, BBStates, MyStates);
1773 return NestingDetected;
1777 ComputePostOrders(Function &F,
1778 SmallVectorImpl<BasicBlock *> &PostOrder,
1779 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1780 unsigned NoObjCARCExceptionsMDKind,
1781 DenseMap<const BasicBlock *, BBState> &BBStates) {
1782 /// The visited set, for doing DFS walks.
1783 SmallPtrSet<BasicBlock *, 16> Visited;
1785 // Do DFS, computing the PostOrder.
1786 SmallPtrSet<BasicBlock *, 16> OnStack;
1787 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1789 // Functions always have exactly one entry block, and we don't have
1790 // any other block that we treat like an entry block.
1791 BasicBlock *EntryBB = &F.getEntryBlock();
1792 BBState &MyStates = BBStates[EntryBB];
1793 MyStates.SetAsEntry();
1794 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
1795 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1796 Visited.insert(EntryBB);
1797 OnStack.insert(EntryBB);
1800 BasicBlock *CurrBB = SuccStack.back().first;
1801 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
1802 succ_iterator SE(TI, false);
1804 while (SuccStack.back().second != SE) {
1805 BasicBlock *SuccBB = *SuccStack.back().second++;
1806 if (Visited.insert(SuccBB).second) {
1807 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
1808 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
1809 BBStates[CurrBB].addSucc(SuccBB);
1810 BBState &SuccStates = BBStates[SuccBB];
1811 SuccStates.addPred(CurrBB);
1812 OnStack.insert(SuccBB);
1816 if (!OnStack.count(SuccBB)) {
1817 BBStates[CurrBB].addSucc(SuccBB);
1818 BBStates[SuccBB].addPred(CurrBB);
1821 OnStack.erase(CurrBB);
1822 PostOrder.push_back(CurrBB);
1823 SuccStack.pop_back();
1824 } while (!SuccStack.empty());
1828 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1829 // Functions may have many exits, and there also blocks which we treat
1830 // as exits due to ignored edges.
1831 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1832 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
1833 BasicBlock *ExitBB = I;
1834 BBState &MyStates = BBStates[ExitBB];
1835 if (!MyStates.isExit())
1838 MyStates.SetAsExit();
1840 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
1841 Visited.insert(ExitBB);
1842 while (!PredStack.empty()) {
1843 reverse_dfs_next_succ:
1844 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1845 while (PredStack.back().second != PE) {
1846 BasicBlock *BB = *PredStack.back().second++;
1847 if (Visited.insert(BB).second) {
1848 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1849 goto reverse_dfs_next_succ;
1852 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1857 // Visit the function both top-down and bottom-up.
1858 bool ObjCARCOpt::Visit(Function &F,
1859 DenseMap<const BasicBlock *, BBState> &BBStates,
1860 BlotMapVector<Value *, RRInfo> &Retains,
1861 DenseMap<Value *, RRInfo> &Releases) {
1863 // Use reverse-postorder traversals, because we magically know that loops
1864 // will be well behaved, i.e. they won't repeatedly call retain on a single
1865 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1866 // class here because we want the reverse-CFG postorder to consider each
1867 // function exit point, and we want to ignore selected cycle edges.
1868 SmallVector<BasicBlock *, 16> PostOrder;
1869 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1870 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1871 NoObjCARCExceptionsMDKind,
1874 // Use reverse-postorder on the reverse CFG for bottom-up.
1875 bool BottomUpNestingDetected = false;
1876 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1877 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
1879 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
1881 // Use reverse-postorder for top-down.
1882 bool TopDownNestingDetected = false;
1883 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1884 PostOrder.rbegin(), E = PostOrder.rend();
1886 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
1888 return TopDownNestingDetected && BottomUpNestingDetected;
1891 /// Move the calls in RetainsToMove and ReleasesToMove.
1892 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1893 RRInfo &ReleasesToMove,
1894 BlotMapVector<Value *, RRInfo> &Retains,
1895 DenseMap<Value *, RRInfo> &Releases,
1896 SmallVectorImpl<Instruction *> &DeadInsts,
1898 Type *ArgTy = Arg->getType();
1899 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1901 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1903 // Insert the new retain and release calls.
1904 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1905 Value *MyArg = ArgTy == ParamTy ? Arg :
1906 new BitCastInst(Arg, ParamTy, "", InsertPt);
1907 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1908 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1909 Call->setDoesNotThrow();
1910 Call->setTailCall();
1912 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
1913 "At insertion point: " << *InsertPt << "\n");
1915 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1916 Value *MyArg = ArgTy == ParamTy ? Arg :
1917 new BitCastInst(Arg, ParamTy, "", InsertPt);
1918 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
1919 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1920 // Attach a clang.imprecise_release metadata tag, if appropriate.
1921 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1922 Call->setMetadata(ImpreciseReleaseMDKind, M);
1923 Call->setDoesNotThrow();
1924 if (ReleasesToMove.IsTailCallRelease)
1925 Call->setTailCall();
1927 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
1928 "At insertion point: " << *InsertPt << "\n");
1931 // Delete the original retain and release calls.
1932 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1933 Retains.blot(OrigRetain);
1934 DeadInsts.push_back(OrigRetain);
1935 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1937 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1938 Releases.erase(OrigRelease);
1939 DeadInsts.push_back(OrigRelease);
1940 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1945 bool ObjCARCOpt::ConnectTDBUTraversals(
1946 DenseMap<const BasicBlock *, BBState> &BBStates,
1947 BlotMapVector<Value *, RRInfo> &Retains,
1948 DenseMap<Value *, RRInfo> &Releases, Module *M,
1949 SmallVectorImpl<Instruction *> &NewRetains,
1950 SmallVectorImpl<Instruction *> &NewReleases,
1951 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1952 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1953 bool &AnyPairsCompletelyEliminated) {
1954 // If a pair happens in a region where it is known that the reference count
1955 // is already incremented, we can similarly ignore possible decrements unless
1956 // we are dealing with a retainable object with multiple provenance sources.
1957 bool KnownSafeTD = true, KnownSafeBU = true;
1958 bool MultipleOwners = false;
1959 bool CFGHazardAfflicted = false;
1961 // Connect the dots between the top-down-collected RetainsToMove and
1962 // bottom-up-collected ReleasesToMove to form sets of related calls.
1963 // This is an iterative process so that we connect multiple releases
1964 // to multiple retains if needed.
1965 unsigned OldDelta = 0;
1966 unsigned NewDelta = 0;
1967 unsigned OldCount = 0;
1968 unsigned NewCount = 0;
1969 bool FirstRelease = true;
1971 for (SmallVectorImpl<Instruction *>::const_iterator
1972 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
1973 Instruction *NewRetain = *NI;
1974 BlotMapVector<Value *, RRInfo>::const_iterator It =
1975 Retains.find(NewRetain);
1976 assert(It != Retains.end());
1977 const RRInfo &NewRetainRRI = It->second;
1978 KnownSafeTD &= NewRetainRRI.KnownSafe;
1980 MultipleOwners || MultiOwnersSet.count(GetArgRCIdentityRoot(NewRetain));
1981 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1982 DenseMap<Value *, RRInfo>::const_iterator Jt =
1983 Releases.find(NewRetainRelease);
1984 if (Jt == Releases.end())
1986 const RRInfo &NewRetainReleaseRRI = Jt->second;
1988 // If the release does not have a reference to the retain as well,
1989 // something happened which is unaccounted for. Do not do anything.
1991 // This can happen if we catch an additive overflow during path count
1993 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1996 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1998 // If we overflow when we compute the path count, don't remove/move
2000 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
2001 unsigned PathCount = BBState::OverflowOccurredValue;
2002 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2004 assert(PathCount != BBState::OverflowOccurredValue &&
2005 "PathCount at this point can not be "
2006 "OverflowOccurredValue.");
2007 OldDelta -= PathCount;
2009 // Merge the ReleaseMetadata and IsTailCallRelease values.
2011 ReleasesToMove.ReleaseMetadata =
2012 NewRetainReleaseRRI.ReleaseMetadata;
2013 ReleasesToMove.IsTailCallRelease =
2014 NewRetainReleaseRRI.IsTailCallRelease;
2015 FirstRelease = false;
2017 if (ReleasesToMove.ReleaseMetadata !=
2018 NewRetainReleaseRRI.ReleaseMetadata)
2019 ReleasesToMove.ReleaseMetadata = nullptr;
2020 if (ReleasesToMove.IsTailCallRelease !=
2021 NewRetainReleaseRRI.IsTailCallRelease)
2022 ReleasesToMove.IsTailCallRelease = false;
2025 // Collect the optimal insertion points.
2027 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
2028 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
2029 // If we overflow when we compute the path count, don't
2030 // remove/move anything.
2031 const BBState &RIPBBState = BBStates[RIP->getParent()];
2032 PathCount = BBState::OverflowOccurredValue;
2033 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2035 assert(PathCount != BBState::OverflowOccurredValue &&
2036 "PathCount at this point can not be "
2037 "OverflowOccurredValue.");
2038 NewDelta -= PathCount;
2041 NewReleases.push_back(NewRetainRelease);
2046 if (NewReleases.empty()) break;
2048 // Back the other way.
2049 for (SmallVectorImpl<Instruction *>::const_iterator
2050 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2051 Instruction *NewRelease = *NI;
2052 DenseMap<Value *, RRInfo>::const_iterator It =
2053 Releases.find(NewRelease);
2054 assert(It != Releases.end());
2055 const RRInfo &NewReleaseRRI = It->second;
2056 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2057 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
2058 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
2059 BlotMapVector<Value *, RRInfo>::const_iterator Jt =
2060 Retains.find(NewReleaseRetain);
2061 if (Jt == Retains.end())
2063 const RRInfo &NewReleaseRetainRRI = Jt->second;
2065 // If the retain does not have a reference to the release as well,
2066 // something happened which is unaccounted for. Do not do anything.
2068 // This can happen if we catch an additive overflow during path count
2070 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
2073 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
2074 // If we overflow when we compute the path count, don't remove/move
2076 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
2077 unsigned PathCount = BBState::OverflowOccurredValue;
2078 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2080 assert(PathCount != BBState::OverflowOccurredValue &&
2081 "PathCount at this point can not be "
2082 "OverflowOccurredValue.");
2083 OldDelta += PathCount;
2084 OldCount += PathCount;
2086 // Collect the optimal insertion points.
2088 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
2089 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
2090 // If we overflow when we compute the path count, don't
2091 // remove/move anything.
2092 const BBState &RIPBBState = BBStates[RIP->getParent()];
2094 PathCount = BBState::OverflowOccurredValue;
2095 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2097 assert(PathCount != BBState::OverflowOccurredValue &&
2098 "PathCount at this point can not be "
2099 "OverflowOccurredValue.");
2100 NewDelta += PathCount;
2101 NewCount += PathCount;
2104 NewRetains.push_back(NewReleaseRetain);
2108 NewReleases.clear();
2109 if (NewRetains.empty()) break;
2112 // If the pointer is known incremented in 1 direction and we do not have
2113 // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
2114 // to be known safe in both directions.
2115 bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
2116 ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
2117 if (UnconditionallySafe) {
2118 RetainsToMove.ReverseInsertPts.clear();
2119 ReleasesToMove.ReverseInsertPts.clear();
2122 // Determine whether the new insertion points we computed preserve the
2123 // balance of retain and release calls through the program.
2124 // TODO: If the fully aggressive solution isn't valid, try to find a
2125 // less aggressive solution which is.
2129 // At this point, we are not going to remove any RR pairs, but we still are
2130 // able to move RR pairs. If one of our pointers is afflicted with
2131 // CFGHazards, we cannot perform such code motion so exit early.
2132 const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
2133 ReleasesToMove.ReverseInsertPts.size();
2134 if (CFGHazardAfflicted && WillPerformCodeMotion)
2138 // Determine whether the original call points are balanced in the retain and
2139 // release calls through the program. If not, conservatively don't touch
2141 // TODO: It's theoretically possible to do code motion in this case, as
2142 // long as the existing imbalances are maintained.
2146 #ifdef ARC_ANNOTATIONS
2147 // Do not move calls if ARC annotations are requested.
2148 if (EnableARCAnnotations)
2150 #endif // ARC_ANNOTATIONS
2153 assert(OldCount != 0 && "Unreachable code?");
2154 NumRRs += OldCount - NewCount;
2155 // Set to true if we completely removed any RR pairs.
2156 AnyPairsCompletelyEliminated = NewCount == 0;
2158 // We can move calls!
2162 /// Identify pairings between the retains and releases, and delete and/or move
2164 bool ObjCARCOpt::PerformCodePlacement(
2165 DenseMap<const BasicBlock *, BBState> &BBStates,
2166 BlotMapVector<Value *, RRInfo> &Retains,
2167 DenseMap<Value *, RRInfo> &Releases, Module *M) {
2168 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2170 bool AnyPairsCompletelyEliminated = false;
2171 RRInfo RetainsToMove;
2172 RRInfo ReleasesToMove;
2173 SmallVector<Instruction *, 4> NewRetains;
2174 SmallVector<Instruction *, 4> NewReleases;
2175 SmallVector<Instruction *, 8> DeadInsts;
2177 // Visit each retain.
2178 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2181 Value *V = I->first;
2182 if (!V) continue; // blotted
2184 Instruction *Retain = cast<Instruction>(V);
2186 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2188 Value *Arg = GetArgRCIdentityRoot(Retain);
2190 // If the object being released is in static or stack storage, we know it's
2191 // not being managed by ObjC reference counting, so we can delete pairs
2192 // regardless of what possible decrements or uses lie between them.
2193 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2195 // A constant pointer can't be pointing to an object on the heap. It may
2196 // be reference-counted, but it won't be deleted.
2197 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2198 if (const GlobalVariable *GV =
2199 dyn_cast<GlobalVariable>(
2200 GetRCIdentityRoot(LI->getPointerOperand())))
2201 if (GV->isConstant())
2204 // Connect the dots between the top-down-collected RetainsToMove and
2205 // bottom-up-collected ReleasesToMove to form sets of related calls.
2206 NewRetains.push_back(Retain);
2207 bool PerformMoveCalls =
2208 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2209 NewReleases, DeadInsts, RetainsToMove,
2210 ReleasesToMove, Arg, KnownSafe,
2211 AnyPairsCompletelyEliminated);
2213 if (PerformMoveCalls) {
2214 // Ok, everything checks out and we're all set. Let's move/delete some
2216 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2217 Retains, Releases, DeadInsts, M);
2220 // Clean up state for next retain.
2221 NewReleases.clear();
2223 RetainsToMove.clear();
2224 ReleasesToMove.clear();
2227 // Now that we're done moving everything, we can delete the newly dead
2228 // instructions, as we no longer need them as insert points.
2229 while (!DeadInsts.empty())
2230 EraseInstruction(DeadInsts.pop_back_val());
2232 return AnyPairsCompletelyEliminated;
2235 /// Weak pointer optimizations.
2236 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2237 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2239 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2240 // itself because it uses AliasAnalysis and we need to do provenance
2242 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2243 Instruction *Inst = &*I++;
2245 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2247 ARCInstKind Class = GetBasicARCInstKind(Inst);
2248 if (Class != ARCInstKind::LoadWeak &&
2249 Class != ARCInstKind::LoadWeakRetained)
2252 // Delete objc_loadWeak calls with no users.
2253 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2254 Inst->eraseFromParent();
2258 // TODO: For now, just look for an earlier available version of this value
2259 // within the same block. Theoretically, we could do memdep-style non-local
2260 // analysis too, but that would want caching. A better approach would be to
2261 // use the technique that EarlyCSE uses.
2262 inst_iterator Current = std::prev(I);
2263 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2264 for (BasicBlock::iterator B = CurrentBB->begin(),
2265 J = Current.getInstructionIterator();
2267 Instruction *EarlierInst = &*std::prev(J);
2268 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2269 switch (EarlierClass) {
2270 case ARCInstKind::LoadWeak:
2271 case ARCInstKind::LoadWeakRetained: {
2272 // If this is loading from the same pointer, replace this load's value
2274 CallInst *Call = cast<CallInst>(Inst);
2275 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2276 Value *Arg = Call->getArgOperand(0);
2277 Value *EarlierArg = EarlierCall->getArgOperand(0);
2278 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2279 case AliasAnalysis::MustAlias:
2281 // If the load has a builtin retain, insert a plain retain for it.
2282 if (Class == ARCInstKind::LoadWeakRetained) {
2283 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2284 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2287 // Zap the fully redundant load.
2288 Call->replaceAllUsesWith(EarlierCall);
2289 Call->eraseFromParent();
2291 case AliasAnalysis::MayAlias:
2292 case AliasAnalysis::PartialAlias:
2294 case AliasAnalysis::NoAlias:
2299 case ARCInstKind::StoreWeak:
2300 case ARCInstKind::InitWeak: {
2301 // If this is storing to the same pointer and has the same size etc.
2302 // replace this load's value with the stored value.
2303 CallInst *Call = cast<CallInst>(Inst);
2304 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2305 Value *Arg = Call->getArgOperand(0);
2306 Value *EarlierArg = EarlierCall->getArgOperand(0);
2307 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2308 case AliasAnalysis::MustAlias:
2310 // If the load has a builtin retain, insert a plain retain for it.
2311 if (Class == ARCInstKind::LoadWeakRetained) {
2312 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2313 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2316 // Zap the fully redundant load.
2317 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2318 Call->eraseFromParent();
2320 case AliasAnalysis::MayAlias:
2321 case AliasAnalysis::PartialAlias:
2323 case AliasAnalysis::NoAlias:
2328 case ARCInstKind::MoveWeak:
2329 case ARCInstKind::CopyWeak:
2330 // TOOD: Grab the copied value.
2332 case ARCInstKind::AutoreleasepoolPush:
2333 case ARCInstKind::None:
2334 case ARCInstKind::IntrinsicUser:
2335 case ARCInstKind::User:
2336 // Weak pointers are only modified through the weak entry points
2337 // (and arbitrary calls, which could call the weak entry points).
2340 // Anything else could modify the weak pointer.
2347 // Then, for each destroyWeak with an alloca operand, check to see if
2348 // the alloca and all its users can be zapped.
2349 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2350 Instruction *Inst = &*I++;
2351 ARCInstKind Class = GetBasicARCInstKind(Inst);
2352 if (Class != ARCInstKind::DestroyWeak)
2355 CallInst *Call = cast<CallInst>(Inst);
2356 Value *Arg = Call->getArgOperand(0);
2357 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2358 for (User *U : Alloca->users()) {
2359 const Instruction *UserInst = cast<Instruction>(U);
2360 switch (GetBasicARCInstKind(UserInst)) {
2361 case ARCInstKind::InitWeak:
2362 case ARCInstKind::StoreWeak:
2363 case ARCInstKind::DestroyWeak:
2370 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
2371 CallInst *UserInst = cast<CallInst>(*UI++);
2372 switch (GetBasicARCInstKind(UserInst)) {
2373 case ARCInstKind::InitWeak:
2374 case ARCInstKind::StoreWeak:
2375 // These functions return their second argument.
2376 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2378 case ARCInstKind::DestroyWeak:
2382 llvm_unreachable("alloca really is used!");
2384 UserInst->eraseFromParent();
2386 Alloca->eraseFromParent();
2392 /// Identify program paths which execute sequences of retains and releases which
2393 /// can be eliminated.
2394 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2395 // Releases, Retains - These are used to store the results of the main flow
2396 // analysis. These use Value* as the key instead of Instruction* so that the
2397 // map stays valid when we get around to rewriting code and calls get
2398 // replaced by arguments.
2399 DenseMap<Value *, RRInfo> Releases;
2400 BlotMapVector<Value *, RRInfo> Retains;
2402 // This is used during the traversal of the function to track the
2403 // states for each identified object at each block.
2404 DenseMap<const BasicBlock *, BBState> BBStates;
2406 // Analyze the CFG of the function, and all instructions.
2407 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2410 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2415 MultiOwnersSet.clear();
2417 return AnyPairsCompletelyEliminated && NestingDetected;
2420 /// Check if there is a dependent call earlier that does not have anything in
2421 /// between the Retain and the call that can affect the reference count of their
2422 /// shared pointer argument. Note that Retain need not be in BB.
2424 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2425 SmallPtrSetImpl<Instruction *> &DepInsts,
2426 SmallPtrSetImpl<const BasicBlock *> &Visited,
2427 ProvenanceAnalysis &PA) {
2428 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2429 DepInsts, Visited, PA);
2430 if (DepInsts.size() != 1)
2434 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2436 // Check that the pointer is the return value of the call.
2437 if (!Call || Arg != Call)
2440 // Check that the call is a regular call.
2441 ARCInstKind Class = GetBasicARCInstKind(Call);
2442 if (Class != ARCInstKind::CallOrUser && Class != ARCInstKind::Call)
2448 /// Find a dependent retain that precedes the given autorelease for which there
2449 /// is nothing in between the two instructions that can affect the ref count of
2452 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2453 Instruction *Autorelease,
2454 SmallPtrSetImpl<Instruction *> &DepInsts,
2455 SmallPtrSetImpl<const BasicBlock *> &Visited,
2456 ProvenanceAnalysis &PA) {
2457 FindDependencies(CanChangeRetainCount, Arg,
2458 BB, Autorelease, DepInsts, Visited, PA);
2459 if (DepInsts.size() != 1)
2463 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2465 // Check that we found a retain with the same argument.
2466 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2467 GetArgRCIdentityRoot(Retain) != Arg) {
2474 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2475 /// no instructions dependent on Arg that need a positive ref count in between
2476 /// the autorelease and the ret.
2478 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2480 SmallPtrSetImpl<Instruction *> &DepInsts,
2481 SmallPtrSetImpl<const BasicBlock *> &V,
2482 ProvenanceAnalysis &PA) {
2483 FindDependencies(NeedsPositiveRetainCount, Arg,
2484 BB, Ret, DepInsts, V, PA);
2485 if (DepInsts.size() != 1)
2488 CallInst *Autorelease =
2489 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2492 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2493 if (!IsAutorelease(AutoreleaseClass))
2495 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2501 /// Look for this pattern:
2503 /// %call = call i8* @something(...)
2504 /// %2 = call i8* @objc_retain(i8* %call)
2505 /// %3 = call i8* @objc_autorelease(i8* %2)
2508 /// And delete the retain and autorelease.
2509 void ObjCARCOpt::OptimizeReturns(Function &F) {
2510 if (!F.getReturnType()->isPointerTy())
2513 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2515 SmallPtrSet<Instruction *, 4> DependingInstructions;
2516 SmallPtrSet<const BasicBlock *, 4> Visited;
2517 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2518 BasicBlock *BB = FI;
2519 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2521 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2526 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2528 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2529 // dependent on Arg such that there are no instructions dependent on Arg
2530 // that need a positive ref count in between the autorelease and Ret.
2531 CallInst *Autorelease =
2532 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
2533 DependingInstructions, Visited,
2535 DependingInstructions.clear();
2542 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
2543 DependingInstructions, Visited, PA);
2544 DependingInstructions.clear();
2550 // Check that there is nothing that can affect the reference count
2551 // between the retain and the call. Note that Retain need not be in BB.
2552 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2553 DependingInstructions,
2555 DependingInstructions.clear();
2558 if (!HasSafePathToCall)
2561 // If so, we can zap the retain and autorelease.
2564 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2565 << *Autorelease << "\n");
2566 EraseInstruction(Retain);
2567 EraseInstruction(Autorelease);
2573 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2574 llvm::Statistic &NumRetains =
2575 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2576 llvm::Statistic &NumReleases =
2577 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2579 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2580 Instruction *Inst = &*I++;
2581 switch (GetBasicARCInstKind(Inst)) {
2584 case ARCInstKind::Retain:
2587 case ARCInstKind::Release:
2595 bool ObjCARCOpt::doInitialization(Module &M) {
2599 // If nothing in the Module uses ARC, don't do anything.
2600 Run = ModuleHasARC(M);
2604 // Identify the imprecise release metadata kind.
2605 ImpreciseReleaseMDKind =
2606 M.getContext().getMDKindID("clang.imprecise_release");
2607 CopyOnEscapeMDKind =
2608 M.getContext().getMDKindID("clang.arc.copy_on_escape");
2609 NoObjCARCExceptionsMDKind =
2610 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
2611 #ifdef ARC_ANNOTATIONS
2612 ARCAnnotationBottomUpMDKind =
2613 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
2614 ARCAnnotationTopDownMDKind =
2615 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
2616 ARCAnnotationProvenanceSourceMDKind =
2617 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
2618 #endif // ARC_ANNOTATIONS
2620 // Intuitively, objc_retain and others are nocapture, however in practice
2621 // they are not, because they return their argument value. And objc_release
2622 // calls finalizers which can have arbitrary side effects.
2624 // Initialize our runtime entry point cache.
2630 bool ObjCARCOpt::runOnFunction(Function &F) {
2634 // If nothing in the Module uses ARC, don't do anything.
2640 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
2643 PA.setAA(&getAnalysis<AliasAnalysis>());
2646 if (AreStatisticsEnabled()) {
2647 GatherStatistics(F, false);
2651 // This pass performs several distinct transformations. As a compile-time aid
2652 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2653 // library functions aren't declared.
2655 // Preliminary optimizations. This also computes UsedInThisFunction.
2656 OptimizeIndividualCalls(F);
2658 // Optimizations for weak pointers.
2659 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2660 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2661 (1 << unsigned(ARCInstKind::StoreWeak)) |
2662 (1 << unsigned(ARCInstKind::InitWeak)) |
2663 (1 << unsigned(ARCInstKind::CopyWeak)) |
2664 (1 << unsigned(ARCInstKind::MoveWeak)) |
2665 (1 << unsigned(ARCInstKind::DestroyWeak))))
2666 OptimizeWeakCalls(F);
2668 // Optimizations for retain+release pairs.
2669 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2670 (1 << unsigned(ARCInstKind::RetainRV)) |
2671 (1 << unsigned(ARCInstKind::RetainBlock))))
2672 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2673 // Run OptimizeSequences until it either stops making changes or
2674 // no retain+release pair nesting is detected.
2675 while (OptimizeSequences(F)) {}
2677 // Optimizations if objc_autorelease is used.
2678 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2679 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2682 // Gather statistics after optimization.
2684 if (AreStatisticsEnabled()) {
2685 GatherStatistics(F, true);
2689 DEBUG(dbgs() << "\n");
2694 void ObjCARCOpt::releaseMemory() {