///
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "objc-arc-opts"
#include "ObjCARC.h"
+#include "ARCRuntimeEntryPoints.h"
#include "DependencyAnalysis.h"
#include "ObjCARCAliasAnalysis.h"
#include "ProvenanceAnalysis.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/IR/CFG.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/CFG.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
using namespace llvm::objcarc;
+#define DEBUG_TYPE "objc-arc-opts"
+
/// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
/// @{
return FindSingleUseIdentifiedObject(
cast<CallInst>(Arg)->getArgOperand(0));
if (!IsObjCIdentifiedObject(Arg))
- return 0;
+ return nullptr;
return Arg;
}
// If we found an identifiable object but it has multiple uses, but they are
// trivial uses, we can still consider this to be a single-use value.
if (IsObjCIdentifiedObject(Arg)) {
- for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
- UI != UE; ++UI) {
- const User *U = *UI;
+ for (const User *U : Arg->users())
if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
- return 0;
- }
+ return nullptr;
return Arg;
}
- return 0;
-}
-
-/// \brief Test whether the given retainable object pointer escapes.
-///
-/// This differs from regular escape analysis in that a use as an
-/// argument to a call is not considered an escape.
-///
-static bool DoesRetainableObjPtrEscape(const User *Ptr) {
- DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Target: " << *Ptr << "\n");
-
- // Walk the def-use chains.
- SmallVector<const Value *, 4> Worklist;
- Worklist.push_back(Ptr);
- // If Ptr has any operands add them as well.
- for (User::const_op_iterator I = Ptr->op_begin(), E = Ptr->op_end(); I != E;
- ++I) {
- Worklist.push_back(*I);
- }
-
- // Ensure we do not visit any value twice.
- SmallPtrSet<const Value *, 8> VisitedSet;
-
- do {
- const Value *V = Worklist.pop_back_val();
-
- DEBUG(dbgs() << "Visiting: " << *V << "\n");
-
- for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- const User *UUser = *UI;
-
- DEBUG(dbgs() << "User: " << *UUser << "\n");
-
- // Special - Use by a call (callee or argument) is not considered
- // to be an escape.
- switch (GetBasicInstructionClass(UUser)) {
- case IC_StoreWeak:
- case IC_InitWeak:
- case IC_StoreStrong:
- case IC_Autorelease:
- case IC_AutoreleaseRV: {
- DEBUG(dbgs() << "User copies pointer arguments. Pointer Escapes!\n");
- // These special functions make copies of their pointer arguments.
- return true;
- }
- case IC_IntrinsicUser:
- // Use by the use intrinsic is not an escape.
- continue;
- case IC_User:
- case IC_None:
- // Use by an instruction which copies the value is an escape if the
- // result is an escape.
- if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
- isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
-
- if (VisitedSet.insert(UUser)) {
- DEBUG(dbgs() << "User copies value. Ptr escapes if result escapes."
- " Adding to list.\n");
- Worklist.push_back(UUser);
- } else {
- DEBUG(dbgs() << "Already visited node.\n");
- }
- continue;
- }
- // Use by a load is not an escape.
- if (isa<LoadInst>(UUser))
- continue;
- // Use by a store is not an escape if the use is the address.
- if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
- if (V != SI->getValueOperand())
- continue;
- break;
- default:
- // Regular calls and other stuff are not considered escapes.
- continue;
- }
- // Otherwise, conservatively assume an escape.
- DEBUG(dbgs() << "Assuming ptr escapes.\n");
- return true;
- }
- } while (!Worklist.empty());
-
- // No escapes found.
- DEBUG(dbgs() << "Ptr does not escape.\n");
- return false;
+ return nullptr;
}
/// This is a wrapper around getUnderlyingObjCPtr along the lines of
bool CFGHazardAfflicted;
RRInfo() :
- KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0),
+ KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(nullptr),
CFGHazardAfflicted(false) {}
void clear();
/// Conservatively merge the two RRInfo. Returns true if a partial merge has
- /// occured, false otherwise.
+ /// occurred, false otherwise.
bool Merge(const RRInfo &Other);
- bool IsTrackingImpreciseReleases() {
- return ReleaseMetadata != 0;
- }
};
}
void RRInfo::clear() {
KnownSafe = false;
IsTailCallRelease = false;
- ReleaseMetadata = 0;
+ ReleaseMetadata = nullptr;
Calls.clear();
ReverseInsertPts.clear();
CFGHazardAfflicted = false;
bool RRInfo::Merge(const RRInfo &Other) {
// Conservatively merge the ReleaseMetadata information.
if (ReleaseMetadata != Other.ReleaseMetadata)
- ReleaseMetadata = 0;
+ ReleaseMetadata = nullptr;
// Conservatively merge the boolean state.
KnownSafe &= Other.KnownSafe;
bool Partial;
/// The current position in the sequence.
- Sequence Seq : 8;
+ unsigned char Seq : 8;
- public:
/// Unidirectional information about the current sequence.
- ///
- /// TODO: Encapsulate this better.
RRInfo RRI;
+ public:
PtrState() : KnownPositiveRefCount(false), Partial(false),
Seq(S_None) {}
+
+ bool IsKnownSafe() const {
+ return RRI.KnownSafe;
+ }
+
+ void SetKnownSafe(const bool NewValue) {
+ RRI.KnownSafe = NewValue;
+ }
+
+ bool IsTailCallRelease() const {
+ return RRI.IsTailCallRelease;
+ }
+
+ void SetTailCallRelease(const bool NewValue) {
+ RRI.IsTailCallRelease = NewValue;
+ }
+
+ bool IsTrackingImpreciseReleases() const {
+ return RRI.ReleaseMetadata != nullptr;
+ }
+
+ const MDNode *GetReleaseMetadata() const {
+ return RRI.ReleaseMetadata;
+ }
+
+ void SetReleaseMetadata(MDNode *NewValue) {
+ RRI.ReleaseMetadata = NewValue;
+ }
+
+ bool IsCFGHazardAfflicted() const {
+ return RRI.CFGHazardAfflicted;
+ }
+
+ void SetCFGHazardAfflicted(const bool NewValue) {
+ RRI.CFGHazardAfflicted = NewValue;
+ }
+
void SetKnownPositiveRefCount() {
DEBUG(dbgs() << "Setting Known Positive.\n");
KnownPositiveRefCount = true;
}
Sequence GetSeq() const {
- return Seq;
+ return static_cast<Sequence>(Seq);
}
void ClearSequenceProgress() {
}
void Merge(const PtrState &Other, bool TopDown);
+
+ void InsertCall(Instruction *I) {
+ RRI.Calls.insert(I);
+ }
+
+ void InsertReverseInsertPt(Instruction *I) {
+ RRI.ReverseInsertPts.insert(I);
+ }
+
+ void ClearReverseInsertPts() {
+ RRI.ReverseInsertPts.clear();
+ }
+
+ bool HasReverseInsertPts() const {
+ return !RRI.ReverseInsertPts.empty();
+ }
+
+ const RRInfo &GetRRInfo() const {
+ return RRI;
+ }
};
}
void
PtrState::Merge(const PtrState &Other, bool TopDown) {
- Seq = MergeSeqs(Seq, Other.Seq, TopDown);
+ Seq = MergeSeqs(GetSeq(), Other.GetSeq(), TopDown);
KnownPositiveRefCount &= Other.KnownPositiveRefCount;
// If we're not in a sequence (anymore), drop all associated state.
SmallVector<BasicBlock *, 2> Succs;
public:
- BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
+ static const unsigned OverflowOccurredValue;
+
+ BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
typedef MapTy::iterator ptr_iterator;
typedef MapTy::const_iterator ptr_const_iterator;
/// which pass through this block. This is only valid after both the
/// top-down and bottom-up traversals are complete.
///
- /// Returns true if overflow occured. Returns false if overflow did not
+ /// Returns true if overflow occurred. Returns false if overflow did not
/// occur.
bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
- assert(TopDownPathCount != 0);
- assert(BottomUpPathCount != 0);
+ if (TopDownPathCount == OverflowOccurredValue ||
+ BottomUpPathCount == OverflowOccurredValue)
+ return true;
unsigned long long Product =
(unsigned long long)TopDownPathCount*BottomUpPathCount;
- PathCount = Product;
- // Overflow occured if any of the upper bits of Product are set.
- return Product >> 32;
+ // Overflow occurred if any of the upper bits of Product are set or if all
+ // the lower bits of Product are all set.
+ return (Product >> 32) ||
+ ((PathCount = Product) == OverflowOccurredValue);
}
// Specialized CFG utilities.
typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
- edge_iterator pred_begin() { return Preds.begin(); }
- edge_iterator pred_end() { return Preds.end(); }
- edge_iterator succ_begin() { return Succs.begin(); }
- edge_iterator succ_end() { return Succs.end(); }
+ edge_iterator pred_begin() const { return Preds.begin(); }
+ edge_iterator pred_end() const { return Preds.end(); }
+ edge_iterator succ_begin() const { return Succs.begin(); }
+ edge_iterator succ_end() const { return Succs.end(); }
void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
bool isExit() const { return Succs.empty(); }
};
+
+ const unsigned BBState::OverflowOccurredValue = 0xffffffff;
}
void BBState::InitFromPred(const BBState &Other) {
/// The top-down traversal uses this to merge information about predecessors to
/// form the initial state for a new block.
void BBState::MergePred(const BBState &Other) {
+ if (TopDownPathCount == OverflowOccurredValue)
+ return;
+
// Other.TopDownPathCount can be 0, in which case it is either dead or a
// loop backedge. Loop backedges are special.
TopDownPathCount += Other.TopDownPathCount;
+ // In order to be consistent, we clear the top down pointers when by adding
+ // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
+ // has not occurred.
+ if (TopDownPathCount == OverflowOccurredValue) {
+ clearTopDownPointers();
+ return;
+ }
+
// Check for overflow. If we have overflow, fall back to conservative
// behavior.
if (TopDownPathCount < Other.TopDownPathCount) {
+ TopDownPathCount = OverflowOccurredValue;
clearTopDownPointers();
return;
}
/// The bottom-up traversal uses this to merge information about successors to
/// form the initial state for a new block.
void BBState::MergeSucc(const BBState &Other) {
+ if (BottomUpPathCount == OverflowOccurredValue)
+ return;
+
// Other.BottomUpPathCount can be 0, in which case it is either dead or a
// loop backedge. Loop backedges are special.
BottomUpPathCount += Other.BottomUpPathCount;
+ // In order to be consistent, we clear the top down pointers when by adding
+ // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
+ // has not occurred.
+ if (BottomUpPathCount == OverflowOccurredValue) {
+ clearBottomUpPointers();
+ return;
+ }
+
// Check for overflow. If we have overflow, fall back to conservative
// behavior.
if (BottomUpPathCount < Other.BottomUpPathCount) {
+ BottomUpPathCount = OverflowOccurredValue;
clearBottomUpPointers();
return;
}
/// arc annotation processor tool. If the function is an
static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
Value *Ptr) {
- MDString *Hash = 0;
+ MDString *Hash = nullptr;
// If pointer is a result of an instruction and it does not have a source
// MDNode it, attach a new MDNode onto it. If pointer is a result of
// of line at the module level and to provide a very simple format
// encoding the information herein. Both of these makes it simpler to
// parse the annotations by a simple external program.
- std::string Str;
- raw_string_ostream os(Str);
+ string_ostream os;
os << "(" << Inst->getParent()->getParent()->getName() << ",%"
<< Inst->getName() << ")";
Hash = cast<MDString>(Node->getOperand(0));
}
} else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
- std::string str;
- raw_string_ostream os(str);
+ string_ostream os;
os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
<< ")";
Hash = MDString::get(Arg->getContext(), os.str());
}
static std::string SequenceToString(Sequence A) {
- std::string str;
- raw_string_ostream os(str);
+ string_ostream os;
os << A;
return os.str();
}
MDString *PtrSourceMDNodeID,
Sequence OldSeq,
Sequence NewSeq) {
- MDNode *Node = 0;
+ MDNode *Node = nullptr;
Value *tmp[3] = {PtrSourceMDNodeID,
SequenceToMDString(Inst->getContext(),
OldSeq),
Value *PtrName;
StringRef Tmp = Ptr->getName();
- if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
+ if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
Tmp + "_STR");
PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
Value *S;
std::string SeqStr = SequenceToString(Seq);
- if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
+ if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
SeqStr + "_STR");
S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
/*isVarArg=*/false);
Constant *Callee = M->getOrInsertFunction(Name, FTy);
- IRBuilder<> Builder(BB, llvm::prior(BB->end()));
+ IRBuilder<> Builder(BB, std::prev(BB->end()));
Value *PtrName;
StringRef Tmp = Ptr->getName();
- if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
+ if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
Tmp + "_STR");
PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
Value *S;
std::string SeqStr = SequenceToString(Seq);
- if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
+ if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
SeqStr + "_STR");
S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
// llvm-arc-annotation-processor tool to cross reference where the source
// pointer is in the LLVM IR since the LLVM IR parser does not submit such
// information via debug info for backends to use (since why would anyone
- // need such a thing from LLVM IR besides in non standard cases
+ // need such a thing from LLVM IR besides in non-standard cases
// [i.e. this]).
MDString *SourcePtrMDNode =
AppendMDNodeToSourcePtr(PtrMDId, Ptr);
class ObjCARCOpt : public FunctionPass {
bool Changed;
ProvenanceAnalysis PA;
+ ARCRuntimeEntryPoints EP;
// This is used to track if a pointer is stored into an alloca.
DenseSet<const Value *> MultiOwnersSet;
/// A flag indicating whether this optimization pass should run.
bool Run;
- /// Declarations for ObjC runtime functions, for use in creating calls to
- /// them. These are initialized lazily to avoid cluttering up the Module
- /// with unused declarations.
-
- /// Declaration for ObjC runtime function objc_autoreleaseReturnValue.
- Constant *AutoreleaseRVCallee;
- /// Declaration for ObjC runtime function objc_release.
- Constant *ReleaseCallee;
- /// Declaration for ObjC runtime function objc_retain.
- Constant *RetainCallee;
- /// Declaration for ObjC runtime function objc_retainBlock.
- Constant *RetainBlockCallee;
- /// Declaration for ObjC runtime function objc_autorelease.
- Constant *AutoreleaseCallee;
-
/// Flags which determine whether each of the interesting runtine functions
/// is in fact used in the current function.
unsigned UsedInThisFunction;
unsigned ARCAnnotationProvenanceSourceMDKind;
#endif // ARC_ANNOATIONS
- Constant *getAutoreleaseRVCallee(Module *M);
- Constant *getReleaseCallee(Module *M);
- Constant *getRetainCallee(Module *M);
- Constant *getRetainBlockCallee(Module *M);
- Constant *getAutoreleaseCallee(Module *M);
-
- bool IsRetainBlockOptimizable(const Instruction *Inst);
-
bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
InstructionClass &Class);
- bool OptimizeRetainBlockCall(Function &F, Instruction *RetainBlock,
- InstructionClass &Class);
void OptimizeIndividualCalls(Function &F);
void CheckForCFGHazards(const BasicBlock *BB,
MapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases,
Module *M,
- SmallVector<Instruction *, 4> &NewRetains,
- SmallVector<Instruction *, 4> &NewReleases,
- SmallVector<Instruction *, 8> &DeadInsts,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts,
RRInfo &RetainsToMove,
RRInfo &ReleasesToMove,
Value *Arg,
void GatherStatistics(Function &F, bool AfterOptimization = false);
#endif
- virtual void getAnalysisUsage(AnalysisUsage &AU) const;
- virtual bool doInitialization(Module &M);
- virtual bool runOnFunction(Function &F);
- virtual void releaseMemory();
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+ bool doInitialization(Module &M) override;
+ bool runOnFunction(Function &F) override;
+ void releaseMemory() override;
public:
static char ID;
AU.setPreservesCFG();
}
-bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
- // Without the magic metadata tag, we have to assume this might be an
- // objc_retainBlock call inserted to convert a block pointer to an id,
- // in which case it really is needed.
- if (!Inst->getMetadata(CopyOnEscapeMDKind))
- return false;
-
- // If the pointer "escapes" (not including being used in a call),
- // the copy may be needed.
- if (DoesRetainableObjPtrEscape(Inst))
- return false;
-
- // Otherwise, it's not needed.
- return true;
-}
-
-Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
- if (!AutoreleaseRVCallee) {
- LLVMContext &C = M->getContext();
- Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
- Type *Params[] = { I8X };
- FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
- AttributeSet Attribute =
- AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
- Attribute::NoUnwind);
- AutoreleaseRVCallee =
- M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
- Attribute);
- }
- return AutoreleaseRVCallee;
-}
-
-Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
- if (!ReleaseCallee) {
- LLVMContext &C = M->getContext();
- Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
- AttributeSet Attribute =
- AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
- Attribute::NoUnwind);
- ReleaseCallee =
- M->getOrInsertFunction(
- "objc_release",
- FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
- Attribute);
- }
- return ReleaseCallee;
-}
-
-Constant *ObjCARCOpt::getRetainCallee(Module *M) {
- if (!RetainCallee) {
- LLVMContext &C = M->getContext();
- Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
- AttributeSet Attribute =
- AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
- Attribute::NoUnwind);
- RetainCallee =
- M->getOrInsertFunction(
- "objc_retain",
- FunctionType::get(Params[0], Params, /*isVarArg=*/false),
- Attribute);
- }
- return RetainCallee;
-}
-
-Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
- if (!RetainBlockCallee) {
- LLVMContext &C = M->getContext();
- Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
- // objc_retainBlock is not nounwind because it calls user copy constructors
- // which could theoretically throw.
- RetainBlockCallee =
- M->getOrInsertFunction(
- "objc_retainBlock",
- FunctionType::get(Params[0], Params, /*isVarArg=*/false),
- AttributeSet());
- }
- return RetainBlockCallee;
-}
-
-Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
- if (!AutoreleaseCallee) {
- LLVMContext &C = M->getContext();
- Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
- AttributeSet Attribute =
- AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
- Attribute::NoUnwind);
- AutoreleaseCallee =
- M->getOrInsertFunction(
- "objc_autorelease",
- FunctionType::get(Params[0], Params, /*isVarArg=*/false),
- Attribute);
- }
- return AutoreleaseCallee;
-}
-
/// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
/// not a return value. Or, if it can be paired with an
/// objc_autoreleaseReturnValue, delete the pair and return true.
"objc_retain since the operand is not a return value.\n"
"Old = " << *RetainRV << "\n");
- cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent()));
+ Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
DEBUG(dbgs() << "New = " << *RetainRV << "\n");
Users.push_back(Ptr);
do {
Ptr = Users.pop_back_val();
- for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
- UI != UE; ++UI) {
- const User *I = *UI;
- if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
+ for (const User *U : Ptr->users()) {
+ if (isa<ReturnInst>(U) || GetBasicInstructionClass(U) == IC_RetainRV)
return;
- if (isa<BitCastInst>(I))
- Users.push_back(I);
+ if (isa<BitCastInst>(U))
+ Users.push_back(U);
}
} while (!Users.empty());
"Old = " << *AutoreleaseRV << "\n");
CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
- AutoreleaseRVCI->
- setCalledFunction(getAutoreleaseCallee(F.getParent()));
+ Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
+ AutoreleaseRVCI->setCalledFunction(NewDecl);
AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
Class = IC_Autorelease;
}
-// \brief Attempt to strength reduce objc_retainBlock calls to objc_retain
-// calls.
-//
-// Specifically: If an objc_retainBlock call has the copy_on_escape metadata and
-// does not escape (following the rules of block escaping), strength reduce the
-// objc_retainBlock to an objc_retain.
-//
-// TODO: If an objc_retainBlock call is dominated period by a previous
-// objc_retainBlock call, strength reduce the objc_retainBlock to an
-// objc_retain.
-bool
-ObjCARCOpt::OptimizeRetainBlockCall(Function &F, Instruction *Inst,
- InstructionClass &Class) {
- assert(GetBasicInstructionClass(Inst) == Class);
- assert(IC_RetainBlock == Class);
-
- // If we can not optimize Inst, return false.
- if (!IsRetainBlockOptimizable(Inst))
- return false;
-
- Changed = true;
- ++NumPeeps;
-
- DEBUG(dbgs() << "Strength reduced retainBlock => retain.\n");
- DEBUG(dbgs() << "Old: " << *Inst << "\n");
- CallInst *RetainBlock = cast<CallInst>(Inst);
- RetainBlock->setCalledFunction(getRetainCallee(F.getParent()));
- // Remove copy_on_escape metadata.
- RetainBlock->setMetadata(CopyOnEscapeMDKind, 0);
- Class = IC_Retain;
- DEBUG(dbgs() << "New: " << *Inst << "\n");
- return true;
-}
-
/// Visit each call, one at a time, and make simplifications without doing any
/// additional analysis.
void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
}
break;
}
- case IC_RetainBlock:
- // If we strength reduce an objc_retainBlock to an objc_retain, continue
- // onto the objc_retain peephole optimizations. Otherwise break.
- OptimizeRetainBlockCall(F, Inst, Class);
- break;
case IC_RetainRV:
if (OptimizeRetainRVCall(F, Inst))
continue;
// Create the declaration lazily.
LLVMContext &C = Inst->getContext();
- CallInst *NewCall =
- CallInst::Create(getReleaseCallee(F.getParent()),
- Call->getArgOperand(0), "", Call);
+
+ Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
+ CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
+ Call);
NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
bool &ShouldContinue) {
switch (SuccSSeq) {
case S_CanRelease: {
- if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
+ if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
S.ClearSequenceProgress();
break;
}
- S.RRI.CFGHazardAfflicted = true;
+ S.SetCFGHazardAfflicted(true);
ShouldContinue = true;
break;
}
case S_Stop:
case S_Release:
case S_MovableRelease:
- if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
+ if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
AllSuccsHaveSame = false;
else
NotAllSeqEqualButKnownSafe = true;
case S_Release:
case S_MovableRelease:
case S_Use:
- if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
+ if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
AllSuccsHaveSame = false;
else
NotAllSeqEqualButKnownSafe = true;
// If we have S_Use or S_CanRelease, perform our check for cfg hazard
// checks.
- const bool SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
+ const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
// *NOTE* We do not use Seq from above here since we are allowing for
// S.GetSeq() to change while we are visiting basic blocks.
// safe, stop code motion. This is because whether or not it is safe to
// remove RR pairs via KnownSafe is an orthogonal concept to whether we
// are allowed to perform code motion.
- S.RRI.CFGHazardAfflicted = true;
+ S.SetCFGHazardAfflicted(true);
}
}
}
BBState &MyStates) {
bool NestingDetected = false;
InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
+ const Value *Arg = nullptr;
DEBUG(dbgs() << "Class: " << Class << "\n");
Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
S.ResetSequenceProgress(NewSeq);
- S.RRI.ReleaseMetadata = ReleaseMetadata;
- S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
- S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
- S.RRI.Calls.insert(Inst);
+ S.SetReleaseMetadata(ReleaseMetadata);
+ S.SetKnownSafe(S.HasKnownPositiveRefCount());
+ S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
+ S.InsertCall(Inst);
S.SetKnownPositiveRefCount();
break;
}
case S_Use:
// If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
// imprecise release, clear our reverse insertion points.
- if (OldSeq != S_Use || S.RRI.IsTrackingImpreciseReleases())
- S.RRI.ReverseInsertPts.clear();
+ if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
+ S.ClearReverseInsertPts();
// FALL THROUGH
case S_CanRelease:
// Don't do retain+release tracking for IC_RetainRV, because it's
// better to let it remain as the first instruction after a call.
if (Class != IC_RetainRV)
- Retains[Inst] = S.RRI;
+ Retains[Inst] = S.GetRRInfo();
S.ClearSequenceProgress();
break;
case S_None:
// pointer has multiple owners implying that we must be more conservative.
//
// This comes up in the context of a pointer being ``KnownSafe''. In the
- // presense of a block being initialized, the frontend will emit the
+ // presence of a block being initialized, the frontend will emit the
// objc_retain on the original pointer and the release on the pointer loaded
// from the alloca. The optimizer will through the provenance analysis
// realize that the two are related, but since we only require KnownSafe in
// one direction, will match the inner retain on the original pointer with
// the guard release on the original pointer. This is fixed by ensuring that
- // in the presense of allocas we only unconditionally remove pointers if
+ // in the presence of allocas we only unconditionally remove pointers if
// both our retain and our release are KnownSafe.
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
if (CanUse(Inst, Ptr, PA, Class)) {
DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
<< "\n");
- assert(S.RRI.ReverseInsertPts.empty());
+ assert(!S.HasReverseInsertPts());
// If this is an invoke instruction, we're scanning it as part of
// one of its successor blocks, since we can't insert code after it
// in its own block, and we don't want to split critical edges.
if (isa<InvokeInst>(Inst))
- S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
+ S.InsertReverseInsertPt(BB->getFirstInsertionPt());
else
- S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
+ S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
S.SetSeq(S_Use);
ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
} else if (Seq == S_Release && IsUser(Class)) {
// Non-movable releases depend on any possible objc pointer use.
S.SetSeq(S_Stop);
ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
- assert(S.RRI.ReverseInsertPts.empty());
+ assert(!S.HasReverseInsertPts());
// As above; handle invoke specially.
if (isa<InvokeInst>(Inst))
- S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
+ S.InsertReverseInsertPt(BB->getFirstInsertionPt());
else
- S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
+ S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
}
break;
case S_Stop:
// Visit all the instructions, bottom-up.
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
- Instruction *Inst = llvm::prior(I);
+ Instruction *Inst = std::prev(I);
// Invoke instructions are visited as part of their successors (below).
if (isa<InvokeInst>(Inst))
BBState &MyStates) {
bool NestingDetected = false;
InstructionClass Class = GetInstructionClass(Inst);
- const Value *Arg = 0;
+ const Value *Arg = nullptr;
switch (Class) {
case IC_RetainBlock:
ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
S.ResetSequenceProgress(S_Retain);
- S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
- S.RRI.Calls.insert(Inst);
+ S.SetKnownSafe(S.HasKnownPositiveRefCount());
+ S.InsertCall(Inst);
}
S.SetKnownPositiveRefCount();
switch (OldSeq) {
case S_Retain:
case S_CanRelease:
- if (OldSeq == S_Retain || ReleaseMetadata != 0)
- S.RRI.ReverseInsertPts.clear();
+ if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
+ S.ClearReverseInsertPts();
// FALL THROUGH
case S_Use:
- S.RRI.ReleaseMetadata = ReleaseMetadata;
- S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
- Releases[Inst] = S.RRI;
+ S.SetReleaseMetadata(ReleaseMetadata);
+ S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
+ Releases[Inst] = S.GetRRInfo();
ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
S.ClearSequenceProgress();
break;
case S_Retain:
S.SetSeq(S_CanRelease);
ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
- assert(S.RRI.ReverseInsertPts.empty());
- S.RRI.ReverseInsertPts.insert(Inst);
+ assert(!S.HasReverseInsertPts());
+ S.InsertReverseInsertPt(Inst);
// One call can't cause a transition from S_Retain to S_CanRelease
// and S_CanRelease to S_Use. If we've made the first transition,
Instruction *InsertPt = *PI;
Value *MyArg = ArgTy == ParamTy ? Arg :
new BitCastInst(Arg, ParamTy, "", InsertPt);
- CallInst *Call =
- CallInst::Create(getRetainCallee(M), MyArg, "", InsertPt);
+ Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
Call->setDoesNotThrow();
Call->setTailCall();
Instruction *InsertPt = *PI;
Value *MyArg = ArgTy == ParamTy ? Arg :
new BitCastInst(Arg, ParamTy, "", InsertPt);
- CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
- "", InsertPt);
+ Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
+ CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
// Attach a clang.imprecise_release metadata tag, if appropriate.
if (MDNode *M = ReleasesToMove.ReleaseMetadata)
Call->setMetadata(ImpreciseReleaseMDKind, M);
MapVector<Value *, RRInfo> &Retains,
DenseMap<Value *, RRInfo> &Releases,
Module *M,
- SmallVector<Instruction *, 4> &NewRetains,
- SmallVector<Instruction *, 4> &NewReleases,
- SmallVector<Instruction *, 8> &DeadInsts,
+ SmallVectorImpl<Instruction *> &NewRetains,
+ SmallVectorImpl<Instruction *> &NewReleases,
+ SmallVectorImpl<Instruction *> &DeadInsts,
RRInfo &RetainsToMove,
RRInfo &ReleasesToMove,
Value *Arg,
if (Jt == Releases.end())
return false;
const RRInfo &NewRetainReleaseRRI = Jt->second;
- assert(NewRetainReleaseRRI.Calls.count(NewRetain));
+
+ // If the release does not have a reference to the retain as well,
+ // something happened which is unaccounted for. Do not do anything.
+ //
+ // This can happen if we catch an additive overflow during path count
+ // merging.
+ if (!NewRetainReleaseRRI.Calls.count(NewRetain))
+ return false;
+
if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
// If we overflow when we compute the path count, don't remove/move
// anything.
const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
- unsigned PathCount;
+ unsigned PathCount = BBState::OverflowOccurredValue;
if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
OldDelta -= PathCount;
// Merge the ReleaseMetadata and IsTailCallRelease values.
} else {
if (ReleasesToMove.ReleaseMetadata !=
NewRetainReleaseRRI.ReleaseMetadata)
- ReleasesToMove.ReleaseMetadata = 0;
+ ReleasesToMove.ReleaseMetadata = nullptr;
if (ReleasesToMove.IsTailCallRelease !=
NewRetainReleaseRRI.IsTailCallRelease)
ReleasesToMove.IsTailCallRelease = false;
// If we overflow when we compute the path count, don't
// remove/move anything.
const BBState &RIPBBState = BBStates[RIP->getParent()];
+ PathCount = BBState::OverflowOccurredValue;
if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
NewDelta -= PathCount;
}
}
if (Jt == Retains.end())
return false;
const RRInfo &NewReleaseRetainRRI = Jt->second;
- assert(NewReleaseRetainRRI.Calls.count(NewRelease));
- if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
+ // If the retain does not have a reference to the release as well,
+ // something happened which is unaccounted for. Do not do anything.
+ //
+ // This can happen if we catch an additive overflow during path count
+ // merging.
+ if (!NewReleaseRetainRRI.Calls.count(NewRelease))
+ return false;
+
+ if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
// If we overflow when we compute the path count, don't remove/move
// anything.
const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
- unsigned PathCount;
+ unsigned PathCount = BBState::OverflowOccurredValue;
if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
OldDelta += PathCount;
OldCount += PathCount;
// If we overflow when we compute the path count, don't
// remove/move anything.
const BBState &RIPBBState = BBStates[RIP->getParent()];
+
+ PathCount = BBState::OverflowOccurredValue;
if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
return false;
+ assert(PathCount != BBState::OverflowOccurredValue &&
+ "PathCount at this point can not be "
+ "OverflowOccurredValue.");
NewDelta += PathCount;
NewCount += PathCount;
}
// within the same block. Theoretically, we could do memdep-style non-local
// analysis too, but that would want caching. A better approach would be to
// use the technique that EarlyCSE uses.
- inst_iterator Current = llvm::prior(I);
+ inst_iterator Current = std::prev(I);
BasicBlock *CurrentBB = Current.getBasicBlockIterator();
for (BasicBlock::iterator B = CurrentBB->begin(),
J = Current.getInstructionIterator();
J != B; --J) {
- Instruction *EarlierInst = &*llvm::prior(J);
+ Instruction *EarlierInst = &*std::prev(J);
InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
switch (EarlierClass) {
case IC_LoadWeak:
Changed = true;
// If the load has a builtin retain, insert a plain retain for it.
if (Class == IC_LoadWeakRetained) {
- CallInst *CI =
- CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
- "", Call);
+ Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
CI->setTailCall();
}
// Zap the fully redundant load.
Changed = true;
// If the load has a builtin retain, insert a plain retain for it.
if (Class == IC_LoadWeakRetained) {
- CallInst *CI =
- CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
- "", Call);
+ Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
+ CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
CI->setTailCall();
}
// Zap the fully redundant load.
CallInst *Call = cast<CallInst>(Inst);
Value *Arg = Call->getArgOperand(0);
if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
- for (Value::use_iterator UI = Alloca->use_begin(),
- UE = Alloca->use_end(); UI != UE; ++UI) {
- const Instruction *UserInst = cast<Instruction>(*UI);
+ for (User *U : Alloca->users()) {
+ const Instruction *UserInst = cast<Instruction>(U);
switch (GetBasicInstructionClass(UserInst)) {
case IC_InitWeak:
case IC_StoreWeak:
}
}
Changed = true;
- for (Value::use_iterator UI = Alloca->use_begin(),
- UE = Alloca->use_end(); UI != UE; ) {
+ for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
CallInst *UserInst = cast<CallInst>(*UI++);
switch (GetBasicInstructionClass(UserInst)) {
case IC_InitWeak:
FindDependencies(CanChangeRetainCount, Arg,
BB, Autorelease, DepInsts, Visited, PA);
if (DepInsts.size() != 1)
- return 0;
+ return nullptr;
CallInst *Retain =
dyn_cast_or_null<CallInst>(*DepInsts.begin());
if (!Retain ||
!IsRetain(GetBasicInstructionClass(Retain)) ||
GetObjCArg(Retain) != Arg) {
- return 0;
+ return nullptr;
}
return Retain;
FindDependencies(NeedsPositiveRetainCount, Arg,
BB, Ret, DepInsts, V, PA);
if (DepInsts.size() != 1)
- return 0;
+ return nullptr;
CallInst *Autorelease =
dyn_cast_or_null<CallInst>(*DepInsts.begin());
if (!Autorelease)
- return 0;
+ return nullptr;
InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
if (!IsAutorelease(AutoreleaseClass))
- return 0;
+ return nullptr;
if (GetObjCArg(Autorelease) != Arg)
- return 0;
+ return nullptr;
return Autorelease;
}
// they are not, because they return their argument value. And objc_release
// calls finalizers which can have arbitrary side effects.
- // These are initialized lazily.
- AutoreleaseRVCallee = 0;
- ReleaseCallee = 0;
- RetainCallee = 0;
- RetainBlockCallee = 0;
- AutoreleaseCallee = 0;
+ // Initialize our runtime entry point cache.
+ EP.Initialize(&M);
return false;
}