1 //===- Andersens.cpp - Andersen's Interprocedural Alias Analysis ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an implementation of Andersen's interprocedural alias
13 // In pointer analysis terms, this is a subset-based, flow-insensitive,
14 // field-sensitive, and context-insensitive algorithm pointer algorithm.
16 // This algorithm is implemented as three stages:
17 // 1. Object identification.
18 // 2. Inclusion constraint identification.
19 // 3. Offline constraint graph optimization
20 // 4. Inclusion constraint solving.
22 // The object identification stage identifies all of the memory objects in the
23 // program, which includes globals, heap allocated objects, and stack allocated
26 // The inclusion constraint identification stage finds all inclusion constraints
27 // in the program by scanning the program, looking for pointer assignments and
28 // other statements that effect the points-to graph. For a statement like "A =
29 // B", this statement is processed to indicate that A can point to anything that
30 // B can point to. Constraints can handle copies, loads, and stores, and
33 // The offline constraint graph optimization portion includes offline variable
34 // substitution algorithms intended to compute pointer and location
35 // equivalences. Pointer equivalences are those pointers that will have the
36 // same points-to sets, and location equivalences are those variables that
37 // always appear together in points-to sets. It also includes an offline
38 // cycle detection algorithm that allows cycles to be collapsed sooner
41 // The inclusion constraint solving phase iteratively propagates the inclusion
42 // constraints until a fixed point is reached. This is an O(N^3) algorithm.
44 // Function constraints are handled as if they were structs with X fields.
45 // Thus, an access to argument X of function Y is an access to node index
46 // getNode(Y) + X. This representation allows handling of indirect calls
47 // without any issues. To wit, an indirect call Y(a,b) is equivalent to
48 // *(Y + 1) = a, *(Y + 2) = b.
49 // The return node for a function is always located at getNode(F) +
50 // CallReturnPos. The arguments start at getNode(F) + CallArgPos.
52 // Future Improvements:
54 //===----------------------------------------------------------------------===//
56 #define DEBUG_TYPE "anders-aa"
57 #include "llvm/Constants.h"
58 #include "llvm/DerivedTypes.h"
59 #include "llvm/Instructions.h"
60 #include "llvm/Module.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/ErrorHandling.h"
63 #include "llvm/Support/InstIterator.h"
64 #include "llvm/Support/InstVisitor.h"
65 #include "llvm/Analysis/AliasAnalysis.h"
66 #include "llvm/Analysis/MemoryBuiltins.h"
67 #include "llvm/Analysis/Passes.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/System/Atomic.h"
70 #include "llvm/ADT/Statistic.h"
71 #include "llvm/ADT/SparseBitVector.h"
72 #include "llvm/ADT/DenseSet.h"
81 // Determining the actual set of nodes the universal set can consist of is very
82 // expensive because it means propagating around very large sets. We rely on
83 // other analysis being able to determine which nodes can never be pointed to in
84 // order to disambiguate further than "points-to anything".
85 #define FULL_UNIVERSAL 0
89 STATISTIC(NumIters , "Number of iterations to reach convergence");
91 STATISTIC(NumConstraints, "Number of constraints");
92 STATISTIC(NumNodes , "Number of nodes");
93 STATISTIC(NumUnified , "Number of variables unified");
94 STATISTIC(NumErased , "Number of redundant constraints erased");
96 static const unsigned SelfRep = (unsigned)-1;
97 static const unsigned Unvisited = (unsigned)-1;
98 // Position of the function return node relative to the function node.
99 static const unsigned CallReturnPos = 1;
100 // Position of the function call node relative to the function node.
101 static const unsigned CallFirstArgPos = 2;
104 struct BitmapKeyInfo {
105 static inline SparseBitVector<> *getEmptyKey() {
106 return reinterpret_cast<SparseBitVector<> *>(-1);
108 static inline SparseBitVector<> *getTombstoneKey() {
109 return reinterpret_cast<SparseBitVector<> *>(-2);
111 static unsigned getHashValue(const SparseBitVector<> *bitmap) {
112 return bitmap->getHashValue();
114 static bool isEqual(const SparseBitVector<> *LHS,
115 const SparseBitVector<> *RHS) {
118 else if (LHS == getEmptyKey() || RHS == getEmptyKey()
119 || LHS == getTombstoneKey() || RHS == getTombstoneKey())
126 class Andersens : public ModulePass, public AliasAnalysis,
127 private InstVisitor<Andersens> {
130 /// Constraint - Objects of this structure are used to represent the various
131 /// constraints identified by the algorithm. The constraints are 'copy',
132 /// for statements like "A = B", 'load' for statements like "A = *B",
133 /// 'store' for statements like "*A = B", and AddressOf for statements like
134 /// A = alloca; The Offset is applied as *(A + K) = B for stores,
135 /// A = *(B + K) for loads, and A = B + K for copies. It is
136 /// illegal on addressof constraints (because it is statically
137 /// resolvable to A = &C where C = B + K)
140 enum ConstraintType { Copy, Load, Store, AddressOf } Type;
145 Constraint(ConstraintType Ty, unsigned D, unsigned S, unsigned O = 0)
146 : Type(Ty), Dest(D), Src(S), Offset(O) {
147 assert((Offset == 0 || Ty != AddressOf) &&
148 "Offset is illegal on addressof constraints");
151 bool operator==(const Constraint &RHS) const {
152 return RHS.Type == Type
155 && RHS.Offset == Offset;
158 bool operator!=(const Constraint &RHS) const {
159 return !(*this == RHS);
162 bool operator<(const Constraint &RHS) const {
163 if (RHS.Type != Type)
164 return RHS.Type < Type;
165 else if (RHS.Dest != Dest)
166 return RHS.Dest < Dest;
167 else if (RHS.Src != Src)
168 return RHS.Src < Src;
169 return RHS.Offset < Offset;
173 // Information DenseSet requires implemented in order to be able to do
176 static inline std::pair<unsigned, unsigned> getEmptyKey() {
177 return std::make_pair(~0U, ~0U);
179 static inline std::pair<unsigned, unsigned> getTombstoneKey() {
180 return std::make_pair(~0U - 1, ~0U - 1);
182 static unsigned getHashValue(const std::pair<unsigned, unsigned> &P) {
183 return P.first ^ P.second;
185 static unsigned isEqual(const std::pair<unsigned, unsigned> &LHS,
186 const std::pair<unsigned, unsigned> &RHS) {
191 struct ConstraintKeyInfo {
192 static inline Constraint getEmptyKey() {
193 return Constraint(Constraint::Copy, ~0U, ~0U, ~0U);
195 static inline Constraint getTombstoneKey() {
196 return Constraint(Constraint::Copy, ~0U - 1, ~0U - 1, ~0U - 1);
198 static unsigned getHashValue(const Constraint &C) {
199 return C.Src ^ C.Dest ^ C.Type ^ C.Offset;
201 static bool isEqual(const Constraint &LHS,
202 const Constraint &RHS) {
203 return LHS.Type == RHS.Type && LHS.Dest == RHS.Dest
204 && LHS.Src == RHS.Src && LHS.Offset == RHS.Offset;
208 // Node class - This class is used to represent a node in the constraint
209 // graph. Due to various optimizations, it is not always the case that
210 // there is a mapping from a Node to a Value. In particular, we add
211 // artificial Node's that represent the set of pointed-to variables shared
212 // for each location equivalent Node.
215 static volatile sys::cas_flag Counter;
219 SparseBitVector<> *Edges;
220 SparseBitVector<> *PointsTo;
221 SparseBitVector<> *OldPointsTo;
222 std::list<Constraint> Constraints;
224 // Pointer and location equivalence labels
225 unsigned PointerEquivLabel;
226 unsigned LocationEquivLabel;
227 // Predecessor edges, both real and implicit
228 SparseBitVector<> *PredEdges;
229 SparseBitVector<> *ImplicitPredEdges;
230 // Set of nodes that point to us, only use for location equivalence.
231 SparseBitVector<> *PointedToBy;
232 // Number of incoming edges, used during variable substitution to early
233 // free the points-to sets
235 // True if our points-to set is in the Set2PEClass map
237 // True if our node has no indirect constraints (complex or otherwise)
239 // True if the node is address taken, *or* it is part of a group of nodes
240 // that must be kept together. This is set to true for functions and
241 // their arg nodes, which must be kept at the same position relative to
242 // their base function node.
245 // Nodes in cycles (or in equivalence classes) are united together using a
246 // standard union-find representation with path compression. NodeRep
247 // gives the index into GraphNodes for the representative Node.
250 // Modification timestamp. Assigned from Counter.
251 // Used for work list prioritization.
254 explicit Node(bool direct = true) :
255 Val(0), Edges(0), PointsTo(0), OldPointsTo(0),
256 PointerEquivLabel(0), LocationEquivLabel(0), PredEdges(0),
257 ImplicitPredEdges(0), PointedToBy(0), NumInEdges(0),
258 StoredInHash(false), Direct(direct), AddressTaken(false),
259 NodeRep(SelfRep), Timestamp(0) { }
261 Node *setValue(Value *V) {
262 assert(Val == 0 && "Value already set for this node!");
267 /// getValue - Return the LLVM value corresponding to this node.
269 Value *getValue() const { return Val; }
271 /// addPointerTo - Add a pointer to the list of pointees of this node,
272 /// returning true if this caused a new pointer to be added, or false if
273 /// we already knew about the points-to relation.
274 bool addPointerTo(unsigned Node) {
275 return PointsTo->test_and_set(Node);
278 /// intersects - Return true if the points-to set of this node intersects
279 /// with the points-to set of the specified node.
280 bool intersects(Node *N) const;
282 /// intersectsIgnoring - Return true if the points-to set of this node
283 /// intersects with the points-to set of the specified node on any nodes
284 /// except for the specified node to ignore.
285 bool intersectsIgnoring(Node *N, unsigned) const;
287 // Timestamp a node (used for work list prioritization)
289 Timestamp = sys::AtomicIncrement(&Counter);
294 return( (int) NodeRep < 0 );
298 struct WorkListElement {
301 WorkListElement(Node* n, unsigned t) : node(n), Timestamp(t) {}
303 // Note that we reverse the sense of the comparison because we
304 // actually want to give low timestamps the priority over high,
305 // whereas priority is typically interpreted as a greater value is
306 // given high priority.
307 bool operator<(const WorkListElement& that) const {
308 return( this->Timestamp > that.Timestamp );
312 // Priority-queue based work list specialized for Nodes.
314 std::priority_queue<WorkListElement> Q;
317 void insert(Node* n) {
318 Q.push( WorkListElement(n, n->Timestamp) );
321 // We automatically discard non-representative nodes and nodes
322 // that were in the work list twice (we keep a copy of the
323 // timestamp in the work list so we can detect this situation by
324 // comparing against the node's current timestamp).
326 while( !Q.empty() ) {
327 WorkListElement x = Q.top(); Q.pop();
328 Node* INode = x.node;
330 if( INode->isRep() &&
331 INode->Timestamp == x.Timestamp ) {
343 /// GraphNodes - This vector is populated as part of the object
344 /// identification stage of the analysis, which populates this vector with a
345 /// node for each memory object and fills in the ValueNodes map.
346 std::vector<Node> GraphNodes;
348 /// ValueNodes - This map indicates the Node that a particular Value* is
349 /// represented by. This contains entries for all pointers.
350 DenseMap<Value*, unsigned> ValueNodes;
352 /// ObjectNodes - This map contains entries for each memory object in the
353 /// program: globals, alloca's and mallocs.
354 DenseMap<Value*, unsigned> ObjectNodes;
356 /// ReturnNodes - This map contains an entry for each function in the
357 /// program that returns a value.
358 DenseMap<Function*, unsigned> ReturnNodes;
360 /// VarargNodes - This map contains the entry used to represent all pointers
361 /// passed through the varargs portion of a function call for a particular
362 /// function. An entry is not present in this map for functions that do not
363 /// take variable arguments.
364 DenseMap<Function*, unsigned> VarargNodes;
367 /// Constraints - This vector contains a list of all of the constraints
368 /// identified by the program.
369 std::vector<Constraint> Constraints;
371 // Map from graph node to maximum K value that is allowed (for functions,
372 // this is equivalent to the number of arguments + CallFirstArgPos)
373 std::map<unsigned, unsigned> MaxK;
375 /// This enum defines the GraphNodes indices that correspond to important
383 // Stack for Tarjan's
384 std::stack<unsigned> SCCStack;
385 // Map from Graph Node to DFS number
386 std::vector<unsigned> Node2DFS;
387 // Map from Graph Node to Deleted from graph.
388 std::vector<bool> Node2Deleted;
389 // Same as Node Maps, but implemented as std::map because it is faster to
391 std::map<unsigned, unsigned> Tarjan2DFS;
392 std::map<unsigned, bool> Tarjan2Deleted;
393 // Current DFS number
398 WorkList *CurrWL, *NextWL; // "current" and "next" work lists
400 // Offline variable substitution related things
402 // Temporary rep storage, used because we can't collapse SCC's in the
403 // predecessor graph by uniting the variables permanently, we can only do so
404 // for the successor graph.
405 std::vector<unsigned> VSSCCRep;
406 // Mapping from node to whether we have visited it during SCC finding yet.
407 std::vector<bool> Node2Visited;
408 // During variable substitution, we create unknowns to represent the unknown
409 // value that is a dereference of a variable. These nodes are known as
410 // "ref" nodes (since they represent the value of dereferences).
411 unsigned FirstRefNode;
412 // During HVN, we create represent address taken nodes as if they were
413 // unknown (since HVN, unlike HU, does not evaluate unions).
414 unsigned FirstAdrNode;
415 // Current pointer equivalence class number
417 // Mapping from points-to sets to equivalence classes
418 typedef DenseMap<SparseBitVector<> *, unsigned, BitmapKeyInfo> BitVectorMap;
419 BitVectorMap Set2PEClass;
420 // Mapping from pointer equivalences to the representative node. -1 if we
421 // have no representative node for this pointer equivalence class yet.
422 std::vector<int> PEClass2Node;
423 // Mapping from pointer equivalences to representative node. This includes
424 // pointer equivalent but not location equivalent variables. -1 if we have
425 // no representative node for this pointer equivalence class yet.
426 std::vector<int> PENLEClass2Node;
427 // Union/Find for HCD
428 std::vector<unsigned> HCDSCCRep;
429 // HCD's offline-detected cycles; "Statically DeTected"
430 // -1 if not part of such a cycle, otherwise a representative node.
431 std::vector<int> SDT;
432 // Whether to use SDT (UniteNodes can use it during solving, but not before)
437 Andersens() : ModulePass(&ID) {}
439 bool runOnModule(Module &M) {
440 InitializeAliasAnalysis(this);
442 CollectConstraints(M);
444 #define DEBUG_TYPE "anders-aa-constraints"
445 DEBUG(PrintConstraints());
447 #define DEBUG_TYPE "anders-aa"
449 DEBUG(PrintPointsToGraph());
451 // Free the constraints list, as we don't need it to respond to alias
453 std::vector<Constraint>().swap(Constraints);
454 //These are needed for Print() (-analyze in opt)
455 //ObjectNodes.clear();
456 //ReturnNodes.clear();
457 //VarargNodes.clear();
461 void releaseMemory() {
462 // FIXME: Until we have transitively required passes working correctly,
463 // this cannot be enabled! Otherwise, using -count-aa with the pass
464 // causes memory to be freed too early. :(
466 // The memory objects and ValueNodes data structures at the only ones that
467 // are still live after construction.
468 std::vector<Node>().swap(GraphNodes);
473 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
474 AliasAnalysis::getAnalysisUsage(AU);
475 AU.setPreservesAll(); // Does not transform code
478 /// getAdjustedAnalysisPointer - This method is used when a pass implements
479 /// an analysis interface through multiple inheritance. If needed, it
480 /// should override this to adjust the this pointer as needed for the
481 /// specified pass info.
482 virtual void *getAdjustedAnalysisPointer(const PassInfo *PI) {
483 if (PI->isPassID(&AliasAnalysis::ID))
484 return (AliasAnalysis*)this;
488 //------------------------------------------------
489 // Implement the AliasAnalysis API
491 AliasResult alias(const Value *V1, unsigned V1Size,
492 const Value *V2, unsigned V2Size);
493 virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
494 virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
495 bool pointsToConstantMemory(const Value *P);
497 virtual void deleteValue(Value *V) {
499 getAnalysis<AliasAnalysis>().deleteValue(V);
502 virtual void copyValue(Value *From, Value *To) {
503 ValueNodes[To] = ValueNodes[From];
504 getAnalysis<AliasAnalysis>().copyValue(From, To);
508 /// getNode - Return the node corresponding to the specified pointer scalar.
510 unsigned getNode(Value *V) {
511 if (Constant *C = dyn_cast<Constant>(V))
512 if (!isa<GlobalValue>(C))
513 return getNodeForConstantPointer(C);
515 DenseMap<Value*, unsigned>::iterator I = ValueNodes.find(V);
516 if (I == ValueNodes.end()) {
520 llvm_unreachable("Value does not have a node in the points-to graph!");
525 /// getObject - Return the node corresponding to the memory object for the
526 /// specified global or allocation instruction.
527 unsigned getObject(Value *V) const {
528 DenseMap<Value*, unsigned>::const_iterator I = ObjectNodes.find(V);
529 assert(I != ObjectNodes.end() &&
530 "Value does not have an object in the points-to graph!");
534 /// getReturnNode - Return the node representing the return value for the
535 /// specified function.
536 unsigned getReturnNode(Function *F) const {
537 DenseMap<Function*, unsigned>::const_iterator I = ReturnNodes.find(F);
538 assert(I != ReturnNodes.end() && "Function does not return a value!");
542 /// getVarargNode - Return the node representing the variable arguments
543 /// formal for the specified function.
544 unsigned getVarargNode(Function *F) const {
545 DenseMap<Function*, unsigned>::const_iterator I = VarargNodes.find(F);
546 assert(I != VarargNodes.end() && "Function does not take var args!");
550 /// getNodeValue - Get the node for the specified LLVM value and set the
551 /// value for it to be the specified value.
552 unsigned getNodeValue(Value &V) {
553 unsigned Index = getNode(&V);
554 GraphNodes[Index].setValue(&V);
558 unsigned UniteNodes(unsigned First, unsigned Second,
559 bool UnionByRank = true);
560 unsigned FindNode(unsigned Node);
561 unsigned FindNode(unsigned Node) const;
563 void IdentifyObjects(Module &M);
564 void CollectConstraints(Module &M);
565 bool AnalyzeUsesOfFunction(Value *);
566 void CreateConstraintGraph();
567 void OptimizeConstraints();
568 unsigned FindEquivalentNode(unsigned, unsigned);
569 void ClumpAddressTaken();
570 void RewriteConstraints();
574 void Search(unsigned Node);
575 void UnitePointerEquivalences();
576 void SolveConstraints();
577 bool QueryNode(unsigned Node);
578 void Condense(unsigned Node);
579 void HUValNum(unsigned Node);
580 void HVNValNum(unsigned Node);
581 unsigned getNodeForConstantPointer(Constant *C);
582 unsigned getNodeForConstantPointerTarget(Constant *C);
583 void AddGlobalInitializerConstraints(unsigned, Constant *C);
585 void AddConstraintsForNonInternalLinkage(Function *F);
586 void AddConstraintsForCall(CallSite CS, Function *F);
587 bool AddConstraintsForExternalCall(CallSite CS, Function *F);
590 void PrintNode(const Node *N) const;
591 void PrintConstraints() const ;
592 void PrintConstraint(const Constraint &) const;
593 void PrintLabels() const;
594 void PrintPointsToGraph() const;
596 //===------------------------------------------------------------------===//
597 // Instruction visitation methods for adding constraints
599 friend class InstVisitor<Andersens>;
600 void visitReturnInst(ReturnInst &RI);
601 void visitInvokeInst(InvokeInst &II) { visitCallSite(CallSite(&II)); }
602 void visitCallInst(CallInst &CI) {
603 if (isMalloc(&CI)) visitAlloc(CI);
604 else visitCallSite(CallSite(&CI));
606 void visitCallSite(CallSite CS);
607 void visitAllocaInst(AllocaInst &I);
608 void visitAlloc(Instruction &I);
609 void visitLoadInst(LoadInst &LI);
610 void visitStoreInst(StoreInst &SI);
611 void visitGetElementPtrInst(GetElementPtrInst &GEP);
612 void visitPHINode(PHINode &PN);
613 void visitCastInst(CastInst &CI);
614 void visitICmpInst(ICmpInst &ICI) {} // NOOP!
615 void visitFCmpInst(FCmpInst &ICI) {} // NOOP!
616 void visitSelectInst(SelectInst &SI);
617 void visitVAArg(VAArgInst &I);
618 void visitInstruction(Instruction &I);
620 //===------------------------------------------------------------------===//
621 // Implement Analyize interface
623 void print(raw_ostream &O, const Module*) const {
624 PrintPointsToGraph();
629 char Andersens::ID = 0;
630 static RegisterPass<Andersens>
631 X("anders-aa", "Andersen's Interprocedural Alias Analysis (experimental)",
633 static RegisterAnalysisGroup<AliasAnalysis> Y(X);
635 // Initialize Timestamp Counter (static).
636 volatile llvm::sys::cas_flag Andersens::Node::Counter = 0;
638 ModulePass *llvm::createAndersensPass() { return new Andersens(); }
640 //===----------------------------------------------------------------------===//
641 // AliasAnalysis Interface Implementation
642 //===----------------------------------------------------------------------===//
644 AliasAnalysis::AliasResult Andersens::alias(const Value *V1, unsigned V1Size,
645 const Value *V2, unsigned V2Size) {
646 Node *N1 = &GraphNodes[FindNode(getNode(const_cast<Value*>(V1)))];
647 Node *N2 = &GraphNodes[FindNode(getNode(const_cast<Value*>(V2)))];
649 // Check to see if the two pointers are known to not alias. They don't alias
650 // if their points-to sets do not intersect.
651 if (!N1->intersectsIgnoring(N2, NullObject))
654 return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
657 AliasAnalysis::ModRefResult
658 Andersens::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
659 // The only thing useful that we can contribute for mod/ref information is
660 // when calling external function calls: if we know that memory never escapes
661 // from the program, it cannot be modified by an external call.
663 // NOTE: This is not really safe, at least not when the entire program is not
664 // available. The deal is that the external function could call back into the
665 // program and modify stuff. We ignore this technical niggle for now. This
666 // is, after all, a "research quality" implementation of Andersen's analysis.
667 if (Function *F = CS.getCalledFunction())
668 if (F->isDeclaration()) {
669 Node *N1 = &GraphNodes[FindNode(getNode(P))];
671 if (N1->PointsTo->empty())
674 if (!UniversalSet->PointsTo->test(FindNode(getNode(P))))
675 return NoModRef; // Universal set does not contain P
677 if (!N1->PointsTo->test(UniversalSet))
678 return NoModRef; // P doesn't point to the universal set.
682 return AliasAnalysis::getModRefInfo(CS, P, Size);
685 AliasAnalysis::ModRefResult
686 Andersens::getModRefInfo(CallSite CS1, CallSite CS2) {
687 return AliasAnalysis::getModRefInfo(CS1,CS2);
690 /// pointsToConstantMemory - If we can determine that this pointer only points
691 /// to constant memory, return true. In practice, this means that if the
692 /// pointer can only point to constant globals, functions, or the null pointer,
695 bool Andersens::pointsToConstantMemory(const Value *P) {
696 Node *N = &GraphNodes[FindNode(getNode(const_cast<Value*>(P)))];
699 for (SparseBitVector<>::iterator bi = N->PointsTo->begin();
700 bi != N->PointsTo->end();
703 Node *Pointee = &GraphNodes[i];
704 if (Value *V = Pointee->getValue()) {
705 if (!isa<GlobalValue>(V) || (isa<GlobalVariable>(V) &&
706 !cast<GlobalVariable>(V)->isConstant()))
707 return AliasAnalysis::pointsToConstantMemory(P);
710 return AliasAnalysis::pointsToConstantMemory(P);
717 //===----------------------------------------------------------------------===//
718 // Object Identification Phase
719 //===----------------------------------------------------------------------===//
721 /// IdentifyObjects - This stage scans the program, adding an entry to the
722 /// GraphNodes list for each memory object in the program (global stack or
723 /// heap), and populates the ValueNodes and ObjectNodes maps for these objects.
725 void Andersens::IdentifyObjects(Module &M) {
726 unsigned NumObjects = 0;
728 // Object #0 is always the universal set: the object that we don't know
730 assert(NumObjects == UniversalSet && "Something changed!");
733 // Object #1 always represents the null pointer.
734 assert(NumObjects == NullPtr && "Something changed!");
737 // Object #2 always represents the null object (the object pointed to by null)
738 assert(NumObjects == NullObject && "Something changed!");
741 // Add all the globals first.
742 for (Module::global_iterator I = M.global_begin(), E = M.global_end();
744 ObjectNodes[I] = NumObjects++;
745 ValueNodes[I] = NumObjects++;
748 // Add nodes for all of the functions and the instructions inside of them.
749 for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
750 // The function itself is a memory object.
751 unsigned First = NumObjects;
752 ValueNodes[F] = NumObjects++;
753 if (F->getFunctionType()->getReturnType()->isPointerTy())
754 ReturnNodes[F] = NumObjects++;
755 if (F->getFunctionType()->isVarArg())
756 VarargNodes[F] = NumObjects++;
759 // Add nodes for all of the incoming pointer arguments.
760 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
763 if (I->getType()->isPointerTy())
764 ValueNodes[I] = NumObjects++;
766 MaxK[First] = NumObjects - First;
768 // Scan the function body, creating a memory object for each heap/stack
769 // allocation in the body of the function and a node to represent all
770 // pointer values defined by instructions and used as operands.
771 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) {
772 // If this is an heap or stack allocation, create a node for the memory
774 if (II->getType()->isPointerTy()) {
775 ValueNodes[&*II] = NumObjects++;
776 if (AllocaInst *AI = dyn_cast<AllocaInst>(&*II))
777 ObjectNodes[AI] = NumObjects++;
778 else if (isMalloc(&*II))
779 ObjectNodes[&*II] = NumObjects++;
782 // Calls to inline asm need to be added as well because the callee isn't
783 // referenced anywhere else.
784 if (CallInst *CI = dyn_cast<CallInst>(&*II)) {
785 Value *Callee = CI->getCalledValue();
786 if (isa<InlineAsm>(Callee))
787 ValueNodes[Callee] = NumObjects++;
792 // Now that we know how many objects to create, make them all now!
793 GraphNodes.resize(NumObjects);
794 NumNodes += NumObjects;
797 //===----------------------------------------------------------------------===//
798 // Constraint Identification Phase
799 //===----------------------------------------------------------------------===//
801 /// getNodeForConstantPointer - Return the node corresponding to the constant
803 unsigned Andersens::getNodeForConstantPointer(Constant *C) {
804 assert(C->getType()->isPointerTy() && "Not a constant pointer!");
806 if (isa<ConstantPointerNull>(C) || isa<UndefValue>(C))
808 else if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
810 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
811 switch (CE->getOpcode()) {
812 case Instruction::GetElementPtr:
813 return getNodeForConstantPointer(CE->getOperand(0));
814 case Instruction::IntToPtr:
816 case Instruction::BitCast:
817 return getNodeForConstantPointer(CE->getOperand(0));
819 errs() << "Constant Expr not yet handled: " << *CE << "\n";
823 llvm_unreachable("Unknown constant pointer!");
828 /// getNodeForConstantPointerTarget - Return the node POINTED TO by the
829 /// specified constant pointer.
830 unsigned Andersens::getNodeForConstantPointerTarget(Constant *C) {
831 assert(C->getType()->isPointerTy() && "Not a constant pointer!");
833 if (isa<ConstantPointerNull>(C))
835 else if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
836 return getObject(GV);
837 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
838 switch (CE->getOpcode()) {
839 case Instruction::GetElementPtr:
840 return getNodeForConstantPointerTarget(CE->getOperand(0));
841 case Instruction::IntToPtr:
843 case Instruction::BitCast:
844 return getNodeForConstantPointerTarget(CE->getOperand(0));
846 errs() << "Constant Expr not yet handled: " << *CE << "\n";
850 llvm_unreachable("Unknown constant pointer!");
855 /// AddGlobalInitializerConstraints - Add inclusion constraints for the memory
856 /// object N, which contains values indicated by C.
857 void Andersens::AddGlobalInitializerConstraints(unsigned NodeIndex,
859 if (C->getType()->isSingleValueType()) {
860 if (C->getType()->isPointerTy())
861 Constraints.push_back(Constraint(Constraint::Copy, NodeIndex,
862 getNodeForConstantPointer(C)));
863 } else if (C->isNullValue()) {
864 Constraints.push_back(Constraint(Constraint::Copy, NodeIndex,
867 } else if (!isa<UndefValue>(C)) {
868 // If this is an array or struct, include constraints for each element.
869 assert(isa<ConstantArray>(C) || isa<ConstantStruct>(C));
870 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
871 AddGlobalInitializerConstraints(NodeIndex,
872 cast<Constant>(C->getOperand(i)));
876 /// AddConstraintsForNonInternalLinkage - If this function does not have
877 /// internal linkage, realize that we can't trust anything passed into or
878 /// returned by this function.
879 void Andersens::AddConstraintsForNonInternalLinkage(Function *F) {
880 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
881 if (I->getType()->isPointerTy())
882 // If this is an argument of an externally accessible function, the
883 // incoming pointer might point to anything.
884 Constraints.push_back(Constraint(Constraint::Copy, getNode(I),
888 /// AddConstraintsForCall - If this is a call to a "known" function, add the
889 /// constraints and return true. If this is a call to an unknown function,
891 bool Andersens::AddConstraintsForExternalCall(CallSite CS, Function *F) {
892 assert(F->isDeclaration() && "Not an external function!");
894 // These functions don't induce any points-to constraints.
895 if (F->getName() == "atoi" || F->getName() == "atof" ||
896 F->getName() == "atol" || F->getName() == "atoll" ||
897 F->getName() == "remove" || F->getName() == "unlink" ||
898 F->getName() == "rename" || F->getName() == "memcmp" ||
899 F->getName() == "llvm.memset" ||
900 F->getName() == "strcmp" || F->getName() == "strncmp" ||
901 F->getName() == "execl" || F->getName() == "execlp" ||
902 F->getName() == "execle" || F->getName() == "execv" ||
903 F->getName() == "execvp" || F->getName() == "chmod" ||
904 F->getName() == "puts" || F->getName() == "write" ||
905 F->getName() == "open" || F->getName() == "create" ||
906 F->getName() == "truncate" || F->getName() == "chdir" ||
907 F->getName() == "mkdir" || F->getName() == "rmdir" ||
908 F->getName() == "read" || F->getName() == "pipe" ||
909 F->getName() == "wait" || F->getName() == "time" ||
910 F->getName() == "stat" || F->getName() == "fstat" ||
911 F->getName() == "lstat" || F->getName() == "strtod" ||
912 F->getName() == "strtof" || F->getName() == "strtold" ||
913 F->getName() == "fopen" || F->getName() == "fdopen" ||
914 F->getName() == "freopen" ||
915 F->getName() == "fflush" || F->getName() == "feof" ||
916 F->getName() == "fileno" || F->getName() == "clearerr" ||
917 F->getName() == "rewind" || F->getName() == "ftell" ||
918 F->getName() == "ferror" || F->getName() == "fgetc" ||
919 F->getName() == "fgetc" || F->getName() == "_IO_getc" ||
920 F->getName() == "fwrite" || F->getName() == "fread" ||
921 F->getName() == "fgets" || F->getName() == "ungetc" ||
922 F->getName() == "fputc" ||
923 F->getName() == "fputs" || F->getName() == "putc" ||
924 F->getName() == "ftell" || F->getName() == "rewind" ||
925 F->getName() == "_IO_putc" || F->getName() == "fseek" ||
926 F->getName() == "fgetpos" || F->getName() == "fsetpos" ||
927 F->getName() == "printf" || F->getName() == "fprintf" ||
928 F->getName() == "sprintf" || F->getName() == "vprintf" ||
929 F->getName() == "vfprintf" || F->getName() == "vsprintf" ||
930 F->getName() == "scanf" || F->getName() == "fscanf" ||
931 F->getName() == "sscanf" || F->getName() == "__assert_fail" ||
932 F->getName() == "modf")
936 // These functions do induce points-to edges.
937 if (F->getName() == "llvm.memcpy" ||
938 F->getName() == "llvm.memmove" ||
939 F->getName() == "memmove") {
941 const FunctionType *FTy = F->getFunctionType();
942 if (FTy->getNumParams() > 1 &&
943 FTy->getParamType(0)->isPointerTy() &&
944 FTy->getParamType(1)->isPointerTy()) {
946 // *Dest = *Src, which requires an artificial graph node to represent the
947 // constraint. It is broken up into *Dest = temp, temp = *Src
948 unsigned FirstArg = getNode(CS.getArgument(0));
949 unsigned SecondArg = getNode(CS.getArgument(1));
950 unsigned TempArg = GraphNodes.size();
951 GraphNodes.push_back(Node());
952 Constraints.push_back(Constraint(Constraint::Store,
954 Constraints.push_back(Constraint(Constraint::Load,
955 TempArg, SecondArg));
956 // In addition, Dest = Src
957 Constraints.push_back(Constraint(Constraint::Copy,
958 FirstArg, SecondArg));
964 if (F->getName() == "realloc" || F->getName() == "strchr" ||
965 F->getName() == "strrchr" || F->getName() == "strstr" ||
966 F->getName() == "strtok") {
967 const FunctionType *FTy = F->getFunctionType();
968 if (FTy->getNumParams() > 0 &&
969 FTy->getParamType(0)->isPointerTy()) {
970 Constraints.push_back(Constraint(Constraint::Copy,
971 getNode(CS.getInstruction()),
972 getNode(CS.getArgument(0))));
982 /// AnalyzeUsesOfFunction - Look at all of the users of the specified function.
983 /// If this is used by anything complex (i.e., the address escapes), return
985 bool Andersens::AnalyzeUsesOfFunction(Value *V) {
987 if (!V->getType()->isPointerTy()) return true;
989 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
990 if (isa<LoadInst>(*UI)) {
992 } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
993 if (V == SI->getOperand(1)) {
995 } else if (SI->getOperand(1)) {
996 return true; // Storing the pointer
998 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
999 if (AnalyzeUsesOfFunction(GEP)) return true;
1000 } else if (isFreeCall(*UI)) {
1002 } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
1003 // Make sure that this is just the function being called, not that it is
1004 // passing into the function.
1005 for (unsigned i = 1, e = CI->getNumOperands(); i != e; ++i)
1006 if (CI->getOperand(i) == V) return true;
1007 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
1008 // Make sure that this is just the function being called, not that it is
1009 // passing into the function.
1010 for (unsigned i = 3, e = II->getNumOperands(); i != e; ++i)
1011 if (II->getOperand(i) == V) return true;
1012 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
1013 if (CE->getOpcode() == Instruction::GetElementPtr ||
1014 CE->getOpcode() == Instruction::BitCast) {
1015 if (AnalyzeUsesOfFunction(CE))
1020 } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(*UI)) {
1021 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1022 return true; // Allow comparison against null.
1029 /// CollectConstraints - This stage scans the program, adding a constraint to
1030 /// the Constraints list for each instruction in the program that induces a
1031 /// constraint, and setting up the initial points-to graph.
1033 void Andersens::CollectConstraints(Module &M) {
1034 // First, the universal set points to itself.
1035 Constraints.push_back(Constraint(Constraint::AddressOf, UniversalSet,
1037 Constraints.push_back(Constraint(Constraint::Store, UniversalSet,
1040 // Next, the null pointer points to the null object.
1041 Constraints.push_back(Constraint(Constraint::AddressOf, NullPtr, NullObject));
1043 // Next, add any constraints on global variables and their initializers.
1044 for (Module::global_iterator I = M.global_begin(), E = M.global_end();
1046 // Associate the address of the global object as pointing to the memory for
1047 // the global: &G = <G memory>
1048 unsigned ObjectIndex = getObject(I);
1049 Node *Object = &GraphNodes[ObjectIndex];
1050 Object->setValue(I);
1051 Constraints.push_back(Constraint(Constraint::AddressOf, getNodeValue(*I),
1054 if (I->hasDefinitiveInitializer()) {
1055 AddGlobalInitializerConstraints(ObjectIndex, I->getInitializer());
1057 // If it doesn't have an initializer (i.e. it's defined in another
1058 // translation unit), it points to the universal set.
1059 Constraints.push_back(Constraint(Constraint::Copy, ObjectIndex,
1064 for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
1065 // Set up the return value node.
1066 if (F->getFunctionType()->getReturnType()->isPointerTy())
1067 GraphNodes[getReturnNode(F)].setValue(F);
1068 if (F->getFunctionType()->isVarArg())
1069 GraphNodes[getVarargNode(F)].setValue(F);
1071 // Set up incoming argument nodes.
1072 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
1074 if (I->getType()->isPointerTy())
1077 // At some point we should just add constraints for the escaping functions
1078 // at solve time, but this slows down solving. For now, we simply mark
1079 // address taken functions as escaping and treat them as external.
1080 if (!F->hasLocalLinkage() || AnalyzeUsesOfFunction(F))
1081 AddConstraintsForNonInternalLinkage(F);
1083 if (!F->isDeclaration()) {
1084 // Scan the function body, creating a memory object for each heap/stack
1085 // allocation in the body of the function and a node to represent all
1086 // pointer values defined by instructions and used as operands.
1089 // External functions that return pointers return the universal set.
1090 if (F->getFunctionType()->getReturnType()->isPointerTy())
1091 Constraints.push_back(Constraint(Constraint::Copy,
1095 // Any pointers that are passed into the function have the universal set
1096 // stored into them.
1097 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
1099 if (I->getType()->isPointerTy()) {
1100 // Pointers passed into external functions could have anything stored
1102 Constraints.push_back(Constraint(Constraint::Store, getNode(I),
1104 // Memory objects passed into external function calls can have the
1105 // universal set point to them.
1107 Constraints.push_back(Constraint(Constraint::Copy,
1111 Constraints.push_back(Constraint(Constraint::Copy,
1117 // If this is an external varargs function, it can also store pointers
1118 // into any pointers passed through the varargs section.
1119 if (F->getFunctionType()->isVarArg())
1120 Constraints.push_back(Constraint(Constraint::Store, getVarargNode(F),
1124 NumConstraints += Constraints.size();
1128 void Andersens::visitInstruction(Instruction &I) {
1130 return; // This function is just a big assert.
1132 if (isa<BinaryOperator>(I))
1134 // Most instructions don't have any effect on pointer values.
1135 switch (I.getOpcode()) {
1136 case Instruction::Br:
1137 case Instruction::Switch:
1138 case Instruction::Unwind:
1139 case Instruction::Unreachable:
1140 case Instruction::ICmp:
1141 case Instruction::FCmp:
1144 // Is this something we aren't handling yet?
1145 errs() << "Unknown instruction: " << I;
1146 llvm_unreachable(0);
1150 void Andersens::visitAllocaInst(AllocaInst &I) {
1154 void Andersens::visitAlloc(Instruction &I) {
1155 unsigned ObjectIndex = getObject(&I);
1156 GraphNodes[ObjectIndex].setValue(&I);
1157 Constraints.push_back(Constraint(Constraint::AddressOf, getNodeValue(I),
1161 void Andersens::visitReturnInst(ReturnInst &RI) {
1162 if (RI.getNumOperands() && RI.getOperand(0)->getType()->isPointerTy())
1163 // return V --> <Copy/retval{F}/v>
1164 Constraints.push_back(Constraint(Constraint::Copy,
1165 getReturnNode(RI.getParent()->getParent()),
1166 getNode(RI.getOperand(0))));
1169 void Andersens::visitLoadInst(LoadInst &LI) {
1170 if (LI.getType()->isPointerTy())
1171 // P1 = load P2 --> <Load/P1/P2>
1172 Constraints.push_back(Constraint(Constraint::Load, getNodeValue(LI),
1173 getNode(LI.getOperand(0))));
1176 void Andersens::visitStoreInst(StoreInst &SI) {
1177 if (SI.getOperand(0)->getType()->isPointerTy())
1178 // store P1, P2 --> <Store/P2/P1>
1179 Constraints.push_back(Constraint(Constraint::Store,
1180 getNode(SI.getOperand(1)),
1181 getNode(SI.getOperand(0))));
1184 void Andersens::visitGetElementPtrInst(GetElementPtrInst &GEP) {
1185 // P1 = getelementptr P2, ... --> <Copy/P1/P2>
1186 Constraints.push_back(Constraint(Constraint::Copy, getNodeValue(GEP),
1187 getNode(GEP.getOperand(0))));
1190 void Andersens::visitPHINode(PHINode &PN) {
1191 if (PN.getType()->isPointerTy()) {
1192 unsigned PNN = getNodeValue(PN);
1193 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1194 // P1 = phi P2, P3 --> <Copy/P1/P2>, <Copy/P1/P3>, ...
1195 Constraints.push_back(Constraint(Constraint::Copy, PNN,
1196 getNode(PN.getIncomingValue(i))));
1200 void Andersens::visitCastInst(CastInst &CI) {
1201 Value *Op = CI.getOperand(0);
1202 if (CI.getType()->isPointerTy()) {
1203 if (Op->getType()->isPointerTy()) {
1204 // P1 = cast P2 --> <Copy/P1/P2>
1205 Constraints.push_back(Constraint(Constraint::Copy, getNodeValue(CI),
1206 getNode(CI.getOperand(0))));
1208 // P1 = cast int --> <Copy/P1/Univ>
1210 Constraints.push_back(Constraint(Constraint::Copy, getNodeValue(CI),
1216 } else if (Op->getType()->isPointerTy()) {
1217 // int = cast P1 --> <Copy/Univ/P1>
1219 Constraints.push_back(Constraint(Constraint::Copy,
1221 getNode(CI.getOperand(0))));
1223 getNode(CI.getOperand(0));
1228 void Andersens::visitSelectInst(SelectInst &SI) {
1229 if (SI.getType()->isPointerTy()) {
1230 unsigned SIN = getNodeValue(SI);
1231 // P1 = select C, P2, P3 ---> <Copy/P1/P2>, <Copy/P1/P3>
1232 Constraints.push_back(Constraint(Constraint::Copy, SIN,
1233 getNode(SI.getOperand(1))));
1234 Constraints.push_back(Constraint(Constraint::Copy, SIN,
1235 getNode(SI.getOperand(2))));
1239 void Andersens::visitVAArg(VAArgInst &I) {
1240 llvm_unreachable("vaarg not handled yet!");
1243 /// AddConstraintsForCall - Add constraints for a call with actual arguments
1244 /// specified by CS to the function specified by F. Note that the types of
1245 /// arguments might not match up in the case where this is an indirect call and
1246 /// the function pointer has been casted. If this is the case, do something
1248 void Andersens::AddConstraintsForCall(CallSite CS, Function *F) {
1249 Value *CallValue = CS.getCalledValue();
1250 bool IsDeref = F == NULL;
1252 // If this is a call to an external function, try to handle it directly to get
1253 // some taste of context sensitivity.
1254 if (F && F->isDeclaration() && AddConstraintsForExternalCall(CS, F))
1257 if (CS.getType()->isPointerTy()) {
1258 unsigned CSN = getNode(CS.getInstruction());
1259 if (!F || F->getFunctionType()->getReturnType()->isPointerTy()) {
1261 Constraints.push_back(Constraint(Constraint::Load, CSN,
1262 getNode(CallValue), CallReturnPos));
1264 Constraints.push_back(Constraint(Constraint::Copy, CSN,
1265 getNode(CallValue) + CallReturnPos));
1267 // If the function returns a non-pointer value, handle this just like we
1268 // treat a nonpointer cast to pointer.
1269 Constraints.push_back(Constraint(Constraint::Copy, CSN,
1272 } else if (F && F->getFunctionType()->getReturnType()->isPointerTy()) {
1274 Constraints.push_back(Constraint(Constraint::Copy,
1276 getNode(CallValue) + CallReturnPos));
1278 Constraints.push_back(Constraint(Constraint::Copy,
1279 getNode(CallValue) + CallReturnPos,
1286 CallSite::arg_iterator ArgI = CS.arg_begin(), ArgE = CS.arg_end();
1287 bool external = !F || F->isDeclaration();
1290 Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
1291 for (; AI != AE && ArgI != ArgE; ++AI, ++ArgI)
1294 if (external && (*ArgI)->getType()->isPointerTy())
1296 // Add constraint that ArgI can now point to anything due to
1297 // escaping, as can everything it points to. The second portion of
1298 // this should be taken care of by universal = *universal
1299 Constraints.push_back(Constraint(Constraint::Copy,
1304 if (AI->getType()->isPointerTy()) {
1305 if ((*ArgI)->getType()->isPointerTy()) {
1306 // Copy the actual argument into the formal argument.
1307 Constraints.push_back(Constraint(Constraint::Copy, getNode(AI),
1310 Constraints.push_back(Constraint(Constraint::Copy, getNode(AI),
1313 } else if ((*ArgI)->getType()->isPointerTy()) {
1315 Constraints.push_back(Constraint(Constraint::Copy,
1319 Constraints.push_back(Constraint(Constraint::Copy,
1327 unsigned ArgPos = CallFirstArgPos;
1328 for (; ArgI != ArgE; ++ArgI) {
1329 if ((*ArgI)->getType()->isPointerTy()) {
1330 // Copy the actual argument into the formal argument.
1331 Constraints.push_back(Constraint(Constraint::Store,
1333 getNode(*ArgI), ArgPos++));
1335 Constraints.push_back(Constraint(Constraint::Store,
1336 getNode (CallValue),
1337 UniversalSet, ArgPos++));
1341 // Copy all pointers passed through the varargs section to the varargs node.
1342 if (F && F->getFunctionType()->isVarArg())
1343 for (; ArgI != ArgE; ++ArgI)
1344 if ((*ArgI)->getType()->isPointerTy())
1345 Constraints.push_back(Constraint(Constraint::Copy, getVarargNode(F),
1347 // If more arguments are passed in than we track, just drop them on the floor.
1350 void Andersens::visitCallSite(CallSite CS) {
1351 if (CS.getType()->isPointerTy())
1352 getNodeValue(*CS.getInstruction());
1354 if (Function *F = CS.getCalledFunction()) {
1355 AddConstraintsForCall(CS, F);
1357 AddConstraintsForCall(CS, NULL);
1361 //===----------------------------------------------------------------------===//
1362 // Constraint Solving Phase
1363 //===----------------------------------------------------------------------===//
1365 /// intersects - Return true if the points-to set of this node intersects
1366 /// with the points-to set of the specified node.
1367 bool Andersens::Node::intersects(Node *N) const {
1368 return PointsTo->intersects(N->PointsTo);
1371 /// intersectsIgnoring - Return true if the points-to set of this node
1372 /// intersects with the points-to set of the specified node on any nodes
1373 /// except for the specified node to ignore.
1374 bool Andersens::Node::intersectsIgnoring(Node *N, unsigned Ignoring) const {
1375 // TODO: If we are only going to call this with the same value for Ignoring,
1376 // we should move the special values out of the points-to bitmap.
1377 bool WeHadIt = PointsTo->test(Ignoring);
1378 bool NHadIt = N->PointsTo->test(Ignoring);
1379 bool Result = false;
1381 PointsTo->reset(Ignoring);
1383 N->PointsTo->reset(Ignoring);
1384 Result = PointsTo->intersects(N->PointsTo);
1386 PointsTo->set(Ignoring);
1388 N->PointsTo->set(Ignoring);
1393 /// Clump together address taken variables so that the points-to sets use up
1394 /// less space and can be operated on faster.
1396 void Andersens::ClumpAddressTaken() {
1398 #define DEBUG_TYPE "anders-aa-renumber"
1399 std::vector<unsigned> Translate;
1400 std::vector<Node> NewGraphNodes;
1402 Translate.resize(GraphNodes.size());
1403 unsigned NewPos = 0;
1405 for (unsigned i = 0; i < Constraints.size(); ++i) {
1406 Constraint &C = Constraints[i];
1407 if (C.Type == Constraint::AddressOf) {
1408 GraphNodes[C.Src].AddressTaken = true;
1411 for (unsigned i = 0; i < NumberSpecialNodes; ++i) {
1412 unsigned Pos = NewPos++;
1414 NewGraphNodes.push_back(GraphNodes[i]);
1415 DEBUG(dbgs() << "Renumbering node " << i << " to node " << Pos << "\n");
1418 // I believe this ends up being faster than making two vectors and splicing
1420 for (unsigned i = NumberSpecialNodes; i < GraphNodes.size(); ++i) {
1421 if (GraphNodes[i].AddressTaken) {
1422 unsigned Pos = NewPos++;
1424 NewGraphNodes.push_back(GraphNodes[i]);
1425 DEBUG(dbgs() << "Renumbering node " << i << " to node " << Pos << "\n");
1429 for (unsigned i = NumberSpecialNodes; i < GraphNodes.size(); ++i) {
1430 if (!GraphNodes[i].AddressTaken) {
1431 unsigned Pos = NewPos++;
1433 NewGraphNodes.push_back(GraphNodes[i]);
1434 DEBUG(dbgs() << "Renumbering node " << i << " to node " << Pos << "\n");
1438 for (DenseMap<Value*, unsigned>::iterator Iter = ValueNodes.begin();
1439 Iter != ValueNodes.end();
1441 Iter->second = Translate[Iter->second];
1443 for (DenseMap<Value*, unsigned>::iterator Iter = ObjectNodes.begin();
1444 Iter != ObjectNodes.end();
1446 Iter->second = Translate[Iter->second];
1448 for (DenseMap<Function*, unsigned>::iterator Iter = ReturnNodes.begin();
1449 Iter != ReturnNodes.end();
1451 Iter->second = Translate[Iter->second];
1453 for (DenseMap<Function*, unsigned>::iterator Iter = VarargNodes.begin();
1454 Iter != VarargNodes.end();
1456 Iter->second = Translate[Iter->second];
1458 for (unsigned i = 0; i < Constraints.size(); ++i) {
1459 Constraint &C = Constraints[i];
1460 C.Src = Translate[C.Src];
1461 C.Dest = Translate[C.Dest];
1464 GraphNodes.swap(NewGraphNodes);
1466 #define DEBUG_TYPE "anders-aa"
1469 /// The technique used here is described in "Exploiting Pointer and Location
1470 /// Equivalence to Optimize Pointer Analysis. In the 14th International Static
1471 /// Analysis Symposium (SAS), August 2007." It is known as the "HVN" algorithm,
1472 /// and is equivalent to value numbering the collapsed constraint graph without
1473 /// evaluating unions. This is used as a pre-pass to HU in order to resolve
1474 /// first order pointer dereferences and speed up/reduce memory usage of HU.
1475 /// Running both is equivalent to HRU without the iteration
1476 /// HVN in more detail:
1477 /// Imagine the set of constraints was simply straight line code with no loops
1478 /// (we eliminate cycles, so there are no loops), such as:
1484 /// Applying value numbering to this code tells us:
1487 /// For HVN, this is as far as it goes. We assign new value numbers to every
1488 /// "address node", and every "reference node".
1489 /// To get the optimal result for this, we use a DFS + SCC (since all nodes in a
1490 /// cycle must have the same value number since the = operation is really
1491 /// inclusion, not overwrite), and value number nodes we receive points-to sets
1492 /// before we value our own node.
1493 /// The advantage of HU over HVN is that HU considers the inclusion property, so
1494 /// that if you have
1501 /// HU will determine that G == F == E. HVN will not, because it cannot prove
1502 /// that the points to information ends up being the same because they all
1503 /// receive &D from E anyway.
1505 void Andersens::HVN() {
1506 DEBUG(dbgs() << "Beginning HVN\n");
1507 // Build a predecessor graph. This is like our constraint graph with the
1508 // edges going in the opposite direction, and there are edges for all the
1509 // constraints, instead of just copy constraints. We also build implicit
1510 // edges for constraints are implied but not explicit. I.E for the constraint
1511 // a = &b, we add implicit edges *a = b. This helps us capture more cycles
1512 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1513 Constraint &C = Constraints[i];
1514 if (C.Type == Constraint::AddressOf) {
1515 GraphNodes[C.Src].AddressTaken = true;
1516 GraphNodes[C.Src].Direct = false;
1519 unsigned AdrNode = C.Src + FirstAdrNode;
1520 if (!GraphNodes[C.Dest].PredEdges)
1521 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1522 GraphNodes[C.Dest].PredEdges->set(AdrNode);
1525 unsigned RefNode = C.Dest + FirstRefNode;
1526 if (!GraphNodes[RefNode].ImplicitPredEdges)
1527 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1528 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src);
1529 } else if (C.Type == Constraint::Load) {
1530 if (C.Offset == 0) {
1532 if (!GraphNodes[C.Dest].PredEdges)
1533 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1534 GraphNodes[C.Dest].PredEdges->set(C.Src + FirstRefNode);
1536 GraphNodes[C.Dest].Direct = false;
1538 } else if (C.Type == Constraint::Store) {
1539 if (C.Offset == 0) {
1541 unsigned RefNode = C.Dest + FirstRefNode;
1542 if (!GraphNodes[RefNode].PredEdges)
1543 GraphNodes[RefNode].PredEdges = new SparseBitVector<>;
1544 GraphNodes[RefNode].PredEdges->set(C.Src);
1547 // Dest = Src edge and *Dest = *Src edge
1548 if (!GraphNodes[C.Dest].PredEdges)
1549 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1550 GraphNodes[C.Dest].PredEdges->set(C.Src);
1551 unsigned RefNode = C.Dest + FirstRefNode;
1552 if (!GraphNodes[RefNode].ImplicitPredEdges)
1553 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1554 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src + FirstRefNode);
1558 // Do SCC finding first to condense our predecessor graph
1560 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
1561 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
1562 Node2Visited.insert(Node2Visited.begin(), GraphNodes.size(), false);
1564 for (unsigned i = 0; i < FirstRefNode; ++i) {
1565 unsigned Node = VSSCCRep[i];
1566 if (!Node2Visited[Node])
1569 for (BitVectorMap::iterator Iter = Set2PEClass.begin();
1570 Iter != Set2PEClass.end();
1573 Set2PEClass.clear();
1575 Node2Deleted.clear();
1576 Node2Visited.clear();
1577 DEBUG(dbgs() << "Finished HVN\n");
1581 /// This is the workhorse of HVN value numbering. We combine SCC finding at the
1582 /// same time because it's easy.
1583 void Andersens::HVNValNum(unsigned NodeIndex) {
1584 unsigned MyDFS = DFSNumber++;
1585 Node *N = &GraphNodes[NodeIndex];
1586 Node2Visited[NodeIndex] = true;
1587 Node2DFS[NodeIndex] = MyDFS;
1589 // First process all our explicit edges
1591 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1592 Iter != N->PredEdges->end();
1594 unsigned j = VSSCCRep[*Iter];
1595 if (!Node2Deleted[j]) {
1596 if (!Node2Visited[j])
1598 if (Node2DFS[NodeIndex] > Node2DFS[j])
1599 Node2DFS[NodeIndex] = Node2DFS[j];
1603 // Now process all the implicit edges
1604 if (N->ImplicitPredEdges)
1605 for (SparseBitVector<>::iterator Iter = N->ImplicitPredEdges->begin();
1606 Iter != N->ImplicitPredEdges->end();
1608 unsigned j = VSSCCRep[*Iter];
1609 if (!Node2Deleted[j]) {
1610 if (!Node2Visited[j])
1612 if (Node2DFS[NodeIndex] > Node2DFS[j])
1613 Node2DFS[NodeIndex] = Node2DFS[j];
1617 // See if we found any cycles
1618 if (MyDFS == Node2DFS[NodeIndex]) {
1619 while (!SCCStack.empty() && Node2DFS[SCCStack.top()] >= MyDFS) {
1620 unsigned CycleNodeIndex = SCCStack.top();
1621 Node *CycleNode = &GraphNodes[CycleNodeIndex];
1622 VSSCCRep[CycleNodeIndex] = NodeIndex;
1624 N->Direct &= CycleNode->Direct;
1626 if (CycleNode->PredEdges) {
1628 N->PredEdges = new SparseBitVector<>;
1629 *(N->PredEdges) |= CycleNode->PredEdges;
1630 delete CycleNode->PredEdges;
1631 CycleNode->PredEdges = NULL;
1633 if (CycleNode->ImplicitPredEdges) {
1634 if (!N->ImplicitPredEdges)
1635 N->ImplicitPredEdges = new SparseBitVector<>;
1636 *(N->ImplicitPredEdges) |= CycleNode->ImplicitPredEdges;
1637 delete CycleNode->ImplicitPredEdges;
1638 CycleNode->ImplicitPredEdges = NULL;
1644 Node2Deleted[NodeIndex] = true;
1647 GraphNodes[NodeIndex].PointerEquivLabel = PEClass++;
1651 // Collect labels of successor nodes
1652 bool AllSame = true;
1653 unsigned First = ~0;
1654 SparseBitVector<> *Labels = new SparseBitVector<>;
1658 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1659 Iter != N->PredEdges->end();
1661 unsigned j = VSSCCRep[*Iter];
1662 unsigned Label = GraphNodes[j].PointerEquivLabel;
1663 // Ignore labels that are equal to us or non-pointers
1664 if (j == NodeIndex || Label == 0)
1666 if (First == (unsigned)~0)
1668 else if (First != Label)
1673 // We either have a non-pointer, a copy of an existing node, or a new node.
1674 // Assign the appropriate pointer equivalence label.
1675 if (Labels->empty()) {
1676 GraphNodes[NodeIndex].PointerEquivLabel = 0;
1677 } else if (AllSame) {
1678 GraphNodes[NodeIndex].PointerEquivLabel = First;
1680 GraphNodes[NodeIndex].PointerEquivLabel = Set2PEClass[Labels];
1681 if (GraphNodes[NodeIndex].PointerEquivLabel == 0) {
1682 unsigned EquivClass = PEClass++;
1683 Set2PEClass[Labels] = EquivClass;
1684 GraphNodes[NodeIndex].PointerEquivLabel = EquivClass;
1691 SCCStack.push(NodeIndex);
1695 /// The technique used here is described in "Exploiting Pointer and Location
1696 /// Equivalence to Optimize Pointer Analysis. In the 14th International Static
1697 /// Analysis Symposium (SAS), August 2007." It is known as the "HU" algorithm,
1698 /// and is equivalent to value numbering the collapsed constraint graph
1699 /// including evaluating unions.
1700 void Andersens::HU() {
1701 DEBUG(dbgs() << "Beginning HU\n");
1702 // Build a predecessor graph. This is like our constraint graph with the
1703 // edges going in the opposite direction, and there are edges for all the
1704 // constraints, instead of just copy constraints. We also build implicit
1705 // edges for constraints are implied but not explicit. I.E for the constraint
1706 // a = &b, we add implicit edges *a = b. This helps us capture more cycles
1707 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1708 Constraint &C = Constraints[i];
1709 if (C.Type == Constraint::AddressOf) {
1710 GraphNodes[C.Src].AddressTaken = true;
1711 GraphNodes[C.Src].Direct = false;
1713 GraphNodes[C.Dest].PointsTo->set(C.Src);
1715 unsigned RefNode = C.Dest + FirstRefNode;
1716 if (!GraphNodes[RefNode].ImplicitPredEdges)
1717 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1718 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src);
1719 GraphNodes[C.Src].PointedToBy->set(C.Dest);
1720 } else if (C.Type == Constraint::Load) {
1721 if (C.Offset == 0) {
1723 if (!GraphNodes[C.Dest].PredEdges)
1724 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1725 GraphNodes[C.Dest].PredEdges->set(C.Src + FirstRefNode);
1727 GraphNodes[C.Dest].Direct = false;
1729 } else if (C.Type == Constraint::Store) {
1730 if (C.Offset == 0) {
1732 unsigned RefNode = C.Dest + FirstRefNode;
1733 if (!GraphNodes[RefNode].PredEdges)
1734 GraphNodes[RefNode].PredEdges = new SparseBitVector<>;
1735 GraphNodes[RefNode].PredEdges->set(C.Src);
1738 // Dest = Src edge and *Dest = *Src edg
1739 if (!GraphNodes[C.Dest].PredEdges)
1740 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1741 GraphNodes[C.Dest].PredEdges->set(C.Src);
1742 unsigned RefNode = C.Dest + FirstRefNode;
1743 if (!GraphNodes[RefNode].ImplicitPredEdges)
1744 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1745 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src + FirstRefNode);
1749 // Do SCC finding first to condense our predecessor graph
1751 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
1752 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
1753 Node2Visited.insert(Node2Visited.begin(), GraphNodes.size(), false);
1755 for (unsigned i = 0; i < FirstRefNode; ++i) {
1756 if (FindNode(i) == i) {
1757 unsigned Node = VSSCCRep[i];
1758 if (!Node2Visited[Node])
1763 // Reset tables for actual labeling
1765 Node2Visited.clear();
1766 Node2Deleted.clear();
1767 // Pre-grow our densemap so that we don't get really bad behavior
1768 Set2PEClass.resize(GraphNodes.size());
1770 // Visit the condensed graph and generate pointer equivalence labels.
1771 Node2Visited.insert(Node2Visited.begin(), GraphNodes.size(), false);
1772 for (unsigned i = 0; i < FirstRefNode; ++i) {
1773 if (FindNode(i) == i) {
1774 unsigned Node = VSSCCRep[i];
1775 if (!Node2Visited[Node])
1779 // PEClass nodes will be deleted by the deleting of N->PointsTo in our caller.
1780 Set2PEClass.clear();
1781 DEBUG(dbgs() << "Finished HU\n");
1785 /// Implementation of standard Tarjan SCC algorithm as modified by Nuutilla.
1786 void Andersens::Condense(unsigned NodeIndex) {
1787 unsigned MyDFS = DFSNumber++;
1788 Node *N = &GraphNodes[NodeIndex];
1789 Node2Visited[NodeIndex] = true;
1790 Node2DFS[NodeIndex] = MyDFS;
1792 // First process all our explicit edges
1794 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1795 Iter != N->PredEdges->end();
1797 unsigned j = VSSCCRep[*Iter];
1798 if (!Node2Deleted[j]) {
1799 if (!Node2Visited[j])
1801 if (Node2DFS[NodeIndex] > Node2DFS[j])
1802 Node2DFS[NodeIndex] = Node2DFS[j];
1806 // Now process all the implicit edges
1807 if (N->ImplicitPredEdges)
1808 for (SparseBitVector<>::iterator Iter = N->ImplicitPredEdges->begin();
1809 Iter != N->ImplicitPredEdges->end();
1811 unsigned j = VSSCCRep[*Iter];
1812 if (!Node2Deleted[j]) {
1813 if (!Node2Visited[j])
1815 if (Node2DFS[NodeIndex] > Node2DFS[j])
1816 Node2DFS[NodeIndex] = Node2DFS[j];
1820 // See if we found any cycles
1821 if (MyDFS == Node2DFS[NodeIndex]) {
1822 while (!SCCStack.empty() && Node2DFS[SCCStack.top()] >= MyDFS) {
1823 unsigned CycleNodeIndex = SCCStack.top();
1824 Node *CycleNode = &GraphNodes[CycleNodeIndex];
1825 VSSCCRep[CycleNodeIndex] = NodeIndex;
1827 N->Direct &= CycleNode->Direct;
1829 *(N->PointsTo) |= CycleNode->PointsTo;
1830 delete CycleNode->PointsTo;
1831 CycleNode->PointsTo = NULL;
1832 if (CycleNode->PredEdges) {
1834 N->PredEdges = new SparseBitVector<>;
1835 *(N->PredEdges) |= CycleNode->PredEdges;
1836 delete CycleNode->PredEdges;
1837 CycleNode->PredEdges = NULL;
1839 if (CycleNode->ImplicitPredEdges) {
1840 if (!N->ImplicitPredEdges)
1841 N->ImplicitPredEdges = new SparseBitVector<>;
1842 *(N->ImplicitPredEdges) |= CycleNode->ImplicitPredEdges;
1843 delete CycleNode->ImplicitPredEdges;
1844 CycleNode->ImplicitPredEdges = NULL;
1849 Node2Deleted[NodeIndex] = true;
1851 // Set up number of incoming edges for other nodes
1853 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1854 Iter != N->PredEdges->end();
1856 ++GraphNodes[VSSCCRep[*Iter]].NumInEdges;
1858 SCCStack.push(NodeIndex);
1862 void Andersens::HUValNum(unsigned NodeIndex) {
1863 Node *N = &GraphNodes[NodeIndex];
1864 Node2Visited[NodeIndex] = true;
1866 // Eliminate dereferences of non-pointers for those non-pointers we have
1867 // already identified. These are ref nodes whose non-ref node:
1868 // 1. Has already been visited determined to point to nothing (and thus, a
1869 // dereference of it must point to nothing)
1870 // 2. Any direct node with no predecessor edges in our graph and with no
1871 // points-to set (since it can't point to anything either, being that it
1872 // receives no points-to sets and has none).
1873 if (NodeIndex >= FirstRefNode) {
1874 unsigned j = VSSCCRep[FindNode(NodeIndex - FirstRefNode)];
1875 if ((Node2Visited[j] && !GraphNodes[j].PointerEquivLabel)
1876 || (GraphNodes[j].Direct && !GraphNodes[j].PredEdges
1877 && GraphNodes[j].PointsTo->empty())){
1881 // Process all our explicit edges
1883 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1884 Iter != N->PredEdges->end();
1886 unsigned j = VSSCCRep[*Iter];
1887 if (!Node2Visited[j])
1890 // If this edge turned out to be the same as us, or got no pointer
1891 // equivalence label (and thus points to nothing) , just decrement our
1892 // incoming edges and continue.
1893 if (j == NodeIndex || GraphNodes[j].PointerEquivLabel == 0) {
1894 --GraphNodes[j].NumInEdges;
1898 *(N->PointsTo) |= GraphNodes[j].PointsTo;
1900 // If we didn't end up storing this in the hash, and we're done with all
1901 // the edges, we don't need the points-to set anymore.
1902 --GraphNodes[j].NumInEdges;
1903 if (!GraphNodes[j].NumInEdges && !GraphNodes[j].StoredInHash) {
1904 delete GraphNodes[j].PointsTo;
1905 GraphNodes[j].PointsTo = NULL;
1908 // If this isn't a direct node, generate a fresh variable.
1910 N->PointsTo->set(FirstRefNode + NodeIndex);
1913 // See If we have something equivalent to us, if not, generate a new
1914 // equivalence class.
1915 if (N->PointsTo->empty()) {
1920 N->PointerEquivLabel = Set2PEClass[N->PointsTo];
1921 if (N->PointerEquivLabel == 0) {
1922 unsigned EquivClass = PEClass++;
1923 N->StoredInHash = true;
1924 Set2PEClass[N->PointsTo] = EquivClass;
1925 N->PointerEquivLabel = EquivClass;
1928 N->PointerEquivLabel = PEClass++;
1933 /// Rewrite our list of constraints so that pointer equivalent nodes are
1934 /// replaced by their the pointer equivalence class representative.
1935 void Andersens::RewriteConstraints() {
1936 std::vector<Constraint> NewConstraints;
1937 DenseSet<Constraint, ConstraintKeyInfo> Seen;
1939 PEClass2Node.clear();
1940 PENLEClass2Node.clear();
1942 // We may have from 1 to Graphnodes + 1 equivalence classes.
1943 PEClass2Node.insert(PEClass2Node.begin(), GraphNodes.size() + 1, -1);
1944 PENLEClass2Node.insert(PENLEClass2Node.begin(), GraphNodes.size() + 1, -1);
1946 // Rewrite constraints, ignoring non-pointer constraints, uniting equivalent
1947 // nodes, and rewriting constraints to use the representative nodes.
1948 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1949 Constraint &C = Constraints[i];
1950 unsigned RHSNode = FindNode(C.Src);
1951 unsigned LHSNode = FindNode(C.Dest);
1952 unsigned RHSLabel = GraphNodes[VSSCCRep[RHSNode]].PointerEquivLabel;
1953 unsigned LHSLabel = GraphNodes[VSSCCRep[LHSNode]].PointerEquivLabel;
1955 // First we try to eliminate constraints for things we can prove don't point
1957 if (LHSLabel == 0) {
1958 DEBUG(PrintNode(&GraphNodes[LHSNode]));
1959 DEBUG(dbgs() << " is a non-pointer, ignoring constraint.\n");
1962 if (RHSLabel == 0) {
1963 DEBUG(PrintNode(&GraphNodes[RHSNode]));
1964 DEBUG(dbgs() << " is a non-pointer, ignoring constraint.\n");
1967 // This constraint may be useless, and it may become useless as we translate
1969 if (C.Src == C.Dest && C.Type == Constraint::Copy)
1972 C.Src = FindEquivalentNode(RHSNode, RHSLabel);
1973 C.Dest = FindEquivalentNode(FindNode(LHSNode), LHSLabel);
1974 if ((C.Src == C.Dest && C.Type == Constraint::Copy)
1979 NewConstraints.push_back(C);
1981 Constraints.swap(NewConstraints);
1982 PEClass2Node.clear();
1985 /// See if we have a node that is pointer equivalent to the one being asked
1986 /// about, and if so, unite them and return the equivalent node. Otherwise,
1987 /// return the original node.
1988 unsigned Andersens::FindEquivalentNode(unsigned NodeIndex,
1989 unsigned NodeLabel) {
1990 if (!GraphNodes[NodeIndex].AddressTaken) {
1991 if (PEClass2Node[NodeLabel] != -1) {
1992 // We found an existing node with the same pointer label, so unify them.
1993 // We specifically request that Union-By-Rank not be used so that
1994 // PEClass2Node[NodeLabel] U= NodeIndex and not the other way around.
1995 return UniteNodes(PEClass2Node[NodeLabel], NodeIndex, false);
1997 PEClass2Node[NodeLabel] = NodeIndex;
1998 PENLEClass2Node[NodeLabel] = NodeIndex;
2000 } else if (PENLEClass2Node[NodeLabel] == -1) {
2001 PENLEClass2Node[NodeLabel] = NodeIndex;
2007 void Andersens::PrintLabels() const {
2008 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2009 if (i < FirstRefNode) {
2010 PrintNode(&GraphNodes[i]);
2011 } else if (i < FirstAdrNode) {
2012 DEBUG(dbgs() << "REF(");
2013 PrintNode(&GraphNodes[i-FirstRefNode]);
2014 DEBUG(dbgs() <<")");
2016 DEBUG(dbgs() << "ADR(");
2017 PrintNode(&GraphNodes[i-FirstAdrNode]);
2018 DEBUG(dbgs() <<")");
2021 DEBUG(dbgs() << " has pointer label " << GraphNodes[i].PointerEquivLabel
2022 << " and SCC rep " << VSSCCRep[i]
2023 << " and is " << (GraphNodes[i].Direct ? "Direct" : "Not direct")
2028 /// The technique used here is described in "The Ant and the
2029 /// Grasshopper: Fast and Accurate Pointer Analysis for Millions of
2030 /// Lines of Code. In Programming Language Design and Implementation
2031 /// (PLDI), June 2007." It is known as the "HCD" (Hybrid Cycle
2032 /// Detection) algorithm. It is called a hybrid because it performs an
2033 /// offline analysis and uses its results during the solving (online)
2034 /// phase. This is just the offline portion; the results of this
2035 /// operation are stored in SDT and are later used in SolveContraints()
2036 /// and UniteNodes().
2037 void Andersens::HCD() {
2038 DEBUG(dbgs() << "Starting HCD.\n");
2039 HCDSCCRep.resize(GraphNodes.size());
2041 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2042 GraphNodes[i].Edges = new SparseBitVector<>;
2046 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2047 Constraint &C = Constraints[i];
2048 assert (C.Src < GraphNodes.size() && C.Dest < GraphNodes.size());
2049 if (C.Type == Constraint::AddressOf) {
2051 } else if (C.Type == Constraint::Load) {
2053 GraphNodes[C.Dest].Edges->set(C.Src + FirstRefNode);
2054 } else if (C.Type == Constraint::Store) {
2056 GraphNodes[C.Dest + FirstRefNode].Edges->set(C.Src);
2058 GraphNodes[C.Dest].Edges->set(C.Src);
2062 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
2063 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
2064 Node2Visited.insert(Node2Visited.begin(), GraphNodes.size(), false);
2065 SDT.insert(SDT.begin(), GraphNodes.size() / 2, -1);
2068 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2069 unsigned Node = HCDSCCRep[i];
2070 if (!Node2Deleted[Node])
2074 for (unsigned i = 0; i < GraphNodes.size(); ++i)
2075 if (GraphNodes[i].Edges != NULL) {
2076 delete GraphNodes[i].Edges;
2077 GraphNodes[i].Edges = NULL;
2080 while( !SCCStack.empty() )
2084 Node2Visited.clear();
2085 Node2Deleted.clear();
2087 DEBUG(dbgs() << "HCD complete.\n");
2090 // Component of HCD:
2091 // Use Nuutila's variant of Tarjan's algorithm to detect
2092 // Strongly-Connected Components (SCCs). For non-trivial SCCs
2093 // containing ref nodes, insert the appropriate information in SDT.
2094 void Andersens::Search(unsigned Node) {
2095 unsigned MyDFS = DFSNumber++;
2097 Node2Visited[Node] = true;
2098 Node2DFS[Node] = MyDFS;
2100 for (SparseBitVector<>::iterator Iter = GraphNodes[Node].Edges->begin(),
2101 End = GraphNodes[Node].Edges->end();
2104 unsigned J = HCDSCCRep[*Iter];
2105 assert(GraphNodes[J].isRep() && "Debug check; must be representative");
2106 if (!Node2Deleted[J]) {
2107 if (!Node2Visited[J])
2109 if (Node2DFS[Node] > Node2DFS[J])
2110 Node2DFS[Node] = Node2DFS[J];
2114 if( MyDFS != Node2DFS[Node] ) {
2115 SCCStack.push(Node);
2119 // This node is the root of a SCC, so process it.
2121 // If the SCC is "non-trivial" (not a singleton) and contains a reference
2122 // node, we place this SCC into SDT. We unite the nodes in any case.
2123 if (!SCCStack.empty() && Node2DFS[SCCStack.top()] >= MyDFS) {
2124 SparseBitVector<> SCC;
2128 bool Ref = (Node >= FirstRefNode);
2130 Node2Deleted[Node] = true;
2133 unsigned P = SCCStack.top(); SCCStack.pop();
2134 Ref |= (P >= FirstRefNode);
2136 HCDSCCRep[P] = Node;
2137 } while (!SCCStack.empty() && Node2DFS[SCCStack.top()] >= MyDFS);
2140 unsigned Rep = SCC.find_first();
2141 assert(Rep < FirstRefNode && "The SCC didn't have a non-Ref node!");
2143 SparseBitVector<>::iterator i = SCC.begin();
2145 // Skip over the non-ref nodes
2146 while( *i < FirstRefNode )
2149 while( i != SCC.end() )
2150 SDT[ (*i++) - FirstRefNode ] = Rep;
2156 /// Optimize the constraints by performing offline variable substitution and
2157 /// other optimizations.
2158 void Andersens::OptimizeConstraints() {
2159 DEBUG(dbgs() << "Beginning constraint optimization\n");
2163 // Function related nodes need to stay in the same relative position and can't
2164 // be location equivalent.
2165 for (std::map<unsigned, unsigned>::iterator Iter = MaxK.begin();
2168 for (unsigned i = Iter->first;
2169 i != Iter->first + Iter->second;
2171 GraphNodes[i].AddressTaken = true;
2172 GraphNodes[i].Direct = false;
2176 ClumpAddressTaken();
2177 FirstRefNode = GraphNodes.size();
2178 FirstAdrNode = FirstRefNode + GraphNodes.size();
2179 GraphNodes.insert(GraphNodes.end(), 2 * GraphNodes.size(),
2181 VSSCCRep.resize(GraphNodes.size());
2182 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2186 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2187 Node *N = &GraphNodes[i];
2188 delete N->PredEdges;
2189 N->PredEdges = NULL;
2190 delete N->ImplicitPredEdges;
2191 N->ImplicitPredEdges = NULL;
2194 #define DEBUG_TYPE "anders-aa-labels"
2195 DEBUG(PrintLabels());
2197 #define DEBUG_TYPE "anders-aa"
2198 RewriteConstraints();
2199 // Delete the adr nodes.
2200 GraphNodes.resize(FirstRefNode * 2);
2203 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2204 Node *N = &GraphNodes[i];
2205 if (FindNode(i) == i) {
2206 N->PointsTo = new SparseBitVector<>;
2207 N->PointedToBy = new SparseBitVector<>;
2211 N->PointerEquivLabel = 0;
2215 #define DEBUG_TYPE "anders-aa-labels"
2216 DEBUG(PrintLabels());
2218 #define DEBUG_TYPE "anders-aa"
2219 RewriteConstraints();
2220 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2221 if (FindNode(i) == i) {
2222 Node *N = &GraphNodes[i];
2225 delete N->PredEdges;
2226 N->PredEdges = NULL;
2227 delete N->ImplicitPredEdges;
2228 N->ImplicitPredEdges = NULL;
2229 delete N->PointedToBy;
2230 N->PointedToBy = NULL;
2234 // perform Hybrid Cycle Detection (HCD)
2238 // No longer any need for the upper half of GraphNodes (for ref nodes).
2239 GraphNodes.erase(GraphNodes.begin() + FirstRefNode, GraphNodes.end());
2243 DEBUG(dbgs() << "Finished constraint optimization\n");
2248 /// Unite pointer but not location equivalent variables, now that the constraint
2250 void Andersens::UnitePointerEquivalences() {
2251 DEBUG(dbgs() << "Uniting remaining pointer equivalences\n");
2252 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2253 if (GraphNodes[i].AddressTaken && GraphNodes[i].isRep()) {
2254 unsigned Label = GraphNodes[i].PointerEquivLabel;
2256 if (Label && PENLEClass2Node[Label] != -1)
2257 UniteNodes(i, PENLEClass2Node[Label]);
2260 DEBUG(dbgs() << "Finished remaining pointer equivalences\n");
2261 PENLEClass2Node.clear();
2264 /// Create the constraint graph used for solving points-to analysis.
2266 void Andersens::CreateConstraintGraph() {
2267 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
2268 Constraint &C = Constraints[i];
2269 assert (C.Src < GraphNodes.size() && C.Dest < GraphNodes.size());
2270 if (C.Type == Constraint::AddressOf)
2271 GraphNodes[C.Dest].PointsTo->set(C.Src);
2272 else if (C.Type == Constraint::Load)
2273 GraphNodes[C.Src].Constraints.push_back(C);
2274 else if (C.Type == Constraint::Store)
2275 GraphNodes[C.Dest].Constraints.push_back(C);
2276 else if (C.Offset != 0)
2277 GraphNodes[C.Src].Constraints.push_back(C);
2279 GraphNodes[C.Src].Edges->set(C.Dest);
2283 // Perform DFS and cycle detection.
2284 bool Andersens::QueryNode(unsigned Node) {
2285 assert(GraphNodes[Node].isRep() && "Querying a non-rep node");
2286 unsigned OurDFS = ++DFSNumber;
2287 SparseBitVector<> ToErase;
2288 SparseBitVector<> NewEdges;
2289 Tarjan2DFS[Node] = OurDFS;
2291 // Changed denotes a change from a recursive call that we will bubble up.
2292 // Merged is set if we actually merge a node ourselves.
2293 bool Changed = false, Merged = false;
2295 for (SparseBitVector<>::iterator bi = GraphNodes[Node].Edges->begin();
2296 bi != GraphNodes[Node].Edges->end();
2298 unsigned RepNode = FindNode(*bi);
2299 // If this edge points to a non-representative node but we are
2300 // already planning to add an edge to its representative, we have no
2301 // need for this edge anymore.
2302 if (RepNode != *bi && NewEdges.test(RepNode)){
2307 // Continue about our DFS.
2308 if (!Tarjan2Deleted[RepNode]){
2309 if (Tarjan2DFS[RepNode] == 0) {
2310 Changed |= QueryNode(RepNode);
2311 // May have been changed by QueryNode
2312 RepNode = FindNode(RepNode);
2314 if (Tarjan2DFS[RepNode] < Tarjan2DFS[Node])
2315 Tarjan2DFS[Node] = Tarjan2DFS[RepNode];
2318 // We may have just discovered that this node is part of a cycle, in
2319 // which case we can also erase it.
2320 if (RepNode != *bi) {
2322 NewEdges.set(RepNode);
2326 GraphNodes[Node].Edges->intersectWithComplement(ToErase);
2327 GraphNodes[Node].Edges |= NewEdges;
2329 // If this node is a root of a non-trivial SCC, place it on our
2330 // worklist to be processed.
2331 if (OurDFS == Tarjan2DFS[Node]) {
2332 while (!SCCStack.empty() && Tarjan2DFS[SCCStack.top()] >= OurDFS) {
2333 Node = UniteNodes(Node, SCCStack.top());
2338 Tarjan2Deleted[Node] = true;
2341 NextWL->insert(&GraphNodes[Node]);
2343 SCCStack.push(Node);
2346 return(Changed | Merged);
2349 /// SolveConstraints - This stage iteratively processes the constraints list
2350 /// propagating constraints (adding edges to the Nodes in the points-to graph)
2351 /// until a fixed point is reached.
2353 /// We use a variant of the technique called "Lazy Cycle Detection", which is
2354 /// described in "The Ant and the Grasshopper: Fast and Accurate Pointer
2355 /// Analysis for Millions of Lines of Code. In Programming Language Design and
2356 /// Implementation (PLDI), June 2007."
2357 /// The paper describes performing cycle detection one node at a time, which can
2358 /// be expensive if there are no cycles, but there are long chains of nodes that
2359 /// it heuristically believes are cycles (because it will DFS from each node
2360 /// without state from previous nodes).
2361 /// Instead, we use the heuristic to build a worklist of nodes to check, then
2362 /// cycle detect them all at the same time to do this more cheaply. This
2363 /// catches cycles slightly later than the original technique did, but does it
2364 /// make significantly cheaper.
2366 void Andersens::SolveConstraints() {
2370 OptimizeConstraints();
2372 #define DEBUG_TYPE "anders-aa-constraints"
2373 DEBUG(PrintConstraints());
2375 #define DEBUG_TYPE "anders-aa"
2377 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2378 Node *N = &GraphNodes[i];
2379 N->PointsTo = new SparseBitVector<>;
2380 N->OldPointsTo = new SparseBitVector<>;
2381 N->Edges = new SparseBitVector<>;
2383 CreateConstraintGraph();
2384 UnitePointerEquivalences();
2385 assert(SCCStack.empty() && "SCC Stack should be empty by now!");
2387 Node2Deleted.clear();
2388 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
2389 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
2391 DenseSet<Constraint, ConstraintKeyInfo> Seen;
2392 DenseSet<std::pair<unsigned,unsigned>, PairKeyInfo> EdgesChecked;
2394 // Order graph and add initial nodes to work list.
2395 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2396 Node *INode = &GraphNodes[i];
2398 // Add to work list if it's a representative and can contribute to the
2399 // calculation right now.
2400 if (INode->isRep() && !INode->PointsTo->empty()
2401 && (!INode->Edges->empty() || !INode->Constraints.empty())) {
2403 CurrWL->insert(INode);
2406 std::queue<unsigned int> TarjanWL;
2408 // "Rep and special variables" - in order for HCD to maintain conservative
2409 // results when !FULL_UNIVERSAL, we need to treat the special variables in
2410 // the same way that the !FULL_UNIVERSAL tweak does throughout the rest of
2411 // the analysis - it's ok to add edges from the special nodes, but never
2412 // *to* the special nodes.
2413 std::vector<unsigned int> RSV;
2415 while( !CurrWL->empty() ) {
2416 DEBUG(dbgs() << "Starting iteration #" << ++NumIters << "\n");
2419 unsigned CurrNodeIndex;
2421 // Actual cycle checking code. We cycle check all of the lazy cycle
2422 // candidates from the last iteration in one go.
2423 if (!TarjanWL.empty()) {
2427 Tarjan2Deleted.clear();
2428 while (!TarjanWL.empty()) {
2429 unsigned int ToTarjan = TarjanWL.front();
2431 if (!Tarjan2Deleted[ToTarjan]
2432 && GraphNodes[ToTarjan].isRep()
2433 && Tarjan2DFS[ToTarjan] == 0)
2434 QueryNode(ToTarjan);
2438 // Add to work list if it's a representative and can contribute to the
2439 // calculation right now.
2440 while( (CurrNode = CurrWL->pop()) != NULL ) {
2441 CurrNodeIndex = CurrNode - &GraphNodes[0];
2445 // Figure out the changed points to bits
2446 SparseBitVector<> CurrPointsTo;
2447 CurrPointsTo.intersectWithComplement(CurrNode->PointsTo,
2448 CurrNode->OldPointsTo);
2449 if (CurrPointsTo.empty())
2452 *(CurrNode->OldPointsTo) |= CurrPointsTo;
2454 // Check the offline-computed equivalencies from HCD.
2458 if (SDT[CurrNodeIndex] >= 0) {
2460 Rep = FindNode(SDT[CurrNodeIndex]);
2465 for (SparseBitVector<>::iterator bi = CurrPointsTo.begin();
2466 bi != CurrPointsTo.end(); ++bi) {
2467 unsigned Node = FindNode(*bi);
2469 if (Node < NumberSpecialNodes) {
2470 RSV.push_back(Node);
2474 Rep = UniteNodes(Rep,Node);
2480 NextWL->insert(&GraphNodes[Rep]);
2482 if ( ! CurrNode->isRep() )
2488 /* Now process the constraints for this node. */
2489 for (std::list<Constraint>::iterator li = CurrNode->Constraints.begin();
2490 li != CurrNode->Constraints.end(); ) {
2491 li->Src = FindNode(li->Src);
2492 li->Dest = FindNode(li->Dest);
2494 // Delete redundant constraints
2495 if( Seen.count(*li) ) {
2496 std::list<Constraint>::iterator lk = li; li++;
2498 CurrNode->Constraints.erase(lk);
2504 // Src and Dest will be the vars we are going to process.
2505 // This may look a bit ugly, but what it does is allow us to process
2506 // both store and load constraints with the same code.
2507 // Load constraints say that every member of our RHS solution has K
2508 // added to it, and that variable gets an edge to LHS. We also union
2509 // RHS+K's solution into the LHS solution.
2510 // Store constraints say that every member of our LHS solution has K
2511 // added to it, and that variable gets an edge from RHS. We also union
2512 // RHS's solution into the LHS+K solution.
2515 unsigned K = li->Offset;
2516 unsigned CurrMember;
2517 if (li->Type == Constraint::Load) {
2520 } else if (li->Type == Constraint::Store) {
2524 // TODO Handle offseted copy constraint
2529 // See if we can use Hybrid Cycle Detection (that is, check
2530 // if it was a statically detected offline equivalence that
2531 // involves pointers; if so, remove the redundant constraints).
2532 if( SCC && K == 0 ) {
2536 if (GraphNodes[*Src].Edges->test_and_set(*Dest))
2537 if (GraphNodes[*Dest].PointsTo |= *(GraphNodes[*Src].PointsTo))
2538 NextWL->insert(&GraphNodes[*Dest]);
2540 for (unsigned i=0; i < RSV.size(); ++i) {
2541 CurrMember = RSV[i];
2543 if (*Dest < NumberSpecialNodes)
2545 if (GraphNodes[*Src].Edges->test_and_set(*Dest))
2546 if (GraphNodes[*Dest].PointsTo |= *(GraphNodes[*Src].PointsTo))
2547 NextWL->insert(&GraphNodes[*Dest]);
2550 // since all future elements of the points-to set will be
2551 // equivalent to the current ones, the complex constraints
2552 // become redundant.
2554 std::list<Constraint>::iterator lk = li; li++;
2556 // In this case, we can still erase the constraints when the
2557 // elements of the points-to sets are referenced by *Dest,
2558 // but not when they are referenced by *Src (i.e. for a Load
2559 // constraint). This is because if another special variable is
2560 // put into the points-to set later, we still need to add the
2561 // new edge from that special variable.
2562 if( lk->Type != Constraint::Load)
2564 GraphNodes[CurrNodeIndex].Constraints.erase(lk);
2566 const SparseBitVector<> &Solution = CurrPointsTo;
2568 for (SparseBitVector<>::iterator bi = Solution.begin();
2569 bi != Solution.end();
2573 // Need to increment the member by K since that is where we are
2574 // supposed to copy to/from. Note that in positive weight cycles,
2575 // which occur in address taking of fields, K can go past
2576 // MaxK[CurrMember] elements, even though that is all it could point
2578 if (K > 0 && K > MaxK[CurrMember])
2581 CurrMember = FindNode(CurrMember + K);
2583 // Add an edge to the graph, so we can just do regular
2584 // bitmap ior next time. It may also let us notice a cycle.
2586 if (*Dest < NumberSpecialNodes)
2589 if (GraphNodes[*Src].Edges->test_and_set(*Dest))
2590 if (GraphNodes[*Dest].PointsTo |= *(GraphNodes[*Src].PointsTo))
2591 NextWL->insert(&GraphNodes[*Dest]);
2597 SparseBitVector<> NewEdges;
2598 SparseBitVector<> ToErase;
2600 // Now all we have left to do is propagate points-to info along the
2601 // edges, erasing the redundant edges.
2602 for (SparseBitVector<>::iterator bi = CurrNode->Edges->begin();
2603 bi != CurrNode->Edges->end();
2606 unsigned DestVar = *bi;
2607 unsigned Rep = FindNode(DestVar);
2609 // If we ended up with this node as our destination, or we've already
2610 // got an edge for the representative, delete the current edge.
2611 if (Rep == CurrNodeIndex ||
2612 (Rep != DestVar && NewEdges.test(Rep))) {
2613 ToErase.set(DestVar);
2617 std::pair<unsigned,unsigned> edge(CurrNodeIndex,Rep);
2619 // This is where we do lazy cycle detection.
2620 // If this is a cycle candidate (equal points-to sets and this
2621 // particular edge has not been cycle-checked previously), add to the
2622 // list to check for cycles on the next iteration.
2623 if (!EdgesChecked.count(edge) &&
2624 *(GraphNodes[Rep].PointsTo) == *(CurrNode->PointsTo)) {
2625 EdgesChecked.insert(edge);
2628 // Union the points-to sets into the dest
2630 if (Rep >= NumberSpecialNodes)
2632 if (GraphNodes[Rep].PointsTo |= CurrPointsTo) {
2633 NextWL->insert(&GraphNodes[Rep]);
2635 // If this edge's destination was collapsed, rewrite the edge.
2636 if (Rep != DestVar) {
2637 ToErase.set(DestVar);
2641 CurrNode->Edges->intersectWithComplement(ToErase);
2642 CurrNode->Edges |= NewEdges;
2645 // Switch to other work list.
2646 WorkList* t = CurrWL; CurrWL = NextWL; NextWL = t;
2651 Node2Deleted.clear();
2652 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2653 Node *N = &GraphNodes[i];
2654 delete N->OldPointsTo;
2661 //===----------------------------------------------------------------------===//
2663 //===----------------------------------------------------------------------===//
2665 // Unite nodes First and Second, returning the one which is now the
2666 // representative node. First and Second are indexes into GraphNodes
2667 unsigned Andersens::UniteNodes(unsigned First, unsigned Second,
2669 assert (First < GraphNodes.size() && Second < GraphNodes.size() &&
2670 "Attempting to merge nodes that don't exist");
2672 Node *FirstNode = &GraphNodes[First];
2673 Node *SecondNode = &GraphNodes[Second];
2675 assert (SecondNode->isRep() && FirstNode->isRep() &&
2676 "Trying to unite two non-representative nodes!");
2677 if (First == Second)
2681 int RankFirst = (int) FirstNode ->NodeRep;
2682 int RankSecond = (int) SecondNode->NodeRep;
2684 // Rank starts at -1 and gets decremented as it increases.
2685 // Translation: higher rank, lower NodeRep value, which is always negative.
2686 if (RankFirst > RankSecond) {
2687 unsigned t = First; First = Second; Second = t;
2688 Node* tp = FirstNode; FirstNode = SecondNode; SecondNode = tp;
2689 } else if (RankFirst == RankSecond) {
2690 FirstNode->NodeRep = (unsigned) (RankFirst - 1);
2694 SecondNode->NodeRep = First;
2696 if (First >= NumberSpecialNodes)
2698 if (FirstNode->PointsTo && SecondNode->PointsTo)
2699 FirstNode->PointsTo |= *(SecondNode->PointsTo);
2700 if (FirstNode->Edges && SecondNode->Edges)
2701 FirstNode->Edges |= *(SecondNode->Edges);
2702 if (!SecondNode->Constraints.empty())
2703 FirstNode->Constraints.splice(FirstNode->Constraints.begin(),
2704 SecondNode->Constraints);
2705 if (FirstNode->OldPointsTo) {
2706 delete FirstNode->OldPointsTo;
2707 FirstNode->OldPointsTo = new SparseBitVector<>;
2710 // Destroy interesting parts of the merged-from node.
2711 delete SecondNode->OldPointsTo;
2712 delete SecondNode->Edges;
2713 delete SecondNode->PointsTo;
2714 SecondNode->Edges = NULL;
2715 SecondNode->PointsTo = NULL;
2716 SecondNode->OldPointsTo = NULL;
2719 DEBUG(dbgs() << "Unified Node ");
2720 DEBUG(PrintNode(FirstNode));
2721 DEBUG(dbgs() << " and Node ");
2722 DEBUG(PrintNode(SecondNode));
2723 DEBUG(dbgs() << "\n");
2726 if (SDT[Second] >= 0) {
2728 SDT[First] = SDT[Second];
2730 UniteNodes( FindNode(SDT[First]), FindNode(SDT[Second]) );
2731 First = FindNode(First);
2738 // Find the index into GraphNodes of the node representing Node, performing
2739 // path compression along the way
2740 unsigned Andersens::FindNode(unsigned NodeIndex) {
2741 assert (NodeIndex < GraphNodes.size()
2742 && "Attempting to find a node that can't exist");
2743 Node *N = &GraphNodes[NodeIndex];
2747 return (N->NodeRep = FindNode(N->NodeRep));
2750 // Find the index into GraphNodes of the node representing Node,
2751 // don't perform path compression along the way (for Print)
2752 unsigned Andersens::FindNode(unsigned NodeIndex) const {
2753 assert (NodeIndex < GraphNodes.size()
2754 && "Attempting to find a node that can't exist");
2755 const Node *N = &GraphNodes[NodeIndex];
2759 return FindNode(N->NodeRep);
2762 //===----------------------------------------------------------------------===//
2764 //===----------------------------------------------------------------------===//
2766 void Andersens::PrintNode(const Node *N) const {
2767 if (N == &GraphNodes[UniversalSet]) {
2768 dbgs() << "<universal>";
2770 } else if (N == &GraphNodes[NullPtr]) {
2771 dbgs() << "<nullptr>";
2773 } else if (N == &GraphNodes[NullObject]) {
2777 if (!N->getValue()) {
2778 dbgs() << "artificial" << (intptr_t) N;
2782 assert(N->getValue() != 0 && "Never set node label!");
2783 Value *V = N->getValue();
2784 if (Function *F = dyn_cast<Function>(V)) {
2785 if (F->getFunctionType()->getReturnType()->isPointerTy() &&
2786 N == &GraphNodes[getReturnNode(F)]) {
2787 dbgs() << F->getName() << ":retval";
2789 } else if (F->getFunctionType()->isVarArg() &&
2790 N == &GraphNodes[getVarargNode(F)]) {
2791 dbgs() << F->getName() << ":vararg";
2796 if (Instruction *I = dyn_cast<Instruction>(V))
2797 dbgs() << I->getParent()->getParent()->getName() << ":";
2798 else if (Argument *Arg = dyn_cast<Argument>(V))
2799 dbgs() << Arg->getParent()->getName() << ":";
2802 dbgs() << V->getName();
2804 dbgs() << "(unnamed)";
2806 if (isa<GlobalValue>(V) || isa<AllocaInst>(V) || isMalloc(V))
2807 if (N == &GraphNodes[getObject(V)])
2810 void Andersens::PrintConstraint(const Constraint &C) const {
2811 if (C.Type == Constraint::Store) {
2816 PrintNode(&GraphNodes[C.Dest]);
2817 if (C.Type == Constraint::Store && C.Offset != 0)
2818 dbgs() << " + " << C.Offset << ")";
2820 if (C.Type == Constraint::Load) {
2825 else if (C.Type == Constraint::AddressOf)
2827 PrintNode(&GraphNodes[C.Src]);
2828 if (C.Offset != 0 && C.Type != Constraint::Store)
2829 dbgs() << " + " << C.Offset;
2830 if (C.Type == Constraint::Load && C.Offset != 0)
2835 void Andersens::PrintConstraints() const {
2836 dbgs() << "Constraints:\n";
2838 for (unsigned i = 0, e = Constraints.size(); i != e; ++i)
2839 PrintConstraint(Constraints[i]);
2842 void Andersens::PrintPointsToGraph() const {
2843 dbgs() << "Points-to graph:\n";
2844 for (unsigned i = 0, e = GraphNodes.size(); i != e; ++i) {
2845 const Node *N = &GraphNodes[i];
2846 if (FindNode(i) != i) {
2848 dbgs() << "\t--> same as ";
2849 PrintNode(&GraphNodes[FindNode(i)]);
2852 dbgs() << "[" << (N->PointsTo->count()) << "] ";
2857 for (SparseBitVector<>::iterator bi = N->PointsTo->begin();
2858 bi != N->PointsTo->end();
2862 PrintNode(&GraphNodes[*bi]);