1 //===- Andersens.cpp - Andersen's Interprocedural Alias Analysis ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an implementation of Andersen's interprocedural alias
13 // In pointer analysis terms, this is a subset-based, flow-insensitive,
14 // field-sensitive, and context-insensitive algorithm pointer algorithm.
16 // This algorithm is implemented as three stages:
17 // 1. Object identification.
18 // 2. Inclusion constraint identification.
19 // 3. Offline constraint graph optimization
20 // 4. Inclusion constraint solving.
22 // The object identification stage identifies all of the memory objects in the
23 // program, which includes globals, heap allocated objects, and stack allocated
26 // The inclusion constraint identification stage finds all inclusion constraints
27 // in the program by scanning the program, looking for pointer assignments and
28 // other statements that effect the points-to graph. For a statement like "A =
29 // B", this statement is processed to indicate that A can point to anything that
30 // B can point to. Constraints can handle copies, loads, and stores, and
33 // The offline constraint graph optimization portion includes offline variable
34 // substitution algorithms intended to computer pointer and location
35 // equivalences. Pointer equivalences are those pointers that will have the
36 // same points-to sets, and location equivalences are those variables that
37 // always appear together in points-to sets.
39 // The inclusion constraint solving phase iteratively propagates the inclusion
40 // constraints until a fixed point is reached. This is an O(N^3) algorithm.
42 // Function constraints are handled as if they were structs with X fields.
43 // Thus, an access to argument X of function Y is an access to node index
44 // getNode(Y) + X. This representation allows handling of indirect calls
45 // without any issues. To wit, an indirect call Y(a,b) is equivalent to
46 // *(Y + 1) = a, *(Y + 2) = b.
47 // The return node for a function is always located at getNode(F) +
48 // CallReturnPos. The arguments start at getNode(F) + CallArgPos.
50 // Future Improvements:
51 // Offline detection of online cycles. Use of BDD's.
52 //===----------------------------------------------------------------------===//
54 #define DEBUG_TYPE "anders-aa"
55 #include "llvm/Constants.h"
56 #include "llvm/DerivedTypes.h"
57 #include "llvm/Instructions.h"
58 #include "llvm/Module.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Compiler.h"
61 #include "llvm/Support/InstIterator.h"
62 #include "llvm/Support/InstVisitor.h"
63 #include "llvm/Analysis/AliasAnalysis.h"
64 #include "llvm/Analysis/Passes.h"
65 #include "llvm/Support/Debug.h"
66 #include "llvm/ADT/Statistic.h"
67 #include "llvm/ADT/SparseBitVector.h"
68 #include "llvm/ADT/DenseMap.h"
76 STATISTIC(NumIters , "Number of iterations to reach convergence");
77 STATISTIC(NumConstraints, "Number of constraints");
78 STATISTIC(NumNodes , "Number of nodes");
79 STATISTIC(NumUnified , "Number of variables unified");
82 const unsigned SelfRep = (unsigned)-1;
83 const unsigned Unvisited = (unsigned)-1;
84 // Position of the function return node relative to the function node.
85 const unsigned CallReturnPos = 1;
86 // Position of the function call node relative to the function node.
87 const unsigned CallFirstArgPos = 2;
89 struct BitmapKeyInfo {
90 static inline SparseBitVector<> *getEmptyKey() {
91 return reinterpret_cast<SparseBitVector<> *>(-1);
93 static inline SparseBitVector<> *getTombstoneKey() {
94 return reinterpret_cast<SparseBitVector<> *>(-2);
96 static unsigned getHashValue(const SparseBitVector<> *bitmap) {
97 return bitmap->getHashValue();
99 static bool isEqual(const SparseBitVector<> *LHS,
100 const SparseBitVector<> *RHS) {
103 else if (LHS == getEmptyKey() || RHS == getEmptyKey()
104 || LHS == getTombstoneKey() || RHS == getTombstoneKey())
110 static bool isPod() { return true; }
113 class VISIBILITY_HIDDEN Andersens : public ModulePass, public AliasAnalysis,
114 private InstVisitor<Andersens> {
117 /// Constraint - Objects of this structure are used to represent the various
118 /// constraints identified by the algorithm. The constraints are 'copy',
119 /// for statements like "A = B", 'load' for statements like "A = *B",
120 /// 'store' for statements like "*A = B", and AddressOf for statements like
121 /// A = alloca; The Offset is applied as *(A + K) = B for stores,
122 /// A = *(B + K) for loads, and A = B + K for copies. It is
123 /// illegal on addressof constraints (because it is statically
124 /// resolvable to A = &C where C = B + K)
127 enum ConstraintType { Copy, Load, Store, AddressOf } Type;
132 Constraint(ConstraintType Ty, unsigned D, unsigned S, unsigned O = 0)
133 : Type(Ty), Dest(D), Src(S), Offset(O) {
134 assert(Offset == 0 || Ty != AddressOf &&
135 "Offset is illegal on addressof constraints");
139 // Node class - This class is used to represent a node in the constraint
140 // graph. Due to various optimizations, it is not always the case that
141 // there is a mapping from a Node to a Value. In particular, we add
142 // artificial Node's that represent the set of pointed-to variables shared
143 // for each location equivalent Node.
146 SparseBitVector<> *Edges;
147 SparseBitVector<> *PointsTo;
148 SparseBitVector<> *OldPointsTo;
150 std::list<Constraint> Constraints;
152 // Pointer and location equivalence labels
153 unsigned PointerEquivLabel;
154 unsigned LocationEquivLabel;
155 // Predecessor edges, both real and implicit
156 SparseBitVector<> *PredEdges;
157 SparseBitVector<> *ImplicitPredEdges;
158 // Set of nodes that point to us, only use for location equivalence.
159 SparseBitVector<> *PointedToBy;
160 // Number of incoming edges, used during variable substitution to early
161 // free the points-to sets
163 // True if our points-to set is in the Set2PEClass map
165 // True if our node has no indirect constraints (complex or otherwise)
167 // True if the node is address taken, *or* it is part of a group of nodes
168 // that must be kept together. This is set to true for functions and
169 // their arg nodes, which must be kept at the same position relative to
170 // their base function node.
173 // Nodes in cycles (or in equivalence classes) are united together using a
174 // standard union-find representation with path compression. NodeRep
175 // gives the index into GraphNodes for the representative Node.
179 Node(bool direct = true) :
180 Val(0), Edges(0), PointsTo(0), OldPointsTo(0), Changed(false),
181 PointerEquivLabel(0), LocationEquivLabel(0), PredEdges(0),
182 ImplicitPredEdges(0), PointedToBy(0), NumInEdges(0),
183 StoredInHash(false), Direct(direct), AddressTaken(false),
186 Node *setValue(Value *V) {
187 assert(Val == 0 && "Value already set for this node!");
192 /// getValue - Return the LLVM value corresponding to this node.
194 Value *getValue() const { return Val; }
196 /// addPointerTo - Add a pointer to the list of pointees of this node,
197 /// returning true if this caused a new pointer to be added, or false if
198 /// we already knew about the points-to relation.
199 bool addPointerTo(unsigned Node) {
200 return PointsTo->test_and_set(Node);
203 /// intersects - Return true if the points-to set of this node intersects
204 /// with the points-to set of the specified node.
205 bool intersects(Node *N) const;
207 /// intersectsIgnoring - Return true if the points-to set of this node
208 /// intersects with the points-to set of the specified node on any nodes
209 /// except for the specified node to ignore.
210 bool intersectsIgnoring(Node *N, unsigned) const;
213 /// GraphNodes - This vector is populated as part of the object
214 /// identification stage of the analysis, which populates this vector with a
215 /// node for each memory object and fills in the ValueNodes map.
216 std::vector<Node> GraphNodes;
218 /// ValueNodes - This map indicates the Node that a particular Value* is
219 /// represented by. This contains entries for all pointers.
220 DenseMap<Value*, unsigned> ValueNodes;
222 /// ObjectNodes - This map contains entries for each memory object in the
223 /// program: globals, alloca's and mallocs.
224 DenseMap<Value*, unsigned> ObjectNodes;
226 /// ReturnNodes - This map contains an entry for each function in the
227 /// program that returns a value.
228 DenseMap<Function*, unsigned> ReturnNodes;
230 /// VarargNodes - This map contains the entry used to represent all pointers
231 /// passed through the varargs portion of a function call for a particular
232 /// function. An entry is not present in this map for functions that do not
233 /// take variable arguments.
234 DenseMap<Function*, unsigned> VarargNodes;
237 /// Constraints - This vector contains a list of all of the constraints
238 /// identified by the program.
239 std::vector<Constraint> Constraints;
241 // Map from graph node to maximum K value that is allowed (for functions,
242 // this is equivalent to the number of arguments + CallFirstArgPos)
243 std::map<unsigned, unsigned> MaxK;
245 /// This enum defines the GraphNodes indices that correspond to important
253 // Stack for Tarjan's
254 std::stack<unsigned> SCCStack;
255 // Topological Index -> Graph node
256 std::vector<unsigned> Topo2Node;
257 // Graph Node -> Topological Index;
258 std::vector<unsigned> Node2Topo;
259 // Map from Graph Node to DFS number
260 std::vector<unsigned> Node2DFS;
261 // Map from Graph Node to Deleted from graph.
262 std::vector<bool> Node2Deleted;
263 // Current DFS and RPO numbers
267 // Offline variable substitution related things
269 // Temporary rep storage, used because we can't collapse SCC's in the
270 // predecessor graph by uniting the variables permanently, we can only do so
271 // for the successor graph.
272 std::vector<unsigned> VSSCCRep;
273 // Mapping from node to whether we have visited it during SCC finding yet.
274 std::vector<bool> Node2Visited;
275 // During variable substitution, we create unknowns to represent the unknown
276 // value that is a dereference of a variable. These nodes are known as
277 // "ref" nodes (since they represent the value of dereferences).
278 unsigned FirstRefNode;
279 // During HVN, we create represent address taken nodes as if they were
280 // unknown (since HVN, unlike HU, does not evaluate unions).
281 unsigned FirstAdrNode;
282 // Current pointer equivalence class number
284 // Mapping from points-to sets to equivalence classes
285 typedef DenseMap<SparseBitVector<> *, unsigned, BitmapKeyInfo> BitVectorMap;
286 BitVectorMap Set2PEClass;
287 // Mapping from pointer equivalences to the representative node. -1 if we
288 // have no representative node for this pointer equivalence class yet.
289 std::vector<int> PEClass2Node;
290 // Mapping from pointer equivalences to representative node. This includes
291 // pointer equivalent but not location equivalent variables. -1 if we have
292 // no representative node for this pointer equivalence class yet.
293 std::vector<int> PENLEClass2Node;
297 Andersens() : ModulePass((intptr_t)&ID) {}
299 bool runOnModule(Module &M) {
300 InitializeAliasAnalysis(this);
302 CollectConstraints(M);
304 #define DEBUG_TYPE "anders-aa-constraints"
305 DEBUG(PrintConstraints());
307 #define DEBUG_TYPE "anders-aa"
309 DEBUG(PrintPointsToGraph());
311 // Free the constraints list, as we don't need it to respond to alias
316 std::vector<Constraint>().swap(Constraints);
320 void releaseMemory() {
321 // FIXME: Until we have transitively required passes working correctly,
322 // this cannot be enabled! Otherwise, using -count-aa with the pass
323 // causes memory to be freed too early. :(
325 // The memory objects and ValueNodes data structures at the only ones that
326 // are still live after construction.
327 std::vector<Node>().swap(GraphNodes);
332 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
333 AliasAnalysis::getAnalysisUsage(AU);
334 AU.setPreservesAll(); // Does not transform code
337 //------------------------------------------------
338 // Implement the AliasAnalysis API
340 AliasResult alias(const Value *V1, unsigned V1Size,
341 const Value *V2, unsigned V2Size);
342 virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
343 virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
344 void getMustAliases(Value *P, std::vector<Value*> &RetVals);
345 bool pointsToConstantMemory(const Value *P);
347 virtual void deleteValue(Value *V) {
349 getAnalysis<AliasAnalysis>().deleteValue(V);
352 virtual void copyValue(Value *From, Value *To) {
353 ValueNodes[To] = ValueNodes[From];
354 getAnalysis<AliasAnalysis>().copyValue(From, To);
358 /// getNode - Return the node corresponding to the specified pointer scalar.
360 unsigned getNode(Value *V) {
361 if (Constant *C = dyn_cast<Constant>(V))
362 if (!isa<GlobalValue>(C))
363 return getNodeForConstantPointer(C);
365 DenseMap<Value*, unsigned>::iterator I = ValueNodes.find(V);
366 if (I == ValueNodes.end()) {
370 assert(0 && "Value does not have a node in the points-to graph!");
375 /// getObject - Return the node corresponding to the memory object for the
376 /// specified global or allocation instruction.
377 unsigned getObject(Value *V) {
378 DenseMap<Value*, unsigned>::iterator I = ObjectNodes.find(V);
379 assert(I != ObjectNodes.end() &&
380 "Value does not have an object in the points-to graph!");
384 /// getReturnNode - Return the node representing the return value for the
385 /// specified function.
386 unsigned getReturnNode(Function *F) {
387 DenseMap<Function*, unsigned>::iterator I = ReturnNodes.find(F);
388 assert(I != ReturnNodes.end() && "Function does not return a value!");
392 /// getVarargNode - Return the node representing the variable arguments
393 /// formal for the specified function.
394 unsigned getVarargNode(Function *F) {
395 DenseMap<Function*, unsigned>::iterator I = VarargNodes.find(F);
396 assert(I != VarargNodes.end() && "Function does not take var args!");
400 /// getNodeValue - Get the node for the specified LLVM value and set the
401 /// value for it to be the specified value.
402 unsigned getNodeValue(Value &V) {
403 unsigned Index = getNode(&V);
404 GraphNodes[Index].setValue(&V);
408 unsigned UniteNodes(unsigned First, unsigned Second);
409 unsigned FindNode(unsigned Node);
411 void IdentifyObjects(Module &M);
412 void CollectConstraints(Module &M);
413 bool AnalyzeUsesOfFunction(Value *);
414 void CreateConstraintGraph();
415 void OptimizeConstraints();
416 unsigned FindEquivalentNode(unsigned, unsigned);
417 void ClumpAddressTaken();
418 void RewriteConstraints();
421 void UnitePointerEquivalences();
422 void SolveConstraints();
423 void QueryNode(unsigned Node);
424 void Condense(unsigned Node);
425 void HUValNum(unsigned Node);
426 void HVNValNum(unsigned Node);
427 unsigned getNodeForConstantPointer(Constant *C);
428 unsigned getNodeForConstantPointerTarget(Constant *C);
429 void AddGlobalInitializerConstraints(unsigned, Constant *C);
431 void AddConstraintsForNonInternalLinkage(Function *F);
432 void AddConstraintsForCall(CallSite CS, Function *F);
433 bool AddConstraintsForExternalCall(CallSite CS, Function *F);
436 void PrintNode(Node *N);
437 void PrintConstraints();
438 void PrintConstraint(const Constraint &);
440 void PrintPointsToGraph();
442 //===------------------------------------------------------------------===//
443 // Instruction visitation methods for adding constraints
445 friend class InstVisitor<Andersens>;
446 void visitReturnInst(ReturnInst &RI);
447 void visitInvokeInst(InvokeInst &II) { visitCallSite(CallSite(&II)); }
448 void visitCallInst(CallInst &CI) { visitCallSite(CallSite(&CI)); }
449 void visitCallSite(CallSite CS);
450 void visitAllocationInst(AllocationInst &AI);
451 void visitLoadInst(LoadInst &LI);
452 void visitStoreInst(StoreInst &SI);
453 void visitGetElementPtrInst(GetElementPtrInst &GEP);
454 void visitPHINode(PHINode &PN);
455 void visitCastInst(CastInst &CI);
456 void visitICmpInst(ICmpInst &ICI) {} // NOOP!
457 void visitFCmpInst(FCmpInst &ICI) {} // NOOP!
458 void visitSelectInst(SelectInst &SI);
459 void visitVAArg(VAArgInst &I);
460 void visitInstruction(Instruction &I);
464 char Andersens::ID = 0;
465 RegisterPass<Andersens> X("anders-aa",
466 "Andersen's Interprocedural Alias Analysis");
467 RegisterAnalysisGroup<AliasAnalysis> Y(X);
470 ModulePass *llvm::createAndersensPass() { return new Andersens(); }
472 //===----------------------------------------------------------------------===//
473 // AliasAnalysis Interface Implementation
474 //===----------------------------------------------------------------------===//
476 AliasAnalysis::AliasResult Andersens::alias(const Value *V1, unsigned V1Size,
477 const Value *V2, unsigned V2Size) {
478 Node *N1 = &GraphNodes[FindNode(getNode(const_cast<Value*>(V1)))];
479 Node *N2 = &GraphNodes[FindNode(getNode(const_cast<Value*>(V2)))];
481 // Check to see if the two pointers are known to not alias. They don't alias
482 // if their points-to sets do not intersect.
483 if (!N1->intersectsIgnoring(N2, NullObject))
486 return AliasAnalysis::alias(V1, V1Size, V2, V2Size);
489 AliasAnalysis::ModRefResult
490 Andersens::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
491 // The only thing useful that we can contribute for mod/ref information is
492 // when calling external function calls: if we know that memory never escapes
493 // from the program, it cannot be modified by an external call.
495 // NOTE: This is not really safe, at least not when the entire program is not
496 // available. The deal is that the external function could call back into the
497 // program and modify stuff. We ignore this technical niggle for now. This
498 // is, after all, a "research quality" implementation of Andersen's analysis.
499 if (Function *F = CS.getCalledFunction())
500 if (F->isDeclaration()) {
501 Node *N1 = &GraphNodes[FindNode(getNode(P))];
503 if (N1->PointsTo->empty())
506 if (!N1->PointsTo->test(UniversalSet))
507 return NoModRef; // P doesn't point to the universal set.
510 return AliasAnalysis::getModRefInfo(CS, P, Size);
513 AliasAnalysis::ModRefResult
514 Andersens::getModRefInfo(CallSite CS1, CallSite CS2) {
515 return AliasAnalysis::getModRefInfo(CS1,CS2);
518 /// getMustAlias - We can provide must alias information if we know that a
519 /// pointer can only point to a specific function or the null pointer.
520 /// Unfortunately we cannot determine must-alias information for global
521 /// variables or any other memory memory objects because we do not track whether
522 /// a pointer points to the beginning of an object or a field of it.
523 void Andersens::getMustAliases(Value *P, std::vector<Value*> &RetVals) {
524 Node *N = &GraphNodes[FindNode(getNode(P))];
525 if (N->PointsTo->count() == 1) {
526 Node *Pointee = &GraphNodes[N->PointsTo->find_first()];
527 // If a function is the only object in the points-to set, then it must be
528 // the destination. Note that we can't handle global variables here,
529 // because we don't know if the pointer is actually pointing to a field of
530 // the global or to the beginning of it.
531 if (Value *V = Pointee->getValue()) {
532 if (Function *F = dyn_cast<Function>(V))
533 RetVals.push_back(F);
535 // If the object in the points-to set is the null object, then the null
536 // pointer is a must alias.
537 if (Pointee == &GraphNodes[NullObject])
538 RetVals.push_back(Constant::getNullValue(P->getType()));
541 AliasAnalysis::getMustAliases(P, RetVals);
544 /// pointsToConstantMemory - If we can determine that this pointer only points
545 /// to constant memory, return true. In practice, this means that if the
546 /// pointer can only point to constant globals, functions, or the null pointer,
549 bool Andersens::pointsToConstantMemory(const Value *P) {
550 Node *N = &GraphNodes[FindNode(getNode((Value*)P))];
553 for (SparseBitVector<>::iterator bi = N->PointsTo->begin();
554 bi != N->PointsTo->end();
557 Node *Pointee = &GraphNodes[i];
558 if (Value *V = Pointee->getValue()) {
559 if (!isa<GlobalValue>(V) || (isa<GlobalVariable>(V) &&
560 !cast<GlobalVariable>(V)->isConstant()))
561 return AliasAnalysis::pointsToConstantMemory(P);
564 return AliasAnalysis::pointsToConstantMemory(P);
571 //===----------------------------------------------------------------------===//
572 // Object Identification Phase
573 //===----------------------------------------------------------------------===//
575 /// IdentifyObjects - This stage scans the program, adding an entry to the
576 /// GraphNodes list for each memory object in the program (global stack or
577 /// heap), and populates the ValueNodes and ObjectNodes maps for these objects.
579 void Andersens::IdentifyObjects(Module &M) {
580 unsigned NumObjects = 0;
582 // Object #0 is always the universal set: the object that we don't know
584 assert(NumObjects == UniversalSet && "Something changed!");
587 // Object #1 always represents the null pointer.
588 assert(NumObjects == NullPtr && "Something changed!");
591 // Object #2 always represents the null object (the object pointed to by null)
592 assert(NumObjects == NullObject && "Something changed!");
595 // Add all the globals first.
596 for (Module::global_iterator I = M.global_begin(), E = M.global_end();
598 ObjectNodes[I] = NumObjects++;
599 ValueNodes[I] = NumObjects++;
602 // Add nodes for all of the functions and the instructions inside of them.
603 for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
604 // The function itself is a memory object.
605 unsigned First = NumObjects;
606 ValueNodes[F] = NumObjects++;
607 if (isa<PointerType>(F->getFunctionType()->getReturnType()))
608 ReturnNodes[F] = NumObjects++;
609 if (F->getFunctionType()->isVarArg())
610 VarargNodes[F] = NumObjects++;
613 // Add nodes for all of the incoming pointer arguments.
614 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
617 if (isa<PointerType>(I->getType()))
618 ValueNodes[I] = NumObjects++;
620 MaxK[First] = NumObjects - First;
622 // Scan the function body, creating a memory object for each heap/stack
623 // allocation in the body of the function and a node to represent all
624 // pointer values defined by instructions and used as operands.
625 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) {
626 // If this is an heap or stack allocation, create a node for the memory
628 if (isa<PointerType>(II->getType())) {
629 ValueNodes[&*II] = NumObjects++;
630 if (AllocationInst *AI = dyn_cast<AllocationInst>(&*II))
631 ObjectNodes[AI] = NumObjects++;
636 // Now that we know how many objects to create, make them all now!
637 GraphNodes.resize(NumObjects);
638 NumNodes += NumObjects;
641 //===----------------------------------------------------------------------===//
642 // Constraint Identification Phase
643 //===----------------------------------------------------------------------===//
645 /// getNodeForConstantPointer - Return the node corresponding to the constant
647 unsigned Andersens::getNodeForConstantPointer(Constant *C) {
648 assert(isa<PointerType>(C->getType()) && "Not a constant pointer!");
650 if (isa<ConstantPointerNull>(C) || isa<UndefValue>(C))
652 else if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
654 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
655 switch (CE->getOpcode()) {
656 case Instruction::GetElementPtr:
657 return getNodeForConstantPointer(CE->getOperand(0));
658 case Instruction::IntToPtr:
660 case Instruction::BitCast:
661 return getNodeForConstantPointer(CE->getOperand(0));
663 cerr << "Constant Expr not yet handled: " << *CE << "\n";
667 assert(0 && "Unknown constant pointer!");
672 /// getNodeForConstantPointerTarget - Return the node POINTED TO by the
673 /// specified constant pointer.
674 unsigned Andersens::getNodeForConstantPointerTarget(Constant *C) {
675 assert(isa<PointerType>(C->getType()) && "Not a constant pointer!");
677 if (isa<ConstantPointerNull>(C))
679 else if (GlobalValue *GV = dyn_cast<GlobalValue>(C))
680 return getObject(GV);
681 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
682 switch (CE->getOpcode()) {
683 case Instruction::GetElementPtr:
684 return getNodeForConstantPointerTarget(CE->getOperand(0));
685 case Instruction::IntToPtr:
687 case Instruction::BitCast:
688 return getNodeForConstantPointerTarget(CE->getOperand(0));
690 cerr << "Constant Expr not yet handled: " << *CE << "\n";
694 assert(0 && "Unknown constant pointer!");
699 /// AddGlobalInitializerConstraints - Add inclusion constraints for the memory
700 /// object N, which contains values indicated by C.
701 void Andersens::AddGlobalInitializerConstraints(unsigned NodeIndex,
703 if (C->getType()->isFirstClassType()) {
704 if (isa<PointerType>(C->getType()))
705 Constraints.push_back(Constraint(Constraint::Copy, NodeIndex,
706 getNodeForConstantPointer(C)));
707 } else if (C->isNullValue()) {
708 Constraints.push_back(Constraint(Constraint::Copy, NodeIndex,
711 } else if (!isa<UndefValue>(C)) {
712 // If this is an array or struct, include constraints for each element.
713 assert(isa<ConstantArray>(C) || isa<ConstantStruct>(C));
714 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i)
715 AddGlobalInitializerConstraints(NodeIndex,
716 cast<Constant>(C->getOperand(i)));
720 /// AddConstraintsForNonInternalLinkage - If this function does not have
721 /// internal linkage, realize that we can't trust anything passed into or
722 /// returned by this function.
723 void Andersens::AddConstraintsForNonInternalLinkage(Function *F) {
724 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
725 if (isa<PointerType>(I->getType()))
726 // If this is an argument of an externally accessible function, the
727 // incoming pointer might point to anything.
728 Constraints.push_back(Constraint(Constraint::Copy, getNode(I),
732 /// AddConstraintsForCall - If this is a call to a "known" function, add the
733 /// constraints and return true. If this is a call to an unknown function,
735 bool Andersens::AddConstraintsForExternalCall(CallSite CS, Function *F) {
736 assert(F->isDeclaration() && "Not an external function!");
738 // These functions don't induce any points-to constraints.
739 if (F->getName() == "atoi" || F->getName() == "atof" ||
740 F->getName() == "atol" || F->getName() == "atoll" ||
741 F->getName() == "remove" || F->getName() == "unlink" ||
742 F->getName() == "rename" || F->getName() == "memcmp" ||
743 F->getName() == "llvm.memset.i32" ||
744 F->getName() == "llvm.memset.i64" ||
745 F->getName() == "strcmp" || F->getName() == "strncmp" ||
746 F->getName() == "execl" || F->getName() == "execlp" ||
747 F->getName() == "execle" || F->getName() == "execv" ||
748 F->getName() == "execvp" || F->getName() == "chmod" ||
749 F->getName() == "puts" || F->getName() == "write" ||
750 F->getName() == "open" || F->getName() == "create" ||
751 F->getName() == "truncate" || F->getName() == "chdir" ||
752 F->getName() == "mkdir" || F->getName() == "rmdir" ||
753 F->getName() == "read" || F->getName() == "pipe" ||
754 F->getName() == "wait" || F->getName() == "time" ||
755 F->getName() == "stat" || F->getName() == "fstat" ||
756 F->getName() == "lstat" || F->getName() == "strtod" ||
757 F->getName() == "strtof" || F->getName() == "strtold" ||
758 F->getName() == "fopen" || F->getName() == "fdopen" ||
759 F->getName() == "freopen" ||
760 F->getName() == "fflush" || F->getName() == "feof" ||
761 F->getName() == "fileno" || F->getName() == "clearerr" ||
762 F->getName() == "rewind" || F->getName() == "ftell" ||
763 F->getName() == "ferror" || F->getName() == "fgetc" ||
764 F->getName() == "fgetc" || F->getName() == "_IO_getc" ||
765 F->getName() == "fwrite" || F->getName() == "fread" ||
766 F->getName() == "fgets" || F->getName() == "ungetc" ||
767 F->getName() == "fputc" ||
768 F->getName() == "fputs" || F->getName() == "putc" ||
769 F->getName() == "ftell" || F->getName() == "rewind" ||
770 F->getName() == "_IO_putc" || F->getName() == "fseek" ||
771 F->getName() == "fgetpos" || F->getName() == "fsetpos" ||
772 F->getName() == "printf" || F->getName() == "fprintf" ||
773 F->getName() == "sprintf" || F->getName() == "vprintf" ||
774 F->getName() == "vfprintf" || F->getName() == "vsprintf" ||
775 F->getName() == "scanf" || F->getName() == "fscanf" ||
776 F->getName() == "sscanf" || F->getName() == "__assert_fail" ||
777 F->getName() == "modf")
781 // These functions do induce points-to edges.
782 if (F->getName() == "llvm.memcpy.i32" || F->getName() == "llvm.memcpy.i64" ||
783 F->getName() == "llvm.memmove.i32" ||F->getName() == "llvm.memmove.i64" ||
784 F->getName() == "memmove") {
786 // *Dest = *Src, which requires an artificial graph node to represent the
787 // constraint. It is broken up into *Dest = temp, temp = *Src
788 unsigned FirstArg = getNode(CS.getArgument(0));
789 unsigned SecondArg = getNode(CS.getArgument(1));
790 unsigned TempArg = GraphNodes.size();
791 GraphNodes.push_back(Node());
792 Constraints.push_back(Constraint(Constraint::Store,
794 Constraints.push_back(Constraint(Constraint::Load,
795 TempArg, SecondArg));
800 if (F->getName() == "realloc" || F->getName() == "strchr" ||
801 F->getName() == "strrchr" || F->getName() == "strstr" ||
802 F->getName() == "strtok") {
803 Constraints.push_back(Constraint(Constraint::Copy,
804 getNode(CS.getInstruction()),
805 getNode(CS.getArgument(0))));
814 /// AnalyzeUsesOfFunction - Look at all of the users of the specified function.
815 /// If this is used by anything complex (i.e., the address escapes), return
817 bool Andersens::AnalyzeUsesOfFunction(Value *V) {
819 if (!isa<PointerType>(V->getType())) return true;
821 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
822 if (dyn_cast<LoadInst>(*UI)) {
824 } else if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
825 if (V == SI->getOperand(1)) {
827 } else if (SI->getOperand(1)) {
828 return true; // Storing the pointer
830 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
831 if (AnalyzeUsesOfFunction(GEP)) return true;
832 } else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
833 // Make sure that this is just the function being called, not that it is
834 // passing into the function.
835 for (unsigned i = 1, e = CI->getNumOperands(); i != e; ++i)
836 if (CI->getOperand(i) == V) return true;
837 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
838 // Make sure that this is just the function being called, not that it is
839 // passing into the function.
840 for (unsigned i = 3, e = II->getNumOperands(); i != e; ++i)
841 if (II->getOperand(i) == V) return true;
842 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(*UI)) {
843 if (CE->getOpcode() == Instruction::GetElementPtr ||
844 CE->getOpcode() == Instruction::BitCast) {
845 if (AnalyzeUsesOfFunction(CE))
850 } else if (ICmpInst *ICI = dyn_cast<ICmpInst>(*UI)) {
851 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
852 return true; // Allow comparison against null.
853 } else if (dyn_cast<FreeInst>(*UI)) {
861 /// CollectConstraints - This stage scans the program, adding a constraint to
862 /// the Constraints list for each instruction in the program that induces a
863 /// constraint, and setting up the initial points-to graph.
865 void Andersens::CollectConstraints(Module &M) {
866 // First, the universal set points to itself.
867 Constraints.push_back(Constraint(Constraint::AddressOf, UniversalSet,
869 Constraints.push_back(Constraint(Constraint::Store, UniversalSet,
872 // Next, the null pointer points to the null object.
873 Constraints.push_back(Constraint(Constraint::AddressOf, NullPtr, NullObject));
875 // Next, add any constraints on global variables and their initializers.
876 for (Module::global_iterator I = M.global_begin(), E = M.global_end();
878 // Associate the address of the global object as pointing to the memory for
879 // the global: &G = <G memory>
880 unsigned ObjectIndex = getObject(I);
881 Node *Object = &GraphNodes[ObjectIndex];
883 Constraints.push_back(Constraint(Constraint::AddressOf, getNodeValue(*I),
886 if (I->hasInitializer()) {
887 AddGlobalInitializerConstraints(ObjectIndex, I->getInitializer());
889 // If it doesn't have an initializer (i.e. it's defined in another
890 // translation unit), it points to the universal set.
891 Constraints.push_back(Constraint(Constraint::Copy, ObjectIndex,
896 for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
897 // Set up the return value node.
898 if (isa<PointerType>(F->getFunctionType()->getReturnType()))
899 GraphNodes[getReturnNode(F)].setValue(F);
900 if (F->getFunctionType()->isVarArg())
901 GraphNodes[getVarargNode(F)].setValue(F);
903 // Set up incoming argument nodes.
904 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
906 if (isa<PointerType>(I->getType()))
909 // At some point we should just add constraints for the escaping functions
910 // at solve time, but this slows down solving. For now, we simply mark
911 // address taken functions as escaping and treat them as external.
912 if (!F->hasInternalLinkage() || AnalyzeUsesOfFunction(F))
913 AddConstraintsForNonInternalLinkage(F);
915 if (!F->isDeclaration()) {
916 // Scan the function body, creating a memory object for each heap/stack
917 // allocation in the body of the function and a node to represent all
918 // pointer values defined by instructions and used as operands.
921 // External functions that return pointers return the universal set.
922 if (isa<PointerType>(F->getFunctionType()->getReturnType()))
923 Constraints.push_back(Constraint(Constraint::Copy,
927 // Any pointers that are passed into the function have the universal set
929 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end();
931 if (isa<PointerType>(I->getType())) {
932 // Pointers passed into external functions could have anything stored
934 Constraints.push_back(Constraint(Constraint::Store, getNode(I),
936 // Memory objects passed into external function calls can have the
937 // universal set point to them.
938 Constraints.push_back(Constraint(Constraint::Copy,
943 // If this is an external varargs function, it can also store pointers
944 // into any pointers passed through the varargs section.
945 if (F->getFunctionType()->isVarArg())
946 Constraints.push_back(Constraint(Constraint::Store, getVarargNode(F),
950 NumConstraints += Constraints.size();
954 void Andersens::visitInstruction(Instruction &I) {
956 return; // This function is just a big assert.
958 if (isa<BinaryOperator>(I))
960 // Most instructions don't have any effect on pointer values.
961 switch (I.getOpcode()) {
962 case Instruction::Br:
963 case Instruction::Switch:
964 case Instruction::Unwind:
965 case Instruction::Unreachable:
966 case Instruction::Free:
967 case Instruction::ICmp:
968 case Instruction::FCmp:
971 // Is this something we aren't handling yet?
972 cerr << "Unknown instruction: " << I;
977 void Andersens::visitAllocationInst(AllocationInst &AI) {
978 unsigned ObjectIndex = getObject(&AI);
979 GraphNodes[ObjectIndex].setValue(&AI);
980 Constraints.push_back(Constraint(Constraint::AddressOf, getNodeValue(AI),
984 void Andersens::visitReturnInst(ReturnInst &RI) {
985 if (RI.getNumOperands() && isa<PointerType>(RI.getOperand(0)->getType()))
986 // return V --> <Copy/retval{F}/v>
987 Constraints.push_back(Constraint(Constraint::Copy,
988 getReturnNode(RI.getParent()->getParent()),
989 getNode(RI.getOperand(0))));
992 void Andersens::visitLoadInst(LoadInst &LI) {
993 if (isa<PointerType>(LI.getType()))
994 // P1 = load P2 --> <Load/P1/P2>
995 Constraints.push_back(Constraint(Constraint::Load, getNodeValue(LI),
996 getNode(LI.getOperand(0))));
999 void Andersens::visitStoreInst(StoreInst &SI) {
1000 if (isa<PointerType>(SI.getOperand(0)->getType()))
1001 // store P1, P2 --> <Store/P2/P1>
1002 Constraints.push_back(Constraint(Constraint::Store,
1003 getNode(SI.getOperand(1)),
1004 getNode(SI.getOperand(0))));
1007 void Andersens::visitGetElementPtrInst(GetElementPtrInst &GEP) {
1008 // P1 = getelementptr P2, ... --> <Copy/P1/P2>
1009 Constraints.push_back(Constraint(Constraint::Copy, getNodeValue(GEP),
1010 getNode(GEP.getOperand(0))));
1013 void Andersens::visitPHINode(PHINode &PN) {
1014 if (isa<PointerType>(PN.getType())) {
1015 unsigned PNN = getNodeValue(PN);
1016 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1017 // P1 = phi P2, P3 --> <Copy/P1/P2>, <Copy/P1/P3>, ...
1018 Constraints.push_back(Constraint(Constraint::Copy, PNN,
1019 getNode(PN.getIncomingValue(i))));
1023 void Andersens::visitCastInst(CastInst &CI) {
1024 Value *Op = CI.getOperand(0);
1025 if (isa<PointerType>(CI.getType())) {
1026 if (isa<PointerType>(Op->getType())) {
1027 // P1 = cast P2 --> <Copy/P1/P2>
1028 Constraints.push_back(Constraint(Constraint::Copy, getNodeValue(CI),
1029 getNode(CI.getOperand(0))));
1031 // P1 = cast int --> <Copy/P1/Univ>
1033 Constraints.push_back(Constraint(Constraint::Copy, getNodeValue(CI),
1039 } else if (isa<PointerType>(Op->getType())) {
1040 // int = cast P1 --> <Copy/Univ/P1>
1042 Constraints.push_back(Constraint(Constraint::Copy,
1044 getNode(CI.getOperand(0))));
1046 getNode(CI.getOperand(0));
1051 void Andersens::visitSelectInst(SelectInst &SI) {
1052 if (isa<PointerType>(SI.getType())) {
1053 unsigned SIN = getNodeValue(SI);
1054 // P1 = select C, P2, P3 ---> <Copy/P1/P2>, <Copy/P1/P3>
1055 Constraints.push_back(Constraint(Constraint::Copy, SIN,
1056 getNode(SI.getOperand(1))));
1057 Constraints.push_back(Constraint(Constraint::Copy, SIN,
1058 getNode(SI.getOperand(2))));
1062 void Andersens::visitVAArg(VAArgInst &I) {
1063 assert(0 && "vaarg not handled yet!");
1066 /// AddConstraintsForCall - Add constraints for a call with actual arguments
1067 /// specified by CS to the function specified by F. Note that the types of
1068 /// arguments might not match up in the case where this is an indirect call and
1069 /// the function pointer has been casted. If this is the case, do something
1071 void Andersens::AddConstraintsForCall(CallSite CS, Function *F) {
1072 Value *CallValue = CS.getCalledValue();
1073 bool IsDeref = F == NULL;
1075 // If this is a call to an external function, try to handle it directly to get
1076 // some taste of context sensitivity.
1077 if (F && F->isDeclaration() && AddConstraintsForExternalCall(CS, F))
1080 if (isa<PointerType>(CS.getType())) {
1081 unsigned CSN = getNode(CS.getInstruction());
1082 if (!F || isa<PointerType>(F->getFunctionType()->getReturnType())) {
1084 Constraints.push_back(Constraint(Constraint::Load, CSN,
1085 getNode(CallValue), CallReturnPos));
1087 Constraints.push_back(Constraint(Constraint::Copy, CSN,
1088 getNode(CallValue) + CallReturnPos));
1090 // If the function returns a non-pointer value, handle this just like we
1091 // treat a nonpointer cast to pointer.
1092 Constraints.push_back(Constraint(Constraint::Copy, CSN,
1095 } else if (F && isa<PointerType>(F->getFunctionType()->getReturnType())) {
1096 Constraints.push_back(Constraint(Constraint::Copy,
1098 getNode(CallValue) + CallReturnPos));
1101 CallSite::arg_iterator ArgI = CS.arg_begin(), ArgE = CS.arg_end();
1104 Function::arg_iterator AI = F->arg_begin(), AE = F->arg_end();
1105 for (; AI != AE && ArgI != ArgE; ++AI, ++ArgI)
1106 if (isa<PointerType>(AI->getType())) {
1107 if (isa<PointerType>((*ArgI)->getType())) {
1108 // Copy the actual argument into the formal argument.
1109 Constraints.push_back(Constraint(Constraint::Copy, getNode(AI),
1112 Constraints.push_back(Constraint(Constraint::Copy, getNode(AI),
1115 } else if (isa<PointerType>((*ArgI)->getType())) {
1116 Constraints.push_back(Constraint(Constraint::Copy,
1122 unsigned ArgPos = CallFirstArgPos;
1123 for (; ArgI != ArgE; ++ArgI) {
1124 if (isa<PointerType>((*ArgI)->getType())) {
1125 // Copy the actual argument into the formal argument.
1126 Constraints.push_back(Constraint(Constraint::Store,
1128 getNode(*ArgI), ArgPos++));
1130 Constraints.push_back(Constraint(Constraint::Store,
1131 getNode (CallValue),
1132 UniversalSet, ArgPos++));
1136 // Copy all pointers passed through the varargs section to the varargs node.
1137 if (F && F->getFunctionType()->isVarArg())
1138 for (; ArgI != ArgE; ++ArgI)
1139 if (isa<PointerType>((*ArgI)->getType()))
1140 Constraints.push_back(Constraint(Constraint::Copy, getVarargNode(F),
1142 // If more arguments are passed in than we track, just drop them on the floor.
1145 void Andersens::visitCallSite(CallSite CS) {
1146 if (isa<PointerType>(CS.getType()))
1147 getNodeValue(*CS.getInstruction());
1149 if (Function *F = CS.getCalledFunction()) {
1150 AddConstraintsForCall(CS, F);
1152 AddConstraintsForCall(CS, NULL);
1156 //===----------------------------------------------------------------------===//
1157 // Constraint Solving Phase
1158 //===----------------------------------------------------------------------===//
1160 /// intersects - Return true if the points-to set of this node intersects
1161 /// with the points-to set of the specified node.
1162 bool Andersens::Node::intersects(Node *N) const {
1163 return PointsTo->intersects(N->PointsTo);
1166 /// intersectsIgnoring - Return true if the points-to set of this node
1167 /// intersects with the points-to set of the specified node on any nodes
1168 /// except for the specified node to ignore.
1169 bool Andersens::Node::intersectsIgnoring(Node *N, unsigned Ignoring) const {
1170 // TODO: If we are only going to call this with the same value for Ignoring,
1171 // we should move the special values out of the points-to bitmap.
1172 bool WeHadIt = PointsTo->test(Ignoring);
1173 bool NHadIt = N->PointsTo->test(Ignoring);
1174 bool Result = false;
1176 PointsTo->reset(Ignoring);
1178 N->PointsTo->reset(Ignoring);
1179 Result = PointsTo->intersects(N->PointsTo);
1181 PointsTo->set(Ignoring);
1183 N->PointsTo->set(Ignoring);
1187 void dumpToDOUT(SparseBitVector<> *bitmap) {
1189 dump(*bitmap, DOUT);
1194 /// Clump together address taken variables so that the points-to sets use up
1195 /// less space and can be operated on faster.
1197 void Andersens::ClumpAddressTaken() {
1199 #define DEBUG_TYPE "anders-aa-renumber"
1200 std::vector<unsigned> Translate;
1201 std::vector<Node> NewGraphNodes;
1203 Translate.resize(GraphNodes.size());
1204 unsigned NewPos = 0;
1206 for (unsigned i = 0; i < Constraints.size(); ++i) {
1207 Constraint &C = Constraints[i];
1208 if (C.Type == Constraint::AddressOf) {
1209 GraphNodes[C.Src].AddressTaken = true;
1212 for (unsigned i = 0; i < NumberSpecialNodes; ++i) {
1213 unsigned Pos = NewPos++;
1215 NewGraphNodes.push_back(GraphNodes[i]);
1216 DOUT << "Renumbering node " << i << " to node " << Pos << "\n";
1219 // I believe this ends up being faster than making two vectors and splicing
1221 for (unsigned i = NumberSpecialNodes; i < GraphNodes.size(); ++i) {
1222 if (GraphNodes[i].AddressTaken) {
1223 unsigned Pos = NewPos++;
1225 NewGraphNodes.push_back(GraphNodes[i]);
1226 DOUT << "Renumbering node " << i << " to node " << Pos << "\n";
1230 for (unsigned i = NumberSpecialNodes; i < GraphNodes.size(); ++i) {
1231 if (!GraphNodes[i].AddressTaken) {
1232 unsigned Pos = NewPos++;
1234 NewGraphNodes.push_back(GraphNodes[i]);
1235 DOUT << "Renumbering node " << i << " to node " << Pos << "\n";
1239 for (DenseMap<Value*, unsigned>::iterator Iter = ValueNodes.begin();
1240 Iter != ValueNodes.end();
1242 Iter->second = Translate[Iter->second];
1244 for (DenseMap<Value*, unsigned>::iterator Iter = ObjectNodes.begin();
1245 Iter != ObjectNodes.end();
1247 Iter->second = Translate[Iter->second];
1249 for (DenseMap<Function*, unsigned>::iterator Iter = ReturnNodes.begin();
1250 Iter != ReturnNodes.end();
1252 Iter->second = Translate[Iter->second];
1254 for (DenseMap<Function*, unsigned>::iterator Iter = VarargNodes.begin();
1255 Iter != VarargNodes.end();
1257 Iter->second = Translate[Iter->second];
1259 for (unsigned i = 0; i < Constraints.size(); ++i) {
1260 Constraint &C = Constraints[i];
1261 C.Src = Translate[C.Src];
1262 C.Dest = Translate[C.Dest];
1265 GraphNodes.swap(NewGraphNodes);
1267 #define DEBUG_TYPE "anders-aa"
1270 /// The technique used here is described in "Exploiting Pointer and Location
1271 /// Equivalence to Optimize Pointer Analysis. In the 14th International Static
1272 /// Analysis Symposium (SAS), August 2007." It is known as the "HVN" algorithm,
1273 /// and is equivalent to value numbering the collapsed constraint graph without
1274 /// evaluating unions. This is used as a pre-pass to HU in order to resolve
1275 /// first order pointer dereferences and speed up/reduce memory usage of HU.
1276 /// Running both is equivalent to HRU without the iteration
1277 /// HVN in more detail:
1278 /// Imagine the set of constraints was simply straight line code with no loops
1279 /// (we eliminate cycles, so there are no loops), such as:
1285 /// Applying value numbering to this code tells us:
1288 /// For HVN, this is as far as it goes. We assign new value numbers to every
1289 /// "address node", and every "reference node".
1290 /// To get the optimal result for this, we use a DFS + SCC (since all nodes in a
1291 /// cycle must have the same value number since the = operation is really
1292 /// inclusion, not overwrite), and value number nodes we receive points-to sets
1293 /// before we value our own node.
1294 /// The advantage of HU over HVN is that HU considers the inclusion property, so
1295 /// that if you have
1302 /// HU will determine that G == F == E. HVN will not, because it cannot prove
1303 /// that the points to information ends up being the same because they all
1304 /// receive &D from E anyway.
1306 void Andersens::HVN() {
1307 DOUT << "Beginning HVN\n";
1308 // Build a predecessor graph. This is like our constraint graph with the
1309 // edges going in the opposite direction, and there are edges for all the
1310 // constraints, instead of just copy constraints. We also build implicit
1311 // edges for constraints are implied but not explicit. I.E for the constraint
1312 // a = &b, we add implicit edges *a = b. This helps us capture more cycles
1313 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1314 Constraint &C = Constraints[i];
1315 if (C.Type == Constraint::AddressOf) {
1316 GraphNodes[C.Src].AddressTaken = true;
1317 GraphNodes[C.Src].Direct = false;
1320 unsigned AdrNode = C.Src + FirstAdrNode;
1321 if (!GraphNodes[C.Dest].PredEdges)
1322 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1323 GraphNodes[C.Dest].PredEdges->set(AdrNode);
1326 unsigned RefNode = C.Dest + FirstRefNode;
1327 if (!GraphNodes[RefNode].ImplicitPredEdges)
1328 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1329 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src);
1330 } else if (C.Type == Constraint::Load) {
1331 if (C.Offset == 0) {
1333 if (!GraphNodes[C.Dest].PredEdges)
1334 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1335 GraphNodes[C.Dest].PredEdges->set(C.Src + FirstRefNode);
1337 GraphNodes[C.Dest].Direct = false;
1339 } else if (C.Type == Constraint::Store) {
1340 if (C.Offset == 0) {
1342 unsigned RefNode = C.Dest + FirstRefNode;
1343 if (!GraphNodes[RefNode].PredEdges)
1344 GraphNodes[RefNode].PredEdges = new SparseBitVector<>;
1345 GraphNodes[RefNode].PredEdges->set(C.Src);
1348 // Dest = Src edge and *Dest = *Src edge
1349 if (!GraphNodes[C.Dest].PredEdges)
1350 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1351 GraphNodes[C.Dest].PredEdges->set(C.Src);
1352 unsigned RefNode = C.Dest + FirstRefNode;
1353 if (!GraphNodes[RefNode].ImplicitPredEdges)
1354 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1355 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src + FirstRefNode);
1359 // Do SCC finding first to condense our predecessor graph
1361 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
1362 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
1363 Node2Visited.insert(Node2Visited.begin(), GraphNodes.size(), false);
1365 for (unsigned i = 0; i < FirstRefNode; ++i) {
1366 unsigned Node = VSSCCRep[i];
1367 if (!Node2Visited[Node])
1370 for (BitVectorMap::iterator Iter = Set2PEClass.begin();
1371 Iter != Set2PEClass.end();
1374 Set2PEClass.clear();
1376 Node2Deleted.clear();
1377 Node2Visited.clear();
1378 DOUT << "Finished HVN\n";
1382 /// This is the workhorse of HVN value numbering. We combine SCC finding at the
1383 /// same time because it's easy.
1384 void Andersens::HVNValNum(unsigned NodeIndex) {
1385 unsigned MyDFS = DFSNumber++;
1386 Node *N = &GraphNodes[NodeIndex];
1387 Node2Visited[NodeIndex] = true;
1388 Node2DFS[NodeIndex] = MyDFS;
1390 // First process all our explicit edges
1392 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1393 Iter != N->PredEdges->end();
1395 unsigned j = VSSCCRep[*Iter];
1396 if (!Node2Deleted[j]) {
1397 if (!Node2Visited[j])
1399 if (Node2DFS[NodeIndex] > Node2DFS[j])
1400 Node2DFS[NodeIndex] = Node2DFS[j];
1404 // Now process all the implicit edges
1405 if (N->ImplicitPredEdges)
1406 for (SparseBitVector<>::iterator Iter = N->ImplicitPredEdges->begin();
1407 Iter != N->ImplicitPredEdges->end();
1409 unsigned j = VSSCCRep[*Iter];
1410 if (!Node2Deleted[j]) {
1411 if (!Node2Visited[j])
1413 if (Node2DFS[NodeIndex] > Node2DFS[j])
1414 Node2DFS[NodeIndex] = Node2DFS[j];
1418 // See if we found any cycles
1419 if (MyDFS == Node2DFS[NodeIndex]) {
1420 while (!SCCStack.empty() && Node2DFS[SCCStack.top()] >= MyDFS) {
1421 unsigned CycleNodeIndex = SCCStack.top();
1422 Node *CycleNode = &GraphNodes[CycleNodeIndex];
1423 VSSCCRep[CycleNodeIndex] = NodeIndex;
1425 N->Direct &= CycleNode->Direct;
1427 if (CycleNode->PredEdges) {
1429 N->PredEdges = new SparseBitVector<>;
1430 *(N->PredEdges) |= CycleNode->PredEdges;
1431 delete CycleNode->PredEdges;
1432 CycleNode->PredEdges = NULL;
1434 if (CycleNode->ImplicitPredEdges) {
1435 if (!N->ImplicitPredEdges)
1436 N->ImplicitPredEdges = new SparseBitVector<>;
1437 *(N->ImplicitPredEdges) |= CycleNode->ImplicitPredEdges;
1438 delete CycleNode->ImplicitPredEdges;
1439 CycleNode->ImplicitPredEdges = NULL;
1445 Node2Deleted[NodeIndex] = true;
1448 GraphNodes[NodeIndex].PointerEquivLabel = PEClass++;
1452 // Collect labels of successor nodes
1453 bool AllSame = true;
1454 unsigned First = ~0;
1455 SparseBitVector<> *Labels = new SparseBitVector<>;
1459 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1460 Iter != N->PredEdges->end();
1462 unsigned j = VSSCCRep[*Iter];
1463 unsigned Label = GraphNodes[j].PointerEquivLabel;
1464 // Ignore labels that are equal to us or non-pointers
1465 if (j == NodeIndex || Label == 0)
1467 if (First == (unsigned)~0)
1469 else if (First != Label)
1474 // We either have a non-pointer, a copy of an existing node, or a new node.
1475 // Assign the appropriate pointer equivalence label.
1476 if (Labels->empty()) {
1477 GraphNodes[NodeIndex].PointerEquivLabel = 0;
1478 } else if (AllSame) {
1479 GraphNodes[NodeIndex].PointerEquivLabel = First;
1481 GraphNodes[NodeIndex].PointerEquivLabel = Set2PEClass[Labels];
1482 if (GraphNodes[NodeIndex].PointerEquivLabel == 0) {
1483 unsigned EquivClass = PEClass++;
1484 Set2PEClass[Labels] = EquivClass;
1485 GraphNodes[NodeIndex].PointerEquivLabel = EquivClass;
1492 SCCStack.push(NodeIndex);
1496 /// The technique used here is described in "Exploiting Pointer and Location
1497 /// Equivalence to Optimize Pointer Analysis. In the 14th International Static
1498 /// Analysis Symposium (SAS), August 2007." It is known as the "HU" algorithm,
1499 /// and is equivalent to value numbering the collapsed constraint graph
1500 /// including evaluating unions.
1501 void Andersens::HU() {
1502 DOUT << "Beginning HU\n";
1503 // Build a predecessor graph. This is like our constraint graph with the
1504 // edges going in the opposite direction, and there are edges for all the
1505 // constraints, instead of just copy constraints. We also build implicit
1506 // edges for constraints are implied but not explicit. I.E for the constraint
1507 // a = &b, we add implicit edges *a = b. This helps us capture more cycles
1508 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1509 Constraint &C = Constraints[i];
1510 if (C.Type == Constraint::AddressOf) {
1511 GraphNodes[C.Src].AddressTaken = true;
1512 GraphNodes[C.Src].Direct = false;
1514 GraphNodes[C.Dest].PointsTo->set(C.Src);
1516 unsigned RefNode = C.Dest + FirstRefNode;
1517 if (!GraphNodes[RefNode].ImplicitPredEdges)
1518 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1519 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src);
1520 GraphNodes[C.Src].PointedToBy->set(C.Dest);
1521 } else if (C.Type == Constraint::Load) {
1522 if (C.Offset == 0) {
1524 if (!GraphNodes[C.Dest].PredEdges)
1525 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1526 GraphNodes[C.Dest].PredEdges->set(C.Src + FirstRefNode);
1528 GraphNodes[C.Dest].Direct = false;
1530 } else if (C.Type == Constraint::Store) {
1531 if (C.Offset == 0) {
1533 unsigned RefNode = C.Dest + FirstRefNode;
1534 if (!GraphNodes[RefNode].PredEdges)
1535 GraphNodes[RefNode].PredEdges = new SparseBitVector<>;
1536 GraphNodes[RefNode].PredEdges->set(C.Src);
1539 // Dest = Src edge and *Dest = *Src edg
1540 if (!GraphNodes[C.Dest].PredEdges)
1541 GraphNodes[C.Dest].PredEdges = new SparseBitVector<>;
1542 GraphNodes[C.Dest].PredEdges->set(C.Src);
1543 unsigned RefNode = C.Dest + FirstRefNode;
1544 if (!GraphNodes[RefNode].ImplicitPredEdges)
1545 GraphNodes[RefNode].ImplicitPredEdges = new SparseBitVector<>;
1546 GraphNodes[RefNode].ImplicitPredEdges->set(C.Src + FirstRefNode);
1550 // Do SCC finding first to condense our predecessor graph
1552 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
1553 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
1554 Node2Visited.insert(Node2Visited.begin(), GraphNodes.size(), false);
1556 for (unsigned i = 0; i < FirstRefNode; ++i) {
1557 if (FindNode(i) == i) {
1558 unsigned Node = VSSCCRep[i];
1559 if (!Node2Visited[Node])
1564 // Reset tables for actual labeling
1566 Node2Visited.clear();
1567 Node2Deleted.clear();
1568 // Pre-grow our densemap so that we don't get really bad behavior
1569 Set2PEClass.resize(GraphNodes.size());
1571 // Visit the condensed graph and generate pointer equivalence labels.
1572 Node2Visited.insert(Node2Visited.begin(), GraphNodes.size(), false);
1573 for (unsigned i = 0; i < FirstRefNode; ++i) {
1574 if (FindNode(i) == i) {
1575 unsigned Node = VSSCCRep[i];
1576 if (!Node2Visited[Node])
1580 // PEClass nodes will be deleted by the deleting of N->PointsTo in our caller.
1581 Set2PEClass.clear();
1582 DOUT << "Finished HU\n";
1586 /// Implementation of standard Tarjan SCC algorithm as modified by Nuutilla.
1587 void Andersens::Condense(unsigned NodeIndex) {
1588 unsigned MyDFS = DFSNumber++;
1589 Node *N = &GraphNodes[NodeIndex];
1590 Node2Visited[NodeIndex] = true;
1591 Node2DFS[NodeIndex] = MyDFS;
1593 // First process all our explicit edges
1595 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1596 Iter != N->PredEdges->end();
1598 unsigned j = VSSCCRep[*Iter];
1599 if (!Node2Deleted[j]) {
1600 if (!Node2Visited[j])
1602 if (Node2DFS[NodeIndex] > Node2DFS[j])
1603 Node2DFS[NodeIndex] = Node2DFS[j];
1607 // Now process all the implicit edges
1608 if (N->ImplicitPredEdges)
1609 for (SparseBitVector<>::iterator Iter = N->ImplicitPredEdges->begin();
1610 Iter != N->ImplicitPredEdges->end();
1612 unsigned j = VSSCCRep[*Iter];
1613 if (!Node2Deleted[j]) {
1614 if (!Node2Visited[j])
1616 if (Node2DFS[NodeIndex] > Node2DFS[j])
1617 Node2DFS[NodeIndex] = Node2DFS[j];
1621 // See if we found any cycles
1622 if (MyDFS == Node2DFS[NodeIndex]) {
1623 while (!SCCStack.empty() && Node2DFS[SCCStack.top()] >= MyDFS) {
1624 unsigned CycleNodeIndex = SCCStack.top();
1625 Node *CycleNode = &GraphNodes[CycleNodeIndex];
1626 VSSCCRep[CycleNodeIndex] = NodeIndex;
1628 N->Direct &= CycleNode->Direct;
1630 *(N->PointsTo) |= CycleNode->PointsTo;
1631 delete CycleNode->PointsTo;
1632 CycleNode->PointsTo = NULL;
1633 if (CycleNode->PredEdges) {
1635 N->PredEdges = new SparseBitVector<>;
1636 *(N->PredEdges) |= CycleNode->PredEdges;
1637 delete CycleNode->PredEdges;
1638 CycleNode->PredEdges = NULL;
1640 if (CycleNode->ImplicitPredEdges) {
1641 if (!N->ImplicitPredEdges)
1642 N->ImplicitPredEdges = new SparseBitVector<>;
1643 *(N->ImplicitPredEdges) |= CycleNode->ImplicitPredEdges;
1644 delete CycleNode->ImplicitPredEdges;
1645 CycleNode->ImplicitPredEdges = NULL;
1650 Node2Deleted[NodeIndex] = true;
1652 // Set up number of incoming edges for other nodes
1654 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1655 Iter != N->PredEdges->end();
1657 ++GraphNodes[VSSCCRep[*Iter]].NumInEdges;
1659 SCCStack.push(NodeIndex);
1663 void Andersens::HUValNum(unsigned NodeIndex) {
1664 Node *N = &GraphNodes[NodeIndex];
1665 Node2Visited[NodeIndex] = true;
1667 // Eliminate dereferences of non-pointers for those non-pointers we have
1668 // already identified. These are ref nodes whose non-ref node:
1669 // 1. Has already been visited determined to point to nothing (and thus, a
1670 // dereference of it must point to nothing)
1671 // 2. Any direct node with no predecessor edges in our graph and with no
1672 // points-to set (since it can't point to anything either, being that it
1673 // receives no points-to sets and has none).
1674 if (NodeIndex >= FirstRefNode) {
1675 unsigned j = VSSCCRep[FindNode(NodeIndex - FirstRefNode)];
1676 if ((Node2Visited[j] && !GraphNodes[j].PointerEquivLabel)
1677 || (GraphNodes[j].Direct && !GraphNodes[j].PredEdges
1678 && GraphNodes[j].PointsTo->empty())){
1682 // Process all our explicit edges
1684 for (SparseBitVector<>::iterator Iter = N->PredEdges->begin();
1685 Iter != N->PredEdges->end();
1687 unsigned j = VSSCCRep[*Iter];
1688 if (!Node2Visited[j])
1691 // If this edge turned out to be the same as us, or got no pointer
1692 // equivalence label (and thus points to nothing) , just decrement our
1693 // incoming edges and continue.
1694 if (j == NodeIndex || GraphNodes[j].PointerEquivLabel == 0) {
1695 --GraphNodes[j].NumInEdges;
1699 *(N->PointsTo) |= GraphNodes[j].PointsTo;
1701 // If we didn't end up storing this in the hash, and we're done with all
1702 // the edges, we don't need the points-to set anymore.
1703 --GraphNodes[j].NumInEdges;
1704 if (!GraphNodes[j].NumInEdges && !GraphNodes[j].StoredInHash) {
1705 delete GraphNodes[j].PointsTo;
1706 GraphNodes[j].PointsTo = NULL;
1709 // If this isn't a direct node, generate a fresh variable.
1711 N->PointsTo->set(FirstRefNode + NodeIndex);
1714 // See If we have something equivalent to us, if not, generate a new
1715 // equivalence class.
1716 if (N->PointsTo->empty()) {
1721 N->PointerEquivLabel = Set2PEClass[N->PointsTo];
1722 if (N->PointerEquivLabel == 0) {
1723 unsigned EquivClass = PEClass++;
1724 N->StoredInHash = true;
1725 Set2PEClass[N->PointsTo] = EquivClass;
1726 N->PointerEquivLabel = EquivClass;
1729 N->PointerEquivLabel = PEClass++;
1734 /// Rewrite our list of constraints so that pointer equivalent nodes are
1735 /// replaced by their the pointer equivalence class representative.
1736 void Andersens::RewriteConstraints() {
1737 std::vector<Constraint> NewConstraints;
1739 PEClass2Node.clear();
1740 PENLEClass2Node.clear();
1742 // We may have from 1 to Graphnodes + 1 equivalence classes.
1743 PEClass2Node.insert(PEClass2Node.begin(), GraphNodes.size() + 1, -1);
1744 PENLEClass2Node.insert(PENLEClass2Node.begin(), GraphNodes.size() + 1, -1);
1746 // Rewrite constraints, ignoring non-pointer constraints, uniting equivalent
1747 // nodes, and rewriting constraints to use the representative nodes.
1748 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1749 Constraint &C = Constraints[i];
1750 unsigned RHSNode = FindNode(C.Src);
1751 unsigned LHSNode = FindNode(C.Dest);
1752 unsigned RHSLabel = GraphNodes[VSSCCRep[RHSNode]].PointerEquivLabel;
1753 unsigned LHSLabel = GraphNodes[VSSCCRep[LHSNode]].PointerEquivLabel;
1755 // First we try to eliminate constraints for things we can prove don't point
1757 if (LHSLabel == 0) {
1758 DEBUG(PrintNode(&GraphNodes[LHSNode]));
1759 DOUT << " is a non-pointer, ignoring constraint.\n";
1762 if (RHSLabel == 0) {
1763 DEBUG(PrintNode(&GraphNodes[RHSNode]));
1764 DOUT << " is a non-pointer, ignoring constraint.\n";
1767 // This constraint may be useless, and it may become useless as we translate
1769 if (C.Src == C.Dest && C.Type == Constraint::Copy)
1772 C.Src = FindEquivalentNode(RHSNode, RHSLabel);
1773 C.Dest = FindEquivalentNode(FindNode(LHSNode), LHSLabel);
1774 if (C.Src == C.Dest && C.Type == Constraint::Copy)
1777 NewConstraints.push_back(C);
1779 Constraints.swap(NewConstraints);
1780 PEClass2Node.clear();
1783 /// See if we have a node that is pointer equivalent to the one being asked
1784 /// about, and if so, unite them and return the equivalent node. Otherwise,
1785 /// return the original node.
1786 unsigned Andersens::FindEquivalentNode(unsigned NodeIndex,
1787 unsigned NodeLabel) {
1788 if (!GraphNodes[NodeIndex].AddressTaken) {
1789 if (PEClass2Node[NodeLabel] != -1) {
1790 // We found an existing node with the same pointer label, so unify them.
1791 return UniteNodes(PEClass2Node[NodeLabel], NodeIndex);
1793 PEClass2Node[NodeLabel] = NodeIndex;
1794 PENLEClass2Node[NodeLabel] = NodeIndex;
1796 } else if (PENLEClass2Node[NodeLabel] == -1) {
1797 PENLEClass2Node[NodeLabel] = NodeIndex;
1803 void Andersens::PrintLabels() {
1804 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
1805 if (i < FirstRefNode) {
1806 PrintNode(&GraphNodes[i]);
1807 } else if (i < FirstAdrNode) {
1809 PrintNode(&GraphNodes[i-FirstRefNode]);
1813 PrintNode(&GraphNodes[i-FirstAdrNode]);
1817 DOUT << " has pointer label " << GraphNodes[i].PointerEquivLabel
1818 << " and SCC rep " << VSSCCRep[i]
1819 << " and is " << (GraphNodes[i].Direct ? "Direct" : "Not direct")
1824 /// Optimize the constraints by performing offline variable substitution and
1825 /// other optimizations.
1826 void Andersens::OptimizeConstraints() {
1827 DOUT << "Beginning constraint optimization\n";
1829 // Function related nodes need to stay in the same relative position and can't
1830 // be location equivalent.
1831 for (std::map<unsigned, unsigned>::iterator Iter = MaxK.begin();
1834 for (unsigned i = Iter->first;
1835 i != Iter->first + Iter->second;
1837 GraphNodes[i].AddressTaken = true;
1838 GraphNodes[i].Direct = false;
1842 ClumpAddressTaken();
1843 FirstRefNode = GraphNodes.size();
1844 FirstAdrNode = FirstRefNode + GraphNodes.size();
1845 GraphNodes.insert(GraphNodes.end(), 2 * GraphNodes.size(),
1847 VSSCCRep.resize(GraphNodes.size());
1848 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
1852 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
1853 Node *N = &GraphNodes[i];
1854 delete N->PredEdges;
1855 N->PredEdges = NULL;
1856 delete N->ImplicitPredEdges;
1857 N->ImplicitPredEdges = NULL;
1860 #define DEBUG_TYPE "anders-aa-labels"
1861 DEBUG(PrintLabels());
1863 #define DEBUG_TYPE "anders-aa"
1864 RewriteConstraints();
1865 // Delete the adr nodes.
1866 GraphNodes.resize(FirstRefNode * 2);
1869 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
1870 Node *N = &GraphNodes[i];
1871 if (FindNode(i) == i) {
1872 N->PointsTo = new SparseBitVector<>;
1873 N->PointedToBy = new SparseBitVector<>;
1877 N->PointerEquivLabel = 0;
1881 #define DEBUG_TYPE "anders-aa-labels"
1882 DEBUG(PrintLabels());
1884 #define DEBUG_TYPE "anders-aa"
1885 RewriteConstraints();
1886 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
1887 if (FindNode(i) == i) {
1888 Node *N = &GraphNodes[i];
1890 delete N->PredEdges;
1891 delete N->ImplicitPredEdges;
1892 delete N->PointedToBy;
1895 GraphNodes.erase(GraphNodes.begin() + FirstRefNode, GraphNodes.end());
1896 DOUT << "Finished constraint optimization\n";
1901 /// Unite pointer but not location equivalent variables, now that the constraint
1903 void Andersens::UnitePointerEquivalences() {
1904 DOUT << "Uniting remaining pointer equivalences\n";
1905 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
1906 if (GraphNodes[i].AddressTaken && GraphNodes[i].NodeRep == SelfRep) {
1907 unsigned Label = GraphNodes[i].PointerEquivLabel;
1909 if (Label && PENLEClass2Node[Label] != -1)
1910 UniteNodes(i, PENLEClass2Node[Label]);
1913 DOUT << "Finished remaining pointer equivalences\n";
1914 PENLEClass2Node.clear();
1917 /// Create the constraint graph used for solving points-to analysis.
1919 void Andersens::CreateConstraintGraph() {
1920 for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
1921 Constraint &C = Constraints[i];
1922 assert (C.Src < GraphNodes.size() && C.Dest < GraphNodes.size());
1923 if (C.Type == Constraint::AddressOf)
1924 GraphNodes[C.Dest].PointsTo->set(C.Src);
1925 else if (C.Type == Constraint::Load)
1926 GraphNodes[C.Src].Constraints.push_back(C);
1927 else if (C.Type == Constraint::Store)
1928 GraphNodes[C.Dest].Constraints.push_back(C);
1929 else if (C.Offset != 0)
1930 GraphNodes[C.Src].Constraints.push_back(C);
1932 GraphNodes[C.Src].Edges->set(C.Dest);
1936 // Perform cycle detection, DFS, and RPO finding.
1937 void Andersens::QueryNode(unsigned Node) {
1938 assert(GraphNodes[Node].NodeRep == SelfRep && "Querying a non-rep node");
1939 unsigned OurDFS = ++DFSNumber;
1940 SparseBitVector<> ToErase;
1941 SparseBitVector<> NewEdges;
1942 Node2DFS[Node] = OurDFS;
1944 for (SparseBitVector<>::iterator bi = GraphNodes[Node].Edges->begin();
1945 bi != GraphNodes[Node].Edges->end();
1947 unsigned RepNode = FindNode(*bi);
1948 // If we are going to add an edge to repnode, we have no need for the edge
1950 if (RepNode != *bi && NewEdges.test(RepNode)){
1955 // Continue about our DFS.
1956 if (!Node2Deleted[RepNode]){
1957 if (Node2DFS[RepNode] == 0) {
1959 // May have been changed by query
1960 RepNode = FindNode(RepNode);
1962 if (Node2DFS[RepNode] < Node2DFS[Node])
1963 Node2DFS[Node] = Node2DFS[RepNode];
1965 // We may have just discovered that e belongs to a cycle, in which case we
1966 // can also erase it.
1967 if (RepNode != *bi) {
1969 NewEdges.set(RepNode);
1973 GraphNodes[Node].Edges->intersectWithComplement(ToErase);
1974 GraphNodes[Node].Edges |= NewEdges;
1976 // If this node is a root of a non-trivial SCC, place it on our worklist to be
1978 if (OurDFS == Node2DFS[Node]) {
1979 bool Changed = false;
1980 while (!SCCStack.empty() && Node2DFS[SCCStack.top()] >= OurDFS) {
1981 Node = UniteNodes(Node, FindNode(SCCStack.top()));
1986 Node2Deleted[Node] = true;
1989 Topo2Node.at(GraphNodes.size() - RPONumber) = Node;
1990 Node2Topo[Node] = GraphNodes.size() - RPONumber;
1992 GraphNodes[Node].Changed = true;
1994 SCCStack.push(Node);
1999 /// SolveConstraints - This stage iteratively processes the constraints list
2000 /// propagating constraints (adding edges to the Nodes in the points-to graph)
2001 /// until a fixed point is reached.
2003 void Andersens::SolveConstraints() {
2004 bool Changed = true;
2005 unsigned Iteration = 0;
2007 OptimizeConstraints();
2009 #define DEBUG_TYPE "anders-aa-constraints"
2010 DEBUG(PrintConstraints());
2012 #define DEBUG_TYPE "anders-aa"
2014 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2015 Node *N = &GraphNodes[i];
2016 N->PointsTo = new SparseBitVector<>;
2017 N->OldPointsTo = new SparseBitVector<>;
2018 N->Edges = new SparseBitVector<>;
2020 CreateConstraintGraph();
2021 UnitePointerEquivalences();
2022 assert(SCCStack.empty() && "SCC Stack should be empty by now!");
2023 Topo2Node.insert(Topo2Node.begin(), GraphNodes.size(), Unvisited);
2024 Node2Topo.insert(Node2Topo.begin(), GraphNodes.size(), Unvisited);
2026 Node2Deleted.clear();
2027 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
2028 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
2031 // Order graph and mark starting nodes as changed.
2032 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2033 unsigned N = FindNode(i);
2034 Node *INode = &GraphNodes[i];
2035 if (Node2DFS[N] == 0) {
2037 // Mark as changed if it's a representation and can contribute to the
2038 // calculation right now.
2039 if (INode->NodeRep == SelfRep && !INode->PointsTo->empty()
2040 && (!INode->Edges->empty() || !INode->Constraints.empty()))
2041 INode->Changed = true;
2048 DOUT << "Starting iteration #" << Iteration++ << "\n";
2049 // TODO: In the microoptimization category, we could just make Topo2Node
2050 // a fast map and thus only contain the visited nodes.
2051 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2052 unsigned CurrNodeIndex = Topo2Node[i];
2055 // We may not revisit all nodes on every iteration
2056 if (CurrNodeIndex == Unvisited)
2058 CurrNode = &GraphNodes[CurrNodeIndex];
2059 // See if this is a node we need to process on this iteration
2060 if (!CurrNode->Changed || CurrNode->NodeRep != SelfRep)
2062 CurrNode->Changed = false;
2064 // Figure out the changed points to bits
2065 SparseBitVector<> CurrPointsTo;
2066 CurrPointsTo.intersectWithComplement(CurrNode->PointsTo,
2067 CurrNode->OldPointsTo);
2068 if (CurrPointsTo.empty()){
2071 *(CurrNode->OldPointsTo) |= CurrPointsTo;
2073 /* Now process the constraints for this node. */
2074 for (std::list<Constraint>::iterator li = CurrNode->Constraints.begin();
2075 li != CurrNode->Constraints.end(); ) {
2076 li->Src = FindNode(li->Src);
2077 li->Dest = FindNode(li->Dest);
2079 // TODO: We could delete redundant constraints here.
2080 // Src and Dest will be the vars we are going to process.
2081 // This may look a bit ugly, but what it does is allow us to process
2082 // both store and load constraints with the same code.
2083 // Load constraints say that every member of our RHS solution has K
2084 // added to it, and that variable gets an edge to LHS. We also union
2085 // RHS+K's solution into the LHS solution.
2086 // Store constraints say that every member of our LHS solution has K
2087 // added to it, and that variable gets an edge from RHS. We also union
2088 // RHS's solution into the LHS+K solution.
2091 unsigned K = li->Offset;
2092 unsigned CurrMember;
2093 if (li->Type == Constraint::Load) {
2096 } else if (li->Type == Constraint::Store) {
2100 // TODO Handle offseted copy constraint
2104 // TODO: hybrid cycle detection would go here, we should check
2105 // if it was a statically detected offline equivalence that
2106 // involves pointers , and if so, remove the redundant constraints.
2108 const SparseBitVector<> &Solution = CurrPointsTo;
2110 for (SparseBitVector<>::iterator bi = Solution.begin();
2111 bi != Solution.end();
2115 // Need to increment the member by K since that is where we are
2116 // supposed to copy to/from. Note that in positive weight cycles,
2117 // which occur in address taking of fields, K can go past
2118 // MaxK[CurrMember] elements, even though that is all it could point
2120 if (K > 0 && K > MaxK[CurrMember])
2123 CurrMember = FindNode(CurrMember + K);
2125 // Add an edge to the graph, so we can just do regular bitmap ior next
2126 // time. It may also let us notice a cycle.
2127 if (GraphNodes[*Src].Edges->test_and_set(*Dest)) {
2128 if (GraphNodes[*Dest].PointsTo |= *(GraphNodes[*Src].PointsTo)) {
2129 GraphNodes[*Dest].Changed = true;
2130 // If we changed a node we've already processed, we need another
2132 if (Node2Topo[*Dest] <= i)
2139 SparseBitVector<> NewEdges;
2140 SparseBitVector<> ToErase;
2142 // Now all we have left to do is propagate points-to info along the
2143 // edges, erasing the redundant edges.
2146 for (SparseBitVector<>::iterator bi = CurrNode->Edges->begin();
2147 bi != CurrNode->Edges->end();
2150 unsigned DestVar = *bi;
2151 unsigned Rep = FindNode(DestVar);
2153 // If we ended up with this node as our destination, or we've already
2154 // got an edge for the representative, delete the current edge.
2155 if (Rep == CurrNodeIndex ||
2156 (Rep != DestVar && NewEdges.test(Rep))) {
2157 ToErase.set(DestVar);
2160 // Union the points-to sets into the dest
2161 if (GraphNodes[Rep].PointsTo |= CurrPointsTo) {
2162 GraphNodes[Rep].Changed = true;
2163 if (Node2Topo[Rep] <= i)
2166 // If this edge's destination was collapsed, rewrite the edge.
2167 if (Rep != DestVar) {
2168 ToErase.set(DestVar);
2172 CurrNode->Edges->intersectWithComplement(ToErase);
2173 CurrNode->Edges |= NewEdges;
2176 DFSNumber = RPONumber = 0;
2177 Node2Deleted.clear();
2181 Topo2Node.insert(Topo2Node.begin(), GraphNodes.size(), Unvisited);
2182 Node2Topo.insert(Node2Topo.begin(), GraphNodes.size(), Unvisited);
2183 Node2DFS.insert(Node2DFS.begin(), GraphNodes.size(), 0);
2184 Node2Deleted.insert(Node2Deleted.begin(), GraphNodes.size(), false);
2185 // Rediscover the DFS/Topo ordering, and cycle detect.
2186 for (unsigned j = 0; j < GraphNodes.size(); j++) {
2187 unsigned JRep = FindNode(j);
2188 if (Node2DFS[JRep] == 0)
2198 Node2Deleted.clear();
2199 for (unsigned i = 0; i < GraphNodes.size(); ++i) {
2200 Node *N = &GraphNodes[i];
2201 delete N->OldPointsTo;
2206 //===----------------------------------------------------------------------===//
2208 //===----------------------------------------------------------------------===//
2210 // Unite nodes First and Second, returning the one which is now the
2211 // representative node. First and Second are indexes into GraphNodes
2212 unsigned Andersens::UniteNodes(unsigned First, unsigned Second) {
2213 assert (First < GraphNodes.size() && Second < GraphNodes.size() &&
2214 "Attempting to merge nodes that don't exist");
2215 // TODO: implement union by rank
2216 Node *FirstNode = &GraphNodes[First];
2217 Node *SecondNode = &GraphNodes[Second];
2219 assert (SecondNode->NodeRep == SelfRep && FirstNode->NodeRep == SelfRep &&
2220 "Trying to unite two non-representative nodes!");
2221 if (First == Second)
2224 SecondNode->NodeRep = First;
2225 FirstNode->Changed |= SecondNode->Changed;
2226 if (FirstNode->PointsTo && SecondNode->PointsTo)
2227 FirstNode->PointsTo |= *(SecondNode->PointsTo);
2228 if (FirstNode->Edges && SecondNode->Edges)
2229 FirstNode->Edges |= *(SecondNode->Edges);
2230 if (!FirstNode->Constraints.empty() && !SecondNode->Constraints.empty())
2231 FirstNode->Constraints.splice(FirstNode->Constraints.begin(),
2232 SecondNode->Constraints);
2233 if (FirstNode->OldPointsTo) {
2234 delete FirstNode->OldPointsTo;
2235 FirstNode->OldPointsTo = new SparseBitVector<>;
2238 // Destroy interesting parts of the merged-from node.
2239 delete SecondNode->OldPointsTo;
2240 delete SecondNode->Edges;
2241 delete SecondNode->PointsTo;
2242 SecondNode->Edges = NULL;
2243 SecondNode->PointsTo = NULL;
2244 SecondNode->OldPointsTo = NULL;
2247 DOUT << "Unified Node ";
2248 DEBUG(PrintNode(FirstNode));
2249 DOUT << " and Node ";
2250 DEBUG(PrintNode(SecondNode));
2257 // Find the index into GraphNodes of the node representing Node, performing
2258 // path compression along the way
2259 unsigned Andersens::FindNode(unsigned NodeIndex) {
2260 assert (NodeIndex < GraphNodes.size()
2261 && "Attempting to find a node that can't exist");
2262 Node *N = &GraphNodes[NodeIndex];
2263 if (N->NodeRep == SelfRep)
2266 return (N->NodeRep = FindNode(N->NodeRep));
2269 //===----------------------------------------------------------------------===//
2271 //===----------------------------------------------------------------------===//
2273 void Andersens::PrintNode(Node *N) {
2274 if (N == &GraphNodes[UniversalSet]) {
2275 cerr << "<universal>";
2277 } else if (N == &GraphNodes[NullPtr]) {
2278 cerr << "<nullptr>";
2280 } else if (N == &GraphNodes[NullObject]) {
2284 if (!N->getValue()) {
2285 cerr << "artificial" << (intptr_t) N;
2289 assert(N->getValue() != 0 && "Never set node label!");
2290 Value *V = N->getValue();
2291 if (Function *F = dyn_cast<Function>(V)) {
2292 if (isa<PointerType>(F->getFunctionType()->getReturnType()) &&
2293 N == &GraphNodes[getReturnNode(F)]) {
2294 cerr << F->getName() << ":retval";
2296 } else if (F->getFunctionType()->isVarArg() &&
2297 N == &GraphNodes[getVarargNode(F)]) {
2298 cerr << F->getName() << ":vararg";
2303 if (Instruction *I = dyn_cast<Instruction>(V))
2304 cerr << I->getParent()->getParent()->getName() << ":";
2305 else if (Argument *Arg = dyn_cast<Argument>(V))
2306 cerr << Arg->getParent()->getName() << ":";
2309 cerr << V->getName();
2311 cerr << "(unnamed)";
2313 if (isa<GlobalValue>(V) || isa<AllocationInst>(V))
2314 if (N == &GraphNodes[getObject(V)])
2317 void Andersens::PrintConstraint(const Constraint &C) {
2318 if (C.Type == Constraint::Store) {
2323 PrintNode(&GraphNodes[C.Dest]);
2324 if (C.Type == Constraint::Store && C.Offset != 0)
2325 cerr << " + " << C.Offset << ")";
2327 if (C.Type == Constraint::Load) {
2332 else if (C.Type == Constraint::AddressOf)
2334 PrintNode(&GraphNodes[C.Src]);
2335 if (C.Offset != 0 && C.Type != Constraint::Store)
2336 cerr << " + " << C.Offset;
2337 if (C.Type == Constraint::Load && C.Offset != 0)
2342 void Andersens::PrintConstraints() {
2343 cerr << "Constraints:\n";
2345 for (unsigned i = 0, e = Constraints.size(); i != e; ++i)
2346 PrintConstraint(Constraints[i]);
2349 void Andersens::PrintPointsToGraph() {
2350 cerr << "Points-to graph:\n";
2351 for (unsigned i = 0, e = GraphNodes.size(); i != e; ++i) {
2352 Node *N = &GraphNodes[i];
2353 if (FindNode (i) != i) {
2355 cerr << "\t--> same as ";
2356 PrintNode(&GraphNodes[FindNode(i)]);
2359 cerr << "[" << (N->PointsTo->count()) << "] ";
2364 for (SparseBitVector<>::iterator bi = N->PointsTo->begin();
2365 bi != N->PointsTo->end();
2369 PrintNode(&GraphNodes[*bi]);