1 //===- DataStructure.cpp - Implement the core data structure analysis -----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the core data structure functionality.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/DataStructure/DSGraphTraits.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/GlobalVariable.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/DerivedTypes.h"
20 #include "llvm/Target/TargetData.h"
21 #include "llvm/Assembly/Writer.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SCCIterator.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Support/Streams.h"
29 #include "llvm/Support/Timer.h"
34 #define COLLAPSE_ARRAYS_AGGRESSIVELY 0
37 Statistic NumFolds ("dsa", "Number of nodes completely folded");
38 Statistic NumCallNodesMerged("dsa", "Number of call nodes merged");
39 Statistic NumNodeAllocated ("dsa", "Number of nodes allocated");
40 Statistic NumDNE ("dsa", "Number of nodes removed by reachability");
41 Statistic NumTrivialDNE ("dsa", "Number of nodes trivially removed");
42 Statistic NumTrivialGlobalDNE("dsa", "Number of globals trivially removed");
43 static cl::opt<unsigned>
44 DSAFieldLimit("dsa-field-limit", cl::Hidden,
45 cl::desc("Number of fields to track before collapsing a node"),
50 #define TIME_REGION(VARNAME, DESC) \
51 NamedRegionTimer VARNAME(DESC)
53 #define TIME_REGION(VARNAME, DESC)
58 /// isForwarding - Return true if this NodeHandle is forwarding to another
60 bool DSNodeHandle::isForwarding() const {
61 return N && N->isForwarding();
64 DSNode *DSNodeHandle::HandleForwarding() const {
65 assert(N->isForwarding() && "Can only be invoked if forwarding!");
67 { //assert not looping
69 std::set<DSNode*> seen;
70 while(NH && NH->isForwarding()) {
71 assert(seen.find(NH) == seen.end() && "Loop detected");
77 // Handle node forwarding here!
78 DSNode *Next = N->ForwardNH.getNode(); // Cause recursive shrinkage
79 Offset += N->ForwardNH.getOffset();
81 if (--N->NumReferrers == 0) {
82 // Removing the last referrer to the node, sever the forwarding link
88 if (N->Size <= Offset) {
89 assert(N->Size <= 1 && "Forwarded to shrunk but not collapsed node?");
95 //===----------------------------------------------------------------------===//
96 // DSScalarMap Implementation
97 //===----------------------------------------------------------------------===//
99 DSNodeHandle &DSScalarMap::AddGlobal(GlobalValue *GV) {
100 assert(ValueMap.count(GV) == 0 && "GV already exists!");
102 // If the node doesn't exist, check to see if it's a global that is
103 // equated to another global in the program.
104 EquivalenceClasses<GlobalValue*>::iterator ECI = GlobalECs.findValue(GV);
105 if (ECI != GlobalECs.end()) {
106 GlobalValue *Leader = *GlobalECs.findLeader(ECI);
109 iterator I = ValueMap.find(GV);
110 if (I != ValueMap.end())
115 // Okay, this is either not an equivalenced global or it is the leader, it
116 // will be inserted into the scalar map now.
117 GlobalSet.insert(GV);
119 return ValueMap.insert(std::make_pair(GV, DSNodeHandle())).first->second;
123 //===----------------------------------------------------------------------===//
124 // DSNode Implementation
125 //===----------------------------------------------------------------------===//
127 DSNode::DSNode(const Type *T, DSGraph *G)
128 : NumReferrers(0), Size(0), ParentGraph(G), Ty(Type::VoidTy), NodeType(0) {
129 // Add the type entry if it is specified...
130 if (T) mergeTypeInfo(T, 0);
131 if (G) G->addNode(this);
135 // DSNode copy constructor... do not copy over the referrers list!
136 DSNode::DSNode(const DSNode &N, DSGraph *G, bool NullLinks)
137 : NumReferrers(0), Size(N.Size), ParentGraph(G),
138 Ty(N.Ty), Globals(N.Globals), NodeType(N.NodeType) {
142 Links.resize(N.Links.size()); // Create the appropriate number of null links
147 /// getTargetData - Get the target data object used to construct this node.
149 const TargetData &DSNode::getTargetData() const {
150 return ParentGraph->getTargetData();
153 void DSNode::assertOK() const {
154 assert((Ty != Type::VoidTy ||
155 Ty == Type::VoidTy && (Size == 0 ||
156 (NodeType & DSNode::Array))) &&
159 assert(ParentGraph && "Node has no parent?");
160 const DSScalarMap &SM = ParentGraph->getScalarMap();
161 for (unsigned i = 0, e = Globals.size(); i != e; ++i) {
162 assert(SM.global_count(Globals[i]));
163 assert(SM.find(Globals[i])->second.getNode() == this);
167 /// forwardNode - Mark this node as being obsolete, and all references to it
168 /// should be forwarded to the specified node and offset.
170 void DSNode::forwardNode(DSNode *To, unsigned Offset) {
171 assert(this != To && "Cannot forward a node to itself!");
172 assert(ForwardNH.isNull() && "Already forwarding from this node!");
173 if (To->Size <= 1) Offset = 0;
174 assert((Offset < To->Size || (Offset == To->Size && Offset == 0)) &&
175 "Forwarded offset is wrong!");
176 ForwardNH.setTo(To, Offset);
181 // Remove this node from the parent graph's Nodes list.
182 ParentGraph->unlinkNode(this);
186 // addGlobal - Add an entry for a global value to the Globals list. This also
187 // marks the node with the 'G' flag if it does not already have it.
189 void DSNode::addGlobal(GlobalValue *GV) {
190 // First, check to make sure this is the leader if the global is in an
191 // equivalence class.
192 GV = getParentGraph()->getScalarMap().getLeaderForGlobal(GV);
194 // Keep the list sorted.
195 std::vector<GlobalValue*>::iterator I =
196 std::lower_bound(Globals.begin(), Globals.end(), GV);
198 if (I == Globals.end() || *I != GV) {
199 Globals.insert(I, GV);
200 NodeType |= GlobalNode;
204 // removeGlobal - Remove the specified global that is explicitly in the globals
206 void DSNode::removeGlobal(GlobalValue *GV) {
207 std::vector<GlobalValue*>::iterator I =
208 std::lower_bound(Globals.begin(), Globals.end(), GV);
209 assert(I != Globals.end() && *I == GV && "Global not in node!");
213 /// foldNodeCompletely - If we determine that this node has some funny
214 /// behavior happening to it that we cannot represent, we fold it down to a
215 /// single, completely pessimistic, node. This node is represented as a
216 /// single byte with a single TypeEntry of "void".
218 void DSNode::foldNodeCompletely() {
219 if (isNodeCompletelyFolded()) return; // If this node is already folded...
223 // If this node has a size that is <= 1, we don't need to create a forwarding
225 if (getSize() <= 1) {
226 NodeType |= DSNode::Array;
229 assert(Links.size() <= 1 && "Size is 1, but has more links?");
232 // Create the node we are going to forward to. This is required because
233 // some referrers may have an offset that is > 0. By forcing them to
234 // forward, the forwarder has the opportunity to correct the offset.
235 DSNode *DestNode = new DSNode(0, ParentGraph);
236 DestNode->NodeType = NodeType|DSNode::Array;
237 DestNode->Ty = Type::VoidTy;
239 DestNode->Globals.swap(Globals);
241 // Start forwarding to the destination node...
242 forwardNode(DestNode, 0);
244 if (!Links.empty()) {
245 DestNode->Links.reserve(1);
247 DSNodeHandle NH(DestNode);
248 DestNode->Links.push_back(Links[0]);
250 // If we have links, merge all of our outgoing links together...
251 for (unsigned i = Links.size()-1; i != 0; --i)
252 NH.getNode()->Links[0].mergeWith(Links[i]);
255 DestNode->Links.resize(1);
260 /// isNodeCompletelyFolded - Return true if this node has been completely
261 /// folded down to something that can never be expanded, effectively losing
262 /// all of the field sensitivity that may be present in the node.
264 bool DSNode::isNodeCompletelyFolded() const {
265 return getSize() == 1 && Ty == Type::VoidTy && isArray();
268 /// addFullGlobalsList - Compute the full set of global values that are
269 /// represented by this node. Unlike getGlobalsList(), this requires fair
270 /// amount of work to compute, so don't treat this method call as free.
271 void DSNode::addFullGlobalsList(std::vector<GlobalValue*> &List) const {
272 if (globals_begin() == globals_end()) return;
274 EquivalenceClasses<GlobalValue*> &EC = getParentGraph()->getGlobalECs();
276 for (globals_iterator I = globals_begin(), E = globals_end(); I != E; ++I) {
277 EquivalenceClasses<GlobalValue*>::iterator ECI = EC.findValue(*I);
281 List.insert(List.end(), EC.member_begin(ECI), EC.member_end());
285 /// addFullFunctionList - Identical to addFullGlobalsList, but only return the
286 /// functions in the full list.
287 void DSNode::addFullFunctionList(std::vector<Function*> &List) const {
288 if (globals_begin() == globals_end()) return;
290 EquivalenceClasses<GlobalValue*> &EC = getParentGraph()->getGlobalECs();
292 for (globals_iterator I = globals_begin(), E = globals_end(); I != E; ++I) {
293 EquivalenceClasses<GlobalValue*>::iterator ECI = EC.findValue(*I);
294 if (ECI == EC.end()) {
295 if (Function *F = dyn_cast<Function>(*I))
298 for (EquivalenceClasses<GlobalValue*>::member_iterator MI =
299 EC.member_begin(ECI), E = EC.member_end(); MI != E; ++MI)
300 if (Function *F = dyn_cast<Function>(*MI))
307 /// TypeElementWalker Class - Used for implementation of physical subtyping...
309 class TypeElementWalker {
314 StackState(const Type *T, unsigned Off = 0)
315 : Ty(T), Offset(Off), Idx(0) {}
318 std::vector<StackState> Stack;
319 const TargetData &TD;
321 TypeElementWalker(const Type *T, const TargetData &td) : TD(td) {
326 bool isDone() const { return Stack.empty(); }
327 const Type *getCurrentType() const { return Stack.back().Ty; }
328 unsigned getCurrentOffset() const { return Stack.back().Offset; }
330 void StepToNextType() {
331 PopStackAndAdvance();
336 /// PopStackAndAdvance - Pop the current element off of the stack and
337 /// advance the underlying element to the next contained member.
338 void PopStackAndAdvance() {
339 assert(!Stack.empty() && "Cannot pop an empty stack!");
341 while (!Stack.empty()) {
342 StackState &SS = Stack.back();
343 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
345 if (SS.Idx != ST->getNumElements()) {
346 const StructLayout *SL = TD.getStructLayout(ST);
348 unsigned(SL->MemberOffsets[SS.Idx]-SL->MemberOffsets[SS.Idx-1]);
351 Stack.pop_back(); // At the end of the structure
353 const ArrayType *AT = cast<ArrayType>(SS.Ty);
355 if (SS.Idx != AT->getNumElements()) {
356 SS.Offset += unsigned(TD.getTypeSize(AT->getElementType()));
359 Stack.pop_back(); // At the end of the array
364 /// StepToLeaf - Used by physical subtyping to move to the first leaf node
365 /// on the type stack.
367 if (Stack.empty()) return;
368 while (!Stack.empty() && !Stack.back().Ty->isFirstClassType()) {
369 StackState &SS = Stack.back();
370 if (const StructType *ST = dyn_cast<StructType>(SS.Ty)) {
371 if (ST->getNumElements() == 0) {
373 PopStackAndAdvance();
375 // Step into the structure...
376 assert(SS.Idx < ST->getNumElements());
377 const StructLayout *SL = TD.getStructLayout(ST);
378 Stack.push_back(StackState(ST->getElementType(SS.Idx),
379 SS.Offset+unsigned(SL->MemberOffsets[SS.Idx])));
382 const ArrayType *AT = cast<ArrayType>(SS.Ty);
383 if (AT->getNumElements() == 0) {
385 PopStackAndAdvance();
387 // Step into the array...
388 assert(SS.Idx < AT->getNumElements());
389 Stack.push_back(StackState(AT->getElementType(),
391 unsigned(TD.getTypeSize(AT->getElementType()))));
397 } // end anonymous namespace
399 /// ElementTypesAreCompatible - Check to see if the specified types are
400 /// "physically" compatible. If so, return true, else return false. We only
401 /// have to check the fields in T1: T2 may be larger than T1. If AllowLargerT1
402 /// is true, then we also allow a larger T1.
404 static bool ElementTypesAreCompatible(const Type *T1, const Type *T2,
405 bool AllowLargerT1, const TargetData &TD){
406 TypeElementWalker T1W(T1, TD), T2W(T2, TD);
408 while (!T1W.isDone() && !T2W.isDone()) {
409 if (T1W.getCurrentOffset() != T2W.getCurrentOffset())
412 const Type *T1 = T1W.getCurrentType();
413 const Type *T2 = T2W.getCurrentType();
414 if (T1 != T2 && !T1->canLosslesslyBitCastTo(T2))
417 T1W.StepToNextType();
418 T2W.StepToNextType();
421 return AllowLargerT1 || T1W.isDone();
425 /// mergeTypeInfo - This method merges the specified type into the current node
426 /// at the specified offset. This may update the current node's type record if
427 /// this gives more information to the node, it may do nothing to the node if
428 /// this information is already known, or it may merge the node completely (and
429 /// return true) if the information is incompatible with what is already known.
431 /// This method returns true if the node is completely folded, otherwise false.
433 bool DSNode::mergeTypeInfo(const Type *NewTy, unsigned Offset,
434 bool FoldIfIncompatible) {
435 DOUT << "merging " << *NewTy << " at " << Offset << " with " << *Ty << "\n";
436 const TargetData &TD = getTargetData();
437 // Check to make sure the Size member is up-to-date. Size can be one of the
439 // Size = 0, Ty = Void: Nothing is known about this node.
440 // Size = 0, Ty = FnTy: FunctionPtr doesn't have a size, so we use zero
441 // Size = 1, Ty = Void, Array = 1: The node is collapsed
442 // Otherwise, sizeof(Ty) = Size
444 assert(((Size == 0 && Ty == Type::VoidTy && !isArray()) ||
445 (Size == 0 && !Ty->isSized() && !isArray()) ||
446 (Size == 1 && Ty == Type::VoidTy && isArray()) ||
447 (Size == 0 && !Ty->isSized() && !isArray()) ||
448 (TD.getTypeSize(Ty) == Size)) &&
449 "Size member of DSNode doesn't match the type structure!");
450 assert(NewTy != Type::VoidTy && "Cannot merge void type into DSNode!");
452 if (Offset == 0 && NewTy == Ty)
453 return false; // This should be a common case, handle it efficiently
455 // Return true immediately if the node is completely folded.
456 if (isNodeCompletelyFolded()) return true;
458 // If this is an array type, eliminate the outside arrays because they won't
459 // be used anyway. This greatly reduces the size of large static arrays used
460 // as global variables, for example.
462 bool WillBeArray = false;
463 while (const ArrayType *AT = dyn_cast<ArrayType>(NewTy)) {
464 // FIXME: we might want to keep small arrays, but must be careful about
465 // things like: [2 x [10000 x int*]]
466 NewTy = AT->getElementType();
470 // Figure out how big the new type we're merging in is...
471 unsigned NewTySize = NewTy->isSized() ? (unsigned)TD.getTypeSize(NewTy) : 0;
473 // Otherwise check to see if we can fold this type into the current node. If
474 // we can't, we fold the node completely, if we can, we potentially update our
477 if (Ty == Type::VoidTy) {
478 // If this is the first type that this node has seen, just accept it without
480 assert(Offset == 0 && !isArray() &&
481 "Cannot have an offset into a void node!");
483 // If this node would have to have an unreasonable number of fields, just
484 // collapse it. This can occur for fortran common blocks, which have stupid
485 // things like { [100000000 x double], [1000000 x double] }.
486 unsigned NumFields = (NewTySize+DS::PointerSize-1) >> DS::PointerShift;
487 if (NumFields > DSAFieldLimit) {
488 foldNodeCompletely();
494 if (WillBeArray) NodeType |= Array;
497 // Calculate the number of outgoing links from this node.
498 Links.resize(NumFields);
502 // Handle node expansion case here...
503 if (Offset+NewTySize > Size) {
504 // It is illegal to grow this node if we have treated it as an array of
507 if (FoldIfIncompatible) foldNodeCompletely();
511 // If this node would have to have an unreasonable number of fields, just
512 // collapse it. This can occur for fortran common blocks, which have stupid
513 // things like { [100000000 x double], [1000000 x double] }.
514 unsigned NumFields = (NewTySize+Offset+DS::PointerSize-1) >> DS::PointerShift;
515 if (NumFields > DSAFieldLimit) {
516 foldNodeCompletely();
521 //handle some common cases:
522 // Ty: struct { t1, t2, t3, t4, ..., tn}
523 // NewTy: struct { offset, stuff...}
524 // try merge with NewTy: struct {t1, t2, stuff...} if offset lands exactly
526 if (isa<StructType>(NewTy) && isa<StructType>(Ty)) {
527 DOUT << "Ty: " << *Ty << "\nNewTy: " << *NewTy << "@" << Offset << "\n";
528 const StructType *STy = cast<StructType>(Ty);
529 const StructLayout &SL = *TD.getStructLayout(STy);
530 unsigned i = SL.getElementContainingOffset(Offset);
531 //Either we hit it exactly or give up
532 if (SL.MemberOffsets[i] != Offset) {
533 if (FoldIfIncompatible) foldNodeCompletely();
536 std::vector<const Type*> nt;
537 for (unsigned x = 0; x < i; ++x)
538 nt.push_back(STy->getElementType(x));
539 STy = cast<StructType>(NewTy);
540 nt.insert(nt.end(), STy->element_begin(), STy->element_end());
542 STy = StructType::get(nt);
543 DOUT << "Trying with: " << *STy << "\n";
544 return mergeTypeInfo(STy, 0);
547 //Ty: struct { t1, t2, t3 ... tn}
549 //try merge with NewTy: struct : {t1, t2, T} if offset lands on a field
551 if (isa<StructType>(Ty)) {
552 DOUT << "Ty: " << *Ty << "\nNewTy: " << *NewTy << "@" << Offset << "\n";
553 const StructType *STy = cast<StructType>(Ty);
554 const StructLayout &SL = *TD.getStructLayout(STy);
555 unsigned i = SL.getElementContainingOffset(Offset);
556 //Either we hit it exactly or give up
557 if (SL.MemberOffsets[i] != Offset) {
558 if (FoldIfIncompatible) foldNodeCompletely();
561 std::vector<const Type*> nt;
562 for (unsigned x = 0; x < i; ++x)
563 nt.push_back(STy->getElementType(x));
566 STy = StructType::get(nt);
567 DOUT << "Trying with: " << *STy << "\n";
568 return mergeTypeInfo(STy, 0);
572 "UNIMP: Trying to merge a growth type into "
573 "offset != 0: Collapsing!");
575 if (FoldIfIncompatible) foldNodeCompletely();
581 // Okay, the situation is nice and simple, we are trying to merge a type in
582 // at offset 0 that is bigger than our current type. Implement this by
583 // switching to the new type and then merge in the smaller one, which should
584 // hit the other code path here. If the other code path decides it's not
585 // ok, it will collapse the node as appropriate.
588 const Type *OldTy = Ty;
591 if (WillBeArray) NodeType |= Array;
594 // Must grow links to be the appropriate size...
595 Links.resize(NumFields);
597 // Merge in the old type now... which is guaranteed to be smaller than the
599 return mergeTypeInfo(OldTy, 0);
602 assert(Offset <= Size &&
603 "Cannot merge something into a part of our type that doesn't exist!");
605 // Find the section of Ty that NewTy overlaps with... first we find the
606 // type that starts at offset Offset.
609 const Type *SubType = Ty;
611 assert(Offset-O < TD.getTypeSize(SubType) && "Offset out of range!");
613 switch (SubType->getTypeID()) {
614 case Type::StructTyID: {
615 const StructType *STy = cast<StructType>(SubType);
616 const StructLayout &SL = *TD.getStructLayout(STy);
617 unsigned i = SL.getElementContainingOffset(Offset-O);
619 // The offset we are looking for must be in the i'th element...
620 SubType = STy->getElementType(i);
621 O += (unsigned)SL.MemberOffsets[i];
624 case Type::ArrayTyID: {
625 SubType = cast<ArrayType>(SubType)->getElementType();
626 unsigned ElSize = (unsigned)TD.getTypeSize(SubType);
627 unsigned Remainder = (Offset-O) % ElSize;
628 O = Offset-Remainder;
632 if (FoldIfIncompatible) foldNodeCompletely();
637 assert(O == Offset && "Could not achieve the correct offset!");
639 // If we found our type exactly, early exit
640 if (SubType == NewTy) return false;
642 // Differing function types don't require us to merge. They are not values
644 if (isa<FunctionType>(SubType) &&
645 isa<FunctionType>(NewTy)) return false;
647 unsigned SubTypeSize = SubType->isSized() ?
648 (unsigned)TD.getTypeSize(SubType) : 0;
650 // Ok, we are getting desperate now. Check for physical subtyping, where we
651 // just require each element in the node to be compatible.
652 if (NewTySize <= SubTypeSize && NewTySize && NewTySize < 256 &&
653 SubTypeSize && SubTypeSize < 256 &&
654 ElementTypesAreCompatible(NewTy, SubType, !isArray(), TD))
657 // Okay, so we found the leader type at the offset requested. Search the list
658 // of types that starts at this offset. If SubType is currently an array or
659 // structure, the type desired may actually be the first element of the
662 unsigned PadSize = SubTypeSize; // Size, including pad memory which is ignored
663 while (SubType != NewTy) {
664 const Type *NextSubType = 0;
665 unsigned NextSubTypeSize = 0;
666 unsigned NextPadSize = 0;
667 switch (SubType->getTypeID()) {
668 case Type::StructTyID: {
669 const StructType *STy = cast<StructType>(SubType);
670 const StructLayout &SL = *TD.getStructLayout(STy);
671 if (SL.MemberOffsets.size() > 1)
672 NextPadSize = (unsigned)SL.MemberOffsets[1];
674 NextPadSize = SubTypeSize;
675 NextSubType = STy->getElementType(0);
676 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType);
679 case Type::ArrayTyID:
680 NextSubType = cast<ArrayType>(SubType)->getElementType();
681 NextSubTypeSize = (unsigned)TD.getTypeSize(NextSubType);
682 NextPadSize = NextSubTypeSize;
688 if (NextSubType == 0)
689 break; // In the default case, break out of the loop
691 if (NextPadSize < NewTySize)
692 break; // Don't allow shrinking to a smaller type than NewTySize
693 SubType = NextSubType;
694 SubTypeSize = NextSubTypeSize;
695 PadSize = NextPadSize;
698 // If we found the type exactly, return it...
699 if (SubType == NewTy)
702 // Check to see if we have a compatible, but different type...
703 if (NewTySize == SubTypeSize) {
704 // Check to see if this type is obviously convertible... int -> uint f.e.
705 if (NewTy->canLosslesslyBitCastTo(SubType))
708 // Check to see if we have a pointer & integer mismatch going on here,
709 // loading a pointer as a long, for example.
711 if (SubType->isInteger() && isa<PointerType>(NewTy) ||
712 NewTy->isInteger() && isa<PointerType>(SubType))
714 } else if (NewTySize > SubTypeSize && NewTySize <= PadSize) {
715 // We are accessing the field, plus some structure padding. Ignore the
716 // structure padding.
721 if (getParentGraph()->retnodes_begin() != getParentGraph()->retnodes_end())
722 M = getParentGraph()->retnodes_begin()->first->getParent();
724 DOUT << "MergeTypeInfo Folding OrigTy: ";
725 DEBUG(WriteTypeSymbolic(std::cerr, Ty, M) << "\n due to:";
726 WriteTypeSymbolic(std::cerr, NewTy, M) << " @ " << Offset << "!\n"
728 WriteTypeSymbolic(std::cerr, SubType, M) << "\n\n");
730 if (FoldIfIncompatible) foldNodeCompletely();
736 /// addEdgeTo - Add an edge from the current node to the specified node. This
737 /// can cause merging of nodes in the graph.
739 void DSNode::addEdgeTo(unsigned Offset, const DSNodeHandle &NH) {
740 if (NH.isNull()) return; // Nothing to do
742 if (isNodeCompletelyFolded())
745 DSNodeHandle &ExistingEdge = getLink(Offset);
746 if (!ExistingEdge.isNull()) {
747 // Merge the two nodes...
748 ExistingEdge.mergeWith(NH);
749 } else { // No merging to perform...
750 setLink(Offset, NH); // Just force a link in there...
755 /// MergeSortedVectors - Efficiently merge a vector into another vector where
756 /// duplicates are not allowed and both are sorted. This assumes that 'T's are
757 /// efficiently copyable and have sane comparison semantics.
759 static void MergeSortedVectors(std::vector<GlobalValue*> &Dest,
760 const std::vector<GlobalValue*> &Src) {
761 // By far, the most common cases will be the simple ones. In these cases,
762 // avoid having to allocate a temporary vector...
764 if (Src.empty()) { // Nothing to merge in...
766 } else if (Dest.empty()) { // Just copy the result in...
768 } else if (Src.size() == 1) { // Insert a single element...
769 const GlobalValue *V = Src[0];
770 std::vector<GlobalValue*>::iterator I =
771 std::lower_bound(Dest.begin(), Dest.end(), V);
772 if (I == Dest.end() || *I != Src[0]) // If not already contained...
773 Dest.insert(I, Src[0]);
774 } else if (Dest.size() == 1) {
775 GlobalValue *Tmp = Dest[0]; // Save value in temporary...
776 Dest = Src; // Copy over list...
777 std::vector<GlobalValue*>::iterator I =
778 std::lower_bound(Dest.begin(), Dest.end(), Tmp);
779 if (I == Dest.end() || *I != Tmp) // If not already contained...
783 // Make a copy to the side of Dest...
784 std::vector<GlobalValue*> Old(Dest);
786 // Make space for all of the type entries now...
787 Dest.resize(Dest.size()+Src.size());
789 // Merge the two sorted ranges together... into Dest.
790 std::merge(Old.begin(), Old.end(), Src.begin(), Src.end(), Dest.begin());
792 // Now erase any duplicate entries that may have accumulated into the
793 // vectors (because they were in both of the input sets)
794 Dest.erase(std::unique(Dest.begin(), Dest.end()), Dest.end());
798 void DSNode::mergeGlobals(const std::vector<GlobalValue*> &RHS) {
799 MergeSortedVectors(Globals, RHS);
802 // MergeNodes - Helper function for DSNode::mergeWith().
803 // This function does the hard work of merging two nodes, CurNodeH
804 // and NH after filtering out trivial cases and making sure that
805 // CurNodeH.offset >= NH.offset.
808 // Since merging may cause either node to go away, we must always
809 // use the node-handles to refer to the nodes. These node handles are
810 // automatically updated during merging, so will always provide access
811 // to the correct node after a merge.
813 void DSNode::MergeNodes(DSNodeHandle& CurNodeH, DSNodeHandle& NH) {
814 assert(CurNodeH.getOffset() >= NH.getOffset() &&
815 "This should have been enforced in the caller.");
816 assert(CurNodeH.getNode()->getParentGraph()==NH.getNode()->getParentGraph() &&
817 "Cannot merge two nodes that are not in the same graph!");
819 // Now we know that Offset >= NH.Offset, so convert it so our "Offset" (with
820 // respect to NH.Offset) is now zero. NOffset is the distance from the base
821 // of our object that N starts from.
823 unsigned NOffset = CurNodeH.getOffset()-NH.getOffset();
824 unsigned NSize = NH.getNode()->getSize();
826 // If the two nodes are of different size, and the smaller node has the array
827 // bit set, collapse!
828 if (NSize != CurNodeH.getNode()->getSize()) {
829 #if COLLAPSE_ARRAYS_AGGRESSIVELY
830 if (NSize < CurNodeH.getNode()->getSize()) {
831 if (NH.getNode()->isArray())
832 NH.getNode()->foldNodeCompletely();
833 } else if (CurNodeH.getNode()->isArray()) {
834 NH.getNode()->foldNodeCompletely();
839 // Merge the type entries of the two nodes together...
840 if (NH.getNode()->Ty != Type::VoidTy)
841 CurNodeH.getNode()->mergeTypeInfo(NH.getNode()->Ty, NOffset);
842 assert(!CurNodeH.getNode()->isDeadNode());
844 // If we are merging a node with a completely folded node, then both nodes are
845 // now completely folded.
847 if (CurNodeH.getNode()->isNodeCompletelyFolded()) {
848 if (!NH.getNode()->isNodeCompletelyFolded()) {
849 NH.getNode()->foldNodeCompletely();
850 assert(NH.getNode() && NH.getOffset() == 0 &&
851 "folding did not make offset 0?");
852 NOffset = NH.getOffset();
853 NSize = NH.getNode()->getSize();
854 assert(NOffset == 0 && NSize == 1);
856 } else if (NH.getNode()->isNodeCompletelyFolded()) {
857 CurNodeH.getNode()->foldNodeCompletely();
858 assert(CurNodeH.getNode() && CurNodeH.getOffset() == 0 &&
859 "folding did not make offset 0?");
860 NSize = NH.getNode()->getSize();
861 NOffset = NH.getOffset();
862 assert(NOffset == 0 && NSize == 1);
865 DSNode *N = NH.getNode();
866 if (CurNodeH.getNode() == N || N == 0) return;
867 assert(!CurNodeH.getNode()->isDeadNode());
869 // Merge the NodeType information.
870 CurNodeH.getNode()->NodeType |= N->NodeType;
872 // Start forwarding to the new node!
873 N->forwardNode(CurNodeH.getNode(), NOffset);
874 assert(!CurNodeH.getNode()->isDeadNode());
876 // Make all of the outgoing links of N now be outgoing links of CurNodeH.
878 for (unsigned i = 0; i < N->getNumLinks(); ++i) {
879 DSNodeHandle &Link = N->getLink(i << DS::PointerShift);
880 if (Link.getNode()) {
881 // Compute the offset into the current node at which to
882 // merge this link. In the common case, this is a linear
883 // relation to the offset in the original node (with
884 // wrapping), but if the current node gets collapsed due to
885 // recursive merging, we must make sure to merge in all remaining
886 // links at offset zero.
887 unsigned MergeOffset = 0;
888 DSNode *CN = CurNodeH.getNode();
890 MergeOffset = ((i << DS::PointerShift)+NOffset) % CN->getSize();
891 CN->addEdgeTo(MergeOffset, Link);
895 // Now that there are no outgoing edges, all of the Links are dead.
898 // Merge the globals list...
899 if (!N->Globals.empty()) {
900 CurNodeH.getNode()->mergeGlobals(N->Globals);
902 // Delete the globals from the old node...
903 std::vector<GlobalValue*>().swap(N->Globals);
908 /// mergeWith - Merge this node and the specified node, moving all links to and
909 /// from the argument node into the current node, deleting the node argument.
910 /// Offset indicates what offset the specified node is to be merged into the
913 /// The specified node may be a null pointer (in which case, we update it to
914 /// point to this node).
916 void DSNode::mergeWith(const DSNodeHandle &NH, unsigned Offset) {
917 DSNode *N = NH.getNode();
918 if (N == this && NH.getOffset() == Offset)
921 // If the RHS is a null node, make it point to this node!
923 NH.mergeWith(DSNodeHandle(this, Offset));
927 assert(!N->isDeadNode() && !isDeadNode());
928 assert(!hasNoReferrers() && "Should not try to fold a useless node!");
931 // We cannot merge two pieces of the same node together, collapse the node
933 DOUT << "Attempting to merge two chunks of the same node together!\n";
934 foldNodeCompletely();
938 // If both nodes are not at offset 0, make sure that we are merging the node
939 // at an later offset into the node with the zero offset.
941 if (Offset < NH.getOffset()) {
942 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
944 } else if (Offset == NH.getOffset() && getSize() < N->getSize()) {
945 // If the offsets are the same, merge the smaller node into the bigger node
946 N->mergeWith(DSNodeHandle(this, Offset), NH.getOffset());
950 // Ok, now we can merge the two nodes. Use a static helper that works with
951 // two node handles, since "this" may get merged away at intermediate steps.
952 DSNodeHandle CurNodeH(this, Offset);
953 DSNodeHandle NHCopy(NH);
954 if (CurNodeH.getOffset() >= NHCopy.getOffset())
955 DSNode::MergeNodes(CurNodeH, NHCopy);
957 DSNode::MergeNodes(NHCopy, CurNodeH);
961 //===----------------------------------------------------------------------===//
962 // ReachabilityCloner Implementation
963 //===----------------------------------------------------------------------===//
965 DSNodeHandle ReachabilityCloner::getClonedNH(const DSNodeHandle &SrcNH) {
966 if (SrcNH.isNull()) return DSNodeHandle();
967 const DSNode *SN = SrcNH.getNode();
969 DSNodeHandle &NH = NodeMap[SN];
970 if (!NH.isNull()) { // Node already mapped?
971 DSNode *NHN = NH.getNode();
972 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset());
975 // If SrcNH has globals and the destination graph has one of the same globals,
976 // merge this node with the destination node, which is much more efficient.
977 if (SN->globals_begin() != SN->globals_end()) {
978 DSScalarMap &DestSM = Dest.getScalarMap();
979 for (DSNode::globals_iterator I = SN->globals_begin(),E = SN->globals_end();
981 GlobalValue *GV = *I;
982 DSScalarMap::iterator GI = DestSM.find(GV);
983 if (GI != DestSM.end() && !GI->second.isNull()) {
984 // We found one, use merge instead!
985 merge(GI->second, Src.getNodeForValue(GV));
986 assert(!NH.isNull() && "Didn't merge node!");
987 DSNode *NHN = NH.getNode();
988 return DSNodeHandle(NHN, NH.getOffset()+SrcNH.getOffset());
993 DSNode *DN = new DSNode(*SN, &Dest, true /* Null out all links */);
994 DN->maskNodeTypes(BitsToKeep);
997 // Next, recursively clone all outgoing links as necessary. Note that
998 // adding these links can cause the node to collapse itself at any time, and
999 // the current node may be merged with arbitrary other nodes. For this
1000 // reason, we must always go through NH.
1002 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
1003 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
1004 if (!SrcEdge.isNull()) {
1005 const DSNodeHandle &DestEdge = getClonedNH(SrcEdge);
1006 // Compute the offset into the current node at which to
1007 // merge this link. In the common case, this is a linear
1008 // relation to the offset in the original node (with
1009 // wrapping), but if the current node gets collapsed due to
1010 // recursive merging, we must make sure to merge in all remaining
1011 // links at offset zero.
1012 unsigned MergeOffset = 0;
1013 DSNode *CN = NH.getNode();
1014 if (CN->getSize() != 1)
1015 MergeOffset = ((i << DS::PointerShift)+NH.getOffset()) % CN->getSize();
1016 CN->addEdgeTo(MergeOffset, DestEdge);
1020 // If this node contains any globals, make sure they end up in the scalar
1021 // map with the correct offset.
1022 for (DSNode::globals_iterator I = SN->globals_begin(), E = SN->globals_end();
1024 GlobalValue *GV = *I;
1025 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
1026 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
1027 assert(DestGNH.getNode() == NH.getNode() &&"Global mapping inconsistent");
1028 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
1029 DestGNH.getOffset()+SrcGNH.getOffset()));
1031 NH.getNode()->mergeGlobals(SN->getGlobalsList());
1033 return DSNodeHandle(NH.getNode(), NH.getOffset()+SrcNH.getOffset());
1036 void ReachabilityCloner::merge(const DSNodeHandle &NH,
1037 const DSNodeHandle &SrcNH) {
1038 if (SrcNH.isNull()) return; // Noop
1040 // If there is no destination node, just clone the source and assign the
1041 // destination node to be it.
1042 NH.mergeWith(getClonedNH(SrcNH));
1046 // Okay, at this point, we know that we have both a destination and a source
1047 // node that need to be merged. Check to see if the source node has already
1049 const DSNode *SN = SrcNH.getNode();
1050 DSNodeHandle &SCNH = NodeMap[SN]; // SourceClonedNodeHandle
1051 if (!SCNH.isNull()) { // Node already cloned?
1052 DSNode *SCNHN = SCNH.getNode();
1053 NH.mergeWith(DSNodeHandle(SCNHN,
1054 SCNH.getOffset()+SrcNH.getOffset()));
1055 return; // Nothing to do!
1058 // Okay, so the source node has not already been cloned. Instead of creating
1059 // a new DSNode, only to merge it into the one we already have, try to perform
1060 // the merge in-place. The only case we cannot handle here is when the offset
1061 // into the existing node is less than the offset into the virtual node we are
1062 // merging in. In this case, we have to extend the existing node, which
1063 // requires an allocation anyway.
1064 DSNode *DN = NH.getNode(); // Make sure the Offset is up-to-date
1065 if (NH.getOffset() >= SrcNH.getOffset()) {
1066 if (!DN->isNodeCompletelyFolded()) {
1067 // Make sure the destination node is folded if the source node is folded.
1068 if (SN->isNodeCompletelyFolded()) {
1069 DN->foldNodeCompletely();
1071 } else if (SN->getSize() != DN->getSize()) {
1072 // If the two nodes are of different size, and the smaller node has the
1073 // array bit set, collapse!
1074 #if COLLAPSE_ARRAYS_AGGRESSIVELY
1075 if (SN->getSize() < DN->getSize()) {
1076 if (SN->isArray()) {
1077 DN->foldNodeCompletely();
1080 } else if (DN->isArray()) {
1081 DN->foldNodeCompletely();
1087 // Merge the type entries of the two nodes together...
1088 if (SN->getType() != Type::VoidTy && !DN->isNodeCompletelyFolded()) {
1089 DN->mergeTypeInfo(SN->getType(), NH.getOffset()-SrcNH.getOffset());
1094 assert(!DN->isDeadNode());
1096 // Merge the NodeType information.
1097 DN->mergeNodeFlags(SN->getNodeFlags() & BitsToKeep);
1099 // Before we start merging outgoing links and updating the scalar map, make
1100 // sure it is known that this is the representative node for the src node.
1101 SCNH = DSNodeHandle(DN, NH.getOffset()-SrcNH.getOffset());
1103 // If the source node contains any globals, make sure they end up in the
1104 // scalar map with the correct offset.
1105 if (SN->globals_begin() != SN->globals_end()) {
1106 // Update the globals in the destination node itself.
1107 DN->mergeGlobals(SN->getGlobalsList());
1109 // Update the scalar map for the graph we are merging the source node
1111 for (DSNode::globals_iterator I = SN->globals_begin(),
1112 E = SN->globals_end(); I != E; ++I) {
1113 GlobalValue *GV = *I;
1114 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
1115 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
1116 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent");
1117 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
1118 DestGNH.getOffset()+SrcGNH.getOffset()));
1120 NH.getNode()->mergeGlobals(SN->getGlobalsList());
1123 // We cannot handle this case without allocating a temporary node. Fall
1124 // back on being simple.
1125 DSNode *NewDN = new DSNode(*SN, &Dest, true /* Null out all links */);
1126 NewDN->maskNodeTypes(BitsToKeep);
1128 unsigned NHOffset = NH.getOffset();
1129 NH.mergeWith(DSNodeHandle(NewDN, SrcNH.getOffset()));
1131 assert(NH.getNode() &&
1132 (NH.getOffset() > NHOffset ||
1133 (NH.getOffset() == 0 && NH.getNode()->isNodeCompletelyFolded())) &&
1134 "Merging did not adjust the offset!");
1136 // Before we start merging outgoing links and updating the scalar map, make
1137 // sure it is known that this is the representative node for the src node.
1138 SCNH = DSNodeHandle(NH.getNode(), NH.getOffset()-SrcNH.getOffset());
1140 // If the source node contained any globals, make sure to create entries
1141 // in the scalar map for them!
1142 for (DSNode::globals_iterator I = SN->globals_begin(),
1143 E = SN->globals_end(); I != E; ++I) {
1144 GlobalValue *GV = *I;
1145 const DSNodeHandle &SrcGNH = Src.getNodeForValue(GV);
1146 DSNodeHandle &DestGNH = NodeMap[SrcGNH.getNode()];
1147 assert(DestGNH.getNode()==NH.getNode() &&"Global mapping inconsistent");
1148 assert(SrcGNH.getNode() == SN && "Global mapping inconsistent");
1149 Dest.getNodeForValue(GV).mergeWith(DSNodeHandle(DestGNH.getNode(),
1150 DestGNH.getOffset()+SrcGNH.getOffset()));
1155 // Next, recursively merge all outgoing links as necessary. Note that
1156 // adding these links can cause the destination node to collapse itself at
1157 // any time, and the current node may be merged with arbitrary other nodes.
1158 // For this reason, we must always go through NH.
1160 for (unsigned i = 0, e = SN->getNumLinks(); i != e; ++i) {
1161 const DSNodeHandle &SrcEdge = SN->getLink(i << DS::PointerShift);
1162 if (!SrcEdge.isNull()) {
1163 // Compute the offset into the current node at which to
1164 // merge this link. In the common case, this is a linear
1165 // relation to the offset in the original node (with
1166 // wrapping), but if the current node gets collapsed due to
1167 // recursive merging, we must make sure to merge in all remaining
1168 // links at offset zero.
1169 DSNode *CN = SCNH.getNode();
1170 unsigned MergeOffset =
1171 ((i << DS::PointerShift)+SCNH.getOffset()) % CN->getSize();
1173 DSNodeHandle Tmp = CN->getLink(MergeOffset);
1174 if (!Tmp.isNull()) {
1175 // Perform the recursive merging. Make sure to create a temporary NH,
1176 // because the Link can disappear in the process of recursive merging.
1177 merge(Tmp, SrcEdge);
1179 Tmp.mergeWith(getClonedNH(SrcEdge));
1180 // Merging this could cause all kinds of recursive things to happen,
1181 // culminating in the current node being eliminated. Since this is
1182 // possible, make sure to reaquire the link from 'CN'.
1184 unsigned MergeOffset = 0;
1185 CN = SCNH.getNode();
1186 MergeOffset = ((i << DS::PointerShift)+SCNH.getOffset()) %CN->getSize();
1187 CN->getLink(MergeOffset).mergeWith(Tmp);
1193 /// mergeCallSite - Merge the nodes reachable from the specified src call
1194 /// site into the nodes reachable from DestCS.
1195 void ReachabilityCloner::mergeCallSite(DSCallSite &DestCS,
1196 const DSCallSite &SrcCS) {
1197 merge(DestCS.getRetVal(), SrcCS.getRetVal());
1198 unsigned MinArgs = DestCS.getNumPtrArgs();
1199 if (SrcCS.getNumPtrArgs() < MinArgs) MinArgs = SrcCS.getNumPtrArgs();
1201 for (unsigned a = 0; a != MinArgs; ++a)
1202 merge(DestCS.getPtrArg(a), SrcCS.getPtrArg(a));
1204 for (unsigned a = MinArgs, e = SrcCS.getNumPtrArgs(); a != e; ++a)
1205 DestCS.addPtrArg(getClonedNH(SrcCS.getPtrArg(a)));
1209 //===----------------------------------------------------------------------===//
1210 // DSCallSite Implementation
1211 //===----------------------------------------------------------------------===//
1213 // Define here to avoid including iOther.h and BasicBlock.h in DSGraph.h
1214 Function &DSCallSite::getCaller() const {
1215 return *Site.getInstruction()->getParent()->getParent();
1218 void DSCallSite::InitNH(DSNodeHandle &NH, const DSNodeHandle &Src,
1219 ReachabilityCloner &RC) {
1220 NH = RC.getClonedNH(Src);
1223 //===----------------------------------------------------------------------===//
1224 // DSGraph Implementation
1225 //===----------------------------------------------------------------------===//
1227 /// getFunctionNames - Return a space separated list of the name of the
1228 /// functions in this graph (if any)
1229 std::string DSGraph::getFunctionNames() const {
1230 switch (getReturnNodes().size()) {
1231 case 0: return "Globals graph";
1232 case 1: return retnodes_begin()->first->getName();
1235 for (DSGraph::retnodes_iterator I = retnodes_begin();
1236 I != retnodes_end(); ++I)
1237 Return += I->first->getName() + " ";
1238 Return.erase(Return.end()-1, Return.end()); // Remove last space character
1244 DSGraph::DSGraph(const DSGraph &G, EquivalenceClasses<GlobalValue*> &ECs,
1245 unsigned CloneFlags)
1246 : GlobalsGraph(0), ScalarMap(ECs), TD(G.TD) {
1247 PrintAuxCalls = false;
1248 cloneInto(G, CloneFlags);
1251 DSGraph::~DSGraph() {
1252 FunctionCalls.clear();
1253 AuxFunctionCalls.clear();
1255 ReturnNodes.clear();
1257 // Drop all intra-node references, so that assertions don't fail...
1258 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI)
1259 NI->dropAllReferences();
1261 // Free all of the nodes.
1265 // dump - Allow inspection of graph in a debugger.
1266 void DSGraph::dump() const { print(llvm_cerr); }
1269 /// remapLinks - Change all of the Links in the current node according to the
1270 /// specified mapping.
1272 void DSNode::remapLinks(DSGraph::NodeMapTy &OldNodeMap) {
1273 for (unsigned i = 0, e = Links.size(); i != e; ++i)
1274 if (DSNode *N = Links[i].getNode()) {
1275 DSGraph::NodeMapTy::const_iterator ONMI = OldNodeMap.find(N);
1276 if (ONMI != OldNodeMap.end()) {
1277 DSNode *ONMIN = ONMI->second.getNode();
1278 Links[i].setTo(ONMIN, Links[i].getOffset()+ONMI->second.getOffset());
1283 /// addObjectToGraph - This method can be used to add global, stack, and heap
1284 /// objects to the graph. This can be used when updating DSGraphs due to the
1285 /// introduction of new temporary objects. The new object is not pointed to
1286 /// and does not point to any other objects in the graph.
1287 DSNode *DSGraph::addObjectToGraph(Value *Ptr, bool UseDeclaredType) {
1288 assert(isa<PointerType>(Ptr->getType()) && "Ptr is not a pointer!");
1289 const Type *Ty = cast<PointerType>(Ptr->getType())->getElementType();
1290 DSNode *N = new DSNode(UseDeclaredType ? Ty : 0, this);
1291 assert(ScalarMap[Ptr].isNull() && "Object already in this graph!");
1294 if (GlobalValue *GV = dyn_cast<GlobalValue>(Ptr)) {
1296 } else if (isa<MallocInst>(Ptr)) {
1297 N->setHeapNodeMarker();
1298 } else if (isa<AllocaInst>(Ptr)) {
1299 N->setAllocaNodeMarker();
1301 assert(0 && "Illegal memory object input!");
1307 /// cloneInto - Clone the specified DSGraph into the current graph. The
1308 /// translated ScalarMap for the old function is filled into the ScalarMap
1309 /// for the graph, and the translated ReturnNodes map is returned into
1312 /// The CloneFlags member controls various aspects of the cloning process.
1314 void DSGraph::cloneInto(const DSGraph &G, unsigned CloneFlags) {
1315 TIME_REGION(X, "cloneInto");
1316 assert(&G != this && "Cannot clone graph into itself!");
1318 NodeMapTy OldNodeMap;
1320 // Remove alloca or mod/ref bits as specified...
1321 unsigned BitsToClear = ((CloneFlags & StripAllocaBit)? DSNode::AllocaNode : 0)
1322 | ((CloneFlags & StripModRefBits)? (DSNode::Modified | DSNode::Read) : 0)
1323 | ((CloneFlags & StripIncompleteBit)? DSNode::Incomplete : 0);
1324 BitsToClear |= DSNode::DEAD; // Clear dead flag...
1326 for (node_const_iterator I = G.node_begin(), E = G.node_end(); I != E; ++I) {
1327 assert(!I->isForwarding() &&
1328 "Forward nodes shouldn't be in node list!");
1329 DSNode *New = new DSNode(*I, this);
1330 New->maskNodeTypes(~BitsToClear);
1331 OldNodeMap[I] = New;
1335 Timer::addPeakMemoryMeasurement();
1338 // Rewrite the links in the new nodes to point into the current graph now.
1339 // Note that we don't loop over the node's list to do this. The problem is
1340 // that remaping links can cause recursive merging to happen, which means
1341 // that node_iterator's can get easily invalidated! Because of this, we
1342 // loop over the OldNodeMap, which contains all of the new nodes as the
1343 // .second element of the map elements. Also note that if we remap a node
1344 // more than once, we won't break anything.
1345 for (NodeMapTy::iterator I = OldNodeMap.begin(), E = OldNodeMap.end();
1347 I->second.getNode()->remapLinks(OldNodeMap);
1349 // Copy the scalar map... merging all of the global nodes...
1350 for (DSScalarMap::const_iterator I = G.ScalarMap.begin(),
1351 E = G.ScalarMap.end(); I != E; ++I) {
1352 DSNodeHandle &MappedNode = OldNodeMap[I->second.getNode()];
1353 DSNodeHandle &H = ScalarMap.getRawEntryRef(I->first);
1354 DSNode *MappedNodeN = MappedNode.getNode();
1355 H.mergeWith(DSNodeHandle(MappedNodeN,
1356 I->second.getOffset()+MappedNode.getOffset()));
1359 if (!(CloneFlags & DontCloneCallNodes)) {
1360 // Copy the function calls list.
1361 for (fc_iterator I = G.fc_begin(), E = G.fc_end(); I != E; ++I)
1362 FunctionCalls.push_back(DSCallSite(*I, OldNodeMap));
1365 if (!(CloneFlags & DontCloneAuxCallNodes)) {
1366 // Copy the auxiliary function calls list.
1367 for (afc_iterator I = G.afc_begin(), E = G.afc_end(); I != E; ++I)
1368 AuxFunctionCalls.push_back(DSCallSite(*I, OldNodeMap));
1371 // Map the return node pointers over...
1372 for (retnodes_iterator I = G.retnodes_begin(),
1373 E = G.retnodes_end(); I != E; ++I) {
1374 const DSNodeHandle &Ret = I->second;
1375 DSNodeHandle &MappedRet = OldNodeMap[Ret.getNode()];
1376 DSNode *MappedRetN = MappedRet.getNode();
1377 ReturnNodes.insert(std::make_pair(I->first,
1378 DSNodeHandle(MappedRetN,
1379 MappedRet.getOffset()+Ret.getOffset())));
1383 /// spliceFrom - Logically perform the operation of cloning the RHS graph into
1384 /// this graph, then clearing the RHS graph. Instead of performing this as
1385 /// two seperate operations, do it as a single, much faster, one.
1387 void DSGraph::spliceFrom(DSGraph &RHS) {
1388 // Change all of the nodes in RHS to think we are their parent.
1389 for (NodeListTy::iterator I = RHS.Nodes.begin(), E = RHS.Nodes.end();
1391 I->setParentGraph(this);
1392 // Take all of the nodes.
1393 Nodes.splice(Nodes.end(), RHS.Nodes);
1395 // Take all of the calls.
1396 FunctionCalls.splice(FunctionCalls.end(), RHS.FunctionCalls);
1397 AuxFunctionCalls.splice(AuxFunctionCalls.end(), RHS.AuxFunctionCalls);
1399 // Take all of the return nodes.
1400 if (ReturnNodes.empty()) {
1401 ReturnNodes.swap(RHS.ReturnNodes);
1403 ReturnNodes.insert(RHS.ReturnNodes.begin(), RHS.ReturnNodes.end());
1404 RHS.ReturnNodes.clear();
1407 // Merge the scalar map in.
1408 ScalarMap.spliceFrom(RHS.ScalarMap);
1411 /// spliceFrom - Copy all entries from RHS, then clear RHS.
1413 void DSScalarMap::spliceFrom(DSScalarMap &RHS) {
1414 // Special case if this is empty.
1415 if (ValueMap.empty()) {
1416 ValueMap.swap(RHS.ValueMap);
1417 GlobalSet.swap(RHS.GlobalSet);
1419 GlobalSet.insert(RHS.GlobalSet.begin(), RHS.GlobalSet.end());
1420 for (ValueMapTy::iterator I = RHS.ValueMap.begin(), E = RHS.ValueMap.end();
1422 ValueMap[I->first].mergeWith(I->second);
1423 RHS.ValueMap.clear();
1428 /// getFunctionArgumentsForCall - Given a function that is currently in this
1429 /// graph, return the DSNodeHandles that correspond to the pointer-compatible
1430 /// function arguments. The vector is filled in with the return value (or
1431 /// null if it is not pointer compatible), followed by all of the
1432 /// pointer-compatible arguments.
1433 void DSGraph::getFunctionArgumentsForCall(Function *F,
1434 std::vector<DSNodeHandle> &Args) const {
1435 Args.push_back(getReturnNodeFor(*F));
1436 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
1438 if (isPointerType(AI->getType())) {
1439 Args.push_back(getNodeForValue(AI));
1440 assert(!Args.back().isNull() && "Pointer argument w/o scalarmap entry!?");
1445 // HackedGraphSCCFinder - This is used to find nodes that have a path from the
1446 // node to a node cloned by the ReachabilityCloner object contained. To be
1447 // extra obnoxious it ignores edges from nodes that are globals, and truncates
1448 // search at RC marked nodes. This is designed as an object so that
1449 // intermediate results can be memoized across invocations of
1450 // PathExistsToClonedNode.
1451 struct HackedGraphSCCFinder {
1452 ReachabilityCloner &RC;
1454 std::vector<const DSNode*> SCCStack;
1455 std::map<const DSNode*, std::pair<unsigned, bool> > NodeInfo;
1457 HackedGraphSCCFinder(ReachabilityCloner &rc) : RC(rc), CurNodeId(1) {
1458 // Remove null pointer as a special case.
1459 NodeInfo[0] = std::make_pair(0, false);
1462 std::pair<unsigned, bool> &VisitForSCCs(const DSNode *N);
1464 bool PathExistsToClonedNode(const DSNode *N) {
1465 return VisitForSCCs(N).second;
1468 bool PathExistsToClonedNode(const DSCallSite &CS) {
1469 if (PathExistsToClonedNode(CS.getRetVal().getNode()))
1471 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i)
1472 if (PathExistsToClonedNode(CS.getPtrArg(i).getNode()))
1479 std::pair<unsigned, bool> &HackedGraphSCCFinder::
1480 VisitForSCCs(const DSNode *N) {
1481 std::map<const DSNode*, std::pair<unsigned, bool> >::iterator
1482 NodeInfoIt = NodeInfo.lower_bound(N);
1483 if (NodeInfoIt != NodeInfo.end() && NodeInfoIt->first == N)
1484 return NodeInfoIt->second;
1486 unsigned Min = CurNodeId++;
1487 unsigned MyId = Min;
1488 std::pair<unsigned, bool> &ThisNodeInfo =
1489 NodeInfo.insert(NodeInfoIt,
1490 std::make_pair(N, std::make_pair(MyId, false)))->second;
1492 // Base case: if we find a global, this doesn't reach the cloned graph
1494 if (N->isGlobalNode()) {
1495 ThisNodeInfo.second = false;
1496 return ThisNodeInfo;
1499 // Base case: if this does reach the cloned graph portion... it does. :)
1500 if (RC.hasClonedNode(N)) {
1501 ThisNodeInfo.second = true;
1502 return ThisNodeInfo;
1505 SCCStack.push_back(N);
1507 // Otherwise, check all successors.
1508 bool AnyDirectSuccessorsReachClonedNodes = false;
1509 for (DSNode::const_edge_iterator EI = N->edge_begin(), EE = N->edge_end();
1511 if (DSNode *Succ = EI->getNode()) {
1512 std::pair<unsigned, bool> &SuccInfo = VisitForSCCs(Succ);
1513 if (SuccInfo.first < Min) Min = SuccInfo.first;
1514 AnyDirectSuccessorsReachClonedNodes |= SuccInfo.second;
1518 return ThisNodeInfo; // Part of a large SCC. Leave self on stack.
1520 if (SCCStack.back() == N) { // Special case single node SCC.
1521 SCCStack.pop_back();
1522 ThisNodeInfo.second = AnyDirectSuccessorsReachClonedNodes;
1523 return ThisNodeInfo;
1526 // Find out if any direct successors of any node reach cloned nodes.
1527 if (!AnyDirectSuccessorsReachClonedNodes)
1528 for (unsigned i = SCCStack.size()-1; SCCStack[i] != N; --i)
1529 for (DSNode::const_edge_iterator EI = N->edge_begin(), EE = N->edge_end();
1531 if (DSNode *N = EI->getNode())
1532 if (NodeInfo[N].second) {
1533 AnyDirectSuccessorsReachClonedNodes = true;
1537 // If any successor reaches a cloned node, mark all nodes in this SCC as
1538 // reaching the cloned node.
1539 if (AnyDirectSuccessorsReachClonedNodes)
1540 while (SCCStack.back() != N) {
1541 NodeInfo[SCCStack.back()].second = true;
1542 SCCStack.pop_back();
1544 SCCStack.pop_back();
1545 ThisNodeInfo.second = true;
1546 return ThisNodeInfo;
1549 /// mergeInCallFromOtherGraph - This graph merges in the minimal number of
1550 /// nodes from G2 into 'this' graph, merging the bindings specified by the
1551 /// call site (in this graph) with the bindings specified by the vector in G2.
1552 /// The two DSGraphs must be different.
1554 void DSGraph::mergeInGraph(const DSCallSite &CS,
1555 std::vector<DSNodeHandle> &Args,
1556 const DSGraph &Graph, unsigned CloneFlags) {
1557 TIME_REGION(X, "mergeInGraph");
1559 assert((CloneFlags & DontCloneCallNodes) &&
1560 "Doesn't support copying of call nodes!");
1562 // If this is not a recursive call, clone the graph into this graph...
1563 if (&Graph == this) {
1564 // Merge the return value with the return value of the context.
1565 Args[0].mergeWith(CS.getRetVal());
1567 // Resolve all of the function arguments.
1568 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) {
1569 if (i == Args.size()-1)
1572 // Add the link from the argument scalar to the provided value.
1573 Args[i+1].mergeWith(CS.getPtrArg(i));
1578 // Clone the callee's graph into the current graph, keeping track of where
1579 // scalars in the old graph _used_ to point, and of the new nodes matching
1580 // nodes of the old graph.
1581 ReachabilityCloner RC(*this, Graph, CloneFlags);
1583 // Map the return node pointer over.
1584 if (!CS.getRetVal().isNull())
1585 RC.merge(CS.getRetVal(), Args[0]);
1587 // Map over all of the arguments.
1588 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i) {
1589 if (i == Args.size()-1)
1592 // Add the link from the argument scalar to the provided value.
1593 RC.merge(CS.getPtrArg(i), Args[i+1]);
1596 // We generally don't want to copy global nodes or aux calls from the callee
1597 // graph to the caller graph. However, we have to copy them if there is a
1598 // path from the node to a node we have already copied which does not go
1599 // through another global. Compute the set of node that can reach globals and
1600 // aux call nodes to copy over, then do it.
1601 std::vector<const DSCallSite*> AuxCallToCopy;
1602 std::vector<GlobalValue*> GlobalsToCopy;
1604 // NodesReachCopiedNodes - Memoize results for efficiency. Contains a
1605 // true/false value for every visited node that reaches a copied node without
1606 // going through a global.
1607 HackedGraphSCCFinder SCCFinder(RC);
1609 if (!(CloneFlags & DontCloneAuxCallNodes))
1610 for (afc_iterator I = Graph.afc_begin(), E = Graph.afc_end(); I!=E; ++I)
1611 if (SCCFinder.PathExistsToClonedNode(*I))
1612 AuxCallToCopy.push_back(&*I);
1613 // else if (I->isIndirectCall()){
1614 // //If the call node doesn't have any callees, clone it
1615 // std::vector< Function *> List;
1616 // I->getCalleeNode()->addFullFunctionList(List);
1617 // if (!List.size())
1618 // AuxCallToCopy.push_back(&*I);
1621 const DSScalarMap &GSM = Graph.getScalarMap();
1622 for (DSScalarMap::global_iterator GI = GSM.global_begin(),
1623 E = GSM.global_end(); GI != E; ++GI) {
1624 DSNode *GlobalNode = Graph.getNodeForValue(*GI).getNode();
1625 for (DSNode::edge_iterator EI = GlobalNode->edge_begin(),
1626 EE = GlobalNode->edge_end(); EI != EE; ++EI)
1627 if (SCCFinder.PathExistsToClonedNode(EI->getNode())) {
1628 GlobalsToCopy.push_back(*GI);
1633 // Copy aux calls that are needed.
1634 for (unsigned i = 0, e = AuxCallToCopy.size(); i != e; ++i)
1635 AuxFunctionCalls.push_back(DSCallSite(*AuxCallToCopy[i], RC));
1637 // Copy globals that are needed.
1638 for (unsigned i = 0, e = GlobalsToCopy.size(); i != e; ++i)
1639 RC.getClonedNH(Graph.getNodeForValue(GlobalsToCopy[i]));
1644 /// mergeInGraph - The method is used for merging graphs together. If the
1645 /// argument graph is not *this, it makes a clone of the specified graph, then
1646 /// merges the nodes specified in the call site with the formal arguments in the
1649 void DSGraph::mergeInGraph(const DSCallSite &CS, Function &F,
1650 const DSGraph &Graph, unsigned CloneFlags) {
1651 // Set up argument bindings.
1652 std::vector<DSNodeHandle> Args;
1653 Graph.getFunctionArgumentsForCall(&F, Args);
1655 mergeInGraph(CS, Args, Graph, CloneFlags);
1658 /// getCallSiteForArguments - Get the arguments and return value bindings for
1659 /// the specified function in the current graph.
1661 DSCallSite DSGraph::getCallSiteForArguments(Function &F) const {
1662 std::vector<DSNodeHandle> Args;
1664 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
1665 if (isPointerType(I->getType()))
1666 Args.push_back(getNodeForValue(I));
1668 return DSCallSite(CallSite(), getReturnNodeFor(F), &F, Args);
1671 /// getDSCallSiteForCallSite - Given an LLVM CallSite object that is live in
1672 /// the context of this graph, return the DSCallSite for it.
1673 DSCallSite DSGraph::getDSCallSiteForCallSite(CallSite CS) const {
1674 DSNodeHandle RetVal;
1675 Instruction *I = CS.getInstruction();
1676 if (isPointerType(I->getType()))
1677 RetVal = getNodeForValue(I);
1679 std::vector<DSNodeHandle> Args;
1680 Args.reserve(CS.arg_end()-CS.arg_begin());
1682 // Calculate the arguments vector...
1683 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end(); I != E; ++I)
1684 if (isPointerType((*I)->getType()))
1685 if (isa<ConstantPointerNull>(*I))
1686 Args.push_back(DSNodeHandle());
1688 Args.push_back(getNodeForValue(*I));
1690 // Add a new function call entry...
1691 if (Function *F = CS.getCalledFunction())
1692 return DSCallSite(CS, RetVal, F, Args);
1694 return DSCallSite(CS, RetVal,
1695 getNodeForValue(CS.getCalledValue()).getNode(), Args);
1700 // markIncompleteNodes - Mark the specified node as having contents that are not
1701 // known with the current analysis we have performed. Because a node makes all
1702 // of the nodes it can reach incomplete if the node itself is incomplete, we
1703 // must recursively traverse the data structure graph, marking all reachable
1704 // nodes as incomplete.
1706 static void markIncompleteNode(DSNode *N) {
1707 // Stop recursion if no node, or if node already marked...
1708 if (N == 0 || N->isIncomplete()) return;
1710 // Actually mark the node
1711 N->setIncompleteMarker();
1713 // Recursively process children...
1714 for (DSNode::edge_iterator I = N->edge_begin(),E = N->edge_end(); I != E; ++I)
1715 if (DSNode *DSN = I->getNode())
1716 markIncompleteNode(DSN);
1719 static void markIncomplete(DSCallSite &Call) {
1720 // Then the return value is certainly incomplete!
1721 markIncompleteNode(Call.getRetVal().getNode());
1723 // All objects pointed to by function arguments are incomplete!
1724 for (unsigned i = 0, e = Call.getNumPtrArgs(); i != e; ++i)
1725 markIncompleteNode(Call.getPtrArg(i).getNode());
1728 // markIncompleteNodes - Traverse the graph, identifying nodes that may be
1729 // modified by other functions that have not been resolved yet. This marks
1730 // nodes that are reachable through three sources of "unknownness":
1732 // Global Variables, Function Calls, and Incoming Arguments
1734 // For any node that may have unknown components (because something outside the
1735 // scope of current analysis may have modified it), the 'Incomplete' flag is
1736 // added to the NodeType.
1738 void DSGraph::markIncompleteNodes(unsigned Flags) {
1739 // Mark any incoming arguments as incomplete.
1740 if (Flags & DSGraph::MarkFormalArgs)
1741 for (ReturnNodesTy::iterator FI = ReturnNodes.begin(), E =ReturnNodes.end();
1743 Function &F = *FI->first;
1744 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end();
1746 if (isPointerType(I->getType()))
1747 markIncompleteNode(getNodeForValue(I).getNode());
1748 markIncompleteNode(FI->second.getNode());
1751 // Mark stuff passed into functions calls as being incomplete.
1752 if (!shouldPrintAuxCalls())
1753 for (std::list<DSCallSite>::iterator I = FunctionCalls.begin(),
1754 E = FunctionCalls.end(); I != E; ++I)
1757 for (std::list<DSCallSite>::iterator I = AuxFunctionCalls.begin(),
1758 E = AuxFunctionCalls.end(); I != E; ++I)
1761 // Mark all global nodes as incomplete.
1762 for (DSScalarMap::global_iterator I = ScalarMap.global_begin(),
1763 E = ScalarMap.global_end(); I != E; ++I)
1764 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(*I))
1765 if (!GV->hasInitializer() || // Always mark external globals incomp.
1766 (!GV->isConstant() && (Flags & DSGraph::IgnoreGlobals) == 0))
1767 markIncompleteNode(ScalarMap[GV].getNode());
1770 static inline void killIfUselessEdge(DSNodeHandle &Edge) {
1771 if (DSNode *N = Edge.getNode()) // Is there an edge?
1772 if (N->getNumReferrers() == 1) // Does it point to a lonely node?
1773 // No interesting info?
1774 if ((N->getNodeFlags() & ~DSNode::Incomplete) == 0 &&
1775 N->getType() == Type::VoidTy && !N->isNodeCompletelyFolded())
1776 Edge.setTo(0, 0); // Kill the edge!
1779 static inline bool nodeContainsExternalFunction(const DSNode *N) {
1780 std::vector<Function*> Funcs;
1781 N->addFullFunctionList(Funcs);
1782 for (unsigned i = 0, e = Funcs.size(); i != e; ++i)
1783 if (Funcs[i]->isExternal()) return true;
1787 static void removeIdenticalCalls(std::list<DSCallSite> &Calls) {
1788 // Remove trivially identical function calls
1789 Calls.sort(); // Sort by callee as primary key!
1791 // Scan the call list cleaning it up as necessary...
1792 DSNodeHandle LastCalleeNode;
1794 Function *LastCalleeFunc = 0;
1795 unsigned NumDuplicateCalls = 0;
1797 bool LastCalleeContainsExternalFunction = false;
1799 unsigned NumDeleted = 0;
1800 for (std::list<DSCallSite>::iterator I = Calls.begin(), E = Calls.end();
1802 DSCallSite &CS = *I;
1803 std::list<DSCallSite>::iterator OldIt = I++;
1805 if (!CS.isIndirectCall()) {
1808 DSNode *Callee = CS.getCalleeNode();
1810 // If the Callee is a useless edge, this must be an unreachable call site,
1812 if (Callee->getNumReferrers() == 1 && Callee->isComplete() &&
1813 Callee->getGlobalsList().empty()) { // No useful info?
1814 DOUT << "WARNING: Useless call site found.\n";
1820 // If the last call site in the list has the same callee as this one, and
1821 // if the callee contains an external function, it will never be
1822 // resolvable, just merge the call sites.
1823 if (!LastCalleeNode.isNull() && LastCalleeNode.getNode() == Callee) {
1824 LastCalleeContainsExternalFunction =
1825 nodeContainsExternalFunction(Callee);
1827 std::list<DSCallSite>::iterator PrevIt = OldIt;
1829 PrevIt->mergeWith(CS);
1831 // No need to keep this call anymore.
1836 LastCalleeNode = Callee;
1840 // If the return value or any arguments point to a void node with no
1841 // information at all in it, and the call node is the only node to point
1842 // to it, remove the edge to the node (killing the node).
1844 killIfUselessEdge(CS.getRetVal());
1845 for (unsigned a = 0, e = CS.getNumPtrArgs(); a != e; ++a)
1846 killIfUselessEdge(CS.getPtrArg(a));
1849 // If this call site calls the same function as the last call site, and if
1850 // the function pointer contains an external function, this node will
1851 // never be resolved. Merge the arguments of the call node because no
1852 // information will be lost.
1854 if ((CS.isDirectCall() && CS.getCalleeFunc() == LastCalleeFunc) ||
1855 (CS.isIndirectCall() && CS.getCalleeNode() == LastCalleeNode)) {
1856 ++NumDuplicateCalls;
1857 if (NumDuplicateCalls == 1) {
1859 LastCalleeContainsExternalFunction =
1860 nodeContainsExternalFunction(LastCalleeNode);
1862 LastCalleeContainsExternalFunction = LastCalleeFunc->isExternal();
1865 // It is not clear why, but enabling this code makes DSA really
1866 // sensitive to node forwarding. Basically, with this enabled, DSA
1867 // performs different number of inlinings based on which nodes are
1868 // forwarding or not. This is clearly a problem, so this code is
1869 // disabled until this can be resolved.
1871 if (LastCalleeContainsExternalFunction
1874 // This should be more than enough context sensitivity!
1875 // FIXME: Evaluate how many times this is tripped!
1876 NumDuplicateCalls > 20
1880 std::list<DSCallSite>::iterator PrevIt = OldIt;
1882 PrevIt->mergeWith(CS);
1884 // No need to keep this call anymore.
1891 if (CS.isDirectCall()) {
1892 LastCalleeFunc = CS.getCalleeFunc();
1895 LastCalleeNode = CS.getCalleeNode();
1898 NumDuplicateCalls = 0;
1902 if (I != Calls.end() && CS == *I) {
1910 // Resort now that we simplified things.
1913 // Now that we are in sorted order, eliminate duplicates.
1914 std::list<DSCallSite>::iterator CI = Calls.begin(), CE = Calls.end();
1917 std::list<DSCallSite>::iterator OldIt = CI++;
1918 if (CI == CE) break;
1920 // If this call site is now the same as the previous one, we can delete it
1922 if (*OldIt == *CI) {
1929 //Calls.erase(std::unique(Calls.begin(), Calls.end()), Calls.end());
1931 // Track the number of call nodes merged away...
1932 NumCallNodesMerged += NumDeleted;
1935 DOUT << "Merged " << NumDeleted << " call nodes.\n";
1939 // removeTriviallyDeadNodes - After the graph has been constructed, this method
1940 // removes all unreachable nodes that are created because they got merged with
1941 // other nodes in the graph. These nodes will all be trivially unreachable, so
1942 // we don't have to perform any non-trivial analysis here.
1944 void DSGraph::removeTriviallyDeadNodes() {
1945 TIME_REGION(X, "removeTriviallyDeadNodes");
1948 /// NOTE: This code is disabled. This slows down DSA on 177.mesa
1951 // Loop over all of the nodes in the graph, calling getNode on each field.
1952 // This will cause all nodes to update their forwarding edges, causing
1953 // forwarded nodes to be delete-able.
1954 { TIME_REGION(X, "removeTriviallyDeadNodes:node_iterate");
1955 for (node_iterator NI = node_begin(), E = node_end(); NI != E; ++NI) {
1957 for (unsigned l = 0, e = N.getNumLinks(); l != e; ++l)
1958 N.getLink(l*N.getPointerSize()).getNode();
1962 // NOTE: This code is disabled. Though it should, in theory, allow us to
1963 // remove more nodes down below, the scan of the scalar map is incredibly
1964 // expensive for certain programs (with large SCCs). In the future, if we can
1965 // make the scalar map scan more efficient, then we can reenable this.
1966 { TIME_REGION(X, "removeTriviallyDeadNodes:scalarmap");
1968 // Likewise, forward any edges from the scalar nodes. While we are at it,
1969 // clean house a bit.
1970 for (DSScalarMap::iterator I = ScalarMap.begin(),E = ScalarMap.end();I != E;){
1971 I->second.getNode();
1976 bool isGlobalsGraph = !GlobalsGraph;
1978 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E; ) {
1981 // Do not remove *any* global nodes in the globals graph.
1982 // This is a special case because such nodes may not have I, M, R flags set.
1983 if (Node.isGlobalNode() && isGlobalsGraph) {
1988 if (Node.isComplete() && !Node.isModified() && !Node.isRead()) {
1989 // This is a useless node if it has no mod/ref info (checked above),
1990 // outgoing edges (which it cannot, as it is not modified in this
1991 // context), and it has no incoming edges. If it is a global node it may
1992 // have all of these properties and still have incoming edges, due to the
1993 // scalar map, so we check those now.
1995 if (Node.getNumReferrers() == Node.getGlobalsList().size()) {
1996 const std::vector<GlobalValue*> &Globals = Node.getGlobalsList();
1998 // Loop through and make sure all of the globals are referring directly
2000 for (unsigned j = 0, e = Globals.size(); j != e; ++j) {
2001 DSNode *N = getNodeForValue(Globals[j]).getNode();
2002 assert(N == &Node && "ScalarMap doesn't match globals list!");
2005 // Make sure NumReferrers still agrees, if so, the node is truly dead.
2006 if (Node.getNumReferrers() == Globals.size()) {
2007 for (unsigned j = 0, e = Globals.size(); j != e; ++j)
2008 ScalarMap.erase(Globals[j]);
2009 Node.makeNodeDead();
2010 ++NumTrivialGlobalDNE;
2015 if (Node.getNodeFlags() == 0 && Node.hasNoReferrers()) {
2016 // This node is dead!
2017 NI = Nodes.erase(NI); // Erase & remove from node list.
2024 removeIdenticalCalls(FunctionCalls);
2025 removeIdenticalCalls(AuxFunctionCalls);
2029 /// markReachableNodes - This method recursively traverses the specified
2030 /// DSNodes, marking any nodes which are reachable. All reachable nodes it adds
2031 /// to the set, which allows it to only traverse visited nodes once.
2033 void DSNode::markReachableNodes(hash_set<const DSNode*> &ReachableNodes) const {
2034 if (this == 0) return;
2035 assert(getForwardNode() == 0 && "Cannot mark a forwarded node!");
2036 if (ReachableNodes.insert(this).second) // Is newly reachable?
2037 for (DSNode::const_edge_iterator I = edge_begin(), E = edge_end();
2039 I->getNode()->markReachableNodes(ReachableNodes);
2042 void DSCallSite::markReachableNodes(hash_set<const DSNode*> &Nodes) const {
2043 getRetVal().getNode()->markReachableNodes(Nodes);
2044 if (isIndirectCall()) getCalleeNode()->markReachableNodes(Nodes);
2046 for (unsigned i = 0, e = getNumPtrArgs(); i != e; ++i)
2047 getPtrArg(i).getNode()->markReachableNodes(Nodes);
2050 // CanReachAliveNodes - Simple graph walker that recursively traverses the graph
2051 // looking for a node that is marked alive. If an alive node is found, return
2052 // true, otherwise return false. If an alive node is reachable, this node is
2053 // marked as alive...
2055 static bool CanReachAliveNodes(DSNode *N, hash_set<const DSNode*> &Alive,
2056 hash_set<const DSNode*> &Visited,
2057 bool IgnoreGlobals) {
2058 if (N == 0) return false;
2059 assert(N->getForwardNode() == 0 && "Cannot mark a forwarded node!");
2061 // If this is a global node, it will end up in the globals graph anyway, so we
2062 // don't need to worry about it.
2063 if (IgnoreGlobals && N->isGlobalNode()) return false;
2065 // If we know that this node is alive, return so!
2066 if (Alive.count(N)) return true;
2068 // Otherwise, we don't think the node is alive yet, check for infinite
2070 if (Visited.count(N)) return false; // Found a cycle
2071 Visited.insert(N); // No recursion, insert into Visited...
2073 for (DSNode::edge_iterator I = N->edge_begin(),E = N->edge_end(); I != E; ++I)
2074 if (CanReachAliveNodes(I->getNode(), Alive, Visited, IgnoreGlobals)) {
2075 N->markReachableNodes(Alive);
2081 // CallSiteUsesAliveArgs - Return true if the specified call site can reach any
2084 static bool CallSiteUsesAliveArgs(const DSCallSite &CS,
2085 hash_set<const DSNode*> &Alive,
2086 hash_set<const DSNode*> &Visited,
2087 bool IgnoreGlobals) {
2088 if (CanReachAliveNodes(CS.getRetVal().getNode(), Alive, Visited,
2091 if (CS.isIndirectCall() &&
2092 CanReachAliveNodes(CS.getCalleeNode(), Alive, Visited, IgnoreGlobals))
2094 for (unsigned i = 0, e = CS.getNumPtrArgs(); i != e; ++i)
2095 if (CanReachAliveNodes(CS.getPtrArg(i).getNode(), Alive, Visited,
2101 // removeDeadNodes - Use a more powerful reachability analysis to eliminate
2102 // subgraphs that are unreachable. This often occurs because the data
2103 // structure doesn't "escape" into it's caller, and thus should be eliminated
2104 // from the caller's graph entirely. This is only appropriate to use when
2107 void DSGraph::removeDeadNodes(unsigned Flags) {
2108 DEBUG(AssertGraphOK(); if (GlobalsGraph) GlobalsGraph->AssertGraphOK());
2110 // Reduce the amount of work we have to do... remove dummy nodes left over by
2112 removeTriviallyDeadNodes();
2114 TIME_REGION(X, "removeDeadNodes");
2116 // FIXME: Merge non-trivially identical call nodes...
2118 // Alive - a set that holds all nodes found to be reachable/alive.
2119 hash_set<const DSNode*> Alive;
2120 std::vector<std::pair<Value*, DSNode*> > GlobalNodes;
2122 // Copy and merge all information about globals to the GlobalsGraph if this is
2123 // not a final pass (where unreachable globals are removed).
2125 // Strip all alloca bits since the current function is only for the BU pass.
2126 // Strip all incomplete bits since they are short-lived properties and they
2127 // will be correctly computed when rematerializing nodes into the functions.
2129 ReachabilityCloner GGCloner(*GlobalsGraph, *this, DSGraph::StripAllocaBit |
2130 DSGraph::StripIncompleteBit);
2132 // Mark all nodes reachable by (non-global) scalar nodes as alive...
2133 { TIME_REGION(Y, "removeDeadNodes:scalarscan");
2134 for (DSScalarMap::iterator I = ScalarMap.begin(), E = ScalarMap.end();
2136 if (isa<GlobalValue>(I->first)) { // Keep track of global nodes
2137 assert(!I->second.isNull() && "Null global node?");
2138 assert(I->second.getNode()->isGlobalNode() && "Should be a global node!");
2139 GlobalNodes.push_back(std::make_pair(I->first, I->second.getNode()));
2141 // Make sure that all globals are cloned over as roots.
2142 if (!(Flags & DSGraph::RemoveUnreachableGlobals) && GlobalsGraph) {
2143 DSGraph::ScalarMapTy::iterator SMI =
2144 GlobalsGraph->getScalarMap().find(I->first);
2145 if (SMI != GlobalsGraph->getScalarMap().end())
2146 GGCloner.merge(SMI->second, I->second);
2148 GGCloner.getClonedNH(I->second);
2151 I->second.getNode()->markReachableNodes(Alive);
2155 // The return values are alive as well.
2156 for (ReturnNodesTy::iterator I = ReturnNodes.begin(), E = ReturnNodes.end();
2158 I->second.getNode()->markReachableNodes(Alive);
2160 // Mark any nodes reachable by primary calls as alive...
2161 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I)
2162 I->markReachableNodes(Alive);
2165 // Now find globals and aux call nodes that are already live or reach a live
2166 // value (which makes them live in turn), and continue till no more are found.
2169 hash_set<const DSNode*> Visited;
2170 hash_set<const DSCallSite*> AuxFCallsAlive;
2173 // If any global node points to a non-global that is "alive", the global is
2174 // "alive" as well... Remove it from the GlobalNodes list so we only have
2175 // unreachable globals in the list.
2178 if (!(Flags & DSGraph::RemoveUnreachableGlobals))
2179 for (unsigned i = 0; i != GlobalNodes.size(); ++i)
2180 if (CanReachAliveNodes(GlobalNodes[i].second, Alive, Visited,
2181 Flags & DSGraph::RemoveUnreachableGlobals)) {
2182 std::swap(GlobalNodes[i--], GlobalNodes.back()); // Move to end to...
2183 GlobalNodes.pop_back(); // erase efficiently
2187 // Mark only unresolvable call nodes for moving to the GlobalsGraph since
2188 // call nodes that get resolved will be difficult to remove from that graph.
2189 // The final unresolved call nodes must be handled specially at the end of
2190 // the BU pass (i.e., in main or other roots of the call graph).
2191 for (afc_iterator CI = afc_begin(), E = afc_end(); CI != E; ++CI)
2192 if (!AuxFCallsAlive.count(&*CI) &&
2193 (CI->isIndirectCall()
2194 || CallSiteUsesAliveArgs(*CI, Alive, Visited,
2195 Flags & DSGraph::RemoveUnreachableGlobals))) {
2196 CI->markReachableNodes(Alive);
2197 AuxFCallsAlive.insert(&*CI);
2202 // Move dead aux function calls to the end of the list
2203 for (std::list<DSCallSite>::iterator CI = AuxFunctionCalls.begin(),
2204 E = AuxFunctionCalls.end(); CI != E; )
2205 if (AuxFCallsAlive.count(&*CI))
2208 // Copy and merge global nodes and dead aux call nodes into the
2209 // GlobalsGraph, and all nodes reachable from those nodes. Update their
2210 // target pointers using the GGCloner.
2212 if (!(Flags & DSGraph::RemoveUnreachableGlobals))
2213 GlobalsGraph->AuxFunctionCalls.push_back(DSCallSite(*CI, GGCloner));
2215 AuxFunctionCalls.erase(CI++);
2218 // We are finally done with the GGCloner so we can destroy it.
2221 // At this point, any nodes which are visited, but not alive, are nodes
2222 // which can be removed. Loop over all nodes, eliminating completely
2223 // unreachable nodes.
2225 std::vector<DSNode*> DeadNodes;
2226 DeadNodes.reserve(Nodes.size());
2227 for (NodeListTy::iterator NI = Nodes.begin(), E = Nodes.end(); NI != E;) {
2229 assert(!N->isForwarding() && "Forwarded node in nodes list?");
2231 if (!Alive.count(N)) {
2233 assert(!N->isForwarding() && "Cannot remove a forwarding node!");
2234 DeadNodes.push_back(N);
2235 N->dropAllReferences();
2240 // Remove all unreachable globals from the ScalarMap.
2241 // If flag RemoveUnreachableGlobals is set, GlobalNodes has only dead nodes.
2242 // In either case, the dead nodes will not be in the set Alive.
2243 for (unsigned i = 0, e = GlobalNodes.size(); i != e; ++i)
2244 if (!Alive.count(GlobalNodes[i].second))
2245 ScalarMap.erase(GlobalNodes[i].first);
2247 assert((Flags & DSGraph::RemoveUnreachableGlobals) && "non-dead global");
2249 // Delete all dead nodes now since their referrer counts are zero.
2250 for (unsigned i = 0, e = DeadNodes.size(); i != e; ++i)
2251 delete DeadNodes[i];
2253 DEBUG(AssertGraphOK(); GlobalsGraph->AssertGraphOK());
2256 void DSGraph::AssertNodeContainsGlobal(const DSNode *N, GlobalValue *GV) const {
2257 assert(std::find(N->globals_begin(),N->globals_end(), GV) !=
2258 N->globals_end() && "Global value not in node!");
2261 void DSGraph::AssertCallSiteInGraph(const DSCallSite &CS) const {
2262 if (CS.isIndirectCall()) {
2263 AssertNodeInGraph(CS.getCalleeNode());
2265 if (CS.getNumPtrArgs() && CS.getCalleeNode() == CS.getPtrArg(0).getNode() &&
2266 CS.getCalleeNode() && CS.getCalleeNode()->getGlobals().empty())
2267 DOUT << "WARNING: WEIRD CALL SITE FOUND!\n";
2270 AssertNodeInGraph(CS.getRetVal().getNode());
2271 for (unsigned j = 0, e = CS.getNumPtrArgs(); j != e; ++j)
2272 AssertNodeInGraph(CS.getPtrArg(j).getNode());
2275 void DSGraph::AssertCallNodesInGraph() const {
2276 for (fc_iterator I = fc_begin(), E = fc_end(); I != E; ++I)
2277 AssertCallSiteInGraph(*I);
2279 void DSGraph::AssertAuxCallNodesInGraph() const {
2280 for (afc_iterator I = afc_begin(), E = afc_end(); I != E; ++I)
2281 AssertCallSiteInGraph(*I);
2284 void DSGraph::AssertGraphOK() const {
2285 for (node_const_iterator NI = node_begin(), E = node_end(); NI != E; ++NI)
2288 for (ScalarMapTy::const_iterator I = ScalarMap.begin(),
2289 E = ScalarMap.end(); I != E; ++I) {
2290 assert(!I->second.isNull() && "Null node in scalarmap!");
2291 AssertNodeInGraph(I->second.getNode());
2292 if (GlobalValue *GV = dyn_cast<GlobalValue>(I->first)) {
2293 assert(I->second.getNode()->isGlobalNode() &&
2294 "Global points to node, but node isn't global?");
2295 AssertNodeContainsGlobal(I->second.getNode(), GV);
2298 AssertCallNodesInGraph();
2299 AssertAuxCallNodesInGraph();
2301 // Check that all pointer arguments to any functions in this graph have
2303 for (ReturnNodesTy::const_iterator RI = ReturnNodes.begin(),
2304 E = ReturnNodes.end();
2306 Function &F = *RI->first;
2307 for (Function::arg_iterator AI = F.arg_begin(); AI != F.arg_end(); ++AI)
2308 if (isPointerType(AI->getType()))
2309 assert(!getNodeForValue(AI).isNull() &&
2310 "Pointer argument must be in the scalar map!");
2314 /// computeNodeMapping - Given roots in two different DSGraphs, traverse the
2315 /// nodes reachable from the two graphs, computing the mapping of nodes from the
2316 /// first to the second graph. This mapping may be many-to-one (i.e. the first
2317 /// graph may have multiple nodes representing one node in the second graph),
2318 /// but it will not work if there is a one-to-many or many-to-many mapping.
2320 void DSGraph::computeNodeMapping(const DSNodeHandle &NH1,
2321 const DSNodeHandle &NH2, NodeMapTy &NodeMap,
2322 bool StrictChecking) {
2323 DSNode *N1 = NH1.getNode(), *N2 = NH2.getNode();
2324 if (N1 == 0 || N2 == 0) return;
2326 DSNodeHandle &Entry = NodeMap[N1];
2327 if (!Entry.isNull()) {
2328 // Termination of recursion!
2329 if (StrictChecking) {
2330 assert(Entry.getNode() == N2 && "Inconsistent mapping detected!");
2331 assert((Entry.getOffset() == (NH2.getOffset()-NH1.getOffset()) ||
2332 Entry.getNode()->isNodeCompletelyFolded()) &&
2333 "Inconsistent mapping detected!");
2338 Entry.setTo(N2, NH2.getOffset()-NH1.getOffset());
2340 // Loop over all of the fields that N1 and N2 have in common, recursively
2341 // mapping the edges together now.
2342 int N2Idx = NH2.getOffset()-NH1.getOffset();
2343 unsigned N2Size = N2->getSize();
2344 if (N2Size == 0) return; // No edges to map to.
2346 for (unsigned i = 0, e = N1->getSize(); i < e; i += DS::PointerSize) {
2347 const DSNodeHandle &N1NH = N1->getLink(i);
2348 // Don't call N2->getLink if not needed (avoiding crash if N2Idx is not
2350 if (!N1NH.isNull()) {
2351 if (unsigned(N2Idx)+i < N2Size)
2352 computeNodeMapping(N1NH, N2->getLink(N2Idx+i), NodeMap);
2354 computeNodeMapping(N1NH,
2355 N2->getLink(unsigned(N2Idx+i) % N2Size), NodeMap);
2361 /// computeGToGGMapping - Compute the mapping of nodes in the global graph to
2362 /// nodes in this graph.
2363 void DSGraph::computeGToGGMapping(NodeMapTy &NodeMap) {
2364 DSGraph &GG = *getGlobalsGraph();
2366 DSScalarMap &SM = getScalarMap();
2367 for (DSScalarMap::global_iterator I = SM.global_begin(),
2368 E = SM.global_end(); I != E; ++I)
2369 DSGraph::computeNodeMapping(SM[*I], GG.getNodeForValue(*I), NodeMap);
2372 /// computeGGToGMapping - Compute the mapping of nodes in the global graph to
2373 /// nodes in this graph. Note that any uses of this method are probably bugs,
2374 /// unless it is known that the globals graph has been merged into this graph!
2375 void DSGraph::computeGGToGMapping(InvNodeMapTy &InvNodeMap) {
2377 computeGToGGMapping(NodeMap);
2379 while (!NodeMap.empty()) {
2380 InvNodeMap.insert(std::make_pair(NodeMap.begin()->second,
2381 NodeMap.begin()->first));
2382 NodeMap.erase(NodeMap.begin());
2387 /// computeCalleeCallerMapping - Given a call from a function in the current
2388 /// graph to the 'Callee' function (which lives in 'CalleeGraph'), compute the
2389 /// mapping of nodes from the callee to nodes in the caller.
2390 void DSGraph::computeCalleeCallerMapping(DSCallSite CS, const Function &Callee,
2391 DSGraph &CalleeGraph,
2392 NodeMapTy &NodeMap) {
2394 DSCallSite CalleeArgs =
2395 CalleeGraph.getCallSiteForArguments(const_cast<Function&>(Callee));
2397 computeNodeMapping(CalleeArgs.getRetVal(), CS.getRetVal(), NodeMap);
2399 unsigned NumArgs = CS.getNumPtrArgs();
2400 if (NumArgs > CalleeArgs.getNumPtrArgs())
2401 NumArgs = CalleeArgs.getNumPtrArgs();
2403 for (unsigned i = 0; i != NumArgs; ++i)
2404 computeNodeMapping(CalleeArgs.getPtrArg(i), CS.getPtrArg(i), NodeMap);
2406 // Map the nodes that are pointed to by globals.
2407 DSScalarMap &CalleeSM = CalleeGraph.getScalarMap();
2408 DSScalarMap &CallerSM = getScalarMap();
2410 if (CalleeSM.global_size() >= CallerSM.global_size()) {
2411 for (DSScalarMap::global_iterator GI = CallerSM.global_begin(),
2412 E = CallerSM.global_end(); GI != E; ++GI)
2413 if (CalleeSM.global_count(*GI))
2414 computeNodeMapping(CalleeSM[*GI], CallerSM[*GI], NodeMap);
2416 for (DSScalarMap::global_iterator GI = CalleeSM.global_begin(),
2417 E = CalleeSM.global_end(); GI != E; ++GI)
2418 if (CallerSM.global_count(*GI))
2419 computeNodeMapping(CalleeSM[*GI], CallerSM[*GI], NodeMap);
2423 /// updateFromGlobalGraph - This function rematerializes global nodes and
2424 /// nodes reachable from them from the globals graph into the current graph.
2426 void DSGraph::updateFromGlobalGraph() {
2427 TIME_REGION(X, "updateFromGlobalGraph");
2428 ReachabilityCloner RC(*this, *GlobalsGraph, 0);
2430 // Clone the non-up-to-date global nodes into this graph.
2431 for (DSScalarMap::global_iterator I = getScalarMap().global_begin(),
2432 E = getScalarMap().global_end(); I != E; ++I) {
2433 DSScalarMap::iterator It = GlobalsGraph->ScalarMap.find(*I);
2434 if (It != GlobalsGraph->ScalarMap.end())
2435 RC.merge(getNodeForValue(*I), It->second);