1 //===- TopDownClosure.cpp - Compute the top-down interprocedure closure ---===//
3 // This file implements the TDDataStructures class, which represents the
4 // Top-down Interprocedural closure of the data structure graph over the
5 // program. This is useful (but not strictly necessary?) for applications
6 // like pointer analysis.
8 //===----------------------------------------------------------------------===//
10 #include "llvm/Analysis/DataStructure.h"
11 #include "llvm/Module.h"
12 #include "llvm/DerivedTypes.h"
13 #include "Support/Debug.h"
14 #include "Support/Statistic.h"
15 #include "DSCallSiteIterator.h"
18 RegisterAnalysis<TDDataStructures> // Register the pass
19 Y("tddatastructure", "Top-down Data Structure Analysis");
21 Statistic<> NumTDInlines("tddatastructures", "Number of graphs inlined");
24 /// FunctionHasCompleteArguments - This function returns true if it is safe not
25 /// to mark arguments to the function complete.
27 /// FIXME: Need to check if all callers have been found, or rather if a
28 /// funcpointer escapes!
30 static bool FunctionHasCompleteArguments(Function &F) {
31 return F.hasInternalLinkage();
34 // run - Calculate the top down data structure graphs for each function in the
37 bool TDDataStructures::run(Module &M) {
38 BUDataStructures &BU = getAnalysis<BUDataStructures>();
39 GlobalsGraph = new DSGraph(BU.getGlobalsGraph());
41 // Figure out which functions must not mark their arguments complete because
42 // they are accessible outside this compilation unit.
43 for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
44 if (!FunctionHasCompleteArguments(*I))
45 ArgsRemainIncomplete.insert(I);
47 // We want to traverse the call graph in reverse post-order. To do this, we
48 // calculate a post-order traversal, then reverse it.
49 hash_set<DSGraph*> VisitedGraph;
50 std::vector<DSGraph*> PostOrder;
51 const BUDataStructures::ActualCalleesTy &ActualCallees =
52 getAnalysis<BUDataStructures>().getActualCallees();
54 // Calculate top-down from main...
55 if (Function *F = M.getMainFunction())
56 ComputePostOrder(*F, VisitedGraph, PostOrder, ActualCallees);
58 // Next calculate the graphs for each unreachable function...
59 for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
60 ComputePostOrder(*I, VisitedGraph, PostOrder, ActualCallees);
62 VisitedGraph.clear(); // Release memory!
64 // Visit each of the graphs in reverse post-order now!
65 while (!PostOrder.empty()) {
66 inlineGraphIntoCallees(*PostOrder.back());
70 ArgsRemainIncomplete.clear();
75 DSGraph &TDDataStructures::getOrCreateDSGraph(Function &F) {
76 DSGraph *&G = DSInfo[&F];
77 if (G == 0) { // Not created yet? Clone BU graph...
78 G = new DSGraph(getAnalysis<BUDataStructures>().getDSGraph(F));
79 G->getAuxFunctionCalls().clear();
80 G->setPrintAuxCalls();
81 G->setGlobalsGraph(GlobalsGraph);
87 void TDDataStructures::ComputePostOrder(Function &F,hash_set<DSGraph*> &Visited,
88 std::vector<DSGraph*> &PostOrder,
89 const BUDataStructures::ActualCalleesTy &ActualCallees) {
90 if (F.isExternal()) return;
91 DSGraph &G = getOrCreateDSGraph(F);
92 if (Visited.count(&G)) return;
95 // Recursively traverse all of the callee graphs.
96 const std::vector<DSCallSite> &FunctionCalls = G.getFunctionCalls();
98 for (unsigned i = 0, e = FunctionCalls.size(); i != e; ++i) {
99 std::pair<BUDataStructures::ActualCalleesTy::const_iterator,
100 BUDataStructures::ActualCalleesTy::const_iterator>
101 IP = ActualCallees.equal_range(&FunctionCalls[i].getCallInst());
103 for (BUDataStructures::ActualCalleesTy::const_iterator I = IP.first;
105 ComputePostOrder(*I->second, Visited, PostOrder, ActualCallees);
108 PostOrder.push_back(&G);
115 // releaseMemory - If the pass pipeline is done with this pass, we can release
116 // our memory... here...
118 // FIXME: This should be releaseMemory and will work fine, except that LoadVN
119 // has no way to extend the lifetime of the pass, which screws up ds-aa.
121 void TDDataStructures::releaseMyMemory() {
122 for (hash_map<Function*, DSGraph*>::iterator I = DSInfo.begin(),
123 E = DSInfo.end(); I != E; ++I) {
124 I->second->getReturnNodes().erase(I->first);
125 if (I->second->getReturnNodes().empty())
129 // Empty map so next time memory is released, data structures are not
136 void TDDataStructures::inlineGraphIntoCallees(DSGraph &Graph) {
137 // Recompute the Incomplete markers and eliminate unreachable nodes.
138 Graph.removeTriviallyDeadNodes();
139 Graph.maskIncompleteMarkers();
141 // If any of the functions has incomplete incoming arguments, don't mark any
142 // of them as complete.
143 bool HasIncompleteArgs = false;
144 const DSGraph::ReturnNodesTy &GraphReturnNodes = Graph.getReturnNodes();
145 for (DSGraph::ReturnNodesTy::const_iterator I = GraphReturnNodes.begin(),
146 E = GraphReturnNodes.end(); I != E; ++I)
147 if (ArgsRemainIncomplete.count(I->first)) {
148 HasIncompleteArgs = true;
152 // Now fold in the necessary globals from the GlobalsGraph. A global G
153 // must be folded in if it exists in the current graph (i.e., is not dead)
154 // and it was not inlined from any of my callers. If it was inlined from
155 // a caller, it would have been fully consistent with the GlobalsGraph
156 // in the caller so folding in is not necessary. Otherwise, this node came
157 // solely from this function's BU graph and so has to be made consistent.
159 Graph.updateFromGlobalGraph();
161 // Recompute the Incomplete markers. Depends on whether args are complete
163 = HasIncompleteArgs ? DSGraph::MarkFormalArgs : DSGraph::IgnoreFormalArgs;
164 Graph.markIncompleteNodes(Flags | DSGraph::IgnoreGlobals);
166 // Delete dead nodes. Treat globals that are unreachable as dead also.
167 Graph.removeDeadNodes(DSGraph::RemoveUnreachableGlobals);
169 // We are done with computing the current TD Graph! Now move on to
170 // inlining the current graph into the graphs for its callees, if any.
172 const std::vector<DSCallSite> &FunctionCalls = Graph.getFunctionCalls();
173 if (FunctionCalls.empty()) {
174 DEBUG(std::cerr << " [TD] No callees for: " << Graph.getFunctionNames()
179 // Now that we have information about all of the callees, propagate the
180 // current graph into the callees. Clone only the reachable subgraph at
181 // each call-site, not the entire graph (even though the entire graph
182 // would be cloned only once, this should still be better on average).
184 DEBUG(std::cerr << " [TD] Inlining '" << Graph.getFunctionNames() <<"' into "
185 << FunctionCalls.size() << " call nodes.\n");
187 const BUDataStructures::ActualCalleesTy &ActualCallees =
188 getAnalysis<BUDataStructures>().getActualCallees();
190 // Loop over all the call sites and all the callees at each call site.
191 // Clone and merge the reachable subgraph from the call into callee's graph.
193 for (unsigned i = 0, e = FunctionCalls.size(); i != e; ++i) {
194 // For each function in the invoked function list at this call site...
195 std::pair<BUDataStructures::ActualCalleesTy::const_iterator,
196 BUDataStructures::ActualCalleesTy::const_iterator>
197 IP = ActualCallees.equal_range(&FunctionCalls[i].getCallInst());
199 // Multiple callees may have the same graph, so try to inline and merge
200 // only once for each <callSite,calleeGraph> pair, not once for each
201 // <callSite,calleeFunction> pair; the latter will be correct but slower.
202 hash_set<DSGraph*> GraphsSeen;
204 // Loop over each actual callee at this call site
205 for (BUDataStructures::ActualCalleesTy::const_iterator I = IP.first;
206 I != IP.second; ++I) {
207 DSGraph& CalleeGraph = getDSGraph(*I->second);
208 assert(&CalleeGraph != &Graph && "TD need not inline graph into self!");
210 // if this callee graph is already done at this site, skip this callee
211 if (GraphsSeen.find(&CalleeGraph) != GraphsSeen.end())
213 GraphsSeen.insert(&CalleeGraph);
215 // Get the root nodes for cloning the reachable subgraph into each callee:
216 // -- all global nodes that appear in both the caller and the callee
217 // -- return value at this call site, if any
218 // -- actual arguments passed at this call site
219 // -- callee node at this call site, if this is an indirect call (this may
220 // not be needed for merging, but allows us to create CS and therefore
221 // simplify the merging below).
222 hash_set<const DSNode*> RootNodeSet;
223 for (DSGraph::ScalarMapTy::const_iterator
224 SI = CalleeGraph.getScalarMap().begin(),
225 SE = CalleeGraph.getScalarMap().end(); SI != SE; ++SI)
226 if (GlobalValue* GV = dyn_cast<GlobalValue>(SI->first)) {
227 DSGraph::ScalarMapTy::const_iterator GI=Graph.getScalarMap().find(GV);
228 if (GI != Graph.getScalarMap().end())
229 RootNodeSet.insert(GI->second.getNode());
232 if (const DSNode* RetNode = FunctionCalls[i].getRetVal().getNode())
233 RootNodeSet.insert(RetNode);
235 for (unsigned j=0, N=FunctionCalls[i].getNumPtrArgs(); j < N; ++j)
236 if (const DSNode* ArgTarget = FunctionCalls[i].getPtrArg(j).getNode())
237 RootNodeSet.insert(ArgTarget);
239 if (FunctionCalls[i].isIndirectCall())
240 RootNodeSet.insert(FunctionCalls[i].getCalleeNode());
242 DEBUG(std::cerr << " [TD] Resolving arguments for callee graph '"
243 << CalleeGraph.getFunctionNames()
244 << "': " << I->second->getFunctionType()->getNumParams()
245 << " args\n at call site (DSCallSite*) 0x"
246 << &FunctionCalls[i] << "\n");
248 DSGraph::NodeMapTy NodeMapInCallee; // map from nodes to clones in callee
249 DSGraph::NodeMapTy CompletedMap; // unused map for nodes not to do
250 CalleeGraph.cloneReachableSubgraph(Graph, RootNodeSet,
251 NodeMapInCallee, CompletedMap,
252 DSGraph::StripModRefBits |
253 DSGraph::KeepAllocaBit);
255 // Transform our call site info into the cloned version for CalleeGraph
256 DSCallSite CS(FunctionCalls[i], NodeMapInCallee);
258 // Get the formal argument and return nodes for the called function
259 // and merge them with the cloned subgraph. Global nodes were merged
260 // already by cloneReachableSubgraph() above.
261 CalleeGraph.getCallSiteForArguments(*I->second).mergeWith(CS);
267 DEBUG(std::cerr << " [TD] Done inlining into callees for: "
268 << Graph.getFunctionNames() << " [" << Graph.getGraphSize() << "+"
269 << Graph.getFunctionCalls().size() << "]\n");