1 package Analysis.Disjoint;
3 import Analysis.CallGraph.*;
4 import Analysis.Liveness;
5 import Analysis.ArrayReferencees;
8 import IR.Tree.Modifiers;
13 public class DisjointAnalysis {
16 // data from the compiler
18 public CallGraph callGraph;
19 public Liveness liveness;
20 public ArrayReferencees arrayReferencees;
21 public TypeUtil typeUtil;
22 public int allocationDepth;
25 // used to identify HeapRegionNode objects
26 // A unique ID equates an object in one
27 // ownership graph with an object in another
28 // graph that logically represents the same
30 // start at 10 and increment to reserve some
31 // IDs for special purposes
32 static protected int uniqueIDcount = 10;
35 // An out-of-scope method created by the
36 // analysis that has no parameters, and
37 // appears to allocate the command line
38 // arguments, then invoke the source code's
39 // main method. The purpose of this is to
40 // provide the analysis with an explicit
41 // top-level context with no parameters
42 protected MethodDescriptor mdAnalysisEntry;
43 protected FlatMethod fmAnalysisEntry;
45 // main method defined by source program
46 protected MethodDescriptor mdSourceEntry;
48 // the set of task and/or method descriptors
49 // reachable in call graph
50 protected Set<Descriptor>
53 // current descriptors to visit in fixed-point
54 // interprocedural analysis, prioritized by
55 // dependency in the call graph
56 protected PriorityQueue<DescriptorQWrapper>
59 // a duplication of the above structure, but
60 // for efficient testing of inclusion
61 protected HashSet<Descriptor>
62 descriptorsToVisitSet;
64 // storage for priorities (doesn't make sense)
65 // to add it to the Descriptor class, just in
67 protected Hashtable<Descriptor, Integer>
68 mapDescriptorToPriority;
71 // maps a descriptor to its current partial result
72 // from the intraprocedural fixed-point analysis--
73 // then the interprocedural analysis settles, this
74 // mapping will have the final results for each
76 protected Hashtable<Descriptor, ReachGraph>
77 mapDescriptorToCompleteReachGraph;
79 // maps a descriptor to its known dependents: namely
80 // methods or tasks that call the descriptor's method
81 // AND are part of this analysis (reachable from main)
82 protected Hashtable< Descriptor, Set<Descriptor> >
83 mapDescriptorToSetDependents;
85 // maps each flat new to one analysis abstraction
86 // allocate site object, these exist outside reach graphs
87 protected Hashtable<FlatNew, AllocSite>
88 mapFlatNewToAllocSite;
90 // maps intergraph heap region IDs to intergraph
91 // allocation sites that created them, a redundant
92 // structure for efficiency in some operations
93 protected Hashtable<Integer, AllocSite>
96 // maps a method to its initial heap model (IHM) that
97 // is the set of reachability graphs from every caller
98 // site, all merged together. The reason that we keep
99 // them separate is that any one call site's contribution
100 // to the IHM may changed along the path to the fixed point
101 protected Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >
102 mapDescriptorToIHMcontributions;
104 // TODO -- CHANGE EDGE/TYPE/FIELD storage!
105 public static final String arrayElementFieldName = "___element_";
106 static protected Hashtable<TypeDescriptor, FieldDescriptor>
109 // for controlling DOT file output
110 protected boolean writeFinalDOTs;
111 protected boolean writeAllIncrementalDOTs;
113 // supporting DOT output--when we want to write every
114 // partial method result, keep a tally for generating
116 protected Hashtable<Descriptor, Integer>
117 mapDescriptorToNumUpdates;
121 // allocate various structures that are not local
122 // to a single class method--should be done once
123 protected void allocateStructures() {
124 descriptorsToAnalyze = new HashSet<Descriptor>();
126 mapDescriptorToCompleteReachGraph =
127 new Hashtable<Descriptor, ReachGraph>();
129 mapDescriptorToNumUpdates =
130 new Hashtable<Descriptor, Integer>();
132 mapDescriptorToSetDependents =
133 new Hashtable< Descriptor, Set<Descriptor> >();
135 mapFlatNewToAllocSite =
136 new Hashtable<FlatNew, AllocSite>();
138 mapDescriptorToIHMcontributions =
139 new Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >();
141 mapHrnIdToAllocSite =
142 new Hashtable<Integer, AllocSite>();
144 mapTypeToArrayField =
145 new Hashtable <TypeDescriptor, FieldDescriptor>();
147 descriptorsToVisitQ =
148 new PriorityQueue<DescriptorQWrapper>();
150 descriptorsToVisitSet =
151 new HashSet<Descriptor>();
153 mapDescriptorToPriority =
154 new Hashtable<Descriptor, Integer>();
159 // this analysis generates a disjoint reachability
160 // graph for every reachable method in the program
161 public DisjointAnalysis( State s,
166 ) throws java.io.IOException {
167 init( s, tu, cg, l, ar );
170 protected void init( State state,
174 ArrayReferencees arrayReferencees
175 ) throws java.io.IOException {
178 this.typeUtil = typeUtil;
179 this.callGraph = callGraph;
180 this.liveness = liveness;
181 this.arrayReferencees = arrayReferencees;
182 this.allocationDepth = state.DISJOINTALLOCDEPTH;
183 this.writeFinalDOTs = state.DISJOINTWRITEDOTS && !state.DISJOINTWRITEALL;
184 this.writeAllIncrementalDOTs = state.DISJOINTWRITEDOTS && state.DISJOINTWRITEALL;
186 // set some static configuration for ReachGraphs
187 ReachGraph.allocationDepth = allocationDepth;
188 ReachGraph.typeUtil = typeUtil;
190 allocateStructures();
192 double timeStartAnalysis = (double) System.nanoTime();
194 // start interprocedural fixed-point computation
197 double timeEndAnalysis = (double) System.nanoTime();
198 double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow( 10.0, 9.0 ) );
199 String treport = String.format( "The reachability analysis took %.3f sec.", dt );
200 String justtime = String.format( "%.2f", dt );
201 System.out.println( treport );
203 if( writeFinalDOTs && !writeAllIncrementalDOTs ) {
207 if( state.DISJOINTWRITEIHMS ) {
211 if( state.DISJOINTALIASFILE != null ) {
213 // not supporting tasks yet...
216 writeAllAliasesJava( aliasFile,
219 state.DISJOINTALIASTAB,
227 // fixed-point computation over the call graph--when a
228 // method's callees are updated, it must be reanalyzed
229 protected void analyzeMethods() throws java.io.IOException {
232 // This analysis does not support Bamboo at the moment,
233 // but if it does in the future we would initialize the
234 // set of descriptors to analyze as the program-reachable
235 // tasks and the methods callable by them. For Java,
236 // just methods reachable from the main method.
237 System.out.println( "No Bamboo support yet..." );
241 // add all methods transitively reachable from the
242 // source's main to set for analysis
243 mdSourceEntry = typeUtil.getMain();
244 descriptorsToAnalyze.add( mdSourceEntry );
245 descriptorsToAnalyze.addAll(
246 callGraph.getAllMethods( mdSourceEntry )
249 // fabricate an empty calling context that will call
250 // the source's main, but call graph doesn't know
251 // about it, so explicitly add it
252 makeAnalysisEntryMethod( mdSourceEntry );
253 descriptorsToAnalyze.add( mdAnalysisEntry );
256 // topologically sort according to the call graph so
257 // leaf calls are ordered first, smarter analysis order
258 LinkedList<Descriptor> sortedDescriptors =
259 topologicalSort( descriptorsToAnalyze );
261 // add sorted descriptors to priority queue, and duplicate
262 // the queue as a set for efficiently testing whether some
263 // method is marked for analysis
265 Iterator<Descriptor> dItr = sortedDescriptors.iterator();
266 while( dItr.hasNext() ) {
267 Descriptor d = dItr.next();
268 mapDescriptorToPriority.put( d, new Integer( p ) );
269 descriptorsToVisitQ.add( new DescriptorQWrapper( p, d ) );
270 descriptorsToVisitSet.add( d );
274 // analyze methods from the priority queue until it is empty
275 while( !descriptorsToVisitQ.isEmpty() ) {
276 Descriptor d = descriptorsToVisitQ.poll().getDescriptor();
277 assert descriptorsToVisitSet.contains( d );
278 descriptorsToVisitSet.remove( d );
280 // because the task or method descriptor just extracted
281 // was in the "to visit" set it either hasn't been analyzed
282 // yet, or some method that it depends on has been
283 // updated. Recompute a complete reachability graph for
284 // this task/method and compare it to any previous result.
285 // If there is a change detected, add any methods/tasks
286 // that depend on this one to the "to visit" set.
288 System.out.println( "Analyzing " + d );
290 ReachGraph rg = analyzeMethod( d );
291 ReachGraph rgPrev = getPartial( d );
293 if( !rg.equals( rgPrev ) ) {
296 // results for d changed, so enqueue dependents
297 // of d for further analysis
298 Iterator<Descriptor> depsItr = getDependents( d ).iterator();
299 while( depsItr.hasNext() ) {
300 Descriptor dNext = depsItr.next();
308 protected ReachGraph analyzeMethod( Descriptor d )
309 throws java.io.IOException {
311 // get the flat code for this descriptor
313 if( d == mdAnalysisEntry ) {
314 fm = fmAnalysisEntry;
316 fm = state.getMethodFlat( d );
319 // intraprocedural work set
320 Set<FlatNode> flatNodesToVisit = new HashSet<FlatNode>();
321 flatNodesToVisit.add( fm );
323 // mapping of current partial results
324 Hashtable<FlatNode, ReachGraph> mapFlatNodeToReachGraph =
325 new Hashtable<FlatNode, ReachGraph>();
327 // the set of return nodes partial results that will be combined as
328 // the final, conservative approximation of the entire method
329 HashSet<FlatReturnNode> setReturns = new HashSet<FlatReturnNode>();
331 while( !flatNodesToVisit.isEmpty() ) {
332 FlatNode fn = (FlatNode) flatNodesToVisit.iterator().next();
333 flatNodesToVisit.remove( fn );
335 //System.out.println( " "+fn );
337 // effect transfer function defined by this node,
338 // then compare it to the old graph at this node
339 // to see if anything was updated.
341 ReachGraph rg = new ReachGraph();
343 // start by merging all node's parents' graphs
344 for( int i = 0; i < fn.numPrev(); ++i ) {
345 FlatNode pn = fn.getPrev( i );
346 if( mapFlatNodeToReachGraph.containsKey( pn ) ) {
347 ReachGraph rgParent = mapFlatNodeToReachGraph.get( pn );
348 rg.merge( rgParent );
352 // modify rg with appropriate transfer function
353 rg = analyzeFlatNode( d, fm, fn, setReturns, rg );
357 if( takeDebugSnapshots &&
358 d.getSymbol().equals( descSymbolDebug )
360 debugSnapshot( rg, fn );
364 // if the results of the new graph are different from
365 // the current graph at this node, replace the graph
366 // with the update and enqueue the children
367 ReachGraph rgPrev = mapFlatNodeToReachGraph.get( fn );
368 if( !rg.equals( rgPrev ) ) {
369 mapFlatNodeToReachGraph.put( fn, rg );
371 for( int i = 0; i < fn.numNext(); i++ ) {
372 FlatNode nn = fn.getNext( i );
373 flatNodesToVisit.add( nn );
378 // end by merging all return nodes into a complete
379 // ownership graph that represents all possible heap
380 // states after the flat method returns
381 ReachGraph completeGraph = new ReachGraph();
383 assert !setReturns.isEmpty();
384 Iterator retItr = setReturns.iterator();
385 while( retItr.hasNext() ) {
386 FlatReturnNode frn = (FlatReturnNode) retItr.next();
388 assert mapFlatNodeToReachGraph.containsKey( frn );
389 ReachGraph rgRet = mapFlatNodeToReachGraph.get( frn );
391 completeGraph.merge( rgRet );
394 return completeGraph;
399 analyzeFlatNode( Descriptor d,
400 FlatMethod fmContaining,
402 HashSet<FlatReturnNode> setRetNodes,
404 ) throws java.io.IOException {
407 // any variables that are no longer live should be
408 // nullified in the graph to reduce edges
409 //rg.nullifyDeadVars( liveness.getLiveInTemps( fmContaining, fn ) );
416 // use node type to decide what transfer function
417 // to apply to the reachability graph
418 switch( fn.kind() ) {
420 case FKind.FlatMethod: {
421 // construct this method's initial heap model (IHM)
422 // since we're working on the FlatMethod, we know
423 // the incoming ReachGraph 'rg' is empty
425 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
426 getIHMcontributions( d );
428 Set entrySet = heapsFromCallers.entrySet();
429 Iterator itr = entrySet.iterator();
430 while( itr.hasNext() ) {
431 Map.Entry me = (Map.Entry) itr.next();
432 FlatCall fc = (FlatCall) me.getKey();
433 ReachGraph rgContrib = (ReachGraph) me.getValue();
435 assert fc.getMethod().equals( d );
437 // some call sites are in same method context though,
438 // and all of them should be merged together first,
439 // then heaps from different contexts should be merged
440 // THIS ASSUMES DIFFERENT CONTEXTS NEED SPECIAL CONSIDERATION!
441 // such as, do allocation sites need to be aged?
443 rg.merge_diffMethodContext( rgContrib );
447 case FKind.FlatOpNode:
448 FlatOpNode fon = (FlatOpNode) fn;
449 if( fon.getOp().getOp() == Operation.ASSIGN ) {
452 rg.assignTempXEqualToTempY( lhs, rhs );
456 case FKind.FlatCastNode:
457 FlatCastNode fcn = (FlatCastNode) fn;
461 TypeDescriptor td = fcn.getType();
464 rg.assignTempXEqualToCastedTempY( lhs, rhs, td );
467 case FKind.FlatFieldNode:
468 FlatFieldNode ffn = (FlatFieldNode) fn;
471 fld = ffn.getField();
472 if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
473 rg.assignTempXEqualToTempYFieldF( lhs, rhs, fld );
477 case FKind.FlatSetFieldNode:
478 FlatSetFieldNode fsfn = (FlatSetFieldNode) fn;
480 fld = fsfn.getField();
482 if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
483 rg.assignTempXFieldFEqualToTempY( lhs, fld, rhs );
487 case FKind.FlatElementNode:
488 FlatElementNode fen = (FlatElementNode) fn;
491 if( !lhs.getType().isImmutable() || lhs.getType().isArray() ) {
493 assert rhs.getType() != null;
494 assert rhs.getType().isArray();
496 TypeDescriptor tdElement = rhs.getType().dereference();
497 FieldDescriptor fdElement = getArrayField( tdElement );
499 rg.assignTempXEqualToTempYFieldF( lhs, rhs, fdElement );
503 case FKind.FlatSetElementNode:
504 FlatSetElementNode fsen = (FlatSetElementNode) fn;
506 if( arrayReferencees.doesNotCreateNewReaching( fsen ) ) {
507 // skip this node if it cannot create new reachability paths
513 if( !rhs.getType().isImmutable() || rhs.getType().isArray() ) {
515 assert lhs.getType() != null;
516 assert lhs.getType().isArray();
518 TypeDescriptor tdElement = lhs.getType().dereference();
519 FieldDescriptor fdElement = getArrayField( tdElement );
521 rg.assignTempXFieldFEqualToTempY( lhs, fdElement, rhs );
526 FlatNew fnn = (FlatNew) fn;
528 if( !lhs.getType().isImmutable() || lhs.getType().isArray() ) {
529 AllocSite as = getAllocSiteFromFlatNewPRIVATE( fnn );
530 rg.assignTempEqualToNewAlloc( lhs, as );
534 case FKind.FlatCall: {
535 MethodDescriptor mdCaller = fmContaining.getMethod();
536 FlatCall fc = (FlatCall) fn;
537 MethodDescriptor mdCallee = fc.getMethod();
538 FlatMethod fmCallee = state.getMethodFlat( mdCallee );
540 boolean writeDebugDOTs =
541 mdCaller.getSymbol().equals( state.DISJOINTDEBUGCALLER ) &&
542 mdCallee.getSymbol().equals( state.DISJOINTDEBUGCALLEE );
545 // calculate the heap this call site can reach--note this is
546 // not used for the current call site transform, we are
547 // grabbing this heap model for future analysis of the callees,
548 // so if different results emerge we will return to this site
549 ReachGraph heapForThisCall_old =
550 getIHMcontribution( mdCallee, fc );
552 // the computation of the callee-reachable heap
553 // is useful for making the callee starting point
554 // and for applying the call site transfer function
555 Set<HeapRegionNode> callerNodesCopiedToCallee =
556 new HashSet<HeapRegionNode>();
557 Set<RefEdge> callerEdgesCopiedToCallee =
558 new HashSet<RefEdge>();
560 ReachGraph heapForThisCall_cur =
561 rg.makeCalleeView( fc,
563 callerNodesCopiedToCallee,
564 callerEdgesCopiedToCallee,
568 if( !heapForThisCall_cur.equals( heapForThisCall_old ) ) {
569 // if heap at call site changed, update the contribution,
570 // and reschedule the callee for analysis
571 addIHMcontribution( mdCallee, fc, heapForThisCall_cur );
578 // the transformation for a call site should update the
579 // current heap abstraction with any effects from the callee,
580 // or if the method is virtual, the effects from any possible
581 // callees, so find the set of callees...
582 Set<MethodDescriptor> setPossibleCallees =
583 new HashSet<MethodDescriptor>();
585 if( mdCallee.isStatic() ) {
586 setPossibleCallees.add( mdCallee );
588 TypeDescriptor typeDesc = fc.getThis().getType();
589 setPossibleCallees.addAll( callGraph.getMethods( mdCallee,
594 ReachGraph rgMergeOfEffects = new ReachGraph();
596 Iterator<MethodDescriptor> mdItr = setPossibleCallees.iterator();
597 while( mdItr.hasNext() ) {
598 MethodDescriptor mdPossible = mdItr.next();
599 FlatMethod fmPossible = state.getMethodFlat( mdPossible );
601 addDependent( mdPossible, // callee
604 // don't alter the working graph (rg) until we compute a
605 // result for every possible callee, merge them all together,
606 // then set rg to that
607 ReachGraph rgCopy = new ReachGraph();
610 ReachGraph rgEffect = getPartial( mdPossible );
612 if( rgEffect == null ) {
613 // if this method has never been analyzed just schedule it
614 // for analysis and skip over this call site for now
615 enqueue( mdPossible );
617 rgCopy.resolveMethodCall( fc,
620 callerNodesCopiedToCallee,
621 callerEdgesCopiedToCallee,
626 rgMergeOfEffects.merge( rgCopy );
630 // now that we've taken care of building heap models for
631 // callee analysis, finish this transformation
632 rg = rgMergeOfEffects;
636 case FKind.FlatReturnNode:
637 FlatReturnNode frn = (FlatReturnNode) fn;
638 rhs = frn.getReturnTemp();
639 if( rhs != null && !rhs.getType().isImmutable() ) {
640 rg.assignReturnEqualToTemp( rhs );
642 setRetNodes.add( frn );
648 // dead variables were removed before the above transfer function
649 // was applied, so eliminate heap regions and edges that are no
650 // longer part of the abstractly-live heap graph, and sweep up
651 // and reachability effects that are altered by the reduction
652 //rg.abstractGarbageCollect();
656 // at this point rg should be the correct update
657 // by an above transfer function, or untouched if
658 // the flat node type doesn't affect the heap
663 // this method should generate integers strictly greater than zero!
664 // special "shadow" regions are made from a heap region by negating
666 static public Integer generateUniqueHeapRegionNodeID() {
668 return new Integer( uniqueIDcount );
673 static public FieldDescriptor getArrayField( TypeDescriptor tdElement ) {
674 FieldDescriptor fdElement = mapTypeToArrayField.get( tdElement );
675 if( fdElement == null ) {
676 fdElement = new FieldDescriptor( new Modifiers( Modifiers.PUBLIC ),
678 arrayElementFieldName,
681 mapTypeToArrayField.put( tdElement, fdElement );
688 private void writeFinalGraphs() {
689 Set entrySet = mapDescriptorToCompleteReachGraph.entrySet();
690 Iterator itr = entrySet.iterator();
691 while( itr.hasNext() ) {
692 Map.Entry me = (Map.Entry) itr.next();
693 Descriptor d = (Descriptor) me.getKey();
694 ReachGraph rg = (ReachGraph) me.getValue();
697 rg.writeGraph( "COMPLETE"+d,
698 true, // write labels (variables)
699 true, // selectively hide intermediate temp vars
700 false, // prune unreachable heap regions
701 false, // show back edges to confirm graph validity
702 true, // hide subset reachability states
703 true ); // hide edge taints
704 } catch( IOException e ) {}
708 private void writeFinalIHMs() {
709 Iterator d2IHMsItr = mapDescriptorToIHMcontributions.entrySet().iterator();
710 while( d2IHMsItr.hasNext() ) {
711 Map.Entry me1 = (Map.Entry) d2IHMsItr.next();
712 Descriptor d = (Descriptor) me1.getKey();
713 Hashtable<FlatCall, ReachGraph> IHMs = (Hashtable<FlatCall, ReachGraph>) me1.getValue();
715 Iterator fc2rgItr = IHMs.entrySet().iterator();
716 while( fc2rgItr.hasNext() ) {
717 Map.Entry me2 = (Map.Entry) fc2rgItr.next();
718 FlatCall fc = (FlatCall) me2.getKey();
719 ReachGraph rg = (ReachGraph) me2.getValue();
722 rg.writeGraph( "IHMPARTFOR"+d+"FROM"+fc,
723 true, // write labels (variables)
724 false, // selectively hide intermediate temp vars
725 false, // prune unreachable heap regions
726 false, // show back edges to confirm graph validity
727 true, // hide subset reachability states
728 true ); // hide edge taints
729 } catch( IOException e ) {}
737 // return just the allocation site associated with one FlatNew node
738 protected AllocSite getAllocSiteFromFlatNewPRIVATE( FlatNew fnew ) {
740 if( !mapFlatNewToAllocSite.containsKey( fnew ) ) {
742 (AllocSite) Canonical.makeCanonical( new AllocSite( allocationDepth,
748 // the newest nodes are single objects
749 for( int i = 0; i < allocationDepth; ++i ) {
750 Integer id = generateUniqueHeapRegionNodeID();
751 as.setIthOldest( i, id );
752 mapHrnIdToAllocSite.put( id, as );
755 // the oldest node is a summary node
756 as.setSummary( generateUniqueHeapRegionNodeID() );
758 // and one special node is older than all
759 // nodes and shadow nodes for the site
760 as.setSiteSummary( generateUniqueHeapRegionNodeID() );
762 mapFlatNewToAllocSite.put( fnew, as );
765 return mapFlatNewToAllocSite.get( fnew );
770 // return all allocation sites in the method (there is one allocation
771 // site per FlatNew node in a method)
772 protected HashSet<AllocSite> getAllocSiteSet(Descriptor d) {
773 if( !mapDescriptorToAllocSiteSet.containsKey(d) ) {
774 buildAllocSiteSet(d);
777 return mapDescriptorToAllocSiteSet.get(d);
783 protected void buildAllocSiteSet(Descriptor d) {
784 HashSet<AllocSite> s = new HashSet<AllocSite>();
786 FlatMethod fm = state.getMethodFlat( d );
788 // visit every node in this FlatMethod's IR graph
789 // and make a set of the allocation sites from the
790 // FlatNew node's visited
791 HashSet<FlatNode> visited = new HashSet<FlatNode>();
792 HashSet<FlatNode> toVisit = new HashSet<FlatNode>();
795 while( !toVisit.isEmpty() ) {
796 FlatNode n = toVisit.iterator().next();
798 if( n instanceof FlatNew ) {
799 s.add(getAllocSiteFromFlatNewPRIVATE( (FlatNew) n) );
805 for( int i = 0; i < n.numNext(); ++i ) {
806 FlatNode child = n.getNext( i );
807 if( !visited.contains( child ) ) {
808 toVisit.add( child );
813 mapDescriptorToAllocSiteSet.put( d, s );
817 protected HashSet<AllocSite> getFlaggedAllocSites(Descriptor dIn) {
819 HashSet<AllocSite> out = new HashSet<AllocSite>();
820 HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
821 HashSet<Descriptor> visited = new HashSet<Descriptor>();
825 while( !toVisit.isEmpty() ) {
826 Descriptor d = toVisit.iterator().next();
830 HashSet<AllocSite> asSet = getAllocSiteSet(d);
831 Iterator asItr = asSet.iterator();
832 while( asItr.hasNext() ) {
833 AllocSite as = (AllocSite) asItr.next();
834 if( as.getDisjointAnalysisId() != null ) {
839 // enqueue callees of this method to be searched for
840 // allocation sites also
841 Set callees = callGraph.getCalleeSet(d);
842 if( callees != null ) {
843 Iterator methItr = callees.iterator();
844 while( methItr.hasNext() ) {
845 MethodDescriptor md = (MethodDescriptor) methItr.next();
847 if( !visited.contains(md) ) {
859 protected HashSet<AllocSite>
860 getFlaggedAllocSitesReachableFromTaskPRIVATE(TaskDescriptor td) {
862 HashSet<AllocSite> asSetTotal = new HashSet<AllocSite>();
863 HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
864 HashSet<Descriptor> visited = new HashSet<Descriptor>();
868 // traverse this task and all methods reachable from this task
869 while( !toVisit.isEmpty() ) {
870 Descriptor d = toVisit.iterator().next();
874 HashSet<AllocSite> asSet = getAllocSiteSet(d);
875 Iterator asItr = asSet.iterator();
876 while( asItr.hasNext() ) {
877 AllocSite as = (AllocSite) asItr.next();
878 TypeDescriptor typed = as.getType();
879 if( typed != null ) {
880 ClassDescriptor cd = typed.getClassDesc();
881 if( cd != null && cd.hasFlags() ) {
887 // enqueue callees of this method to be searched for
888 // allocation sites also
889 Set callees = callGraph.getCalleeSet(d);
890 if( callees != null ) {
891 Iterator methItr = callees.iterator();
892 while( methItr.hasNext() ) {
893 MethodDescriptor md = (MethodDescriptor) methItr.next();
895 if( !visited.contains(md) ) {
909 protected String computeAliasContextHistogram() {
911 Hashtable<Integer, Integer> mapNumContexts2NumDesc =
912 new Hashtable<Integer, Integer>();
914 Iterator itr = mapDescriptorToAllDescriptors.entrySet().iterator();
915 while( itr.hasNext() ) {
916 Map.Entry me = (Map.Entry) itr.next();
917 HashSet<Descriptor> s = (HashSet<Descriptor>) me.getValue();
919 Integer i = mapNumContexts2NumDesc.get( s.size() );
921 i = new Integer( 0 );
923 mapNumContexts2NumDesc.put( s.size(), i + 1 );
929 itr = mapNumContexts2NumDesc.entrySet().iterator();
930 while( itr.hasNext() ) {
931 Map.Entry me = (Map.Entry) itr.next();
932 Integer c0 = (Integer) me.getKey();
933 Integer d0 = (Integer) me.getValue();
935 s += String.format( "%4d methods had %4d unique alias contexts.\n", d0, c0 );
938 s += String.format( "\n%4d total methods analayzed.\n", total );
943 protected int numMethodsAnalyzed() {
944 return descriptorsToAnalyze.size();
951 // Take in source entry which is the program's compiled entry and
952 // create a new analysis entry, a method that takes no parameters
953 // and appears to allocate the command line arguments and call the
954 // source entry with them. The purpose of this analysis entry is
955 // to provide a top-level method context with no parameters left.
956 protected void makeAnalysisEntryMethod( MethodDescriptor mdSourceEntry ) {
958 Modifiers mods = new Modifiers();
959 mods.addModifier( Modifiers.PUBLIC );
960 mods.addModifier( Modifiers.STATIC );
962 TypeDescriptor returnType =
963 new TypeDescriptor( TypeDescriptor.VOID );
965 this.mdAnalysisEntry =
966 new MethodDescriptor( mods,
968 "analysisEntryMethod"
971 TempDescriptor cmdLineArgs =
972 new TempDescriptor( "args",
973 mdSourceEntry.getParamType( 0 )
977 new FlatNew( mdSourceEntry.getParamType( 0 ),
982 TempDescriptor[] sourceEntryArgs = new TempDescriptor[1];
983 sourceEntryArgs[0] = cmdLineArgs;
986 new FlatCall( mdSourceEntry,
992 FlatReturnNode frn = new FlatReturnNode( null );
994 FlatExit fe = new FlatExit();
996 this.fmAnalysisEntry =
997 new FlatMethod( mdAnalysisEntry,
1001 this.fmAnalysisEntry.addNext( fn );
1008 protected LinkedList<Descriptor> topologicalSort( Set<Descriptor> toSort ) {
1010 Set <Descriptor> discovered = new HashSet <Descriptor>();
1011 LinkedList<Descriptor> sorted = new LinkedList<Descriptor>();
1013 Iterator<Descriptor> itr = toSort.iterator();
1014 while( itr.hasNext() ) {
1015 Descriptor d = itr.next();
1017 if( !discovered.contains( d ) ) {
1018 dfsVisit( d, toSort, sorted, discovered );
1025 // While we're doing DFS on call graph, remember
1026 // dependencies for efficient queuing of methods
1027 // during interprocedural analysis:
1029 // a dependent of a method decriptor d for this analysis is:
1030 // 1) a method or task that invokes d
1031 // 2) in the descriptorsToAnalyze set
1032 protected void dfsVisit( Descriptor d,
1033 Set <Descriptor> toSort,
1034 LinkedList<Descriptor> sorted,
1035 Set <Descriptor> discovered ) {
1036 discovered.add( d );
1038 // only methods have callers, tasks never do
1039 if( d instanceof MethodDescriptor ) {
1041 MethodDescriptor md = (MethodDescriptor) d;
1043 // the call graph is not aware that we have a fabricated
1044 // analysis entry that calls the program source's entry
1045 if( md == mdSourceEntry ) {
1046 if( !discovered.contains( mdAnalysisEntry ) ) {
1047 addDependent( mdSourceEntry, // callee
1048 mdAnalysisEntry // caller
1050 dfsVisit( mdAnalysisEntry, toSort, sorted, discovered );
1054 // otherwise call graph guides DFS
1055 Iterator itr = callGraph.getCallerSet( md ).iterator();
1056 while( itr.hasNext() ) {
1057 Descriptor dCaller = (Descriptor) itr.next();
1059 // only consider callers in the original set to analyze
1060 if( !toSort.contains( dCaller ) ) {
1064 if( !discovered.contains( dCaller ) ) {
1065 addDependent( md, // callee
1069 dfsVisit( dCaller, toSort, sorted, discovered );
1074 sorted.addFirst( d );
1078 protected void enqueue( Descriptor d ) {
1079 if( !descriptorsToVisitSet.contains( d ) ) {
1080 Integer priority = mapDescriptorToPriority.get( d );
1081 descriptorsToVisitQ.add( new DescriptorQWrapper( priority,
1084 descriptorsToVisitSet.add( d );
1089 protected ReachGraph getPartial( Descriptor d ) {
1090 return mapDescriptorToCompleteReachGraph.get( d );
1093 protected void setPartial( Descriptor d, ReachGraph rg ) {
1094 mapDescriptorToCompleteReachGraph.put( d, rg );
1096 // when the flag for writing out every partial
1097 // result is set, we should spit out the graph,
1098 // but in order to give it a unique name we need
1099 // to track how many partial results for this
1100 // descriptor we've already written out
1101 if( writeAllIncrementalDOTs ) {
1102 if( !mapDescriptorToNumUpdates.containsKey( d ) ) {
1103 mapDescriptorToNumUpdates.put( d, new Integer( 0 ) );
1105 Integer n = mapDescriptorToNumUpdates.get( d );
1108 rg.writeGraph( d+"COMPLETE"+String.format( "%05d", n ),
1109 true, // write labels (variables)
1110 true, // selectively hide intermediate temp vars
1111 true, // prune unreachable heap regions
1112 false, // show back edges to confirm graph validity
1113 false, // show parameter indices (unmaintained!)
1114 true, // hide subset reachability states
1115 true); // hide edge taints
1116 } catch( IOException e ) {}
1118 mapDescriptorToNumUpdates.put( d, n + 1 );
1123 // a dependent of a method decriptor d for this analysis is:
1124 // 1) a method or task that invokes d
1125 // 2) in the descriptorsToAnalyze set
1126 protected void addDependent( Descriptor callee, Descriptor caller ) {
1127 Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
1128 if( deps == null ) {
1129 deps = new HashSet<Descriptor>();
1132 mapDescriptorToSetDependents.put( callee, deps );
1135 protected Set<Descriptor> getDependents( Descriptor callee ) {
1136 Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
1137 if( deps == null ) {
1138 deps = new HashSet<Descriptor>();
1139 mapDescriptorToSetDependents.put( callee, deps );
1145 public Hashtable<FlatCall, ReachGraph> getIHMcontributions( Descriptor d ) {
1147 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
1148 mapDescriptorToIHMcontributions.get( d );
1150 if( heapsFromCallers == null ) {
1151 heapsFromCallers = new Hashtable<FlatCall, ReachGraph>();
1152 mapDescriptorToIHMcontributions.put( d, heapsFromCallers );
1155 return heapsFromCallers;
1158 public ReachGraph getIHMcontribution( Descriptor d,
1161 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
1162 getIHMcontributions( d );
1164 if( !heapsFromCallers.containsKey( fc ) ) {
1165 heapsFromCallers.put( fc, new ReachGraph() );
1168 return heapsFromCallers.get( fc );
1171 public void addIHMcontribution( Descriptor d,
1175 Hashtable<FlatCall, ReachGraph> heapsFromCallers =
1176 getIHMcontributions( d );
1178 heapsFromCallers.put( fc, rg );
1185 // get successive captures of the analysis state
1186 boolean takeDebugSnapshots = false;
1187 String descSymbolDebug = "addBar";
1188 boolean stopAfterCapture = true;
1190 // increments every visit to debugSnapshot, don't fiddle with it
1191 int debugCounter = 0;
1193 // the value of debugCounter to start reporting the debugCounter
1194 // to the screen to let user know what debug iteration we're at
1195 int numStartCountReport = 0;
1197 // the frequency of debugCounter values to print out, 0 no report
1198 int freqCountReport = 0;
1200 // the debugCounter value at which to start taking snapshots
1201 int iterStartCapture = 0;
1203 // the number of snapshots to take
1204 int numIterToCapture = 300;
1206 void debugSnapshot( ReachGraph rg, FlatNode fn ) {
1207 if( debugCounter > iterStartCapture + numIterToCapture ) {
1212 if( debugCounter > numStartCountReport &&
1213 freqCountReport > 0 &&
1214 debugCounter % freqCountReport == 0
1216 System.out.println( " @@@ debug counter = "+
1219 if( debugCounter > iterStartCapture ) {
1220 System.out.println( " @@@ capturing debug "+
1221 (debugCounter - iterStartCapture)+
1224 String.format( "snap%04d",
1225 debugCounter - iterStartCapture );
1227 graphName = graphName + fn;
1230 rg.writeGraph( graphName,
1231 true, // write labels (variables)
1232 false, // selectively hide intermediate temp vars
1233 false, // prune unreachable heap regions
1234 false, // show back edges to confirm graph validity
1235 true, // hide subset reachability states
1236 true );// hide edge taints
1237 } catch( Exception e ) {
1238 System.out.println( "Error writing debug capture." );
1243 if( debugCounter == iterStartCapture + numIterToCapture &&
1246 System.out.println( "Stopping analysis after debug captures." );