public class DisjointAnalysis {
+
+ ///////////////////////////////////////////
+ //
+ // Public interface to discover possible
+ // aliases in the program under analysis
+ //
+ ///////////////////////////////////////////
+
+ public HashSet<AllocSite>
+ getFlaggedAllocationSitesReachableFromTask(TaskDescriptor td) {
+ checkAnalysisComplete();
+ return getFlaggedAllocationSitesReachableFromTaskPRIVATE(td);
+ }
+
+ public AllocSite getAllocationSiteFromFlatNew(FlatNew fn) {
+ checkAnalysisComplete();
+ return getAllocSiteFromFlatNewPRIVATE(fn);
+ }
+
+ public AllocSite getAllocationSiteFromHeapRegionNodeID(Integer id) {
+ checkAnalysisComplete();
+ return mapHrnIdToAllocSite.get(id);
+ }
+
+ public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
+ int paramIndex1,
+ int paramIndex2) {
+ checkAnalysisComplete();
+ ReachGraph rg=mapDescriptorToCompleteReachGraph.get(taskOrMethod);
+ FlatMethod fm=state.getMethodFlat(taskOrMethod);
+ assert(rg != null);
+ return rg.mayReachSharedObjects(fm, paramIndex1, paramIndex2);
+ }
+
+ public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
+ int paramIndex, AllocSite alloc) {
+ checkAnalysisComplete();
+ ReachGraph rg = mapDescriptorToCompleteReachGraph.get(taskOrMethod);
+ FlatMethod fm=state.getMethodFlat(taskOrMethod);
+ assert (rg != null);
+ return rg.mayReachSharedObjects(fm, paramIndex, alloc);
+ }
+
+ public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
+ AllocSite alloc, int paramIndex) {
+ checkAnalysisComplete();
+ ReachGraph rg = mapDescriptorToCompleteReachGraph.get(taskOrMethod);
+ FlatMethod fm=state.getMethodFlat(taskOrMethod);
+ assert (rg != null);
+ return rg.mayReachSharedObjects(fm, paramIndex, alloc);
+ }
+
+ public Set<HeapRegionNode> createsPotentialAliases(Descriptor taskOrMethod,
+ AllocSite alloc1, AllocSite alloc2) {
+ checkAnalysisComplete();
+ ReachGraph rg = mapDescriptorToCompleteReachGraph.get(taskOrMethod);
+ assert (rg != null);
+ return rg.mayReachSharedObjects(alloc1, alloc2);
+ }
+
+ public String prettyPrintNodeSet(Set<HeapRegionNode> s) {
+ checkAnalysisComplete();
+
+ String out = "{\n";
+
+ Iterator<HeapRegionNode> i = s.iterator();
+ while (i.hasNext()) {
+ HeapRegionNode n = i.next();
+
+ AllocSite as = n.getAllocSite();
+ if (as == null) {
+ out += " " + n.toString() + ",\n";
+ } else {
+ out += " " + n.toString() + ": " + as.toStringVerbose()
+ + ",\n";
+ }
+ }
+
+ out += "}\n";
+ return out;
+ }
+
+ // use the methods given above to check every possible alias
+ // between task parameters and flagged allocation sites reachable
+ // from the task
+ public void writeAllAliases(String outputFile,
+ String timeReport,
+ String justTime,
+ boolean tabularOutput,
+ int numLines
+)
+ throws java.io.IOException {
+ checkAnalysisComplete();
+
+ BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile));
+
+ if (!tabularOutput) {
+ bw.write("Conducting ownership analysis with allocation depth = "
+ + allocationDepth + "\n");
+ bw.write(timeReport + "\n");
+ }
+
+ int numAlias = 0;
+
+ // look through every task for potential aliases
+ Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+ while (taskItr.hasNext()) {
+ TaskDescriptor td = (TaskDescriptor) taskItr.next();
+
+ if (!tabularOutput) {
+ bw.write("\n---------" + td + "--------\n");
+ }
+
+ HashSet<AllocSite> allocSites = getFlaggedAllocationSitesReachableFromTask(td);
+
+ Set<HeapRegionNode> common;
+
+ // for each task parameter, check for aliases with
+ // other task parameters and every allocation site
+ // reachable from this task
+ boolean foundSomeAlias = false;
+
+ FlatMethod fm = state.getMethodFlat(td);
+ for (int i = 0; i < fm.numParameters(); ++i) {
+
+ // for the ith parameter check for aliases to all
+ // higher numbered parameters
+ for (int j = i + 1; j < fm.numParameters(); ++j) {
+ common = createsPotentialAliases(td, i, j);
+ if (!common.isEmpty()) {
+ foundSomeAlias = true;
+ if (!tabularOutput) {
+ bw.write("Potential alias between parameters " + i
+ + " and " + j + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ } else {
+ ++numAlias;
+ }
+ }
+ }
+
+ // for the ith parameter, check for aliases against
+ // the set of allocation sites reachable from this
+ // task context
+ Iterator allocItr = allocSites.iterator();
+ while (allocItr.hasNext()) {
+ AllocSite as = (AllocSite) allocItr.next();
+ common = createsPotentialAliases(td, i, as);
+ if (!common.isEmpty()) {
+ foundSomeAlias = true;
+ if (!tabularOutput) {
+ bw.write("Potential alias between parameter " + i
+ + " and " + as.getFlatNew() + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ } else {
+ ++numAlias;
+ }
+ }
+ }
+ }
+
+ // for each allocation site check for aliases with
+ // other allocation sites in the context of execution
+ // of this task
+ HashSet<AllocSite> outerChecked = new HashSet<AllocSite>();
+ Iterator allocItr1 = allocSites.iterator();
+ while (allocItr1.hasNext()) {
+ AllocSite as1 = (AllocSite) allocItr1.next();
+
+ Iterator allocItr2 = allocSites.iterator();
+ while (allocItr2.hasNext()) {
+ AllocSite as2 = (AllocSite) allocItr2.next();
+
+ if (!outerChecked.contains(as2)) {
+ common = createsPotentialAliases(td, as1, as2);
+
+ if (!common.isEmpty()) {
+ foundSomeAlias = true;
+ if (!tabularOutput) {
+ bw.write("Potential alias between "
+ + as1.getFlatNew() + " and "
+ + as2.getFlatNew() + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ } else {
+ ++numAlias;
+ }
+ }
+ }
+ }
+
+ outerChecked.add(as1);
+ }
+
+ if (!foundSomeAlias) {
+ if (!tabularOutput) {
+ bw.write("No aliases between flagged objects in Task " + td
+ + ".\n");
+ }
+ }
+ }
+
+ /*
+ if (!tabularOutput) {
+ bw.write("\n" + computeAliasContextHistogram());
+ } else {
+ bw.write(" & " + numAlias + " & " + justTime + " & " + numLines
+ + " & " + numMethodsAnalyzed() + " \\\\\n");
+ }
+ */
+
+ bw.close();
+ }
+
+ // this version of writeAllAliases is for Java programs that have no tasks
+ public void writeAllAliasesJava(String outputFile,
+ String timeReport,
+ String justTime,
+ boolean tabularOutput,
+ int numLines
+)
+ throws java.io.IOException {
+ checkAnalysisComplete();
+
+ assert !state.TASK;
+
+ BufferedWriter bw = new BufferedWriter(new FileWriter(outputFile));
+
+ bw.write("Conducting ownership analysis with allocation depth = "
+ + allocationDepth + "\n");
+ bw.write(timeReport + "\n\n");
+
+ boolean foundSomeAlias = false;
+
+ Descriptor d = typeUtil.getMain();
+ HashSet<AllocSite> allocSites = getFlaggedAllocationSites(d);
+
+ // for each allocation site check for aliases with
+ // other allocation sites in the context of execution
+ // of this task
+ HashSet<AllocSite> outerChecked = new HashSet<AllocSite>();
+ Iterator allocItr1 = allocSites.iterator();
+ while (allocItr1.hasNext()) {
+ AllocSite as1 = (AllocSite) allocItr1.next();
+
+ Iterator allocItr2 = allocSites.iterator();
+ while (allocItr2.hasNext()) {
+ AllocSite as2 = (AllocSite) allocItr2.next();
+
+ if (!outerChecked.contains(as2)) {
+ Set<HeapRegionNode> common = createsPotentialAliases(d,
+ as1, as2);
+
+ if (!common.isEmpty()) {
+ foundSomeAlias = true;
+ bw.write("Potential alias between "
+ + as1.getDisjointAnalysisId() + " and "
+ + as2.getDisjointAnalysisId() + ".\n");
+ bw.write(prettyPrintNodeSet(common) + "\n");
+ }
+ }
+ }
+
+ outerChecked.add(as1);
+ }
+
+ if (!foundSomeAlias) {
+ bw.write("No aliases between flagged objects found.\n");
+ }
+
+// bw.write("\n" + computeAliasContextHistogram());
+ bw.close();
+ }
+
+ ///////////////////////////////////////////
+ //
+ // end public interface
+ //
+ ///////////////////////////////////////////
+
+ protected void checkAnalysisComplete() {
+ if( !analysisComplete ) {
+ throw new Error("Warning: public interface method called while analysis is running.");
+ }
+ }
// data from the compiler
public ArrayReferencees arrayReferencees;
public TypeUtil typeUtil;
public int allocationDepth;
+
+ // data structure for public interface
+ private Hashtable<Descriptor, HashSet<AllocSite> > mapDescriptorToAllocSiteSet;
+
+
+ // for public interface methods to warn that they
+ // are grabbing results during analysis
+ private boolean analysisComplete;
// used to identify HeapRegionNode objects
// heap region
// start at 10 and increment to reserve some
// IDs for special purposes
- static private int uniqueIDcount = 10;
+ static protected int uniqueIDcount = 10;
// An out-of-scope method created by the
// main method. The purpose of this is to
// provide the analysis with an explicit
// top-level context with no parameters
- private MethodDescriptor mdAnalysisEntry;
- private FlatMethod fmAnalysisEntry;
+ protected MethodDescriptor mdAnalysisEntry;
+ protected FlatMethod fmAnalysisEntry;
// main method defined by source program
- private MethodDescriptor mdSourceEntry;
+ protected MethodDescriptor mdSourceEntry;
// the set of task and/or method descriptors
// reachable in call graph
- private Set<Descriptor> descriptorsToAnalyze;
-
+ protected Set<Descriptor>
+ descriptorsToAnalyze;
+
+ // current descriptors to visit in fixed-point
+ // interprocedural analysis, prioritized by
+ // dependency in the call graph
+ protected PriorityQueue<DescriptorQWrapper>
+ descriptorsToVisitQ;
+
+ // a duplication of the above structure, but
+ // for efficient testing of inclusion
+ protected HashSet<Descriptor>
+ descriptorsToVisitSet;
+
+ // storage for priorities (doesn't make sense)
+ // to add it to the Descriptor class, just in
+ // this analysis
+ protected Hashtable<Descriptor, Integer>
+ mapDescriptorToPriority;
+
+
+ // maps a descriptor to its current partial result
+ // from the intraprocedural fixed-point analysis--
+ // then the interprocedural analysis settles, this
+ // mapping will have the final results for each
+ // method descriptor
+ protected Hashtable<Descriptor, ReachGraph>
+ mapDescriptorToCompleteReachGraph;
+
+ // maps a descriptor to its known dependents: namely
+ // methods or tasks that call the descriptor's method
+ // AND are part of this analysis (reachable from main)
+ protected Hashtable< Descriptor, Set<Descriptor> >
+ mapDescriptorToSetDependents;
+
+ // maps each flat new to one analysis abstraction
+ // allocate site object, these exist outside reach graphs
+ protected Hashtable<FlatNew, AllocSite>
+ mapFlatNewToAllocSite;
+
+ // maps intergraph heap region IDs to intergraph
+ // allocation sites that created them, a redundant
+ // structure for efficiency in some operations
+ protected Hashtable<Integer, AllocSite>
+ mapHrnIdToAllocSite;
+
+ // maps a method to its initial heap model (IHM) that
+ // is the set of reachability graphs from every caller
+ // site, all merged together. The reason that we keep
+ // them separate is that any one call site's contribution
+ // to the IHM may changed along the path to the fixed point
+ protected Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >
+ mapDescriptorToIHMcontributions;
+
+ // TODO -- CHANGE EDGE/TYPE/FIELD storage!
+ public static final String arrayElementFieldName = "___element_";
+ static protected Hashtable<TypeDescriptor, FieldDescriptor>
+ mapTypeToArrayField;
// for controlling DOT file output
- private boolean writeFinalDOTs;
- private boolean writeAllIncrementalDOTs;
+ protected boolean writeFinalDOTs;
+ protected boolean writeAllIncrementalDOTs;
+
+ // supporting DOT output--when we want to write every
+ // partial method result, keep a tally for generating
+ // unique filenames
+ protected Hashtable<Descriptor, Integer>
+ mapDescriptorToNumUpdates;
+
+ //map task descriptor to initial task parameter
+ protected Hashtable<Descriptor, ReachGraph>
+ mapDescriptorToReachGraph;
+
+
+ // allocate various structures that are not local
+ // to a single class method--should be done once
+ protected void allocateStructures() {
+ descriptorsToAnalyze = new HashSet<Descriptor>();
+
+ mapDescriptorToCompleteReachGraph =
+ new Hashtable<Descriptor, ReachGraph>();
+
+ mapDescriptorToNumUpdates =
+ new Hashtable<Descriptor, Integer>();
+
+ mapDescriptorToSetDependents =
+ new Hashtable< Descriptor, Set<Descriptor> >();
+
+ mapFlatNewToAllocSite =
+ new Hashtable<FlatNew, AllocSite>();
+
+ mapDescriptorToIHMcontributions =
+ new Hashtable< Descriptor, Hashtable< FlatCall, ReachGraph > >();
+
+ mapHrnIdToAllocSite =
+ new Hashtable<Integer, AllocSite>();
+
+ mapTypeToArrayField =
+ new Hashtable <TypeDescriptor, FieldDescriptor>();
+
+ descriptorsToVisitQ =
+ new PriorityQueue<DescriptorQWrapper>();
+
+ descriptorsToVisitSet =
+ new HashSet<Descriptor>();
+
+ mapDescriptorToPriority =
+ new Hashtable<Descriptor, Integer>();
+
+ mapDescriptorToAllocSiteSet =
+ new Hashtable<Descriptor, HashSet<AllocSite> >();
+
+ mapDescriptorToReachGraph =
+ new Hashtable<Descriptor, ReachGraph>();
+ }
+
// this analysis generates a disjoint reachability
// graph for every reachable method in the program
- public DisjointAnalysis( State s,
- TypeUtil tu,
- CallGraph cg,
- Liveness l,
+ public DisjointAnalysis( State s,
+ TypeUtil tu,
+ CallGraph cg,
+ Liveness l,
ArrayReferencees ar
) throws java.io.IOException {
init( s, tu, cg, l, ar );
}
- private void init( State state,
- TypeUtil typeUtil,
- CallGraph callGraph,
- Liveness liveness,
- ArrayReferencees arrayReferencees
- ) throws java.io.IOException {
+ protected void init( State state,
+ TypeUtil typeUtil,
+ CallGraph callGraph,
+ Liveness liveness,
+ ArrayReferencees arrayReferencees
+ ) throws java.io.IOException {
+
+ analysisComplete = false;
this.state = state;
this.typeUtil = typeUtil;
this.allocationDepth = state.DISJOINTALLOCDEPTH;
this.writeFinalDOTs = state.DISJOINTWRITEDOTS && !state.DISJOINTWRITEALL;
this.writeAllIncrementalDOTs = state.DISJOINTWRITEDOTS && state.DISJOINTWRITEALL;
-
// set some static configuration for ReachGraphs
ReachGraph.allocationDepth = allocationDepth;
ReachGraph.typeUtil = typeUtil;
-
- // This analysis does not support Bamboo at the moment,
- // but if it does in the future we would initialize the
- // set of descriptors to analyze as the program-reachable
- // tasks and the methods callable by them. For Java,
- // just methods reachable from the main method.
- assert !state.TASK;
- descriptorsToAnalyze = new HashSet<Descriptor>();
-
+ allocateStructures();
double timeStartAnalysis = (double) System.nanoTime();
// start interprocedural fixed-point computation
analyzeMethods();
+ analysisComplete=true;
double timeEndAnalysis = (double) System.nanoTime();
double dt = (timeEndAnalysis - timeStartAnalysis)/(Math.pow( 10.0, 9.0 ) );
System.out.println( treport );
if( writeFinalDOTs && !writeAllIncrementalDOTs ) {
- //writeFinalContextGraphs();
- }
+ writeFinalGraphs();
+ }
+
+ if( state.DISJOINTWRITEIHMS ) {
+ writeFinalIHMs();
+ }
if( state.DISJOINTALIASFILE != null ) {
if( state.TASK ) {
// not supporting tasks yet...
+ writeAllAliases(state.OWNERSHIPALIASFILE, treport, justtime, state.OWNERSHIPALIASTAB, state.lines);
} else {
/*
writeAllAliasesJava( aliasFile,
}
-
// fixed-point computation over the call graph--when a
// method's callees are updated, it must be reanalyzed
- private void analyzeMethods() throws java.io.IOException {
-
- mdSourceEntry = typeUtil.getMain();
-
- // add all methods transitively reachable from the
- // source's main to set for analysis
- descriptorsToAnalyze.add( mdSourceEntry );
- descriptorsToAnalyze.addAll(
- callGraph.getAllMethods( mdSourceEntry )
- );
+ protected void analyzeMethods() throws java.io.IOException {
+
+ if( state.TASK ) {
+ // This analysis does not support Bamboo at the moment,
+ // but if it does in the future we would initialize the
+ // set of descriptors to analyze as the program-reachable
+ // tasks and the methods callable by them. For Java,
+ // just methods reachable from the main method.
+ System.out.println( "Bamboo..." );
+ Iterator taskItr = state.getTaskSymbolTable().getDescriptorsIterator();
+
+ while (taskItr.hasNext()) {
+ TaskDescriptor td = (TaskDescriptor) taskItr.next();
+ if (!descriptorsToAnalyze.contains(td)) {
+ descriptorsToAnalyze.add(td);
+ descriptorsToAnalyze.addAll(callGraph.getAllMethods(td));
+ }
+ }
- // fabricate an empty calling context that will call
- // the source's main, but call graph doesn't know
- // about it, so explicitly add it
- makeAnalysisEntryMethod( mdSourceEntry );
- descriptorsToAnalyze.add( mdAnalysisEntry );
+ } else {
+ // add all methods transitively reachable from the
+ // source's main to set for analysis
+ mdSourceEntry = typeUtil.getMain();
+ descriptorsToAnalyze.add( mdSourceEntry );
+ descriptorsToAnalyze.addAll(
+ callGraph.getAllMethods( mdSourceEntry )
+ );
+
+ // fabricate an empty calling context that will call
+ // the source's main, but call graph doesn't know
+ // about it, so explicitly add it
+ makeAnalysisEntryMethod( mdSourceEntry );
+ descriptorsToAnalyze.add( mdAnalysisEntry );
+ }
// topologically sort according to the call graph so
// leaf calls are ordered first, smarter analysis order
topologicalSort( descriptorsToAnalyze );
// add sorted descriptors to priority queue, and duplicate
- // the queue as a set for testing whether some method
- // is marked for analysis
- PriorityQueue<DescriptorQWrapper> descriptorsToVisitQ
- = new PriorityQueue<DescriptorQWrapper>();
-
- HashSet<Descriptor> descriptorsToVisitSet
- = new HashSet<Descriptor>();
-
- Hashtable<Descriptor, Integer> mapDescriptorToPriority
- = new Hashtable<Descriptor, Integer>();
-
+ // the queue as a set for efficiently testing whether some
+ // method is marked for analysis
int p = 0;
Iterator<Descriptor> dItr = sortedDescriptors.iterator();
while( dItr.hasNext() ) {
// because the task or method descriptor just extracted
// was in the "to visit" set it either hasn't been analyzed
// yet, or some method that it depends on has been
- // updated. Recompute a complete ownership graph for
+ // updated. Recompute a complete reachability graph for
// this task/method and compare it to any previous result.
// If there is a change detected, add any methods/tasks
// that depend on this one to the "to visit" set.
System.out.println( "Analyzing " + d );
- // get the flat code for this method descriptor
- FlatMethod fm;
- if( d == mdAnalysisEntry ) {
- fm = fmAnalysisEntry;
- } else {
- fm = state.getMethodFlat( d );
- }
+ ReachGraph rg = analyzeMethod( d );
+ ReachGraph rgPrev = getPartial( d );
+ if( !rg.equals( rgPrev ) ) {
+ setPartial( d, rg );
- /*
- ReachGraph og = analyzeFlatMethod(mc, fm);
- ReachGraph ogPrev = mapDescriptorToCompleteReachabilityGraph.get(mc);
- if( !og.equals(ogPrev) ) {
- setGraphForDescriptor(mc, og);
-
- Iterator<Descriptor> depsItr = iteratorDependents( mc );
+ // results for d changed, so enqueue dependents
+ // of d for further analysis
+ Iterator<Descriptor> depsItr = getDependents( d ).iterator();
while( depsItr.hasNext() ) {
- Descriptor mcNext = depsItr.next();
-
- if( !descriptorsToVisitSet.contains( mcNext ) ) {
- descriptorsToVisitQ.add( new DescriptorQWrapper( mapDescriptorToPriority.get( mcNext.getDescriptor() ),
- mcNext ) );
- descriptorsToVisitSet.add( mcNext );
- }
+ Descriptor dNext = depsItr.next();
+ enqueue( dNext );
}
- }
- */
+ }
}
}
+ protected ReachGraph analyzeMethod( Descriptor d )
+ throws java.io.IOException {
+ // get the flat code for this descriptor
+ FlatMethod fm;
+ if( d == mdAnalysisEntry ) {
+ fm = fmAnalysisEntry;
+ } else {
+ fm = state.getMethodFlat( d );
+ }
+
+ // intraprocedural work set
+ Set<FlatNode> flatNodesToVisit = new HashSet<FlatNode>();
+ flatNodesToVisit.add( fm );
+
+ // mapping of current partial results
+ Hashtable<FlatNode, ReachGraph> mapFlatNodeToReachGraph =
+ new Hashtable<FlatNode, ReachGraph>();
-
- /*
- // keep passing the Descriptor of the method along for debugging
- // and dot file writing
- private ReachGraph
- analyzeFlatMethod(MethodContext mc,
- FlatMethod flatm) throws java.io.IOException {
-
- // initialize flat nodes to visit as the flat method
- // because it is the entry point
-
- flatNodesToVisit = new HashSet<FlatNode>();
- flatNodesToVisit.add(flatm);
-
- // initilize the mapping of flat nodes in this flat method to
- // ownership graph results to an empty mapping
- mapFlatNodeToReachabilityGraph = new Hashtable<FlatNode, ReachGraph>();
-
- // initialize the set of return nodes that will be combined as
- // the final ownership graph result to return as an empty set
- returnNodesToCombineForCompleteReachabilityGraph = new HashSet<FlatReturnNode>();
-
+ // the set of return nodes partial results that will be combined as
+ // the final, conservative approximation of the entire method
+ HashSet<FlatReturnNode> setReturns = new HashSet<FlatReturnNode>();
while( !flatNodesToVisit.isEmpty() ) {
FlatNode fn = (FlatNode) flatNodesToVisit.iterator().next();
- flatNodesToVisit.remove(fn);
+ flatNodesToVisit.remove( fn );
//System.out.println( " "+fn );
- // perform this node's contributions to the ownership
- // graph on a new copy, then compare it to the old graph
- // at this node to see if anything was updated.
- ReachGraph og = new ReachGraph();
+ // effect transfer function defined by this node,
+ // then compare it to the old graph at this node
+ // to see if anything was updated.
+
+ ReachGraph rg = new ReachGraph();
+ TaskDescriptor taskDesc;
+ if(fn instanceof FlatMethod && (taskDesc=((FlatMethod)fn).getTask())!=null){
+ if(mapDescriptorToReachGraph.containsKey(taskDesc)){
+ // retrieve existing reach graph if it is not first time
+ rg=mapDescriptorToReachGraph.get(taskDesc);
+ }else{
+ // create initial reach graph for a task
+ rg=createInitialTaskReachGraph((FlatMethod)fn);
+ rg.globalSweep();
+ mapDescriptorToReachGraph.put(taskDesc, rg);
+ }
+ }
// start by merging all node's parents' graphs
for( int i = 0; i < fn.numPrev(); ++i ) {
- FlatNode pn = fn.getPrev(i);
- if( mapFlatNodeToReachabilityGraph.containsKey(pn) ) {
- ReachabilityGraph ogParent = mapFlatNodeToReachabilityGraph.get(pn);
- og.merge(ogParent);
+ FlatNode pn = fn.getPrev( i );
+ if( mapFlatNodeToReachGraph.containsKey( pn ) ) {
+ ReachGraph rgParent = mapFlatNodeToReachGraph.get( pn );
+// System.out.println("parent="+pn+"->"+rgParent);
+ rg.merge( rgParent );
}
}
- // apply the analysis of the flat node to the
- // ownership graph made from the merge of the
- // parent graphs
- og = analyzeFlatNode(mc,
- flatm,
- fn,
- returnNodesToCombineForCompleteReachabilityGraph,
- og);
-
-
-
if( takeDebugSnapshots &&
- mc.getDescriptor().getSymbol().equals( mcDescSymbolDebug ) ) {
- debugSnapshot(og,fn);
+ d.getSymbol().equals( descSymbolDebug )
+ ) {
+ debugSnapshot( rg, fn, true );
}
+
+
+ // modify rg with appropriate transfer function
+ rg = analyzeFlatNode( d, fm, fn, setReturns, rg );
+ if( takeDebugSnapshots &&
+ d.getSymbol().equals( descSymbolDebug )
+ ) {
+ debugSnapshot( rg, fn, false );
+ }
+
+
// if the results of the new graph are different from
// the current graph at this node, replace the graph
- // with the update and enqueue the children for
- // processing
- ReachGraph ogPrev = mapFlatNodeToReachabilityGraph.get(fn);
- if( !og.equals(ogPrev) ) {
- mapFlatNodeToReachabilityGraph.put(fn, og);
+ // with the update and enqueue the children
+ ReachGraph rgPrev = mapFlatNodeToReachGraph.get( fn );
+ if( !rg.equals( rgPrev ) ) {
+ mapFlatNodeToReachGraph.put( fn, rg );
for( int i = 0; i < fn.numNext(); i++ ) {
- FlatNode nn = fn.getNext(i);
- flatNodesToVisit.add(nn);
+ FlatNode nn = fn.getNext( i );
+ flatNodesToVisit.add( nn );
}
}
}
// ownership graph that represents all possible heap
// states after the flat method returns
ReachGraph completeGraph = new ReachGraph();
- Iterator retItr = returnNodesToCombineForCompleteReachabilityGraph.iterator();
+
+ assert !setReturns.isEmpty();
+ Iterator retItr = setReturns.iterator();
while( retItr.hasNext() ) {
FlatReturnNode frn = (FlatReturnNode) retItr.next();
- assert mapFlatNodeToReachabilityGraph.containsKey(frn);
- ReachGraph ogr = mapFlatNodeToReachabilityGraph.get(frn);
- completeGraph.merge(ogr);
- }
+ assert mapFlatNodeToReachGraph.containsKey( frn );
+ ReachGraph rgRet = mapFlatNodeToReachGraph.get( frn );
+
+ completeGraph.merge( rgRet );
+ }
return completeGraph;
}
+
+ protected ReachGraph
+ analyzeFlatNode( Descriptor d,
+ FlatMethod fmContaining,
+ FlatNode fn,
+ HashSet<FlatReturnNode> setRetNodes,
+ ReachGraph rg
+ ) throws java.io.IOException {
- private ReachGraph
- analyzeFlatNode(MethodContext mc,
- FlatMethod fmContaining,
- FlatNode fn,
- HashSet<FlatReturnNode> setRetNodes,
- ReachGraph og) throws java.io.IOException {
-
-
+
// any variables that are no longer live should be
// nullified in the graph to reduce edges
- // NOTE: it is not clear we need this. It costs a
- // liveness calculation for every method, so only
- // turn it on if we find we actually need it.
- //og.nullifyDeadVars( liveness.getLiveInTemps( fmContaining, fn ) );
+ //rg.nullifyDeadVars( liveness.getLiveInTemps( fmContaining, fn ) );
- TempDescriptor lhs;
- TempDescriptor rhs;
+ TempDescriptor lhs;
+ TempDescriptor rhs;
FieldDescriptor fld;
- // use node type to decide what alterations to make
- // to the ownership graph
+ // use node type to decide what transfer function
+ // to apply to the reachability graph
switch( fn.kind() ) {
- case FKind.FlatMethod:
- FlatMethod fm = (FlatMethod) fn;
-
- // there should only be one FlatMethod node as the
- // parent of all other FlatNode objects, so take
- // the opportunity to construct the initial graph by
- // adding parameters labels to new heap regions
- // AND this should be done once globally so that the
- // parameter IDs are consistent between analysis
- // iterations, so if this step has been done already
- // just merge in the cached version
- ReachGraph ogInitParamAlloc = mapMethodContextToInitialParamAllocGraph.get(mc);
- if( ogInitParamAlloc == null ) {
-
- // if the method context has aliased parameters, make sure
- // there is a blob region for all those param to reference
- Set<Integer> aliasedParamIndices = mc.getAliasedParamIndices();
-
- if( !aliasedParamIndices.isEmpty() ) {
- og.makeAliasedParamHeapRegionNode(fm);
- }
-
- // set up each parameter
- for( int i = 0; i < fm.numParameters(); ++i ) {
- TempDescriptor tdParam = fm.getParameter( i );
- TypeDescriptor typeParam = tdParam.getType();
- Integer paramIndex = new Integer( i );
+ case FKind.FlatMethod: {
+ // construct this method's initial heap model (IHM)
+ // since we're working on the FlatMethod, we know
+ // the incoming ReachGraph 'rg' is empty
- if( typeParam.isImmutable() && !typeParam.isArray() ) {
- // don't bother with this primitive parameter, it
- // cannot affect reachability
- continue;
- }
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ getIHMcontributions( d );
- if( aliasedParamIndices.contains( paramIndex ) ) {
- // use the alias blob but give parameters their
- // own primary obj region
- og.assignTempEqualToAliasedParam( tdParam,
- paramIndex, fm );
- } else {
- // this parameter is not aliased to others, give it
- // a fresh primary obj and secondary object
- og.assignTempEqualToParamAlloc( tdParam,
- mc.getDescriptor() instanceof TaskDescriptor,
- paramIndex, fm );
- }
- }
-
- // add additional edges for aliased regions if necessary
- if( !aliasedParamIndices.isEmpty() ) {
- og.addParam2ParamAliasEdges( fm, aliasedParamIndices );
- }
-
- // clean up reachability on initial parameter shapes
- og.globalSweep();
+ Set entrySet = heapsFromCallers.entrySet();
+ Iterator itr = entrySet.iterator();
+ while( itr.hasNext() ) {
+ Map.Entry me = (Map.Entry) itr.next();
+ FlatCall fc = (FlatCall) me.getKey();
+ ReachGraph rgContrib = (ReachGraph) me.getValue();
- // this maps tokens to parameter indices and vice versa
- // for when this method is a callee
- og.prepareParamTokenMaps( fm );
+ assert fc.getMethod().equals( d );
- // cache the graph
- ReachGraph ogResult = new ReachGraph();
- ogResult.merge(og);
- mapMethodContextToInitialParamAllocGraph.put(mc, ogResult);
+ // some call sites are in same method context though,
+ // and all of them should be merged together first,
+ // then heaps from different contexts should be merged
+ // THIS ASSUMES DIFFERENT CONTEXTS NEED SPECIAL CONSIDERATION!
+ // such as, do allocation sites need to be aged?
- } else {
- // or just leverage the cached copy
- og.merge(ogInitParamAlloc);
- }
- break;
+ rg.merge_diffMethodContext( rgContrib );
+ }
+ } break;
case FKind.FlatOpNode:
FlatOpNode fon = (FlatOpNode) fn;
if( fon.getOp().getOp() == Operation.ASSIGN ) {
lhs = fon.getDest();
rhs = fon.getLeft();
- og.assignTempXEqualToTempY(lhs, rhs);
+ rg.assignTempXEqualToTempY( lhs, rhs );
}
break;
TypeDescriptor td = fcn.getType();
assert td != null;
- og.assignTempXEqualToCastedTempY(lhs, rhs, td);
+ rg.assignTempXEqualToCastedTempY( lhs, rhs, td );
break;
case FKind.FlatFieldNode:
rhs = ffn.getSrc();
fld = ffn.getField();
if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
- og.assignTempXEqualToTempYFieldF(lhs, rhs, fld);
- }
-
- meAnalysis.analyzeFlatFieldNode(mc, og, rhs, fld);
-
+ rg.assignTempXEqualToTempYFieldF( lhs, rhs, fld );
+ }
break;
case FKind.FlatSetFieldNode:
fld = fsfn.getField();
rhs = fsfn.getSrc();
if( !fld.getType().isImmutable() || fld.getType().isArray() ) {
- og.assignTempXFieldFEqualToTempY(lhs, fld, rhs);
- }
-
- meAnalysis.analyzeFlatSetFieldNode(mc, og, lhs, fld);
-
+ rg.assignTempXFieldFEqualToTempY( lhs, fld, rhs );
+ }
break;
case FKind.FlatElementNode:
TypeDescriptor tdElement = rhs.getType().dereference();
FieldDescriptor fdElement = getArrayField( tdElement );
- og.assignTempXEqualToTempYFieldF(lhs, rhs, fdElement);
+ rg.assignTempXEqualToTempYFieldF( lhs, rhs, fdElement );
}
break;
TypeDescriptor tdElement = lhs.getType().dereference();
FieldDescriptor fdElement = getArrayField( tdElement );
- og.assignTempXFieldFEqualToTempY(lhs, fdElement, rhs);
+ rg.assignTempXFieldFEqualToTempY( lhs, fdElement, rhs );
}
break;
-
+
case FKind.FlatNew:
FlatNew fnn = (FlatNew) fn;
lhs = fnn.getDst();
if( !lhs.getType().isImmutable() || lhs.getType().isArray() ) {
- AllocSite as = getAllocSiteFromFlatNewPRIVATE(fnn);
-
- if (mapMethodContextToLiveInAllocSiteSet != null){
- HashSet<AllocSite> alllocSet=mapMethodContextToLiveInAllocSiteSet.get(mc);
- if(alllocSet!=null){
- for (Iterator iterator = alllocSet.iterator(); iterator
- .hasNext();) {
- AllocSite allocationSite = (AllocSite) iterator
- .next();
- if(allocationSite.flatNew.equals(as.flatNew)){
- as.setFlag(true);
- }
- }
- }
- }
-
- og.assignTempEqualToNewAlloc(lhs, as);
+ AllocSite as = getAllocSiteFromFlatNewPRIVATE( fnn );
+ rg.assignTempEqualToNewAlloc( lhs, as );
}
break;
- case FKind.FlatCall:
- FlatCall fc = (FlatCall) fn;
- MethodDescriptor md = fc.getMethod();
- FlatMethod flatm = state.getMethodFlat(md);
- ReachGraph ogMergeOfAllPossibleCalleeResults = new ReachGraph();
-
- if( md.isStatic() ) {
- // a static method is simply always the same, makes life easy
- ogMergeOfAllPossibleCalleeResults = og;
+ case FKind.FlatCall: {
+ //TODO: temporal fix for task descriptor case
+ //MethodDescriptor mdCaller = fmContaining.getMethod();
+ Descriptor mdCaller;
+ if(fmContaining.getMethod()!=null){
+ mdCaller = fmContaining.getMethod();
+ }else{
+ mdCaller = fmContaining.getTask();
+ }
+ FlatCall fc = (FlatCall) fn;
+ MethodDescriptor mdCallee = fc.getMethod();
+ FlatMethod fmCallee = state.getMethodFlat( mdCallee );
+
+ boolean writeDebugDOTs =
+ mdCaller.getSymbol().equals( state.DISJOINTDEBUGCALLER ) &&
+ mdCallee.getSymbol().equals( state.DISJOINTDEBUGCALLEE );
+
+
+ // calculate the heap this call site can reach--note this is
+ // not used for the current call site transform, we are
+ // grabbing this heap model for future analysis of the callees,
+ // so if different results emerge we will return to this site
+ ReachGraph heapForThisCall_old =
+ getIHMcontribution( mdCallee, fc );
+
+ // the computation of the callee-reachable heap
+ // is useful for making the callee starting point
+ // and for applying the call site transfer function
+ Set<Integer> callerNodeIDsCopiedToCallee =
+ new HashSet<Integer>();
+
+ ReachGraph heapForThisCall_cur =
+ rg.makeCalleeView( fc,
+ fmCallee,
+ callerNodeIDsCopiedToCallee,
+ writeDebugDOTs
+ );
+
+ if( !heapForThisCall_cur.equals( heapForThisCall_old ) ) {
+ // if heap at call site changed, update the contribution,
+ // and reschedule the callee for analysis
+ addIHMcontribution( mdCallee, fc, heapForThisCall_cur );
+ enqueue( mdCallee );
+ }
- Set<Integer> aliasedParamIndices =
- ogMergeOfAllPossibleCalleeResults.calculateAliasedParamSet(fc, md.isStatic(), flatm);
- MethodContext mcNew = new MethodContext( md, aliasedParamIndices );
- Set contexts = mapDescriptorToAllMethodContexts.get( md );
- assert contexts != null;
- contexts.add( mcNew );
- addDependent( mc, mcNew );
- ReachGraph onlyPossibleCallee = mapMethodContextToCompleteReachabilityGraph.get( mcNew );
-
- if( onlyPossibleCallee == null ) {
- // if this method context has never been analyzed just schedule it for analysis
- // and skip over this call site for now
- if( !methodContextsToVisitSet.contains( mcNew ) ) {
- methodContextsToVisitQ.add( new MethodContextQWrapper( mapDescriptorToPriority.get( md ),
- mcNew ) );
- methodContextsToVisitSet.add( mcNew );
- }
-
- } else {
- ogMergeOfAllPossibleCalleeResults.resolveMethodCall(fc, md.isStatic(), flatm, onlyPossibleCallee, mc, null);
- }
-
- meAnalysis.createNewMapping(mcNew);
- meAnalysis.analyzeFlatCall(ogMergeOfAllPossibleCalleeResults, mcNew, mc, fc);
-
+ // the transformation for a call site should update the
+ // current heap abstraction with any effects from the callee,
+ // or if the method is virtual, the effects from any possible
+ // callees, so find the set of callees...
+ Set<MethodDescriptor> setPossibleCallees =
+ new HashSet<MethodDescriptor>();
+ if( mdCallee.isStatic() ) {
+ setPossibleCallees.add( mdCallee );
} else {
- // if the method descriptor is virtual, then there could be a
- // set of possible methods that will actually be invoked, so
- // find all of them and merge all of their results together
TypeDescriptor typeDesc = fc.getThis().getType();
- Set possibleCallees = callGraph.getMethods(md, typeDesc);
-
- Iterator i = possibleCallees.iterator();
- while( i.hasNext() ) {
- MethodDescriptor possibleMd = (MethodDescriptor) i.next();
- FlatMethod pflatm = state.getMethodFlat(possibleMd);
-
- // don't alter the working graph (og) until we compute a result for every
- // possible callee, merge them all together, then set og to that
- ReachGraph ogCopy = new ReachGraph();
- ogCopy.merge(og);
-
- Set<Integer> aliasedParamIndices =
- ogCopy.calculateAliasedParamSet(fc, possibleMd.isStatic(), pflatm);
-
- MethodContext mcNew = new MethodContext( possibleMd, aliasedParamIndices );
- Set contexts = mapDescriptorToAllMethodContexts.get( md );
- assert contexts != null;
- contexts.add( mcNew );
-
-
- meAnalysis.createNewMapping(mcNew);
-
-
- addDependent( mc, mcNew );
-
- ReachGraph ogPotentialCallee = mapMethodContextToCompleteReachabilityGraph.get( mcNew );
+ setPossibleCallees.addAll( callGraph.getMethods( mdCallee,
+ typeDesc )
+ );
+ }
- if( ogPotentialCallee == null ) {
- // if this method context has never been analyzed just schedule it for analysis
- // and skip over this call site for now
- if( !methodContextsToVisitSet.contains( mcNew ) ) {
- methodContextsToVisitQ.add( new MethodContextQWrapper( mapDescriptorToPriority.get( md ),
- mcNew ) );
- methodContextsToVisitSet.add( mcNew );
- }
-
- } else {
- ogCopy.resolveMethodCall(fc, possibleMd.isStatic(), pflatm, ogPotentialCallee, mc, null);
- }
-
- ogMergeOfAllPossibleCalleeResults.merge(ogCopy);
-
- meAnalysis.analyzeFlatCall(ogMergeOfAllPossibleCalleeResults, mcNew, mc, fc);
- }
-
+ ReachGraph rgMergeOfEffects = new ReachGraph();
+
+ Iterator<MethodDescriptor> mdItr = setPossibleCallees.iterator();
+ while( mdItr.hasNext() ) {
+ MethodDescriptor mdPossible = mdItr.next();
+ FlatMethod fmPossible = state.getMethodFlat( mdPossible );
+
+ addDependent( mdPossible, // callee
+ d ); // caller
+
+ // don't alter the working graph (rg) until we compute a
+ // result for every possible callee, merge them all together,
+ // then set rg to that
+ ReachGraph rgCopy = new ReachGraph();
+ rgCopy.merge( rg );
+
+ ReachGraph rgEffect = getPartial( mdPossible );
+
+ if( rgEffect == null ) {
+ // if this method has never been analyzed just schedule it
+ // for analysis and skip over this call site for now
+ enqueue( mdPossible );
+ } else {
+ rgCopy.resolveMethodCall( fc,
+ fmPossible,
+ rgEffect,
+ callerNodeIDsCopiedToCallee,
+ writeDebugDOTs
+ );
+ }
+
+ rgMergeOfEffects.merge( rgCopy );
}
- og = ogMergeOfAllPossibleCalleeResults;
- break;
+
+ // now that we've taken care of building heap models for
+ // callee analysis, finish this transformation
+ rg = rgMergeOfEffects;
+ } break;
+
case FKind.FlatReturnNode:
FlatReturnNode frn = (FlatReturnNode) fn;
rhs = frn.getReturnTemp();
if( rhs != null && !rhs.getType().isImmutable() ) {
- og.assignReturnEqualToTemp(rhs);
+ rg.assignReturnEqualToTemp( rhs );
}
- setRetNodes.add(frn);
+ setRetNodes.add( frn );
break;
- }
+ } // end switch
- if( methodEffects ) {
- Hashtable<FlatNode, ReachabilityGraph> table=mapMethodContextToFlatNodeReachabilityGraph.get(mc);
- if(table==null){
- table=new Hashtable<FlatNode, ReachabilityGraph>();
- }
- table.put(fn, og);
- mapMethodContextToFlatNodeReachabilityGraph.put(mc, table);
- }
-
- return og;
+
+ // dead variables were removed before the above transfer function
+ // was applied, so eliminate heap regions and edges that are no
+ // longer part of the abstractly-live heap graph, and sweep up
+ // and reachability effects that are altered by the reduction
+ //rg.abstractGarbageCollect();
+ //rg.globalSweep();
+
+
+ // at this point rg should be the correct update
+ // by an above transfer function, or untouched if
+ // the flat node type doesn't affect the heap
+ return rg;
}
-
+
// this method should generate integers strictly greater than zero!
// special "shadow" regions are made from a heap region by negating
// the ID
static public Integer generateUniqueHeapRegionNodeID() {
++uniqueIDcount;
- return new Integer(uniqueIDcount);
+ return new Integer( uniqueIDcount );
}
+
static public FieldDescriptor getArrayField( TypeDescriptor tdElement ) {
FieldDescriptor fdElement = mapTypeToArrayField.get( tdElement );
if( fdElement == null ) {
- fdElement = new FieldDescriptor(new Modifiers(Modifiers.PUBLIC),
- tdElement,
- arrayElementFieldName,
- null,
- false);
+ fdElement = new FieldDescriptor( new Modifiers( Modifiers.PUBLIC ),
+ tdElement,
+ arrayElementFieldName,
+ null,
+ false );
mapTypeToArrayField.put( tdElement, fdElement );
}
return fdElement;
}
- private void setGraphForMethodContext(MethodContext mc, ReachabilityGraph og) {
-
- mapMethodContextToCompleteReachabilityGraph.put(mc, og);
-
- if( writeFinalDOTs && writeAllIncrementalDOTs ) {
- if( !mapMethodContextToNumUpdates.containsKey(mc) ) {
- mapMethodContextToNumUpdates.put(mc, new Integer(0) );
- }
- Integer n = mapMethodContextToNumUpdates.get(mc);
- try {
- og.writeGraph(mc+"COMPLETE"+String.format("%05d", n),
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
- mapMethodContextToNumUpdates.put(mc, n + 1);
- }
- }
-
-
- private void addDependent( MethodContext caller, MethodContext callee ) {
- HashSet<MethodContext> deps = mapMethodContextToDependentContexts.get( callee );
- if( deps == null ) {
- deps = new HashSet<MethodContext>();
+
+ private void writeFinalGraphs() {
+ Set entrySet = mapDescriptorToCompleteReachGraph.entrySet();
+ Iterator itr = entrySet.iterator();
+ while( itr.hasNext() ) {
+ Map.Entry me = (Map.Entry) itr.next();
+ Descriptor d = (Descriptor) me.getKey();
+ ReachGraph rg = (ReachGraph) me.getValue();
+
+ try {
+ rg.writeGraph( "COMPLETE"+d,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide subset reachability states
+ true ); // hide edge taints
+ } catch( IOException e ) {}
}
- deps.add( caller );
- mapMethodContextToDependentContexts.put( callee, deps );
}
- private Iterator<MethodContext> iteratorDependents( MethodContext callee ) {
- HashSet<MethodContext> deps = mapMethodContextToDependentContexts.get( callee );
- if( deps == null ) {
- deps = new HashSet<MethodContext>();
- mapMethodContextToDependentContexts.put( callee, deps );
+ private void writeFinalIHMs() {
+ Iterator d2IHMsItr = mapDescriptorToIHMcontributions.entrySet().iterator();
+ while( d2IHMsItr.hasNext() ) {
+ Map.Entry me1 = (Map.Entry) d2IHMsItr.next();
+ Descriptor d = (Descriptor) me1.getKey();
+ Hashtable<FlatCall, ReachGraph> IHMs = (Hashtable<FlatCall, ReachGraph>) me1.getValue();
+
+ Iterator fc2rgItr = IHMs.entrySet().iterator();
+ while( fc2rgItr.hasNext() ) {
+ Map.Entry me2 = (Map.Entry) fc2rgItr.next();
+ FlatCall fc = (FlatCall) me2.getKey();
+ ReachGraph rg = (ReachGraph) me2.getValue();
+
+ try {
+ rg.writeGraph( "IHMPARTFOR"+d+"FROM"+fc,
+ true, // write labels (variables)
+ false, // selectively hide intermediate temp vars
+ false, // prune unreachable heap regions
+ false, // hide subset reachability states
+ true ); // hide edge taints
+ } catch( IOException e ) {}
+ }
}
- return deps.iterator();
}
+
- private void writeFinalContextGraphs() {
- Set entrySet = mapMethodContextToCompleteReachabilityGraph.entrySet();
- Iterator itr = entrySet.iterator();
- while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- MethodContext mc = (MethodContext) me.getKey();
- ReachabilityGraph og = (ReachabilityGraph) me.getValue();
-
- try {
- og.writeGraph(mc+"COMPLETE",
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
- } catch( IOException e ) {}
- }
- }
-
-
// return just the allocation site associated with one FlatNew node
- private AllocSite getAllocSiteFromFlatNewPRIVATE(FlatNew fn) {
+ protected AllocSite getAllocSiteFromFlatNewPRIVATE( FlatNew fnew ) {
- if( !mapFlatNewToAllocSite.containsKey(fn) ) {
- AllocSite as = new AllocSite(allocationDepth, fn, fn.getDisjointAnalysisId());
+ if( !mapFlatNewToAllocSite.containsKey( fnew ) ) {
+ AllocSite as =
+ (AllocSite) Canonical.makeCanonical( new AllocSite( allocationDepth,
+ fnew,
+ fnew.getDisjointId()
+ )
+ );
// the newest nodes are single objects
for( int i = 0; i < allocationDepth; ++i ) {
Integer id = generateUniqueHeapRegionNodeID();
- as.setIthOldest(i, id);
+ as.setIthOldest( i, id );
mapHrnIdToAllocSite.put( id, as );
}
// the oldest node is a summary node
- Integer idSummary = generateUniqueHeapRegionNodeID();
- as.setSummary(idSummary);
+ as.setSummary( generateUniqueHeapRegionNodeID() );
- mapFlatNewToAllocSite.put(fn, as);
+ mapFlatNewToAllocSite.put( fnew, as );
}
- return mapFlatNewToAllocSite.get(fn);
+ return mapFlatNewToAllocSite.get( fnew );
}
+ /*
// return all allocation sites in the method (there is one allocation
// site per FlatNew node in a method)
- private HashSet<AllocSite> getAllocSiteSet(Descriptor d) {
+ protected HashSet<AllocSite> getAllocSiteSet(Descriptor d) {
if( !mapDescriptorToAllocSiteSet.containsKey(d) ) {
buildAllocSiteSet(d);
}
return mapDescriptorToAllocSiteSet.get(d);
}
+ */
- private void buildAllocSiteSet(Descriptor d) {
+ /*
+ protected void buildAllocSiteSet(Descriptor d) {
HashSet<AllocSite> s = new HashSet<AllocSite>();
FlatMethod fm = state.getMethodFlat( d );
mapDescriptorToAllocSiteSet.put( d, s );
}
-
-
- private HashSet<AllocSite> getFlaggedAllocSites(Descriptor dIn) {
+ */
+ /*
+ protected HashSet<AllocSite> getFlaggedAllocSites(Descriptor dIn) {
HashSet<AllocSite> out = new HashSet<AllocSite>();
HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
return out;
}
+ */
-
- private HashSet<AllocSite>
+ /*
+ protected HashSet<AllocSite>
getFlaggedAllocSitesReachableFromTaskPRIVATE(TaskDescriptor td) {
HashSet<AllocSite> asSetTotal = new HashSet<AllocSite>();
}
*/
- private LinkedList<Descriptor> topologicalSort( Set<Descriptor> toSort ) {
+
+ /*
+ protected String computeAliasContextHistogram() {
+
+ Hashtable<Integer, Integer> mapNumContexts2NumDesc =
+ new Hashtable<Integer, Integer>();
+
+ Iterator itr = mapDescriptorToAllDescriptors.entrySet().iterator();
+ while( itr.hasNext() ) {
+ Map.Entry me = (Map.Entry) itr.next();
+ HashSet<Descriptor> s = (HashSet<Descriptor>) me.getValue();
+
+ Integer i = mapNumContexts2NumDesc.get( s.size() );
+ if( i == null ) {
+ i = new Integer( 0 );
+ }
+ mapNumContexts2NumDesc.put( s.size(), i + 1 );
+ }
+
+ String s = "";
+ int total = 0;
+
+ itr = mapNumContexts2NumDesc.entrySet().iterator();
+ while( itr.hasNext() ) {
+ Map.Entry me = (Map.Entry) itr.next();
+ Integer c0 = (Integer) me.getKey();
+ Integer d0 = (Integer) me.getValue();
+ total += d0;
+ s += String.format( "%4d methods had %4d unique alias contexts.\n", d0, c0 );
+ }
+
+ s += String.format( "\n%4d total methods analayzed.\n", total );
+
+ return s;
+ }
+
+ protected int numMethodsAnalyzed() {
+ return descriptorsToAnalyze.size();
+ }
+ */
+
+
+
+
+ // Take in source entry which is the program's compiled entry and
+ // create a new analysis entry, a method that takes no parameters
+ // and appears to allocate the command line arguments and call the
+ // source entry with them. The purpose of this analysis entry is
+ // to provide a top-level method context with no parameters left.
+ protected void makeAnalysisEntryMethod( MethodDescriptor mdSourceEntry ) {
+
+ Modifiers mods = new Modifiers();
+ mods.addModifier( Modifiers.PUBLIC );
+ mods.addModifier( Modifiers.STATIC );
+
+ TypeDescriptor returnType =
+ new TypeDescriptor( TypeDescriptor.VOID );
+
+ this.mdAnalysisEntry =
+ new MethodDescriptor( mods,
+ returnType,
+ "analysisEntryMethod"
+ );
+
+ TempDescriptor cmdLineArgs =
+ new TempDescriptor( "args",
+ mdSourceEntry.getParamType( 0 )
+ );
+
+ FlatNew fn =
+ new FlatNew( mdSourceEntry.getParamType( 0 ),
+ cmdLineArgs,
+ false // is global
+ );
+
+ TempDescriptor[] sourceEntryArgs = new TempDescriptor[1];
+ sourceEntryArgs[0] = cmdLineArgs;
+
+ FlatCall fc =
+ new FlatCall( mdSourceEntry,
+ null, // dst temp
+ null, // this temp
+ sourceEntryArgs
+ );
+
+ FlatReturnNode frn = new FlatReturnNode( null );
+
+ FlatExit fe = new FlatExit();
+
+ this.fmAnalysisEntry =
+ new FlatMethod( mdAnalysisEntry,
+ fe
+ );
+
+ this.fmAnalysisEntry.addNext( fn );
+ fn.addNext( fc );
+ fc.addNext( frn );
+ frn.addNext( fe );
+ }
+
+
+ protected LinkedList<Descriptor> topologicalSort( Set<Descriptor> toSort ) {
Set <Descriptor> discovered = new HashSet <Descriptor>();
LinkedList<Descriptor> sorted = new LinkedList<Descriptor>();
return sorted;
}
- private void dfsVisit( Descriptor d,
- Set <Descriptor> toSort,
- LinkedList<Descriptor> sorted,
- Set <Descriptor> discovered ) {
+ // While we're doing DFS on call graph, remember
+ // dependencies for efficient queuing of methods
+ // during interprocedural analysis:
+ //
+ // a dependent of a method decriptor d for this analysis is:
+ // 1) a method or task that invokes d
+ // 2) in the descriptorsToAnalyze set
+ protected void dfsVisit( Descriptor d,
+ Set <Descriptor> toSort,
+ LinkedList<Descriptor> sorted,
+ Set <Descriptor> discovered ) {
discovered.add( d );
// only methods have callers, tasks never do
// analysis entry that calls the program source's entry
if( md == mdSourceEntry ) {
if( !discovered.contains( mdAnalysisEntry ) ) {
+ addDependent( mdSourceEntry, // callee
+ mdAnalysisEntry // caller
+ );
dfsVisit( mdAnalysisEntry, toSort, sorted, discovered );
}
}
}
if( !discovered.contains( dCaller ) ) {
+ addDependent( md, // callee
+ dCaller // caller
+ );
+
dfsVisit( dCaller, toSort, sorted, discovered );
}
}
}
- /*
- private String computeAliasContextHistogram() {
-
- Hashtable<Integer, Integer> mapNumContexts2NumDesc =
- new Hashtable<Integer, Integer>();
-
- Iterator itr = mapDescriptorToAllMethodContexts.entrySet().iterator();
- while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- HashSet<MethodContext> s = (HashSet<MethodContext>) me.getValue();
-
- Integer i = mapNumContexts2NumDesc.get( s.size() );
- if( i == null ) {
- i = new Integer( 0 );
+ protected void enqueue( Descriptor d ) {
+ if( !descriptorsToVisitSet.contains( d ) ) {
+ Integer priority = mapDescriptorToPriority.get( d );
+ descriptorsToVisitQ.add( new DescriptorQWrapper( priority,
+ d )
+ );
+ descriptorsToVisitSet.add( d );
+ }
+ }
+
+
+ protected ReachGraph getPartial( Descriptor d ) {
+ return mapDescriptorToCompleteReachGraph.get( d );
+ }
+
+ protected void setPartial( Descriptor d, ReachGraph rg ) {
+ mapDescriptorToCompleteReachGraph.put( d, rg );
+
+ // when the flag for writing out every partial
+ // result is set, we should spit out the graph,
+ // but in order to give it a unique name we need
+ // to track how many partial results for this
+ // descriptor we've already written out
+ if( writeAllIncrementalDOTs ) {
+ if( !mapDescriptorToNumUpdates.containsKey( d ) ) {
+ mapDescriptorToNumUpdates.put( d, new Integer( 0 ) );
}
- mapNumContexts2NumDesc.put( s.size(), i + 1 );
- }
+ Integer n = mapDescriptorToNumUpdates.get( d );
+ /*
+ try {
+ rg.writeGraph( d+"COMPLETE"+String.format( "%05d", n ),
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // show back edges to confirm graph validity
+ false, // show parameter indices (unmaintained!)
+ true, // hide subset reachability states
+ true); // hide edge taints
+ } catch( IOException e ) {}
+ */
+ mapDescriptorToNumUpdates.put( d, n + 1 );
+ }
+ }
- String s = "";
- int total = 0;
- itr = mapNumContexts2NumDesc.entrySet().iterator();
- while( itr.hasNext() ) {
- Map.Entry me = (Map.Entry) itr.next();
- Integer c0 = (Integer) me.getKey();
- Integer d0 = (Integer) me.getValue();
- total += d0;
- s += String.format( "%4d methods had %4d unique alias contexts.\n", d0, c0 );
+ // a dependent of a method decriptor d for this analysis is:
+ // 1) a method or task that invokes d
+ // 2) in the descriptorsToAnalyze set
+ protected void addDependent( Descriptor callee, Descriptor caller ) {
+ Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
+ if( deps == null ) {
+ deps = new HashSet<Descriptor>();
+ }
+ deps.add( caller );
+ mapDescriptorToSetDependents.put( callee, deps );
+ }
+
+ protected Set<Descriptor> getDependents( Descriptor callee ) {
+ Set<Descriptor> deps = mapDescriptorToSetDependents.get( callee );
+ if( deps == null ) {
+ deps = new HashSet<Descriptor>();
+ mapDescriptorToSetDependents.put( callee, deps );
}
+ return deps;
+ }
- s += String.format( "\n%4d total methods analayzed.\n", total );
+
+ public Hashtable<FlatCall, ReachGraph> getIHMcontributions( Descriptor d ) {
- return s;
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ mapDescriptorToIHMcontributions.get( d );
+
+ if( heapsFromCallers == null ) {
+ heapsFromCallers = new Hashtable<FlatCall, ReachGraph>();
+ mapDescriptorToIHMcontributions.put( d, heapsFromCallers );
+ }
+
+ return heapsFromCallers;
}
- private int numMethodsAnalyzed() {
- return descriptorsToAnalyze.size();
+ public ReachGraph getIHMcontribution( Descriptor d,
+ FlatCall fc
+ ) {
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ getIHMcontributions( d );
+
+ if( !heapsFromCallers.containsKey( fc ) ) {
+ heapsFromCallers.put( fc, new ReachGraph() );
+ }
+
+ return heapsFromCallers.get( fc );
}
- */
+ public void addIHMcontribution( Descriptor d,
+ FlatCall fc,
+ ReachGraph rg
+ ) {
+ Hashtable<FlatCall, ReachGraph> heapsFromCallers =
+ getIHMcontributions( d );
- /*
- // insert a call to debugSnapshot() somewhere in the analysis
- // to get successive captures of the analysis state
+ heapsFromCallers.put( fc, rg );
+ }
+
+private AllocSite createParameterAllocSite(ReachGraph rg, TempDescriptor tempDesc) {
+
+ // create temp descriptor for each parameter variable
+ FlatNew flatNew = new FlatNew(tempDesc.getType(), tempDesc, false);
+ // create allocation site
+ AllocSite as = (AllocSite) Canonical.makeCanonical(new AllocSite( allocationDepth, flatNew, flatNew.getDisjointId()));
+ for (int i = 0; i < allocationDepth; ++i) {
+ Integer id = generateUniqueHeapRegionNodeID();
+ as.setIthOldest(i, id);
+ mapHrnIdToAllocSite.put(id, as);
+ }
+ // the oldest node is a summary node
+ as.setSummary( generateUniqueHeapRegionNodeID() );
+
+ rg.age(as);
+
+ return as;
+
+}
+
+private ReachGraph createInitialTaskReachGraph(FlatMethod fm) {
+ ReachGraph rg = new ReachGraph();
+ TaskDescriptor taskDesc = fm.getTask();
+
+ for (int idx = 0; idx < taskDesc.numParameters(); idx++) {
+ Descriptor paramDesc = taskDesc.getParameter(idx);
+ TypeDescriptor paramTypeDesc = taskDesc.getParamType(idx);
+
+ // setup data structure
+ Set<HashMap<HeapRegionNode, FieldDescriptor>> workSet =
+ new HashSet<HashMap<HeapRegionNode, FieldDescriptor>>();
+ Hashtable<TypeDescriptor, HeapRegionNode> mapTypeToExistingSummaryNode =
+ new Hashtable<TypeDescriptor, HeapRegionNode>();
+ Set<String> doneSet = new HashSet<String>();
+
+ TempDescriptor tempDesc = fm.getParameter(idx);
+
+ AllocSite as = createParameterAllocSite(rg, tempDesc);
+ VariableNode lnX = rg.getVariableNodeFromTemp(tempDesc);
+ Integer idNewest = as.getIthOldest(0);
+ HeapRegionNode hrnNewest = rg.id2hrn.get(idNewest);
+ // make a new reference to allocated node
+ RefEdge edgeNew = new RefEdge(lnX, // source
+ hrnNewest, // dest
+ taskDesc.getParamType(idx), // type
+ null, // field name
+ hrnNewest.getAlpha(), // beta
+ ExistPredSet.factory(rg.predTrue) // predicates
+ );
+ rg.addRefEdge(lnX, hrnNewest, edgeNew);
+
+ // set-up a work set for class field
+ ClassDescriptor classDesc = paramTypeDesc.getClassDesc();
+ for (Iterator it = classDesc.getFields(); it.hasNext();) {
+ FieldDescriptor fd = (FieldDescriptor) it.next();
+ TypeDescriptor fieldType = fd.getType();
+ if (!fieldType.isImmutable() || fieldType.isArray()) {
+ HashMap<HeapRegionNode, FieldDescriptor> newMap = new HashMap<HeapRegionNode, FieldDescriptor>();
+ newMap.put(hrnNewest, fd);
+ workSet.add(newMap);
+ }
+ }
+
+ int uniqueIdentifier = 0;
+ while (!workSet.isEmpty()) {
+ HashMap<HeapRegionNode, FieldDescriptor> map = workSet
+ .iterator().next();
+ workSet.remove(map);
+
+ Set<HeapRegionNode> key = map.keySet();
+ HeapRegionNode srcHRN = key.iterator().next();
+ FieldDescriptor fd = map.get(srcHRN);
+ TypeDescriptor type = fd.getType();
+ String doneSetIdentifier = srcHRN.getIDString() + "_" + fd;
+
+ if (!doneSet.contains(doneSetIdentifier)) {
+ doneSet.add(doneSetIdentifier);
+ if (!mapTypeToExistingSummaryNode.containsKey(type)) {
+ // create new summary Node
+ TempDescriptor td = new TempDescriptor("temp"
+ + uniqueIdentifier, type);
+
+ AllocSite allocSite;
+ if(type.equals(paramTypeDesc)){
+ //corresponding allocsite has already been created for a parameter variable.
+ allocSite=as;
+ }else{
+ allocSite = createParameterAllocSite(rg, td);
+ }
+ String strDesc = allocSite.toStringForDOT()
+ + "\\nsummary";
+ HeapRegionNode hrnSummary =
+ rg.createNewHeapRegionNode(allocSite.getSummary(), // id or null to generate a new one
+ false, // single object?
+ true, // summary?
+ false, // flagged?
+ false, // out-of-context?
+ allocSite.getType(), // type
+ allocSite, // allocation site
+ null, // inherent reach
+ srcHRN.getAlpha(), // current reach
+ ExistPredSet.factory(), // predicates
+ strDesc // description
+ );
+ rg.id2hrn.put(allocSite.getSummary(),hrnSummary);
+
+ // make a new reference to summary node
+ RefEdge edgeToSummary = new RefEdge(srcHRN, // source
+ hrnSummary, // dest
+ fd.getType(), // type
+ fd.getSymbol(), // field name
+ srcHRN.getAlpha(), // beta
+ ExistPredSet.factory(rg.predTrue) // predicates
+ );
+
+ rg.addRefEdge(srcHRN, hrnSummary, edgeToSummary);
+
+ uniqueIdentifier++;
+
+ mapTypeToExistingSummaryNode.put(type, hrnSummary);
+
+ // set-up a work set for fields of the class
+ if(!type.isImmutable()){
+ classDesc = type.getClassDesc();
+ for (Iterator it = classDesc.getFields(); it.hasNext();) {
+ FieldDescriptor typeFieldDesc = (FieldDescriptor) it.next();
+ TypeDescriptor fieldType = typeFieldDesc.getType();
+ if (!fieldType.isImmutable()) {
+ doneSetIdentifier = hrnSummary.getIDString() + "_" + typeFieldDesc;
+ if(!doneSet.contains(doneSetIdentifier)){
+ // add new work item
+ HashMap<HeapRegionNode, FieldDescriptor> newMap =
+ new HashMap<HeapRegionNode, FieldDescriptor>();
+ newMap.put(hrnSummary, typeFieldDesc);
+ workSet.add(newMap);
+ }
+ }
+ }
+ }
+
+ }else{
+ // if there exists corresponding summary node
+ HeapRegionNode hrnDst=mapTypeToExistingSummaryNode.get(type);
+
+ RefEdge edgeToSummary = new RefEdge(srcHRN, // source
+ hrnDst, // dest
+ fd.getType(), // type
+ fd.getSymbol(), // field name
+ srcHRN.getAlpha(), // beta
+ ExistPredSet.factory(rg.predTrue) // predicates
+ );
+ rg.addRefEdge(srcHRN, hrnDst, edgeToSummary);
+
+ }
+ }
+ }
+ }
+// debugSnapshot(rg, fm, true);
+ return rg;
+}
+
+// return all allocation sites in the method (there is one allocation
+// site per FlatNew node in a method)
+private HashSet<AllocSite> getAllocationSiteSet(Descriptor d) {
+ if( !mapDescriptorToAllocSiteSet.containsKey(d) ) {
+ buildAllocationSiteSet(d);
+ }
+
+ return mapDescriptorToAllocSiteSet.get(d);
+
+}
+
+private void buildAllocationSiteSet(Descriptor d) {
+ HashSet<AllocSite> s = new HashSet<AllocSite>();
+
+ FlatMethod fm;
+ if( d instanceof MethodDescriptor ) {
+ fm = state.getMethodFlat( (MethodDescriptor) d);
+ } else {
+ assert d instanceof TaskDescriptor;
+ fm = state.getMethodFlat( (TaskDescriptor) d);
+ }
+
+ // visit every node in this FlatMethod's IR graph
+ // and make a set of the allocation sites from the
+ // FlatNew node's visited
+ HashSet<FlatNode> visited = new HashSet<FlatNode>();
+ HashSet<FlatNode> toVisit = new HashSet<FlatNode>();
+ toVisit.add(fm);
+
+ while( !toVisit.isEmpty() ) {
+ FlatNode n = toVisit.iterator().next();
+
+ if( n instanceof FlatNew ) {
+ s.add(getAllocSiteFromFlatNewPRIVATE( (FlatNew) n) );
+ }
+
+ toVisit.remove(n);
+ visited.add(n);
+
+ for( int i = 0; i < n.numNext(); ++i ) {
+ FlatNode child = n.getNext(i);
+ if( !visited.contains(child) ) {
+ toVisit.add(child);
+ }
+ }
+ }
+
+ mapDescriptorToAllocSiteSet.put(d, s);
+ }
+
+ private HashSet<AllocSite> getFlaggedAllocationSites(Descriptor dIn) {
+
+ HashSet<AllocSite> out = new HashSet<AllocSite>();
+ HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
+ HashSet<Descriptor> visited = new HashSet<Descriptor>();
+
+ toVisit.add(dIn);
+
+ while (!toVisit.isEmpty()) {
+ Descriptor d = toVisit.iterator().next();
+ toVisit.remove(d);
+ visited.add(d);
+
+ HashSet<AllocSite> asSet = getAllocationSiteSet(d);
+ Iterator asItr = asSet.iterator();
+ while (asItr.hasNext()) {
+ AllocSite as = (AllocSite) asItr.next();
+ if (as.getDisjointAnalysisId() != null) {
+ out.add(as);
+ }
+ }
+
+ // enqueue callees of this method to be searched for
+ // allocation sites also
+ Set callees = callGraph.getCalleeSet(d);
+ if (callees != null) {
+ Iterator methItr = callees.iterator();
+ while (methItr.hasNext()) {
+ MethodDescriptor md = (MethodDescriptor) methItr.next();
+
+ if (!visited.contains(md)) {
+ toVisit.add(md);
+ }
+ }
+ }
+ }
+
+ return out;
+ }
+
+
+private HashSet<AllocSite>
+getFlaggedAllocationSitesReachableFromTaskPRIVATE(TaskDescriptor td) {
+
+ HashSet<AllocSite> asSetTotal = new HashSet<AllocSite>();
+ HashSet<Descriptor> toVisit = new HashSet<Descriptor>();
+ HashSet<Descriptor> visited = new HashSet<Descriptor>();
+
+ toVisit.add(td);
+
+ // traverse this task and all methods reachable from this task
+ while( !toVisit.isEmpty() ) {
+ Descriptor d = toVisit.iterator().next();
+ toVisit.remove(d);
+ visited.add(d);
+
+ HashSet<AllocSite> asSet = getAllocationSiteSet(d);
+ Iterator asItr = asSet.iterator();
+ while( asItr.hasNext() ) {
+ AllocSite as = (AllocSite) asItr.next();
+ TypeDescriptor typed = as.getType();
+ if( typed != null ) {
+ ClassDescriptor cd = typed.getClassDesc();
+ if( cd != null && cd.hasFlags() ) {
+ asSetTotal.add(as);
+ }
+ }
+ }
+
+ // enqueue callees of this method to be searched for
+ // allocation sites also
+ Set callees = callGraph.getCalleeSet(d);
+ if( callees != null ) {
+ Iterator methItr = callees.iterator();
+ while( methItr.hasNext() ) {
+ MethodDescriptor md = (MethodDescriptor) methItr.next();
+
+ if( !visited.contains(md) ) {
+ toVisit.add(md);
+ }
+ }
+ }
+ }
+
+ return asSetTotal;
+}
+
+
+
+
+ // get successive captures of the analysis state
boolean takeDebugSnapshots = false;
- String mcDescSymbolDebug = "setRoute";
+ String descSymbolDebug = "addSomething";
boolean stopAfterCapture = true;
// increments every visit to debugSnapshot, don't fiddle with it
- // IMPORTANT NOTE FOR SETTING THE FOLLOWING VALUES: this
- // counter increments just after every node is analyzed
- // from the body of the method whose symbol is specified
- // above.
int debugCounter = 0;
// the value of debugCounter to start reporting the debugCounter
int freqCountReport = 0;
// the debugCounter value at which to start taking snapshots
- int iterStartCapture = 0;
+ int iterStartCapture = 25;
// the number of snapshots to take
int numIterToCapture = 300;
- void debugSnapshot(ReachabilityGraph og, FlatNode fn) {
+
+ void debugSnapshot( ReachGraph rg, FlatNode fn, boolean in ) {
if( debugCounter > iterStartCapture + numIterToCapture ) {
return;
}
- ++debugCounter;
- if( debugCounter > numStartCountReport &&
- freqCountReport > 0 &&
- debugCounter % freqCountReport == 0 ) {
- System.out.println(" @@@ debug counter = "+debugCounter);
+ if( in ) {
+ ++debugCounter;
+ }
+
+ if( debugCounter > numStartCountReport &&
+ freqCountReport > 0 &&
+ debugCounter % freqCountReport == 0
+ ) {
+ System.out.println( " @@@ debug counter = "+
+ debugCounter );
}
+
if( debugCounter > iterStartCapture ) {
- System.out.println(" @@@ capturing debug "+(debugCounter-iterStartCapture)+" @@@");
- String graphName = String.format("snap%04d",debugCounter-iterStartCapture);
+ System.out.println( " @@@ capturing debug "+
+ (debugCounter - iterStartCapture)+
+ " @@@" );
+ String graphName;
+ if( in ) {
+ graphName = String.format( "snap%04din",
+ debugCounter - iterStartCapture );
+ } else {
+ graphName = String.format( "snap%04dout",
+ debugCounter - iterStartCapture );
+ }
if( fn != null ) {
- graphName = graphName+fn;
+ graphName = graphName + fn;
}
try {
- og.writeGraph(graphName,
- true, // write labels (variables)
- true, // selectively hide intermediate temp vars
- true, // prune unreachable heap regions
- false, // show back edges to confirm graph validity
- false, // show parameter indices (unmaintained!)
- true, // hide subset reachability states
- true); // hide edge taints
+ rg.writeGraph( graphName,
+ true, // write labels (variables)
+ true, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ false, // hide subset reachability states
+ true );// hide edge taints
} catch( Exception e ) {
- System.out.println("Error writing debug capture.");
- System.exit(0);
+ System.out.println( "Error writing debug capture." );
+ System.exit( 0 );
}
}
- if( debugCounter == iterStartCapture + numIterToCapture && stopAfterCapture ) {
- System.out.println("Stopping analysis after debug captures.");
- System.exit(0);
+ if( debugCounter == iterStartCapture + numIterToCapture &&
+ stopAfterCapture
+ ) {
+ System.out.println( "Stopping analysis after debug captures." );
+ System.exit( 0 );
}
}
- */
-
-
- // Take in sourceEntry which is the program's compiled entry and
- // create a new analysis entry, a method that takes no parameters
- // and appears to allocate the command line arguments and call the
- // sourceEntry with them. The purpose of this analysis entry is to
- // provide a top-level method context with no parameters left.
- private void makeAnalysisEntryMethod( MethodDescriptor sourceEntry ) {
-
- Modifiers mods = new Modifiers();
- mods.addModifier( Modifiers.PUBLIC );
- mods.addModifier( Modifiers.STATIC );
- TypeDescriptor returnType =
- new TypeDescriptor( TypeDescriptor.VOID );
-
- this.mdAnalysisEntry =
- new MethodDescriptor( mods,
- returnType,
- "analysisEntryMethod"
- );
-
- FlatExit fe = new FlatExit();
-
- TempDescriptor cmdLineArgs = new TempDescriptor( "args" );
-
- FlatNew fn = new FlatNew( sourceEntry.getParamType( 0 ),
- cmdLineArgs,
- false // is global
- );
-
- //FlatCall fc = new
-
- this.fmAnalysisEntry = new FlatMethod( mdAnalysisEntry, fe );
- }
}