import Analysis.ArrayReferencees;
import Analysis.OoOJava.Accessible;
import Analysis.OoOJava.RBlockRelationAnalysis;
+import Analysis.FlatIRGraph.*;
import IR.*;
import IR.Flat.*;
import IR.Tree.Modifiers;
public Alloc getCmdLineArgsAlloc() {
return getAllocationSiteFromFlatNew( constructedCmdLineArgsNew );
}
+ public Alloc getCmdLineArgAlloc() {
+ return getAllocationSiteFromFlatNew( constructedCmdLineArgNew );
+ }
+ public Alloc getCmdLineArgBytesAlloc() {
+ return getAllocationSiteFromFlatNew( constructedCmdLineArgBytesNew );
+ }
+ public Alloc getNewStringLiteralAlloc() {
+ return newStringLiteralAlloc;
+ }
+ public Alloc getNewStringLiteralBytesAlloc() {
+ return newStringLiteralBytesAlloc;
+ }
+
///////////////////////////////////////////
//
// end public interface
protected EffectsAnalysis effectsAnalysis;
protected BuildStateMachines buildStateMachines;
+ protected boolean doDefiniteReachAnalysis = false;
+ protected DefiniteReachAnalysis definiteReachAnalysis;
+
// data structure for public interface
private Hashtable< Descriptor, HashSet<AllocSite> >
protected Hashtable<Descriptor, ReachGraph>
mapDescriptorToInitialContext;
+ // mapping of current partial results for a given node. Note that
+ // to reanalyze a method we discard all partial results because a
+ // null reach graph indicates the node needs to be visited on the
+ // way to the fixed point.
+ // The reason for a persistent mapping is so after the analysis we
+ // can ask for the graph of any node at the fixed point, but this
+ // option is only enabled with a compiler flag.
+ protected Hashtable<FlatNode, ReachGraph> mapFlatNodeToReachGraphPersist;
+ protected Hashtable<FlatNode, ReachGraph> mapFlatNodeToReachGraph;
+
+
// make the result for back edges analysis-wide STRICTLY
// MONOTONIC as well, but notice we use FlatNode as the
// key for this map: in case we want to consider other
// know if something is pointing-to to the cmd line args
// and we can verify by checking the allocation site field.
protected FlatNew constructedCmdLineArgsNew;
+ protected FlatNew constructedCmdLineArgNew;
+ protected FlatNew constructedCmdLineArgBytesNew;
+
+ // similar to above, the runtime allocates new strings
+ // for literal nodes, so make up an alloc to model that
+ protected AllocSite newStringLiteralAlloc;
+ protected AllocSite newStringLiteralBytesAlloc;
+
+ // both of the above need the descriptor of the field
+ // for the String's value field to reference by the
+ // byte array from the string object
+ protected TypeDescriptor stringType;
+ protected TypeDescriptor stringBytesType;
+ protected FieldDescriptor stringBytesField;
+
+
+ protected void initImplicitStringsModel() {
+
+ ClassDescriptor cdString = typeUtil.getClass( typeUtil.StringClass );
+ assert cdString != null;
+
+
+ stringType =
+ new TypeDescriptor( cdString );
+
+ stringBytesType =
+ new TypeDescriptor(TypeDescriptor.CHAR).makeArray( state );
+
+
+ stringBytesField = null;
+ Iterator sFieldsItr = cdString.getFields();
+ while( sFieldsItr.hasNext() ) {
+ FieldDescriptor fd = (FieldDescriptor) sFieldsItr.next();
+ if( fd.getSymbol().equals( typeUtil.StringClassValueField ) ) {
+ stringBytesField = fd;
+ break;
+ }
+ }
+ assert stringBytesField != null;
+
+
+ TempDescriptor throwAway1 =
+ new TempDescriptor("stringLiteralTemp_dummy1",
+ stringType
+ );
+ FlatNew fnStringLiteral =
+ new FlatNew(stringType,
+ throwAway1,
+ false // is global
+ );
+ newStringLiteralAlloc
+ = getAllocSiteFromFlatNewPRIVATE( fnStringLiteral );
+
+
+ TempDescriptor throwAway2 =
+ new TempDescriptor("stringLiteralTemp_dummy2",
+ stringBytesType
+ );
+ FlatNew fnStringLiteralBytes =
+ new FlatNew(stringBytesType,
+ throwAway2,
+ false // is global
+ );
+ newStringLiteralBytesAlloc
+ = getAllocSiteFromFlatNewPRIVATE( fnStringLiteralBytes );
+ }
mapDescriptorToInitialContext =
new Hashtable<Descriptor, ReachGraph>();
+ mapFlatNodeToReachGraphPersist =
+ new Hashtable<FlatNode, ReachGraph>();
+
mapBackEdgeToMonotone =
new Hashtable<FlatNode, ReachGraph>();
mapDescriptorToReachGraph =
new Hashtable<Descriptor, ReachGraph>();
- pm = new PointerMethod();
-
fc2enclosing = new Hashtable<FlatCall, Descriptor>();
}
ReachGraph.typeUtil = typeUtil;
ReachGraph.state = state;
+ ReachGraph.initOutOfScopeTemps();
+
ReachGraph.debugCallSiteVisitStartCapture
= state.DISJOINTDEBUGCALLVISITTOSTART;
= state.DISJOINTDEBUGCALLSTOPAFTER;
ReachGraph.debugCallSiteVisitCounter
- = 0; // count visits from 1, is incremented before first visit
+ = 0; // count visits from 1, is incremented before first visit
-
+ pm = new PointerMethod();
+
+ if( state.DO_DEFINITE_REACH_ANALYSIS ) {
+ doDefiniteReachAnalysis = true;
+ definiteReachAnalysis = new DefiniteReachAnalysis( pm );
+ }
if( suppressOutput ) {
System.out.println("* Running disjoint reachability analysis with output suppressed! *");
}
+
allocateStructures();
+ initImplicitStringsModel();
+
+
+
double timeStartAnalysis = (double) System.nanoTime();
// start interprocedural fixed-point computation
writeFinalGraphs();
}
- if( state.DISJOINTWRITEIHMS && !suppressOutput ) {
+ if( state.DISJOINTWRITEIHMS ) {
writeFinalIHMs();
}
- if( state.DISJOINTWRITEINITCONTEXTS && !suppressOutput ) {
+ if( state.DISJOINTWRITEINITCONTEXTS ) {
writeInitialContexts();
}
+ if( state.DISJOINT_WRITE_ALL_NODE_FINAL_GRAPHS ) {
+ writeFinalGraphsForEveryNode();
+ }
+
if( state.DISJOINTALIASFILE != null && !suppressOutput ) {
if( state.TASK ) {
writeAllSharing(state.DISJOINTALIASFILE, treport, justtime, state.DISJOINTALIASTAB, state.lines);
}
+
// now, depending on the interprocedural mode for visiting
// methods, set up the needed data structures
flatNodesToVisitQ.add(fm);
}
- // mapping of current partial results
- Hashtable<FlatNode, ReachGraph> mapFlatNodeToReachGraph =
+ // start a new mapping of partial results
+ mapFlatNodeToReachGraph =
new Hashtable<FlatNode, ReachGraph>();
// the set of return nodes partial results that will be combined as
if( !rg.equals(rgPrev) ) {
mapFlatNodeToReachGraph.put(fn, rg);
+ // we don't necessarily want to keep the reach graph for every
+ // node in the program unless a client or the user wants it
+ if( state.DISJOINT_WRITE_ALL_NODE_FINAL_GRAPHS ) {
+ mapFlatNodeToReachGraphPersist.put(fn, rg);
+ }
+
for( int i = 0; i < pm.numNext(fn); i++ ) {
FlatNode nn = pm.getNext(fn, i);
// states after the flat method returns
ReachGraph completeGraph = new ReachGraph();
+ if( setReturns.isEmpty() ) {
+ System.out.println( "d = "+d );
+
+ }
assert !setReturns.isEmpty();
Iterator retItr = setReturns.iterator();
while( retItr.hasNext() ) {
stopAfterCapture
) {
System.out.println("!!! Stopping analysis after debug snap captures. !!!");
- System.exit(0);
+ System.exit(-1);
}
}
FlatSESEEnterNode sese;
FlatSESEExitNode fsexn;
+ Set<EdgeKey> edgeKeysForLoad;
+ Set<EdgeKey> edgeKeysRemoved;
+ Set<EdgeKey> edgeKeysAdded;
+
//Stores the flatnode's reach graph at enter
ReachGraph rgOnEnter = new ReachGraph();
rgOnEnter.merge(rg);
fn2rgAtEnter.put(fn, rgOnEnter);
+
+ boolean didDefReachTransfer = false;
+
+
// use node type to decide what transfer function
// to apply to the reachability graph
true, // write labels (variables)
true, // selectively hide intermediate temp vars
true, // prune unreachable heap regions
- true, // hide reachability altogether
+ false, // hide reachability altogether
true, // hide subset reachability states
true, // hide predicates
- false); // hide edge taints
+ true); //false); // hide edge taints
+ } break;
+
+
+ case FKind.FlatGenDefReachNode: {
+ FlatGenDefReachNode fgdrn = (FlatGenDefReachNode) fn;
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.writeState( fn, fgdrn.getOutputName() );
+ }
} break;
FlatCall fc = (FlatCall) me.getKey();
ReachGraph rgContrib = (ReachGraph) me.getValue();
- assert fc.getMethod().equals(d);
-
+ // note that "fc.getMethod()" like (Object.toString)
+ // might not be equal to "d" like (String.toString)
+ // because the mapping gets set up when we resolve
+ // virtual dispatch
rg.merge(rgContrib);
}
rg.merge(rgPrevContext);
mapDescriptorToInitialContext.put(d, rg);
+ if( doDefiniteReachAnalysis ) {
+ FlatMethod fm = (FlatMethod) fn;
+ Set<TempDescriptor> params = new HashSet<TempDescriptor>();
+ for( int i = 0; i < fm.numParameters(); ++i ) {
+ params.add( fm.getParameter( i ) );
+ }
+ definiteReachAnalysis.methodEntry( fn, params );
+ didDefReachTransfer = true;
+ }
} break;
case FKind.FlatOpNode:
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
if(rblockRel.isPotentialStallSite(fn)) {
// x gets status of y
-// if(!rg.isAccessible(rhs)){
if(!accessible.isAccessible(fn, rhs)) {
rg.makeInaccessible(lhs);
}
// transfer func
rg.assignTempXEqualToTempY(lhs, rhs);
+
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.copy( fn, lhs, rhs );
+ didDefReachTransfer = true;
+ }
}
break;
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
if(rblockRel.isPotentialStallSite(fn)) {
// x gets status of y
-// if(!rg.isAccessible(rhs)){
if(!accessible.isAccessible(fn,rhs)) {
rg.makeInaccessible(lhs);
}
// transfer func
rg.assignTempXEqualToCastedTempY(lhs, rhs, td);
+
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.copy( fn, lhs, rhs );
+ didDefReachTransfer = true;
+ }
break;
case FKind.FlatFieldNode:
if(rblockRel.isPotentialStallSite(fn)) {
// x=y.f, stall y if not accessible
// contributes read effects on stall site of y
-// if(!rg.isAccessible(rhs)) {
if(!accessible.isAccessible(fn,rhs)) {
rg.taintStallSite(fn, rhs);
}
}
}
+ edgeKeysForLoad = null;
+ if( doDefiniteReachAnalysis ) {
+ edgeKeysForLoad = new HashSet<EdgeKey>();
+ }
+
if( shouldAnalysisTrack(fld.getType() ) ) {
// transfer func
- rg.assignTempXEqualToTempYFieldF(lhs, rhs, fld, fn);
+ rg.assignTempXEqualToTempYFieldF( lhs, rhs, fld, fn, edgeKeysForLoad );
+
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.load( fn, lhs, rhs, fld, edgeKeysForLoad );
+ didDefReachTransfer = true;
+ }
}
// after transfer, use updated graph to
boolean strongUpdate = false;
+ edgeKeysRemoved = null;
+ edgeKeysAdded = null;
+ if( doDefiniteReachAnalysis ) {
+ edgeKeysRemoved = new HashSet<EdgeKey>();
+ edgeKeysAdded = new HashSet<EdgeKey>();
+ }
+
// before transfer func, possibly inject
// stall-site taints
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
if(rblockRel.isPotentialStallSite(fn)) {
// x.y=f , stall x and y if they are not accessible
// also contribute write effects on stall site of x
-// if(!rg.isAccessible(lhs)) {
if(!accessible.isAccessible(fn,lhs)) {
rg.taintStallSite(fn, lhs);
}
-// if(!rg.isAccessible(rhs)) {
if(!accessible.isAccessible(fn,rhs)) {
rg.taintStallSite(fn, rhs);
}
if( shouldAnalysisTrack(fld.getType() ) ) {
// transfer func
- strongUpdate = rg.assignTempXFieldFEqualToTempY(lhs, fld, rhs, fn);
+ strongUpdate = rg.assignTempXFieldFEqualToTempY( lhs,
+ fld,
+ rhs,
+ fn,
+ edgeKeysRemoved,
+ edgeKeysAdded );
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.store( fn,
+ lhs,
+ fld,
+ rhs,
+ edgeKeysRemoved,
+ edgeKeysAdded );
+ didDefReachTransfer = true;
+ }
}
// use transformed graph to do effects analysis
// x=y.f, stall y if not accessible
// contributes read effects on stall site of y
// after this, x and y are accessbile.
-// if(!rg.isAccessible(rhs)) {
if(!accessible.isAccessible(fn,rhs)) {
rg.taintStallSite(fn, rhs);
}
}
}
+ edgeKeysForLoad = null;
+ if( doDefiniteReachAnalysis ) {
+ edgeKeysForLoad = new HashSet<EdgeKey>();
+ }
+
if( shouldAnalysisTrack(lhs.getType() ) ) {
// transfer func
- rg.assignTempXEqualToTempYFieldF(lhs, rhs, fdElement, fn);
+ rg.assignTempXEqualToTempYFieldF( lhs, rhs, fdElement, fn, edgeKeysForLoad );
+
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.load( fn, lhs, rhs, fdElement, edgeKeysForLoad );
+ didDefReachTransfer = true;
+ }
}
// use transformed graph to do effects analysis
lhs = fsen.getDst();
rhs = fsen.getSrc();
-
+
assert lhs.getType() != null;
assert lhs.getType().isArray();
tdElement = lhs.getType().dereference();
fdElement = getArrayField(tdElement);
+ edgeKeysRemoved = null;
+ edgeKeysAdded = null;
+ if( doDefiniteReachAnalysis ) {
+ edgeKeysRemoved = new HashSet<EdgeKey>();
+ edgeKeysAdded = new HashSet<EdgeKey>();
+ }
+
// before transfer func, possibly inject
// stall-site taints
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
if(rblockRel.isPotentialStallSite(fn)) {
// x.y=f , stall x and y if they are not accessible
// also contribute write effects on stall site of x
-// if(!rg.isAccessible(lhs)) {
if(!accessible.isAccessible(fn,lhs)) {
rg.taintStallSite(fn, lhs);
}
-// if(!rg.isAccessible(rhs)) {
if(!accessible.isAccessible(fn,rhs)) {
rg.taintStallSite(fn, rhs);
}
// transfer func, BUT
// skip this node if it cannot create new reachability paths
if( !arrayReferencees.doesNotCreateNewReaching(fsen) ) {
- rg.assignTempXFieldFEqualToTempY(lhs, fdElement, rhs, fn);
+ rg.assignTempXFieldFEqualToTempY( lhs,
+ fdElement,
+ rhs,
+ fn,
+ edgeKeysRemoved,
+ edgeKeysAdded );
+ }
+
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.store( fn,
+ lhs,
+ fdElement,
+ rhs,
+ edgeKeysRemoved,
+ edgeKeysAdded );
+ didDefReachTransfer = true;
}
}
// transfer func
rg.assignTempEqualToNewAlloc(lhs, as);
+
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.newObject( fn, lhs );
+ didDefReachTransfer = true;
+ }
}
break;
+
+ case FKind.FlatLiteralNode:
+ // BIG NOTE: this transfer function is only here for
+ // points-to information for String literals. That's it.
+ // Effects and disjoint reachability and all of that don't
+ // care about references to literals.
+ FlatLiteralNode fln = (FlatLiteralNode) fn;
+
+ if( fln.getType().equals( stringType ) ) {
+ rg.assignTempEqualToStringLiteral( fln.getDst(),
+ newStringLiteralAlloc,
+ newStringLiteralBytesAlloc,
+ stringBytesField );
+ }
+ break;
+
+
case FKind.FlatSESEEnterNode:
sese = (FlatSESEEnterNode) fn;
FlatMethod fmCallee = state.getMethodFlat(mdCallee);
-
- // all this jimma jamma to debug call sites is WELL WORTH the
- // effort, so many bugs or buggy info goes crazy through call
- // sites
- boolean debugCallSite = false;
- if( state.DISJOINTDEBUGCALLEE != null &&
- state.DISJOINTDEBUGCALLER != null ) {
-
- boolean debugCalleeMatches = false;
- boolean debugCallerMatches = false;
-
- ClassDescriptor cdCallee = mdCallee.getClassDesc();
- if( cdCallee != null ) {
- debugCalleeMatches =
- state.DISJOINTDEBUGCALLEE.equals( cdCallee.getSymbol()+
- "."+
- mdCallee.getSymbol()
- );
- }
-
-
- if( mdCaller instanceof MethodDescriptor ) {
- ClassDescriptor cdCaller = ((MethodDescriptor)mdCaller).getClassDesc();
- if( cdCaller != null ) {
- debugCallerMatches =
- state.DISJOINTDEBUGCALLER.equals( cdCaller.getSymbol()+
- "."+
- mdCaller.getSymbol()
- );
- }
- } else {
- // for bristlecone style tasks
- debugCallerMatches =
- state.DISJOINTDEBUGCALLER.equals( mdCaller.getSymbol() );
- }
-
- debugCallSite = debugCalleeMatches && debugCallerMatches;
+ if( doDefiniteReachAnalysis ) {
+ definiteReachAnalysis.methodCall( fn, fc.getReturnTemp() );
+ didDefReachTransfer = true;
}
-
-
- boolean writeDebugDOTs = false;
- boolean stopAfter = false;
- if( debugCallSite ) {
- ++ReachGraph.debugCallSiteVisitCounter;
- System.out.println(" $$$ Debug call site visit "+
- ReachGraph.debugCallSiteVisitCounter+
- " $$$"
- );
- if(
- (ReachGraph.debugCallSiteVisitCounter >=
- ReachGraph.debugCallSiteVisitStartCapture) &&
-
- (ReachGraph.debugCallSiteVisitCounter <
- ReachGraph.debugCallSiteVisitStartCapture +
- ReachGraph.debugCallSiteNumVisitsToCapture)
- ) {
- writeDebugDOTs = true;
- System.out.println(" $$$ Capturing this call site visit $$$");
- if( ReachGraph.debugCallSiteStopAfter &&
- (ReachGraph.debugCallSiteVisitCounter ==
- ReachGraph.debugCallSiteVisitStartCapture +
- ReachGraph.debugCallSiteNumVisitsToCapture - 1)
- ) {
- stopAfter = true;
- }
- }
- }
-
-
- // calculate the heap this call site can reach--note this is
- // not used for the current call site transform, we are
- // grabbing this heap model for future analysis of the callees,
- // so if different results emerge we will return to this site
- ReachGraph heapForThisCall_old =
- getIHMcontribution(mdCallee, fc);
-
- // the computation of the callee-reachable heap
- // is useful for making the callee starting point
- // and for applying the call site transfer function
- Set<Integer> callerNodeIDsCopiedToCallee =
- new HashSet<Integer>();
-
- ReachGraph heapForThisCall_cur =
- rg.makeCalleeView(fc,
- fmCallee,
- callerNodeIDsCopiedToCallee,
- writeDebugDOTs
- );
-
- // enforce that a call site contribution can only
- // monotonically increase
- heapForThisCall_cur.merge(heapForThisCall_old);
-
- if( !heapForThisCall_cur.equals(heapForThisCall_old) ) {
- // if heap at call site changed, update the contribution,
- // and reschedule the callee for analysis
- addIHMcontribution(mdCallee, fc, heapForThisCall_cur);
-
- // map a FlatCall to its enclosing method/task descriptor
- // so we can write that info out later
- fc2enclosing.put(fc, mdCaller);
-
- if( state.DISJOINTDEBUGSCHEDULING ) {
- System.out.println(" context changed, scheduling callee: "+mdCallee);
- }
-
- if( state.DISJOINTDVISITSTACKEESONTOP ) {
- calleesToEnqueue.add(mdCallee);
- } else {
- enqueue(mdCallee);
- }
-
- }
-
// the transformation for a call site should update the
// current heap abstraction with any effects from the callee,
// or if the method is virtual, the effects from any possible
);
}
+
+ DebugCallSiteData dcsd = new DebugCallSiteData();
+
ReachGraph rgMergeOfPossibleCallers = new ReachGraph();
+
Iterator<MethodDescriptor> mdItr = setPossibleCallees.iterator();
while( mdItr.hasNext() ) {
MethodDescriptor mdPossible = mdItr.next();
addDependent(mdPossible, // callee
d); // caller
+
+ // decide for each possible resolution of the method whether we
+ // want to debug this call site
+ decideDebugCallSite( dcsd, mdCaller, mdPossible );
+
+
+
+ // calculate the heap this call site can reach--note this is
+ // not used for the current call site transform, we are
+ // grabbing this heap model for future analysis of the callees,
+ // so if different results emerge we will return to this site
+ ReachGraph heapForThisCall_old =
+ getIHMcontribution(mdPossible, fc);
+
+ // the computation of the callee-reachable heap
+ // is useful for making the callee starting point
+ // and for applying the call site transfer function
+ Set<Integer> callerNodeIDsCopiedToCallee =
+ new HashSet<Integer>();
+
+
+ ReachGraph heapForThisCall_cur =
+ rg.makeCalleeView(fc,
+ fmPossible,
+ callerNodeIDsCopiedToCallee,
+ dcsd.writeDebugDOTs
+ );
+
+
+ // enforce that a call site contribution can only
+ // monotonically increase
+ heapForThisCall_cur.merge(heapForThisCall_old);
+
+ if( !heapForThisCall_cur.equals(heapForThisCall_old) ) {
+ // if heap at call site changed, update the contribution,
+ // and reschedule the callee for analysis
+ addIHMcontribution(mdPossible, fc, heapForThisCall_cur);
+
+ // map a FlatCall to its enclosing method/task descriptor
+ // so we can write that info out later
+ fc2enclosing.put(fc, mdCaller);
+
+ if( state.DISJOINTDEBUGSCHEDULING ) {
+ System.out.println(" context changed at callsite: "+fc+", scheduling callee: "+mdPossible);
+ }
+
+ if( state.DISJOINTDVISITSTACKEESONTOP ) {
+ calleesToEnqueue.add(mdPossible);
+ } else {
+ enqueue(mdPossible);
+ }
+ }
+
+
+
+
// don't alter the working graph (rg) until we compute a
// result for every possible callee, merge them all together,
// then set rg to that
System.out.println(" callee hasn't been analyzed, scheduling: "+mdPossible);
}
+
} else {
+
// calculate the method call transform
rgPossibleCaller.resolveMethodCall(fc,
fmPossible,
rgPossibleCallee,
callerNodeIDsCopiedToCallee,
- writeDebugDOTs
+ dcsd.writeDebugDOTs
);
+
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
-// if( !rgPossibleCallee.isAccessible( ReachGraph.tdReturn ) ) {
if( !accessible.isAccessible(fn, ReachGraph.tdReturn) ) {
rgPossibleCaller.makeInaccessible(fc.getReturnTemp() );
}
rgMergeOfPossibleCallers.merge(rgPossibleCaller);
}
+
- if( stopAfter ) {
- System.out.println("$$$ Exiting after requested captures of call site. $$$");
- System.exit(0);
- }
+ statusDebugCallSite( dcsd );
+
// now that we've taken care of building heap models for
// before transfer, do effects analysis support
if( doEffectsAnalysis && fmContaining != fmAnalysisEntry ) {
-// if(!rg.isAccessible(rhs)){
if(!accessible.isAccessible(fn,rhs)) {
rg.makeInaccessible(ReachGraph.tdReturn);
}
} // end switch
+
+ if( doDefiniteReachAnalysis && !didDefReachTransfer ) {
+ definiteReachAnalysis.otherStatement( fn );
+ }
+
+
+
// dead variables were removed before the above transfer function
// was applied, so eliminate heap regions and edges that are no
// longer part of the abstractly-live heap graph, and sweep up
fn2rgAtExit.put(fn, rgOnExit);
+
// at this point rg should be the correct update
// by an above transfer function, or untouched if
// the flat node type doesn't affect the heap
}
}
+ private void writeFinalGraphsForEveryNode() {
+ Set entrySet = mapFlatNodeToReachGraphPersist.entrySet();
+ Iterator itr = entrySet.iterator();
+ while( itr.hasNext() ) {
+ Map.Entry me = (Map.Entry) itr.next();
+ FlatNode fn = (FlatNode) me.getKey();
+ ReachGraph rg = (ReachGraph) me.getValue();
+
+ rg.writeGraph("NODEFINAL"+fn,
+ true, // write labels (variables)
+ false, // selectively hide intermediate temp vars
+ true, // prune unreachable heap regions
+ true, // hide all reachability
+ true, // hide subset reachability states
+ true, // hide predicates
+ true); // hide edge taints
+ }
+ }
+
protected ReachGraph getPartial(Descriptor d) {
return mapDescriptorToCompleteReachGraph.get(d);
mods.addModifier(Modifiers.PUBLIC);
mods.addModifier(Modifiers.STATIC);
- TypeDescriptor returnType =
- new TypeDescriptor(TypeDescriptor.VOID);
-
+ TypeDescriptor returnType = new TypeDescriptor(TypeDescriptor.VOID);
+
this.mdAnalysisEntry =
new MethodDescriptor(mods,
returnType,
"analysisEntryMethod"
);
+ TypeDescriptor argsType = mdSourceEntry.getParamType(0);
TempDescriptor cmdLineArgs =
- new TempDescriptor("args",
- mdSourceEntry.getParamType(0)
+ new TempDescriptor("analysisEntryTemp_args",
+ argsType
);
-
- FlatNew fn =
- new FlatNew(mdSourceEntry.getParamType(0),
+ FlatNew fnArgs =
+ new FlatNew(argsType,
cmdLineArgs,
false // is global
);
- this.constructedCmdLineArgsNew = fn;
+ this.constructedCmdLineArgsNew = fnArgs;
+
+ TypeDescriptor argType = argsType.dereference();
+ TempDescriptor anArg =
+ new TempDescriptor("analysisEntryTemp_arg",
+ argType
+ );
+ FlatNew fnArg =
+ new FlatNew(argType,
+ anArg,
+ false // is global
+ );
+ this.constructedCmdLineArgNew = fnArg;
+
+ TypeDescriptor typeIndex = new TypeDescriptor(TypeDescriptor.INT);
+ TempDescriptor index =
+ new TempDescriptor("analysisEntryTemp_index",
+ typeIndex
+ );
+ FlatLiteralNode fli =
+ new FlatLiteralNode(typeIndex,
+ new Integer( 0 ),
+ index
+ );
+
+ FlatSetElementNode fse =
+ new FlatSetElementNode(cmdLineArgs,
+ index,
+ anArg
+ );
+
+ TypeDescriptor typeSize = new TypeDescriptor(TypeDescriptor.INT);
+ TempDescriptor sizeBytes =
+ new TempDescriptor("analysisEntryTemp_size",
+ typeSize
+ );
+ FlatLiteralNode fls =
+ new FlatLiteralNode(typeSize,
+ new Integer( 1 ),
+ sizeBytes
+ );
+
+ TempDescriptor strBytes =
+ new TempDescriptor("analysisEntryTemp_strBytes",
+ stringBytesType
+ );
+ FlatNew fnBytes =
+ new FlatNew(stringBytesType,
+ strBytes,
+ //sizeBytes,
+ false // is global
+ );
+ this.constructedCmdLineArgBytesNew = fnBytes;
+
+ FlatSetFieldNode fsf =
+ new FlatSetFieldNode(anArg,
+ stringBytesField,
+ strBytes
+ );
+
+ // throw this in so you can always see what the initial heap context
+ // looks like if you want to, its cheap
+ FlatGenReachNode fgen = new FlatGenReachNode( "argContext" );
TempDescriptor[] sourceEntryArgs = new TempDescriptor[1];
sourceEntryArgs[0] = cmdLineArgs;
-
FlatCall fc =
new FlatCall(mdSourceEntry,
null, // dst temp
fe
);
- this.fmAnalysisEntry.addNext(fn);
- fn.addNext(fc);
- fc.addNext(frn);
- frn.addNext(fe);
+ List<FlatNode> nodes = new LinkedList<FlatNode>();
+ nodes.add( fnArgs );
+ nodes.add( fnArg );
+ nodes.add( fli );
+ nodes.add( fse );
+ nodes.add( fls );
+ nodes.add( fnBytes );
+ nodes.add( fsf );
+ nodes.add( fgen );
+ nodes.add( fc );
+ nodes.add( frn );
+ nodes.add( fe );
+
+ FlatNode current = this.fmAnalysisEntry;
+ for( FlatNode next: nodes ) {
+ current.addNext( next );
+ current = next;
+ }
+
+
+ // jjenista - this is useful for looking at the FlatIRGraph of the
+ // analysis entry method constructed above if you have to modify it.
+ // The usual method of writing FlatIRGraphs out doesn't work because
+ // this flat method is private to the model of this analysis only.
+ //try {
+ // FlatIRGraph flatMethodWriter =
+ // new FlatIRGraph( state, false, false, false );
+ // flatMethodWriter.writeFlatIRGraph( fmAnalysisEntry, "analysisEntry" );
+ //} catch( IOException e ) {}
}
Hashtable<FlatCall, ReachGraph> heapsFromCallers =
getIHMcontributions(d);
- heapsFromCallers.put(fc, rg);
+ // ensure inputs to initial contexts increase monotonically
+ ReachGraph merged = new ReachGraph();
+ merged.merge( rg );
+ merged.merge( heapsFromCallers.get( fc ) );
+
+ heapsFromCallers.put( fc, merged );
+
}
return fn2rgAtEnter.get(fn);
}
+
+
+ protected class DebugCallSiteData {
+ public boolean debugCallSite;
+ public boolean didOneDebug;
+ public boolean writeDebugDOTs;
+ public boolean stopAfter;
+
+ public DebugCallSiteData() {
+ debugCallSite = false;
+ didOneDebug = false;
+ writeDebugDOTs = false;
+ stopAfter = false;
+ }
+ }
+
+ protected void decideDebugCallSite( DebugCallSiteData dcsd,
+ Descriptor taskOrMethodCaller,
+ MethodDescriptor mdCallee ) {
+
+ // all this jimma jamma to debug call sites is WELL WORTH the
+ // effort, so so so many bugs or buggy info appears through call
+ // sites
+
+ if( state.DISJOINTDEBUGCALLEE == null ||
+ state.DISJOINTDEBUGCALLER == null ) {
+ return;
+ }
+
+
+ boolean debugCalleeMatches = false;
+ boolean debugCallerMatches = false;
+
+ ClassDescriptor cdCallee = mdCallee.getClassDesc();
+ if( cdCallee != null ) {
+ debugCalleeMatches =
+ state.DISJOINTDEBUGCALLEE.equals( cdCallee.getSymbol()+
+ "."+
+ mdCallee.getSymbol()
+ );
+ }
+
+
+ if( taskOrMethodCaller instanceof MethodDescriptor ) {
+ ClassDescriptor cdCaller = ((MethodDescriptor)taskOrMethodCaller).getClassDesc();
+ if( cdCaller != null ) {
+ debugCallerMatches =
+ state.DISJOINTDEBUGCALLER.equals( cdCaller.getSymbol()+
+ "."+
+ taskOrMethodCaller.getSymbol()
+ );
+ }
+ } else {
+ // for bristlecone style tasks
+ debugCallerMatches =
+ state.DISJOINTDEBUGCALLER.equals( taskOrMethodCaller.getSymbol() );
+ }
+
+
+ dcsd.debugCallSite = debugCalleeMatches && debugCallerMatches;
+
+
+ dcsd.writeDebugDOTs =
+
+ dcsd.debugCallSite &&
+
+ (ReachGraph.debugCallSiteVisitCounter >=
+ ReachGraph.debugCallSiteVisitStartCapture) &&
+
+ (ReachGraph.debugCallSiteVisitCounter <
+ ReachGraph.debugCallSiteVisitStartCapture +
+ ReachGraph.debugCallSiteNumVisitsToCapture);
+
+
+
+ if( dcsd.debugCallSite ) {
+ dcsd.didOneDebug = true;
+ }
+ }
+
+ protected void statusDebugCallSite( DebugCallSiteData dcsd ) {
+
+ dcsd.writeDebugDOTs = false;
+ dcsd.stopAfter = false;
+
+ if( dcsd.didOneDebug ) {
+ System.out.println(" $$$ Debug call site visit "+
+ ReachGraph.debugCallSiteVisitCounter+
+ " $$$"
+ );
+ if(
+ (ReachGraph.debugCallSiteVisitCounter >=
+ ReachGraph.debugCallSiteVisitStartCapture) &&
+
+ (ReachGraph.debugCallSiteVisitCounter <
+ ReachGraph.debugCallSiteVisitStartCapture +
+ ReachGraph.debugCallSiteNumVisitsToCapture)
+ ) {
+ dcsd.writeDebugDOTs = true;
+ System.out.println(" $$$ Capturing this call site visit $$$");
+ if( ReachGraph.debugCallSiteStopAfter &&
+ (ReachGraph.debugCallSiteVisitCounter ==
+ ReachGraph.debugCallSiteVisitStartCapture +
+ ReachGraph.debugCallSiteNumVisitsToCapture - 1)
+ ) {
+ dcsd.stopAfter = true;
+ }
+ }
+
+ ++ReachGraph.debugCallSiteVisitCounter;
+ }
+
+ if( dcsd.stopAfter ) {
+ System.out.println("$$$ Exiting after requested captures of call site. $$$");
+ System.exit(-1);
+ }
+ }
+
+
+
+
+
// get successive captures of the analysis state, use compiler
// flags to control
boolean takeDebugSnapshots = false;
true, // selectively hide intermediate temp vars
true, // prune unreachable heap regions
false, // hide reachability
- false, // hide subset reachability states
+ true, // hide subset reachability states
true, // hide predicates
true); // hide edge taints
}
return rgAtEnter.canPointTo( x );
}
+
+
+ public Hashtable< Alloc, Set<Alloc> > canPointToAt( TempDescriptor x,
+ FieldDescriptor f,
+ FlatNode programPoint ) {
+
+ ReachGraph rgAtEnter = fn2rgAtEnter.get( programPoint );
+ if( rgAtEnter == null ) {
+ return null;
+ }
+
+ return rgAtEnter.canPointTo( x, f.getSymbol(), f.getType() );
+ }
+
+
+ public Hashtable< Alloc, Set<Alloc> > canPointToAtElement( TempDescriptor x,
+ FlatNode programPoint ) {
+
+ ReachGraph rgAtEnter = fn2rgAtEnter.get( programPoint );
+ if( rgAtEnter == null ) {
+ return null;
+ }
+
+ assert x.getType() != null;
+ assert x.getType().isArray();
+
+ return rgAtEnter.canPointTo( x, arrayElementFieldName, x.getType().dereference() );
+ }
public Set<Alloc> canPointToAfter( TempDescriptor x,
FlatNode programPoint ) {
ReachGraph rgAtExit = fn2rgAtExit.get( programPoint );
+
if( rgAtExit == null ) {
return null;
}
return rgAtExit.canPointTo( x );
}
-
- public Hashtable< Alloc, Set<Alloc> > canPointToAt( TempDescriptor x,
- FieldDescriptor f,
- FlatNode programPoint ) {
- ReachGraph rgAtEnter = fn2rgAtEnter.get( programPoint );
- if( rgAtEnter == null ) {
+ public Hashtable< Alloc, Set<Alloc> > canPointToAfter( TempDescriptor x,
+ FieldDescriptor f,
+ FlatNode programPoint ) {
+
+ ReachGraph rgAtExit = fn2rgAtExit.get( programPoint );
+ if( rgAtExit == null ) {
return null;
}
- return rgAtEnter.canPointTo( x, f.getSymbol() );
+ return rgAtExit.canPointTo( x, f.getSymbol(), f.getType() );
}
- public Hashtable< Alloc, Set<Alloc> > canPointToAtElement( TempDescriptor x,
- FlatNode programPoint ) {
+ public Hashtable< Alloc, Set<Alloc> > canPointToAfterElement( TempDescriptor x,
+ FlatNode programPoint ) {
- ReachGraph rgAtEnter = fn2rgAtEnter.get( programPoint );
- if( rgAtEnter == null ) {
+ ReachGraph rgAtExit = fn2rgAtExit.get( programPoint );
+ if( rgAtExit == null ) {
return null;
}
assert x.getType() != null;
assert x.getType().isArray();
- return rgAtEnter.canPointTo( x, arrayElementFieldName );
+ return rgAtExit.canPointTo( x, arrayElementFieldName, x.getType().dereference() );
}
}