1 package Analysis.Scheduling;
3 import java.io.FileOutputStream;
4 import java.io.PrintStream;
5 import java.util.Hashtable;
6 import java.util.Iterator;
7 import java.util.Random;
8 import java.util.Vector;
10 import Analysis.OwnershipAnalysis.OwnershipAnalysis;
11 import Analysis.TaskStateAnalysis.FEdge;
12 import Analysis.TaskStateAnalysis.FlagState;
13 import Analysis.TaskStateAnalysis.TaskAnalysis;
14 import IR.ClassDescriptor;
16 import IR.TaskDescriptor;
19 public class MCImplSynthesis {
22 ScheduleAnalysis scheduleAnalysis;
23 TaskAnalysis taskAnalysis;
24 OwnershipAnalysis ownershipAnalysis;
25 ScheduleSimulator scheduleSimulator;
28 int scheduleThreshold;
31 public MCImplSynthesis(State state,
33 OwnershipAnalysis oa) {
35 this.coreNum = state.CORENUM;
36 this.taskAnalysis = ta;
37 this.ownershipAnalysis = oa;
38 this.scheduleAnalysis = new ScheduleAnalysis(state,
40 scheduleAnalysis.setCoreNum(this.coreNum);
41 this.scheduleSimulator = new ScheduleSimulator(this.coreNum,
44 this.scheduleThreshold = 1000;
45 this.probThreshold = 0;
48 public int getCoreNum() {
49 return this.scheduleAnalysis.getCoreNum();
52 public int getScheduleThreshold() {
53 return scheduleThreshold;
56 public void setScheduleThreshold(int scheduleThreshold) {
57 this.scheduleThreshold = scheduleThreshold;
60 public int getProbThreshold() {
64 public void setProbThreshold(int probThreshold) {
65 this.probThreshold = probThreshold;
68 public Vector<Schedule> synthesis() {
69 // Print stuff to the original output and error streams.
70 // The stuff printed through the 'origOut' and 'origErr' references
71 // should go to the console on most systems while the messages
72 // printed through the 'System.out' and 'System.err' will end up in
73 // the files we created for them.
74 //origOut.println ("\nRedirect: Round #2");
75 //System.out.println ("Test output via 'SimulatorResult.out'.");
76 //origOut.println ("Test output via 'origOut' reference.");
78 // Save the current standard input, output, and error streams
79 // for later restoration.
80 PrintStream origOut = System.out;
82 // Create a new output stream for the standard output.
83 PrintStream stdout = null;
85 stdout = new PrintStream(
86 new FileOutputStream(this.state.outputdir + "SimulatorResult_"
87 + this.coreNum + ".out"));
88 } catch (Exception e) {
89 // Sigh. Couldn't open the file.
90 System.out.println("Redirect: Unable to open output file!");
94 // Print stuff to the original output and error streams.
95 // On most systems all of this will end up on your console when you
96 // run this application.
97 //origOut.println ("\nRedirect: Round #1");
98 //System.out.println ("Test output via 'System.out'.");
99 //origOut.println ("Test output via 'origOut' reference.");
101 // Set the System out and err streams to use our replacements.
102 System.setOut(stdout);
104 Vector<Schedule> scheduling = null;
105 Vector<ScheduleNode> schedulinggraph = null;
108 // generate multiple schedulings
109 this.scheduleAnalysis.setScheduleThreshold(this.scheduleThreshold);
110 this.scheduleAnalysis.schedule();
112 Vector<Vector<ScheduleNode>> scheduleGraphs = null;
113 Vector<Vector<ScheduleNode>> newscheduleGraphs =
114 this.scheduleAnalysis.getScheduleGraphs();
115 Vector<Vector<Schedule>> schedulings = new Vector<Vector<Schedule>>();
116 Vector<Integer> selectedSchedulings = new Vector<Integer>();
117 Vector<Vector<SimExecutionEdge>> selectedSimExeGraphs =
118 new Vector<Vector<SimExecutionEdge>>();
120 // check all multi-parameter tasks
121 Vector<TaskDescriptor> multiparamtds = new Vector<TaskDescriptor>();
122 Iterator it_tasks = this.state.getTaskSymbolTable().getDescriptorsIterator();
123 while(it_tasks.hasNext()) {
124 TaskDescriptor td = (TaskDescriptor)it_tasks.next();
125 if(td.numParameters() > 1) {
126 multiparamtds.addElement(td);
131 int bestexetime = Integer.MAX_VALUE;
132 Random rand = new Random();
133 // simulate the generated schedulings and try to optimize it
135 System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
136 System.out.print("Simulate and optimize round: #" + tryindex + ": \n");
137 gid += newscheduleGraphs.size();
138 scheduleGraphs = newscheduleGraphs;
140 // get scheduling layouts from schedule graphs
141 for(int i = 0; i < scheduleGraphs.size(); i++) {
142 Vector<ScheduleNode> scheduleGraph = scheduleGraphs.elementAt(i);
143 Vector<Schedule> tmpscheduling =
144 generateScheduling(scheduleGraph, multiparamtds);
145 schedulings.add(tmpscheduling);
147 selectedSchedulings.clear();
148 selectedSimExeGraphs.clear();
149 int tmpexetime = this.scheduleSimulator.simulate(schedulings,
151 selectedSimExeGraphs);
152 if(tmpexetime < bestexetime) {
153 bestexetime = tmpexetime;
154 scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0));
155 schedulinggraph = newscheduleGraphs.elementAt(selectedSchedulings.elementAt(0));
156 System.out.print("end of: #" + tryindex + " (bestexetime: " + bestexetime + ")\n");
157 System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
159 } else if(tmpexetime == bestexetime) {
160 System.out.print("end of: #" + tryindex + " (bestexetime: " + bestexetime + ")\n");
161 System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
163 if((Math.abs(rand.nextInt()) % 100) < this.probThreshold) {
170 // try to optimize the best one scheduling
171 newscheduleGraphs = optimizeScheduling(scheduleGraphs,
173 selectedSimExeGraphs,
175 this.scheduleThreshold);
176 }while(newscheduleGraphs != null); // TODO: could it possibly lead to endless loop?
178 scheduleGraphs = null;
179 newscheduleGraphs = null;
181 selectedSchedulings = null;
182 selectedSimExeGraphs = null;
183 multiparamtds = null;
185 String path = this.state.outputdir + "scheduling_selected.dot";
186 SchedulingUtil.printScheduleGraph(path, schedulinggraph);
188 // Close the streams.
191 System.setOut(origOut);
192 } catch (Exception e) {
193 origOut.println("Redirect: Unable to close files!");
199 private Vector<Vector<ScheduleNode>> optimizeScheduling(Vector<Vector<ScheduleNode>> scheduleGraphs,
200 Vector<Integer> selectedScheduleGraphs,
201 Vector<Vector<SimExecutionEdge>> selectedSimExeGraphs,
204 if(this.coreNum == 1) {
209 Vector<Vector<ScheduleNode>> optimizeschedulegraphs = null;
213 for(int i = 0; i < selectedScheduleGraphs.size(); i++) {
214 Vector<ScheduleNode> schedulegraph = scheduleGraphs.elementAt(
215 selectedScheduleGraphs.elementAt(i));
216 Vector<SimExecutionEdge> simexegraph = selectedSimExeGraphs.elementAt(i);
217 Vector<SimExecutionEdge> criticalPath = analyzeCriticalPath(simexegraph);
218 Vector<Vector<ScheduleNode>> tmposchedulegraphs = optimizeCriticalPath(schedulegraph,
222 if(tmposchedulegraphs != null) {
223 if(optimizeschedulegraphs == null) {
224 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
226 optimizeschedulegraphs.addAll(tmposchedulegraphs);
227 lgid += tmposchedulegraphs.size();
228 left -= tmposchedulegraphs.size();
230 schedulegraph = null;
233 tmposchedulegraphs = null;
237 schedulegraph = null;
240 tmposchedulegraphs = null;
243 return optimizeschedulegraphs;
246 private Vector<SimExecutionEdge> analyzeCriticalPath(Vector<SimExecutionEdge> simexegraph) {
247 // first figure out the critical path
248 Vector<SimExecutionEdge> criticalPath = new Vector<SimExecutionEdge>();
249 SimExecutionNode senode = (SimExecutionNode)simexegraph.elementAt(0).getSource();
250 getCriticalPath(senode, criticalPath);
251 computeBestStartPoint(criticalPath);
256 private void computeBestStartPoint(Vector<SimExecutionEdge> criticalPath) {
257 // calculate the earliest start time of each task on the critial path
258 for(int i = 0; i < criticalPath.size(); i++) {
259 SimExecutionEdge seedge = criticalPath.elementAt(i);
260 Vector<SimExecutionEdge> predicates = seedge.getPredicates();
261 if(predicates.size() > 0) {
264 // check the latest finish time of all the predicates
265 for(int j = 0; j < predicates.size(); j++) {
266 SimExecutionEdge predicate = predicates.elementAt(j);
267 int tmptime = predicate.getBestStartPoint() + predicate.getWeight();
268 if(tmptime > starttime) {
270 seedge.setLastpredicateEdge(predicate);
271 if(predicate.getTd() != null) {
272 seedge.setLastpredicateNode((SimExecutionNode)predicate.getTarget());
275 seedge.setLastpredicateNode((SimExecutionNode)predicate.getSource());
279 seedge.setBestStartPoint(starttime);
280 } else if(seedge.getSource().getInedgeVector().size() > 0) {
281 // should have only one in edge
282 int starttime = ((SimExecutionNode)seedge.getSource()).getTimepoint();
283 seedge.setBestStartPoint(starttime);
286 seedge.setBestStartPoint(0);
292 // TODO: currently only get one critical path. It's possible that there are
293 // multiple critical paths and some of them can not be optimized while others
294 // can. Need to fix up for this situation.
295 private int getCriticalPath(SimExecutionNode senode,
296 Vector<SimExecutionEdge> criticalPath) {
297 Vector<SimExecutionEdge> edges = (Vector<SimExecutionEdge>)senode.getEdgeVector();
298 if((edges == null) || (edges.size() == 0)) {
302 Vector<SimExecutionEdge> subcriticalpath = new Vector<SimExecutionEdge>();
303 SimExecutionEdge edge = edges.elementAt(0);
304 int sum = edge.getWeight() + getCriticalPath((SimExecutionNode)edge.getTarget(),
306 criticalPath.clear();
307 criticalPath.add(edge);
308 criticalPath.addAll(subcriticalpath);
309 for(int i = 1; i < edges.size(); i++) {
310 edge = edges.elementAt(i);
311 subcriticalpath.clear();
312 int tmpsum = edge.getWeight()
313 + getCriticalPath((SimExecutionNode)edge.getTarget(),
316 // find a longer path
318 criticalPath.clear();
319 criticalPath.add(edge);
320 criticalPath.addAll(subcriticalpath);
324 subcriticalpath.clear();
325 subcriticalpath = null;
329 private Vector<Vector<ScheduleNode>> optimizeCriticalPath(Vector<ScheduleNode> scheduleGraph,
330 Vector<SimExecutionEdge> criticalPath,
333 Vector<Vector<ScheduleNode>> optimizeschedulegraphs = null;
337 // for test, print out the criticalPath
338 if(this.state.PRINTCRITICALPATH) {
339 SchedulingUtil.printCriticalPath(this.state.outputdir + "criticalpath_" + lgid + ".dot",
343 // first check all seedges whose real start point is late than predicted
344 // earliest start time and group them
345 int opcheckpoint = Integer.MAX_VALUE;
346 Vector<Integer> sparecores = null;
347 // first group according to core index,
348 // then group according to task type
349 Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>> tooptimize =
350 new Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>>();
351 for(int i = 0; i < criticalPath.size(); i++) {
352 SimExecutionEdge seedge = criticalPath.elementAt(i);
353 int starttime = seedge.getBestStartPoint();
354 if(starttime < ((SimExecutionNode)seedge.getSource()).getTimepoint()) {
355 // no restrictions due to data dependencies
356 // have potential to be parallelled and start execution earlier
357 seedge.setFixedTime(false);
358 // consider to optimize it only when its predicates can NOT
359 // be optimized, otherwise first considering optimize its predicates
360 SimExecutionEdge lastpredicateedge = seedge.getLastpredicateEdge();
361 if(lastpredicateedge.isFixedTime()) {
362 if(opcheckpoint >= starttime) {
363 // only consider the tasks with smallest best start time
364 if(opcheckpoint > starttime) {
366 opcheckpoint = starttime;
367 SimExecutionNode lastpredicatenode = seedge.getLastpredicateNode();
368 int timepoint = lastpredicatenode.getTimepoint();
369 if(lastpredicateedge.getTd() == null) {
371 timepoint += lastpredicateedge.getWeight();
373 // mapping to critical path
374 for(int index = 0; index < criticalPath.size(); index++) {
375 SimExecutionEdge tmpseedge = criticalPath.elementAt(index);
376 SimExecutionNode tmpsenode =
377 (SimExecutionNode)tmpseedge.getTarget();
378 if(tmpsenode.getTimepoint() > timepoint) {
379 // get the spare core info
380 sparecores = tmpsenode.getSpareCores();
385 int corenum = seedge.getCoreNum();
386 if(!tooptimize.containsKey(corenum)) {
387 tooptimize.put(corenum,
388 new Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>());
390 if(!tooptimize.get(corenum).containsKey(seedge.getTd())) {
391 tooptimize.get(corenum).put(seedge.getTd(),
392 new Vector<SimExecutionEdge>());
394 tooptimize.get(corenum).get(seedge.getTd()).add(seedge);
400 if(tooptimize.size() > 0) {
401 Iterator<Integer> it_cores = tooptimize.keySet().iterator();
402 // check if it is possible to optimize these tasks
403 if((sparecores == null) || (sparecores.size() == 0)) {
404 // lack of spare cores
405 while(it_cores.hasNext()) {
406 int corenum = it_cores.next();
407 Hashtable<TaskDescriptor, Vector<SimExecutionEdge>> candidatetasks =
408 tooptimize.get(corenum);
409 if(candidatetasks.keySet().size() > 1) {
410 // there are multiple tasks could be optimized to start from
411 // this timepoint, try to change original execution order
412 Iterator<TaskDescriptor> it_tds = candidatetasks.keySet().iterator();
413 TaskDescriptor td = null;
414 int starttime = Integer.MAX_VALUE;
416 TaskDescriptor tmptd = it_tds.next();
417 Vector<SimExecutionEdge> seedges = candidatetasks.get(td);
418 int tmptime = ((SimExecutionNode)seedges.elementAt(0).getSource()).getTimepoint();
419 for(int i = 1; i < seedges.size(); i++) {
420 int ttime = ((SimExecutionNode)seedges.elementAt(i).getSource()).getTimepoint();
421 if(ttime < tmptime) {
425 if(tmptime < starttime) {
430 }while(it_tds.hasNext());
432 // TODO: only consider non-multi-param tasks currently
433 if(td.numParameters() == 1) {
434 Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>> tooptimize2 =
435 new Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>>();
436 tooptimize2.put(corenum, candidatetasks);
437 Vector<Vector<ScheduleNode>> ops = innerOptimizeCriticalPath(scheduleGraph,
443 if(optimizeschedulegraphs == null) {
444 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
446 optimizeschedulegraphs.addAll(ops);
447 lgid += optimizeschedulegraphs.size();
448 left -= optimizeschedulegraphs.size();
455 candidatetasks = null;
460 return optimizeschedulegraphs;
463 // flush the dependences and earliest start time
464 it_cores = tooptimize.keySet().iterator();
465 while(it_cores.hasNext()) {
466 Hashtable<TaskDescriptor, Vector<SimExecutionEdge>> candidatetasks =
467 tooptimize.get(it_cores.next());
468 Iterator<Vector<SimExecutionEdge>> it_edgevecs =
469 candidatetasks.values().iterator();
470 while(it_edgevecs.hasNext()) {
471 Vector<SimExecutionEdge> edgevec = it_edgevecs.next();
472 for(int j = 0; j < edgevec.size(); j++) {
473 SimExecutionEdge edge = edgevec.elementAt(j);
474 SimExecutionEdge lastpredicateedge = edge.getLastpredicateEdge();
475 SimExecutionNode lastpredicatenode = edge.getLastpredicateNode();
476 // if(edge.getCoreNum() != lastpredicate.getCoreNum()) // should never hit this
477 int timepoint = lastpredicatenode.getTimepoint();
478 if(lastpredicateedge.getTd() == null) {
480 timepoint += lastpredicateedge.getWeight();
482 // mapping to critical path
483 for(int index = 0; index < criticalPath.size(); index++) {
484 SimExecutionEdge tmpseedge = criticalPath.elementAt(index);
485 SimExecutionNode tmpsenode =
486 (SimExecutionNode)tmpseedge.getTarget();
487 if(tmpsenode.getTimepoint() > timepoint) {
488 // update the predicate info
489 edge.getPredicates().remove(lastpredicateedge);
490 edge.addPredicate(criticalPath.elementAt(index));
497 candidatetasks = null;
501 computeBestStartPoint(criticalPath);
502 Vector<Vector<ScheduleNode>> ops = optimizeCriticalPath(scheduleGraph,
507 if(optimizeschedulegraphs == null) {
508 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
510 optimizeschedulegraphs.addAll(ops);
514 // there are spare cores, try to reorganize the tasks to the spare
516 Vector<Vector<ScheduleNode>> ops = innerOptimizeCriticalPath(scheduleGraph,
522 if(optimizeschedulegraphs == null) {
523 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
525 optimizeschedulegraphs.addAll(ops);
534 return optimizeschedulegraphs;
537 private Vector<Vector<ScheduleNode>> innerOptimizeCriticalPath(Vector<ScheduleNode> scheduleGraph,
538 Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>> tooptimize,
539 Vector<Integer> sparecores,
544 Vector<Vector<ScheduleNode>> optimizeschedulegraphs = null;
546 // first clone the whole graph
547 Vector<ScheduleNode> newscheduleGraph =
548 cloneScheduleGraph(scheduleGraph, lgid);
550 // these nodes are root nodes
551 Vector<ScheduleNode> roots = new Vector<ScheduleNode>();
552 for(int i = 0; i < newscheduleGraph.size(); i++) {
553 if((sparecores == null) || (sparecores.contains(i))) {
554 roots.add(newscheduleGraph.elementAt(i));
558 // map the tasks associated to SimExecutionedges to original
559 // ClassNode in the ScheduleGraph and split them from previous
561 Vector<ScheduleNode> tocombines = new Vector<ScheduleNode>();
562 Iterator<Integer> it_cores = tooptimize.keySet().iterator();
563 while(it_cores.hasNext()) {
564 int corenum = it_cores.next();
565 Hashtable<TaskDescriptor, Vector<SimExecutionEdge>> candidatetasks =
566 tooptimize.get(corenum);
567 Iterator<TaskDescriptor> it_tds = candidatetasks.keySet().iterator();
568 while(it_tds.hasNext()) {
569 TaskDescriptor td = it_tds.next();
570 int numtosplit = candidatetasks.get(td).size();
571 // TODO: currently do not consider multi-param tasks
572 if(td.numParameters() == 1) {
573 ClassDescriptor cd = td.getParamType(0).getClassDesc();
574 ScheduleNode snode = newscheduleGraph.elementAt(corenum); // corresponding ScheduleNode
575 Iterator<ClassNode> it_cnodes = snode.getClassNodesIterator();
576 Vector<ClassNode> tosplit = new Vector<ClassNode>();
577 while((numtosplit > 0) && (it_cnodes.hasNext())) {
578 ClassNode cnode = it_cnodes.next();
579 if(cnode.getClassDescriptor().equals(cd)) {
586 for(int i = 0; i < tosplit.size(); i++) {
587 ScheduleNode splitnode = snode.spliteClassNode(tosplit.elementAt(i));
588 newscheduleGraph.add(splitnode);
589 tocombines.add(splitnode);
594 candidatetasks = null;
599 if(tocombines.size() == 0) {
600 return optimizeschedulegraphs;
603 SchedulingUtil.assignCids(newscheduleGraph);
605 // get all the ScheduleEdge
606 Vector<ScheduleEdge> scheduleEdges = new Vector<ScheduleEdge>();
607 for(int i= 0; i < newscheduleGraph.size(); i++) {
608 scheduleEdges.addAll((Vector<ScheduleEdge>)newscheduleGraph.elementAt(i).getEdgeVector());
611 Vector<Vector<ScheduleNode>> rootNodes =
612 SchedulingUtil.rangeScheduleNodes(roots);
613 Vector<Vector<ScheduleNode>> nodes2combine =
614 SchedulingUtil.rangeScheduleNodes(tocombines);
616 CombinationUtil.CombineGenerator cGen =
617 CombinationUtil.allocateCombineGenerator(rootNodes, nodes2combine);
618 while ((left > 0) && (cGen.nextGen())) {
619 Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
620 Vector<ScheduleNode> sNodes = SchedulingUtil.generateScheduleGraph(this.state,
626 if(optimizeschedulegraphs == null) {
627 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
629 optimizeschedulegraphs.add(sNodes);
635 nodes2combine = null;
636 newscheduleGraph = null;
637 scheduleEdges.clear();
638 scheduleEdges = null;
640 return optimizeschedulegraphs;
643 private Vector<ScheduleNode> cloneScheduleGraph(Vector<ScheduleNode> scheduleGraph,
645 Vector<ScheduleNode> result = new Vector<ScheduleNode>();
647 // get all the ScheduleEdge
648 Vector<ScheduleEdge> scheduleEdges = new Vector<ScheduleEdge>();
649 for(int i= 0; i < scheduleGraph.size(); i++) {
650 scheduleEdges.addAll((Vector<ScheduleEdge>)scheduleGraph.elementAt(i).getEdgeVector());
652 Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
653 new Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>>();
654 Hashtable<ScheduleNode, ScheduleNode> sn2sn =
655 new Hashtable<ScheduleNode, ScheduleNode>();
656 SchedulingUtil.cloneScheduleGraph(scheduleGraph,
663 SchedulingUtil.assignCids(result);
664 scheduleEdges.clear();
665 scheduleEdges = null;
674 private Vector<Schedule> generateScheduling(Vector<ScheduleNode> scheduleGraph,
675 Vector<TaskDescriptor> multiparamtds) {
676 Hashtable<TaskDescriptor, Vector<Schedule>> td2cores =
677 new Hashtable<TaskDescriptor, Vector<Schedule>>(); // multiparam tasks reside on which cores
678 Vector<Schedule> scheduling = new Vector<Schedule>(scheduleGraph.size());
679 // for each ScheduleNode create a schedule node representing a core
680 Hashtable<ScheduleNode, Integer> sn2coreNum =
681 new Hashtable<ScheduleNode, Integer>();
683 for(j = 0; j < scheduleGraph.size(); j++) {
684 sn2coreNum.put(scheduleGraph.elementAt(j), j);
687 boolean setstartupcore = false;
688 Schedule startup = null;
689 int gid = scheduleGraph.elementAt(0).getGid();
690 for(j = 0; j < scheduleGraph.size(); j++) {
691 Schedule tmpSchedule = new Schedule(j, gid);
692 ScheduleNode sn = scheduleGraph.elementAt(j);
694 Vector<ClassNode> cNodes = sn.getClassNodes();
695 for(int k = 0; k < cNodes.size(); k++) {
696 Iterator it_flags = cNodes.elementAt(k).getFlags();
697 while(it_flags.hasNext()) {
698 FlagState fs = (FlagState)it_flags.next();
699 Iterator it_edges = fs.edges();
700 while(it_edges.hasNext()) {
701 TaskDescriptor td = ((FEdge)it_edges.next()).getTask();
702 tmpSchedule.addTask(td);
703 if(!td2cores.containsKey(td)) {
704 td2cores.put(td, new Vector<Schedule>());
706 Vector<Schedule> tmpcores = td2cores.get(td);
707 if(!tmpcores.contains(tmpSchedule)) {
708 tmpcores.add(tmpSchedule);
711 // if the FlagState can be fed to some multi-param tasks,
712 // need to record corresponding ally cores later
713 if(td.numParameters() > 1) {
714 tmpSchedule.addFState4TD(td, fs);
716 if(td.getParamType(0).getClassDesc().getSymbol().equals(
717 TypeUtil.StartupClass)) {
718 assert(!setstartupcore);
720 startup = tmpSchedule;
721 setstartupcore = true;
729 // For each of the ScheduleEdge out of this ScheduleNode, add the target ScheduleNode into the queue inside sn
730 Iterator it_edges = sn.edges();
731 while(it_edges.hasNext()) {
732 ScheduleEdge se = (ScheduleEdge)it_edges.next();
733 ScheduleNode target = (ScheduleNode)se.getTarget();
734 Integer targetcore = sn2coreNum.get(target);
735 switch(se.getType()) {
736 case ScheduleEdge.NEWEDGE: {
737 for(int k = 0; k < se.getNewRate(); k++) {
738 tmpSchedule.addTargetCore(se.getFstate(), targetcore);
743 case ScheduleEdge.TRANSEDGE: {
745 tmpSchedule.addTargetCore(se.getFstate(),
747 se.getTargetFState());
748 // check if missed some FlagState associated with some multi-parameter
749 // task, which has been cloned when splitting a ClassNode
750 FlagState fs = se.getSourceFState();
751 FlagState tfs = se.getTargetFState();
752 Iterator it = tfs.edges();
753 while(it.hasNext()) {
754 TaskDescriptor td = ((FEdge)it.next()).getTask();
755 if(td.numParameters() > 1) {
756 if(tmpSchedule.getTasks().contains(td)) {
757 tmpSchedule.addFState4TD(td, fs);
765 it_edges = sn.getScheduleEdgesIterator();
766 while(it_edges.hasNext()) {
767 ScheduleEdge se = (ScheduleEdge)it_edges.next();
768 switch(se.getType()) {
769 case ScheduleEdge.NEWEDGE: {
770 for(int k = 0; k < se.getNewRate(); k++) {
771 tmpSchedule.addTargetCore(se.getFstate(), j);
776 case ScheduleEdge.TRANSEDGE: {
778 tmpSchedule.addTargetCore(se.getFstate(),
780 se.getTargetFState());
786 scheduling.add(tmpSchedule);
789 int number = this.coreNum;
790 if(scheduling.size() < number) {
791 number = scheduling.size();
794 // set up all the associate ally cores
795 if(multiparamtds.size() > 0) {
796 for(j = 0; j < multiparamtds.size(); ++j) {
797 TaskDescriptor td = multiparamtds.elementAt(j);
799 (Vector<FEdge>) this.taskAnalysis.getFEdgesFromTD(td);
800 Vector<Schedule> cores = td2cores.get(td);
801 for(int k = 0; k < cores.size(); ++k) {
802 Schedule tmpSchedule = cores.elementAt(k);
804 for(int h = 0; h < fes.size(); ++h) {
805 FEdge tmpfe = fes.elementAt(h);
806 FlagState tmpfs = (FlagState)tmpfe.getTarget();
807 Vector<TaskDescriptor> tmptds = new Vector<TaskDescriptor>();
808 if((tmpSchedule.getTargetCoreTable() == null)
809 || (!tmpSchedule.getTargetCoreTable().containsKey(tmpfs))) {
810 // add up all possible cores' info
811 Iterator it_edges = tmpfs.edges();
812 while(it_edges.hasNext()) {
813 TaskDescriptor tmptd = ((FEdge)it_edges.next()).getTask();
814 if(!tmptds.contains(tmptd)) {
816 Vector<Schedule> tmpcores = td2cores.get(tmptd);
817 for(int m = 0; m < tmpcores.size(); ++m) {
818 if(m != tmpSchedule.getCoreNum()) {
819 // if the FlagState can be fed to some multi-param tasks,
820 // need to record corresponding ally cores later
821 if(tmptd.numParameters() > 1) {
822 tmpSchedule.addAllyCore(tmpfs,
823 tmpcores.elementAt(m).getCoreNum());
825 tmpSchedule.addTargetCore(tmpfs,
826 tmpcores.elementAt(m).getCoreNum());
838 if(cores.size() > 1) {
839 Vector<FlagState> tmpfss = tmpSchedule.getFStates4TD(td);
840 for(int h = 0; h < tmpfss.size(); ++h) {
841 for(int l = 0; l < cores.size(); ++l) {
843 tmpSchedule.addAllyCore(tmpfss.elementAt(h),
844 cores.elementAt(l).getCoreNum());