1 package Analysis.Scheduling;
3 import java.io.FileOutputStream;
4 import java.io.PrintStream;
5 import java.util.Hashtable;
6 import java.util.Iterator;
7 import java.util.Vector;
9 import Analysis.OwnershipAnalysis.OwnershipAnalysis;
10 import Analysis.TaskStateAnalysis.FEdge;
11 import Analysis.TaskStateAnalysis.FlagState;
12 import Analysis.TaskStateAnalysis.TaskAnalysis;
13 import IR.ClassDescriptor;
15 import IR.TaskDescriptor;
18 public class MCImplSynthesis {
21 ScheduleAnalysis scheduleAnalysis;
22 TaskAnalysis taskAnalysis;
23 OwnershipAnalysis ownershipAnalysis;
24 ScheduleSimulator scheduleSimulator;
27 int scheduleThreshold;
29 public MCImplSynthesis(State state,
31 OwnershipAnalysis oa) {
33 this.coreNum = state.CORENUM;
34 this.taskAnalysis = ta;
35 this.ownershipAnalysis = oa;
36 this.scheduleAnalysis = new ScheduleAnalysis(state,
38 scheduleAnalysis.setCoreNum(this.coreNum);
39 this.scheduleSimulator = new ScheduleSimulator(this.coreNum,
42 this.scheduleThreshold = 1000;
45 public int getCoreNum() {
46 return this.scheduleAnalysis.getCoreNum();
49 public int getScheduleThreshold() {
50 return scheduleThreshold;
53 public void setScheduleThreshold(int scheduleThreshold) {
54 this.scheduleThreshold = scheduleThreshold;
57 public Vector<Schedule> synthesis() {
58 // Print stuff to the original output and error streams.
59 // The stuff printed through the 'origOut' and 'origErr' references
60 // should go to the console on most systems while the messages
61 // printed through the 'System.out' and 'System.err' will end up in
62 // the files we created for them.
63 //origOut.println ("\nRedirect: Round #2");
64 //System.out.println ("Test output via 'SimulatorResult.out'.");
65 //origOut.println ("Test output via 'origOut' reference.");
67 // Save the current standard input, output, and error streams
68 // for later restoration.
69 PrintStream origOut = System.out;
71 // Create a new output stream for the standard output.
72 PrintStream stdout = null;
74 stdout = new PrintStream(
75 new FileOutputStream(this.state.outputdir + "SimulatorResult_"
76 + this.coreNum + ".out"));
77 } catch (Exception e) {
78 // Sigh. Couldn't open the file.
79 System.out.println("Redirect: Unable to open output file!");
83 // Print stuff to the original output and error streams.
84 // On most systems all of this will end up on your console when you
85 // run this application.
86 //origOut.println ("\nRedirect: Round #1");
87 //System.out.println ("Test output via 'System.out'.");
88 //origOut.println ("Test output via 'origOut' reference.");
90 // Set the System out and err streams to use our replacements.
91 System.setOut(stdout);
93 Vector<Schedule> scheduling = null;
94 Vector<ScheduleNode> schedulinggraph = null;
97 // generate multiple schedulings
98 this.scheduleAnalysis.setScheduleThreshold(this.scheduleThreshold);
99 this.scheduleAnalysis.schedule();
101 Vector<Vector<ScheduleNode>> scheduleGraphs = null;
102 Vector<Vector<ScheduleNode>> newscheduleGraphs =
103 this.scheduleAnalysis.getScheduleGraphs();
104 Vector<Vector<Schedule>> schedulings = new Vector<Vector<Schedule>>();
105 Vector<Integer> selectedSchedulings = new Vector<Integer>();
106 Vector<Vector<SimExecutionEdge>> selectedSimExeGraphs =
107 new Vector<Vector<SimExecutionEdge>>();
109 // check all multi-parameter tasks
110 Vector<TaskDescriptor> multiparamtds = new Vector<TaskDescriptor>();
111 Iterator it_tasks = this.state.getTaskSymbolTable().getDescriptorsIterator();
112 while(it_tasks.hasNext()) {
113 TaskDescriptor td = (TaskDescriptor)it_tasks.next();
114 if(td.numParameters() > 1) {
115 multiparamtds.addElement(td);
120 int bestexetime = Integer.MAX_VALUE;
121 // simulate the generated schedulings and try to optimize it
123 System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
124 System.out.print("Simulate and optimize round: #" + tryindex + ": \n");
125 gid += newscheduleGraphs.size();
126 scheduleGraphs = newscheduleGraphs;
128 // get scheduling layouts from schedule graphs
129 for(int i = 0; i < scheduleGraphs.size(); i++) {
130 Vector<ScheduleNode> scheduleGraph = scheduleGraphs.elementAt(i);
131 Vector<Schedule> tmpscheduling =
132 generateScheduling(scheduleGraph, multiparamtds);
133 schedulings.add(tmpscheduling);
135 selectedSchedulings.clear();
136 selectedSimExeGraphs.clear();
137 int tmpexetime = this.scheduleSimulator.simulate(schedulings,
139 selectedSimExeGraphs);
140 if(tmpexetime < bestexetime) {
141 bestexetime = tmpexetime;
142 scheduling = schedulings.elementAt(selectedSchedulings.elementAt(0));
143 schedulinggraph = newscheduleGraphs.elementAt(selectedSchedulings.elementAt(0));
144 System.out.print("end of: #" + tryindex + " (bestexetime: " + bestexetime + ")\n");
145 System.out.print("+++++++++++++++++++++++++++++++++++++++++++++++++++\n");
151 // try to optimize the best one scheduling
152 newscheduleGraphs = optimizeScheduling(scheduleGraphs,
154 selectedSimExeGraphs,
156 this.scheduleThreshold);
157 }while(newscheduleGraphs != null); // TODO: could it possibly lead to endless loop?
159 scheduleGraphs = null;
160 newscheduleGraphs = null;
162 selectedSchedulings = null;
163 selectedSimExeGraphs = null;
164 multiparamtds = null;
166 String path = "scheduling_selected.dot";
167 SchedulingUtil.printScheduleGraph(path, schedulinggraph);
169 // Close the streams.
172 System.setOut(origOut);
173 } catch (Exception e) {
174 origOut.println("Redirect: Unable to close files!");
180 private Vector<Vector<ScheduleNode>> optimizeScheduling(Vector<Vector<ScheduleNode>> scheduleGraphs,
181 Vector<Integer> selectedScheduleGraphs,
182 Vector<Vector<SimExecutionEdge>> selectedSimExeGraphs,
185 if(this.coreNum == 1) {
190 Vector<Vector<ScheduleNode>> optimizeschedulegraphs = null;
193 int num2try = (gid - 1) / this.scheduleThreshold;
195 for(int i = 0; i < selectedScheduleGraphs.size(); i++) {
196 Vector<ScheduleNode> schedulegraph = scheduleGraphs.elementAt(
197 selectedScheduleGraphs.elementAt(i));
198 Vector<SimExecutionEdge> simexegraph = selectedSimExeGraphs.elementAt(i);
199 Vector<SimExecutionEdge> criticalPath = analyzeCriticalPath(simexegraph);
200 // for test, print out the criticalPath
201 if(this.state.PRINTCRITICALPATH) {
202 SchedulingUtil.printCriticalPath("criticalpath_" + num2try + ".dot", criticalPath);
205 Vector<Vector<ScheduleNode>> tmposchedulegraphs = optimizeCriticalPath(schedulegraph,
209 if(tmposchedulegraphs != null) {
210 if(optimizeschedulegraphs == null) {
211 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
213 optimizeschedulegraphs.addAll(tmposchedulegraphs);
214 lgid += tmposchedulegraphs.size();
215 left -= tmposchedulegraphs.size();
217 schedulegraph = null;
220 tmposchedulegraphs = null;
224 schedulegraph = null;
227 tmposchedulegraphs = null;
230 return optimizeschedulegraphs;
233 private Vector<SimExecutionEdge> analyzeCriticalPath(Vector<SimExecutionEdge> simexegraph) {
234 // first figure out the critical path
235 Vector<SimExecutionEdge> criticalPath = new Vector<SimExecutionEdge>();
236 SimExecutionNode senode = (SimExecutionNode)simexegraph.elementAt(0).getSource();
237 getCriticalPath(senode, criticalPath);
238 computeBestStartPoint(criticalPath);
243 private void computeBestStartPoint(Vector<SimExecutionEdge> criticalPath) {
244 // calculate the earliest start time of each task on the critial path
245 for(int i = 0; i < criticalPath.size(); i++) {
246 SimExecutionEdge seedge = criticalPath.elementAt(i);
247 Vector<SimExecutionEdge> predicates = seedge.getPredicates();
248 if(predicates.size() > 0) {
251 // check the latest finish time of all the predicates
252 for(int j = 0; j < predicates.size(); j++) {
253 SimExecutionEdge predicate = predicates.elementAt(j);
254 int tmptime = predicate.getBestStartPoint() + predicate.getWeight();
255 if(tmptime > starttime) {
257 seedge.setLastpredicateEdge(predicate);
258 if(predicate.getTd() != null) {
259 seedge.setLastpredicateNode((SimExecutionNode)predicate.getTarget());
262 seedge.setLastpredicateNode((SimExecutionNode)predicate.getSource());
266 seedge.setBestStartPoint(starttime);
267 } else if(seedge.getSource().getInedgeVector().size() > 0) {
268 // should have only one in edge
269 int starttime = ((SimExecutionNode)seedge.getSource()).getTimepoint();
270 seedge.setBestStartPoint(starttime);
273 seedge.setBestStartPoint(0);
279 // TODO: currently only get one critical path. It's possible that there are
280 // multiple critical paths and some of them can not be optimized while others
281 // can. Need to fix up for this situation.
282 private int getCriticalPath(SimExecutionNode senode,
283 Vector<SimExecutionEdge> criticalPath) {
284 Vector<SimExecutionEdge> edges = (Vector<SimExecutionEdge>)senode.getEdgeVector();
285 if((edges == null) || (edges.size() == 0)) {
289 Vector<SimExecutionEdge> subcriticalpath = new Vector<SimExecutionEdge>();
290 SimExecutionEdge edge = edges.elementAt(0);
291 int sum = edge.getWeight() + getCriticalPath((SimExecutionNode)edge.getTarget(),
293 criticalPath.clear();
294 criticalPath.add(edge);
295 criticalPath.addAll(subcriticalpath);
296 for(int i = 1; i < edges.size(); i++) {
297 edge = edges.elementAt(i);
298 subcriticalpath.clear();
299 int tmpsum = edge.getWeight()
300 + getCriticalPath((SimExecutionNode)edge.getTarget(),
303 // find a longer path
305 criticalPath.clear();
306 criticalPath.add(edge);
307 criticalPath.addAll(subcriticalpath);
311 subcriticalpath.clear();
312 subcriticalpath = null;
316 private Vector<Vector<ScheduleNode>> optimizeCriticalPath(Vector<ScheduleNode> scheduleGraph,
317 Vector<SimExecutionEdge> criticalPath,
320 Vector<Vector<ScheduleNode>> optimizeschedulegraphs = null;
324 // first check all seedges whose real start point is late than predicted
325 // earliest start time and group them
326 int opcheckpoint = Integer.MAX_VALUE;
327 Vector<Integer> sparecores = null;
328 // first group according to core index,
329 // then group according to task type
330 Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>> tooptimize =
331 new Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>>();
332 for(int i = 0; i < criticalPath.size(); i++) {
333 SimExecutionEdge seedge = criticalPath.elementAt(i);
334 int starttime = seedge.getBestStartPoint();
335 if(starttime < ((SimExecutionNode)seedge.getSource()).getTimepoint()) {
336 // no restrictions due to data dependencies
337 // have potential to be parallelled and start execution earlier
338 seedge.setFixedTime(false);
339 // consider to optimize it only when its predicates can NOT
340 // be optimized, otherwise first considering optimize its predicates
341 if(seedge.getLastpredicateEdge().isFixedTime()) {
342 if(opcheckpoint >= starttime) {
343 // only consider the tasks with smallest best start time
344 if(opcheckpoint > starttime) {
346 opcheckpoint = starttime;
347 sparecores = seedge.getLastpredicateNode().getSpareCores();
349 int corenum = seedge.getCoreNum();
350 if(!tooptimize.containsKey(corenum)) {
351 tooptimize.put(corenum,
352 new Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>());
354 if(!tooptimize.get(corenum).containsKey(seedge.getTd())) {
355 tooptimize.get(corenum).put(seedge.getTd(),
356 new Vector<SimExecutionEdge>());
358 tooptimize.get(corenum).get(seedge.getTd()).add(seedge);
364 if(tooptimize.size() > 0) {
365 Iterator<Integer> it_cores = tooptimize.keySet().iterator();
366 // check if it is possible to optimize these tasks
367 if((sparecores == null) || (sparecores.size() == 0)) {
368 // lack of spare cores
369 while(it_cores.hasNext()) {
370 int corenum = it_cores.next();
371 Hashtable<TaskDescriptor, Vector<SimExecutionEdge>> candidatetasks =
372 tooptimize.get(corenum);
373 if(candidatetasks.keySet().size() > 1) {
374 // there are multiple tasks could be optimized to start from
375 // this timepoint, try to change original execution order
376 Iterator<TaskDescriptor> it_tds = candidatetasks.keySet().iterator();
377 TaskDescriptor td = null;
378 int starttime = Integer.MAX_VALUE;
380 TaskDescriptor tmptd = it_tds.next();
381 Vector<SimExecutionEdge> seedges = candidatetasks.get(td);
382 int tmptime = ((SimExecutionNode)seedges.elementAt(0).getSource()).getTimepoint();
383 for(int i = 1; i < seedges.size(); i++) {
384 int ttime = ((SimExecutionNode)seedges.elementAt(i).getSource()).getTimepoint();
385 if(ttime < tmptime) {
389 if(tmptime < starttime) {
394 }while(it_tds.hasNext());
396 // TODO: only consider non-multi-param tasks currently
397 if(td.numParameters() == 1) {
398 Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>> tooptimize2 =
399 new Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>>();
400 tooptimize2.put(corenum, candidatetasks);
401 Vector<Vector<ScheduleNode>> ops = innerOptimizeCriticalPath(scheduleGraph,
406 if(optimizeschedulegraphs == null) {
407 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
409 optimizeschedulegraphs.addAll(ops);
410 lgid += optimizeschedulegraphs.size();
411 left -= optimizeschedulegraphs.size();
418 candidatetasks = null;
423 return optimizeschedulegraphs;
426 // flush the dependences and earliest start time
427 it_cores = tooptimize.keySet().iterator();
428 while(it_cores.hasNext()) {
429 Hashtable<TaskDescriptor, Vector<SimExecutionEdge>> candidatetasks =
430 tooptimize.get(it_cores.next());
431 Iterator<Vector<SimExecutionEdge>> it_edgevecs =
432 candidatetasks.values().iterator();
433 while(it_edgevecs.hasNext()) {
434 Vector<SimExecutionEdge> edgevec = it_edgevecs.next();
435 for(int j = 0; j < edgevec.size(); j++) {
436 SimExecutionEdge edge = edgevec.elementAt(j);
437 SimExecutionEdge lastpredicateedge = edge.getLastpredicateEdge();
438 SimExecutionNode lastpredicatenode = edge.getLastpredicateNode();
439 // if(edge.getCoreNum() != lastpredicate.getCoreNum()) // should never hit this
440 int timepoint = lastpredicatenode.getTimepoint();
441 if(lastpredicateedge.getTd() == null) {
443 timepoint += lastpredicateedge.getWeight();
445 // mapping to critical path
446 for(int index = 0; index < criticalPath.size(); index++) {
447 SimExecutionEdge tmpseedge = criticalPath.elementAt(index);
448 SimExecutionNode tmpsenode =
449 (SimExecutionNode)tmpseedge.getTarget();
450 if(tmpsenode.getTimepoint() > timepoint) {
451 // update the predicate info
452 edge.getPredicates().remove(lastpredicateedge);
453 edge.addPredicate(criticalPath.elementAt(index));
460 candidatetasks = null;
464 computeBestStartPoint(criticalPath);
465 Vector<Vector<ScheduleNode>> ops = optimizeCriticalPath(scheduleGraph,
470 if(optimizeschedulegraphs == null) {
471 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
473 optimizeschedulegraphs.addAll(ops);
477 // there are spare cores, try to reorganize the tasks to the spare
479 Vector<Vector<ScheduleNode>> ops = innerOptimizeCriticalPath(scheduleGraph,
484 if(optimizeschedulegraphs == null) {
485 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
487 optimizeschedulegraphs.addAll(ops);
496 return optimizeschedulegraphs;
499 private Vector<Vector<ScheduleNode>> innerOptimizeCriticalPath(Vector<ScheduleNode> scheduleGraph,
500 Hashtable<Integer, Hashtable<TaskDescriptor, Vector<SimExecutionEdge>>> tooptimize,
505 Vector<Vector<ScheduleNode>> optimizeschedulegraphs = null;
507 // first clone the whole graph
508 Vector<ScheduleNode> newscheduleGraph =
509 cloneScheduleGraph(scheduleGraph, lgid);
511 // these nodes are root nodes
512 Vector<ScheduleNode> roots = new Vector<ScheduleNode>();
513 for(int i = 0; i < newscheduleGraph.size(); i++) {
514 roots.add(newscheduleGraph.elementAt(i));
517 // map the tasks associated to SimExecutionedges to original
518 // ClassNode in the ScheduleGraph and split them from previous
520 Vector<ScheduleNode> tocombines = new Vector<ScheduleNode>();
521 Iterator<Integer> it_cores = tooptimize.keySet().iterator();
522 while(it_cores.hasNext()) {
523 int corenum = it_cores.next();
524 Hashtable<TaskDescriptor, Vector<SimExecutionEdge>> candidatetasks =
525 tooptimize.get(corenum);
526 Iterator<TaskDescriptor> it_tds = candidatetasks.keySet().iterator();
527 while(it_tds.hasNext()) {
528 TaskDescriptor td = it_tds.next();
529 int numtosplit = candidatetasks.get(td).size();
530 // TODO: currently do not consider multi-param tasks
531 if(td.numParameters() == 1) {
532 ClassDescriptor cd = td.getParamType(0).getClassDesc();
533 ScheduleNode snode = newscheduleGraph.elementAt(corenum); // corresponding ScheduleNode
534 Iterator<ClassNode> it_cnodes = snode.getClassNodesIterator();
535 Vector<ClassNode> tosplit = new Vector<ClassNode>();
536 while((numtosplit > 0) && (it_cnodes.hasNext())) {
537 ClassNode cnode = it_cnodes.next();
538 if(cnode.getClassDescriptor().equals(cd)) {
545 for(int i = 0; i < tosplit.size(); i++) {
546 ScheduleNode splitnode = snode.spliteClassNode(tosplit.elementAt(i));
547 newscheduleGraph.add(splitnode);
548 tocombines.add(splitnode);
553 candidatetasks = null;
558 if(tocombines.size() == 0) {
559 return optimizeschedulegraphs;
562 SchedulingUtil.assignCids(newscheduleGraph);
564 // get all the ScheduleEdge
565 Vector<ScheduleEdge> scheduleEdges = new Vector<ScheduleEdge>();
566 for(int i= 0; i < newscheduleGraph.size(); i++) {
567 scheduleEdges.addAll((Vector<ScheduleEdge>)newscheduleGraph.elementAt(i).getEdgeVector());
570 Vector<Vector<ScheduleNode>> rootNodes =
571 SchedulingUtil.rangeScheduleNodes(roots);
572 Vector<Vector<ScheduleNode>> nodes2combine =
573 SchedulingUtil.rangeScheduleNodes(tocombines);
575 CombinationUtil.CombineGenerator cGen =
576 CombinationUtil.allocateCombineGenerator(rootNodes, nodes2combine);
577 while ((left > 0) && (cGen.nextGen())) {
578 Vector<Vector<CombinationUtil.Combine>> combine = cGen.getCombine();
579 Vector<ScheduleNode> sNodes = SchedulingUtil.generateScheduleGraph(this.state,
585 if(optimizeschedulegraphs == null) {
586 optimizeschedulegraphs = new Vector<Vector<ScheduleNode>>();
588 optimizeschedulegraphs.add(sNodes);
594 nodes2combine = null;
595 newscheduleGraph = null;
596 scheduleEdges.clear();
597 scheduleEdges = null;
599 return optimizeschedulegraphs;
602 private Vector<ScheduleNode> cloneScheduleGraph(Vector<ScheduleNode> scheduleGraph,
604 Vector<ScheduleNode> result = new Vector<ScheduleNode>();
606 // get all the ScheduleEdge
607 Vector<ScheduleEdge> scheduleEdges = new Vector<ScheduleEdge>();
608 for(int i= 0; i < scheduleGraph.size(); i++) {
609 scheduleEdges.addAll((Vector<ScheduleEdge>)scheduleGraph.elementAt(i).getEdgeVector());
611 Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>> sn2hash =
612 new Hashtable<ScheduleNode, Hashtable<ClassNode, ClassNode>>();
613 Hashtable<ScheduleNode, ScheduleNode> sn2sn =
614 new Hashtable<ScheduleNode, ScheduleNode>();
615 SchedulingUtil.cloneScheduleGraph(scheduleGraph,
622 SchedulingUtil.assignCids(result);
623 scheduleEdges.clear();
624 scheduleEdges = null;
633 private Vector<Schedule> generateScheduling(Vector<ScheduleNode> scheduleGraph,
634 Vector<TaskDescriptor> multiparamtds) {
635 Hashtable<TaskDescriptor, Vector<Schedule>> td2cores =
636 new Hashtable<TaskDescriptor, Vector<Schedule>>(); // multiparam tasks reside on which cores
637 Vector<Schedule> scheduling = new Vector<Schedule>(scheduleGraph.size());
638 // for each ScheduleNode create a schedule node representing a core
639 Hashtable<ScheduleNode, Integer> sn2coreNum =
640 new Hashtable<ScheduleNode, Integer>();
642 for(j = 0; j < scheduleGraph.size(); j++) {
643 sn2coreNum.put(scheduleGraph.elementAt(j), j);
646 boolean setstartupcore = false;
647 Schedule startup = null;
648 int gid = scheduleGraph.elementAt(0).getGid();
649 for(j = 0; j < scheduleGraph.size(); j++) {
650 Schedule tmpSchedule = new Schedule(j, gid);
651 ScheduleNode sn = scheduleGraph.elementAt(j);
653 Vector<ClassNode> cNodes = sn.getClassNodes();
654 for(int k = 0; k < cNodes.size(); k++) {
655 Iterator it_flags = cNodes.elementAt(k).getFlags();
656 while(it_flags.hasNext()) {
657 FlagState fs = (FlagState)it_flags.next();
658 Iterator it_edges = fs.edges();
659 while(it_edges.hasNext()) {
660 TaskDescriptor td = ((FEdge)it_edges.next()).getTask();
661 tmpSchedule.addTask(td);
662 if(!td2cores.containsKey(td)) {
663 td2cores.put(td, new Vector<Schedule>());
665 Vector<Schedule> tmpcores = td2cores.get(td);
666 if(!tmpcores.contains(tmpSchedule)) {
667 tmpcores.add(tmpSchedule);
670 // if the FlagState can be fed to some multi-param tasks,
671 // need to record corresponding ally cores later
672 if(td.numParameters() > 1) {
673 tmpSchedule.addFState4TD(td, fs);
675 if(td.getParamType(0).getClassDesc().getSymbol().equals(
676 TypeUtil.StartupClass)) {
677 assert(!setstartupcore);
679 startup = tmpSchedule;
680 setstartupcore = true;
688 // For each of the ScheduleEdge out of this ScheduleNode, add the target ScheduleNode into the queue inside sn
689 Iterator it_edges = sn.edges();
690 while(it_edges.hasNext()) {
691 ScheduleEdge se = (ScheduleEdge)it_edges.next();
692 ScheduleNode target = (ScheduleNode)se.getTarget();
693 Integer targetcore = sn2coreNum.get(target);
694 switch(se.getType()) {
695 case ScheduleEdge.NEWEDGE: {
696 for(int k = 0; k < se.getNewRate(); k++) {
697 tmpSchedule.addTargetCore(se.getFstate(), targetcore);
702 case ScheduleEdge.TRANSEDGE: {
704 tmpSchedule.addTargetCore(se.getFstate(),
706 se.getTargetFState());
707 // check if missed some FlagState associated with some multi-parameter
708 // task, which has been cloned when splitting a ClassNode
709 FlagState fs = se.getSourceFState();
710 FlagState tfs = se.getTargetFState();
711 Iterator it = tfs.edges();
712 while(it.hasNext()) {
713 TaskDescriptor td = ((FEdge)it.next()).getTask();
714 if(td.numParameters() > 1) {
715 if(tmpSchedule.getTasks().contains(td)) {
716 tmpSchedule.addFState4TD(td, fs);
724 it_edges = sn.getScheduleEdgesIterator();
725 while(it_edges.hasNext()) {
726 ScheduleEdge se = (ScheduleEdge)it_edges.next();
727 switch(se.getType()) {
728 case ScheduleEdge.NEWEDGE: {
729 for(int k = 0; k < se.getNewRate(); k++) {
730 tmpSchedule.addTargetCore(se.getFstate(), j);
735 case ScheduleEdge.TRANSEDGE: {
737 tmpSchedule.addTargetCore(se.getFstate(),
739 se.getTargetFState());
745 scheduling.add(tmpSchedule);
748 int number = this.coreNum;
749 if(scheduling.size() < number) {
750 number = scheduling.size();
753 // set up all the associate ally cores
754 if(multiparamtds.size() > 0) {
755 for(j = 0; j < multiparamtds.size(); ++j) {
756 TaskDescriptor td = multiparamtds.elementAt(j);
758 (Vector<FEdge>) this.taskAnalysis.getFEdgesFromTD(td);
759 Vector<Schedule> cores = td2cores.get(td);
760 for(int k = 0; k < cores.size(); ++k) {
761 Schedule tmpSchedule = cores.elementAt(k);
763 for(int h = 0; h < fes.size(); ++h) {
764 FEdge tmpfe = fes.elementAt(h);
765 FlagState tmpfs = (FlagState)tmpfe.getTarget();
766 Vector<TaskDescriptor> tmptds = new Vector<TaskDescriptor>();
767 if((tmpSchedule.getTargetCoreTable() == null)
768 || (!tmpSchedule.getTargetCoreTable().containsKey(tmpfs))) {
769 // add up all possible cores' info
770 Iterator it_edges = tmpfs.edges();
771 while(it_edges.hasNext()) {
772 TaskDescriptor tmptd = ((FEdge)it_edges.next()).getTask();
773 if(!tmptds.contains(tmptd)) {
775 Vector<Schedule> tmpcores = td2cores.get(tmptd);
776 for(int m = 0; m < tmpcores.size(); ++m) {
777 if(m != tmpSchedule.getCoreNum()) {
778 // if the FlagState can be fed to some multi-param tasks,
779 // need to record corresponding ally cores later
780 if(tmptd.numParameters() > 1) {
781 tmpSchedule.addAllyCore(tmpfs,
782 tmpcores.elementAt(m).getCoreNum());
784 tmpSchedule.addTargetCore(tmpfs,
785 tmpcores.elementAt(m).getCoreNum());
797 if(cores.size() > 1) {
798 Vector<FlagState> tmpfss = tmpSchedule.getFStates4TD(td);
799 for(int h = 0; h < tmpfss.size(); ++h) {
800 for(int l = 0; l < cores.size(); ++l) {
802 tmpSchedule.addAllyCore(tmpfss.elementAt(h),
803 cores.elementAt(l).getCoreNum());