1 //===-- ScheduleDAG.cpp - Implement a trivial DAG scheduler ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a simple two pass scheduler. The first pass attempts to push
11 // backward any lengthy instructions and critical paths. The second pass packs
12 // instructions into semi-optimal time slots.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "sched"
17 #include "llvm/CodeGen/MachineConstantPool.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/SelectionDAGISel.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/SSARegMap.h"
22 #include "llvm/Target/TargetMachine.h"
23 #include "llvm/Target/TargetInstrInfo.h"
24 #include "llvm/Target/TargetLowering.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Debug.h"
31 // Style of scheduling to use.
32 enum ScheduleChoices {
38 cl::opt<ScheduleChoices> ScheduleStyle("sched",
39 cl::desc("Choose scheduling style"),
40 cl::init(noScheduling),
42 clEnumValN(noScheduling, "none",
43 "Trivial emission with no analysis"),
44 clEnumValN(simpleScheduling, "simple",
45 "Minimize critical path and maximize processor utilization"),
51 ViewDAGs("view-sched-dags", cl::Hidden,
52 cl::desc("Pop up a window to show sched dags as they are processed"));
54 static const bool ViewDAGs = 0;
58 //===----------------------------------------------------------------------===//
60 /// BitsIterator - Provides iteration through individual bits in a bit vector.
65 T Bits; // Bits left to iterate through
69 BitsIterator(T Initial) : Bits(Initial) {}
71 /// Next - Returns the next bit set or zero if exhausted.
73 // Get the rightmost bit set
74 T Result = Bits & -Bits;
77 // Return single bit or zero
82 //===----------------------------------------------------------------------===//
85 //===----------------------------------------------------------------------===//
87 /// ResourceTally - Manages the use of resources over time intervals. Each
88 /// item (slot) in the tally vector represents the resources used at a given
89 /// moment. A bit set to 1 indicates that a resource is in use, otherwise
90 /// available. An assumption is made that the tally is large enough to schedule
91 /// all current instructions (asserts otherwise.)
96 std::vector<T> Tally; // Resources used per slot
97 typedef typename std::vector<T>::iterator Iter;
100 /// AllInUse - Test to see if all of the resources in the slot are busy (set.)
101 inline bool AllInUse(Iter Cursor, unsigned ResourceSet) {
102 return (*Cursor & ResourceSet) == ResourceSet;
105 /// Skip - Skip over slots that use all of the specified resource (all are
107 Iter Skip(Iter Cursor, unsigned ResourceSet) {
108 assert(ResourceSet && "At least one resource bit needs to bet set");
110 // Continue to the end
112 // Break out if one of the resource bits is not set
113 if (!AllInUse(Cursor, ResourceSet)) return Cursor;
116 assert(Cursor < Tally.end() && "Tally is not large enough for schedule");
120 /// FindSlots - Starting from Begin, locate N consecutive slots where at least
121 /// one of the resource bits is available. Returns the address of first slot.
122 Iter FindSlots(Iter Begin, unsigned N, unsigned ResourceSet,
123 unsigned &Resource) {
127 // Try all possible slots forward
130 Cursor = Skip(Cursor, ResourceSet);
131 // Determine end of interval
132 Iter End = Cursor + N;
133 assert(End <= Tally.end() && "Tally is not large enough for schedule");
135 // Iterate thru each resource
136 BitsIterator<T> Resources(ResourceSet & ~*Cursor);
137 while (unsigned Res = Resources.Next()) {
138 // Check if resource is available for next N slots
139 // Break out if resource is busy
140 Iter Interval = Cursor;
141 for (; Interval < End && !(*Interval & Res); Interval++) {}
143 // If available for interval, return where and which resource
144 if (Interval == End) {
148 // Otherwise, check if worth checking other resources
149 if (AllInUse(Interval, ResourceSet)) {
150 // Start looking beyond interval
159 /// Reserve - Mark busy (set) the specified N slots.
160 void Reserve(Iter Begin, unsigned N, unsigned Resource) {
161 // Determine end of interval
162 Iter End = Begin + N;
163 assert(End <= Tally.end() && "Tally is not large enough for schedule");
165 // Set resource bit in each slot
166 for (; Begin < End; Begin++)
171 /// Initialize - Resize and zero the tally to the specified number of time
173 inline void Initialize(unsigned N) {
174 Tally.assign(N, 0); // Initialize tally to all zeros.
177 // FindAndReserve - Locate and mark busy (set) N bits started at slot I, using
178 // ResourceSet for choices.
179 unsigned FindAndReserve(unsigned I, unsigned N, unsigned ResourceSet) {
180 // Which resource used
182 // Find slots for instruction.
183 Iter Where = FindSlots(Tally.begin() + I, N, ResourceSet, Resource);
185 Reserve(Where, N, Resource);
186 // Return time slot (index)
187 return Where - Tally.begin();
191 //===----------------------------------------------------------------------===//
194 //===----------------------------------------------------------------------===//
196 /// Node group - This struct is used to manage flagged node groups.
199 class NodeGroup : public std::vector<NodeInfo *> {
201 int Pending; // Number of visits pending before
206 NodeGroup() : Pending(0) {}
209 inline NodeInfo *getLeader() { return empty() ? NULL : front(); }
210 inline int getPending() const { return Pending; }
211 inline void setPending(int P) { Pending = P; }
212 inline int addPending(int I) { return Pending += I; }
214 static void Add(NodeInfo *D, NodeInfo *U);
215 static unsigned CountInternalUses(NodeInfo *D, NodeInfo *U);
217 //===----------------------------------------------------------------------===//
220 //===----------------------------------------------------------------------===//
222 /// NodeInfo - This struct tracks information used to schedule the a node.
226 int Pending; // Number of visits pending before
229 SDNode *Node; // DAG node
230 unsigned Latency; // Cycles to complete instruction
231 unsigned ResourceSet; // Bit vector of usable resources
232 unsigned Slot; // Node's time slot
233 NodeGroup *Group; // Grouping information
234 unsigned VRBase; // Virtual register base
237 NodeInfo(SDNode *N = NULL)
248 inline bool isInGroup() const {
249 assert(!Group || !Group->empty() && "Group with no members");
250 return Group != NULL;
252 inline bool isGroupLeader() const {
253 return isInGroup() && Group->getLeader() == this;
255 inline int getPending() const {
256 return Group ? Group->getPending() : Pending;
258 inline void setPending(int P) {
259 if (Group) Group->setPending(P);
262 inline int addPending(int I) {
263 if (Group) return Group->addPending(I);
264 else return Pending += I;
267 typedef std::vector<NodeInfo *>::iterator NIIterator;
268 //===----------------------------------------------------------------------===//
271 //===----------------------------------------------------------------------===//
273 /// NodeGroupIterator - Iterates over all the nodes indicated by the node info.
274 /// If the node is in a group then iterate over the members of the group,
275 /// otherwise just the node info.
277 class NodeGroupIterator {
279 NodeInfo *NI; // Node info
280 NIIterator NGI; // Node group iterator
281 NIIterator NGE; // Node group iterator end
285 NodeGroupIterator(NodeInfo *N) : NI(N) {
286 // If the node is in a group then set up the group iterator. Otherwise
287 // the group iterators will trip first time out.
288 if (N->isInGroup()) {
290 NodeGroup *Group = NI->Group;
291 NGI = Group->begin();
293 // Prevent this node from being used (will be in members list
298 /// next - Return the next node info, otherwise NULL.
302 if (NGI != NGE) return *NGI++;
303 // Use node as the result (may be NULL)
304 NodeInfo *Result = NI;
307 // Return node or NULL
311 //===----------------------------------------------------------------------===//
314 //===----------------------------------------------------------------------===//
316 /// NodeGroupOpIterator - Iterates over all the operands of a node. If the node
317 /// is a member of a group, this iterates over all the operands of all the
318 /// members of the group.
320 class NodeGroupOpIterator {
322 NodeInfo *NI; // Node containing operands
323 NodeGroupIterator GI; // Node group iterator
324 SDNode::op_iterator OI; // Operand iterator
325 SDNode::op_iterator OE; // Operand iterator end
327 /// CheckNode - Test if node has more operands. If not get the next node
328 /// skipping over nodes that have no operands.
330 // Only if operands are exhausted first
332 // Get next node info
333 NodeInfo *NI = GI.next();
334 // Exit if nodes are exhausted
337 SDNode *Node = NI->Node;
338 // Set up the operand iterators
339 OI = Node->op_begin();
346 NodeGroupOpIterator(NodeInfo *N) : NI(N), GI(N) {}
348 /// isEnd - Returns true when not more operands are available.
350 inline bool isEnd() { CheckNode(); return OI == OE; }
352 /// next - Returns the next available operand.
354 inline SDOperand next() {
355 assert(OI != OE && "Not checking for end of NodeGroupOpIterator correctly");
359 //===----------------------------------------------------------------------===//
362 //===----------------------------------------------------------------------===//
364 /// SimpleSched - Simple two pass scheduler.
368 // TODO - get ResourceSet from TII
370 RSInteger = 0x3, // Two integer units
371 RSFloat = 0xC, // Two float units
372 RSLoadStore = 0x30, // Two load store units
373 RSOther = 0 // Processing unit independent
376 MachineBasicBlock *BB; // Current basic block
377 SelectionDAG &DAG; // DAG of the current basic block
378 const TargetMachine &TM; // Target processor
379 const TargetInstrInfo &TII; // Target instruction information
380 const MRegisterInfo &MRI; // Target processor register information
381 SSARegMap *RegMap; // Virtual/real register map
382 MachineConstantPool *ConstPool; // Target constant pool
383 unsigned NodeCount; // Number of nodes in DAG
384 NodeInfo *Info; // Info for nodes being scheduled
385 std::map<SDNode *, NodeInfo *> Map; // Map nodes to info
386 std::vector<NodeInfo*> Ordering; // Emit ordering of nodes
387 ResourceTally<unsigned> Tally; // Resource usage tally
388 unsigned NSlots; // Total latency
389 std::map<SDNode *, unsigned> VRMap; // Node to VR map
390 static const unsigned NotFound = ~0U; // Search marker
395 SimpleSched(SelectionDAG &D, MachineBasicBlock *bb)
396 : BB(bb), DAG(D), TM(D.getTarget()), TII(*TM.getInstrInfo()),
397 MRI(*TM.getRegisterInfo()), RegMap(BB->getParent()->getSSARegMap()),
398 ConstPool(BB->getParent()->getConstantPool()),
400 assert(&TII && "Target doesn't provide instr info?");
401 assert(&MRI && "Target doesn't provide register info?");
404 // Run - perform scheduling.
405 MachineBasicBlock *Run() {
411 /// getNI - Returns the node info for the specified node.
413 inline NodeInfo *getNI(SDNode *Node) { return Map[Node]; }
415 /// getVR - Returns the virtual register number of the node.
417 inline unsigned getVR(SDOperand Op) {
418 NodeInfo *NI = getNI(Op.Val);
419 assert(NI->VRBase != 0 && "Node emitted out of order - late");
420 return NI->VRBase + Op.ResNo;
423 static bool isFlagDefiner(SDNode *A);
424 static bool isFlagUser(SDNode *A);
425 static bool isDefiner(NodeInfo *A, NodeInfo *B);
426 static bool isPassiveNode(SDNode *Node);
427 void IncludeNode(NodeInfo *NI);
430 void GatherNodeInfo();
431 bool isStrongDependency(NodeInfo *A, NodeInfo *B);
432 bool isWeakDependency(NodeInfo *A, NodeInfo *B);
433 void ScheduleBackward();
434 void ScheduleForward();
436 void EmitNode(NodeInfo *NI);
437 static unsigned CountResults(SDNode *Node);
438 static unsigned CountOperands(SDNode *Node);
439 unsigned CreateVirtualRegisters(MachineInstr *MI,
441 const TargetInstrDescriptor &II);
442 unsigned EmitDAG(SDOperand A);
444 void printSI(std::ostream &O, NodeInfo *NI) const;
445 void print(std::ostream &O) const;
446 inline void dump(const char *tag) const { std::cerr << tag; dump(); }
449 //===----------------------------------------------------------------------===//
453 //===----------------------------------------------------------------------===//
456 //===----------------------------------------------------------------------===//
457 /// Add - Adds a definer and user pair to a node group.
459 void NodeGroup::Add(NodeInfo *D, NodeInfo *U) {
460 // Get current groups
461 NodeGroup *DGroup = D->Group;
462 NodeGroup *UGroup = U->Group;
463 // If both are members of groups
464 if (DGroup && UGroup) {
465 // There may have been another edge connecting
466 if (DGroup == UGroup) return;
467 // Add the pending users count
468 DGroup->addPending(UGroup->getPending());
469 // For each member of the users group
470 NodeGroupIterator UNGI(U);
471 while (NodeInfo *UNI = UNGI.next() ) {
474 // For each member of the definers group
475 NodeGroupIterator DNGI(D);
476 while (NodeInfo *DNI = DNGI.next() ) {
477 // Remove internal edges
478 DGroup->addPending(-CountInternalUses(DNI, UNI));
481 // Merge the two lists
482 DGroup->insert(DGroup->end(), UGroup->begin(), UGroup->end());
484 // Make user member of definers group
486 // Add users uses to definers group pending
487 DGroup->addPending(U->Node->use_size());
488 // For each member of the definers group
489 NodeGroupIterator DNGI(D);
490 while (NodeInfo *DNI = DNGI.next() ) {
491 // Remove internal edges
492 DGroup->addPending(-CountInternalUses(DNI, U));
494 DGroup->push_back(U);
496 // Make definer member of users group
498 // Add definers uses to users group pending
499 UGroup->addPending(D->Node->use_size());
500 // For each member of the users group
501 NodeGroupIterator UNGI(U);
502 while (NodeInfo *UNI = UNGI.next() ) {
503 // Remove internal edges
504 UGroup->addPending(-CountInternalUses(D, UNI));
506 UGroup->insert(UGroup->begin(), D);
508 D->Group = U->Group = DGroup = new NodeGroup();
509 DGroup->addPending(D->Node->use_size() + U->Node->use_size() -
510 CountInternalUses(D, U));
511 DGroup->push_back(D);
512 DGroup->push_back(U);
516 /// CountInternalUses - Returns the number of edges between the two nodes.
518 unsigned NodeGroup::CountInternalUses(NodeInfo *D, NodeInfo *U) {
520 for (SDNode:: use_iterator UI = D->Node->use_begin(),
521 E = D->Node->use_end(); UI != E; UI++) {
522 if (*UI == U->Node) N++;
526 //===----------------------------------------------------------------------===//
529 //===----------------------------------------------------------------------===//
530 /// isFlagDefiner - Returns true if the node defines a flag result.
531 bool SimpleSched::isFlagDefiner(SDNode *A) {
532 unsigned N = A->getNumValues();
533 return N && A->getValueType(N - 1) == MVT::Flag;
536 /// isFlagUser - Returns true if the node uses a flag result.
538 bool SimpleSched::isFlagUser(SDNode *A) {
539 unsigned N = A->getNumOperands();
540 return N && A->getOperand(N - 1).getValueType() == MVT::Flag;
543 /// isDefiner - Return true if node A is a definer for B.
545 bool SimpleSched::isDefiner(NodeInfo *A, NodeInfo *B) {
546 // While there are A nodes
547 NodeGroupIterator NII(A);
548 while (NodeInfo *NI = NII.next()) {
550 SDNode *Node = NI->Node;
551 // While there operands in nodes of B
552 NodeGroupOpIterator NGOI(B);
553 while (!NGOI.isEnd()) {
554 SDOperand Op = NGOI.next();
555 // If node from A defines a node in B
556 if (Node == Op.Val) return true;
562 /// isPassiveNode - Return true if the node is a non-scheduled leaf.
564 bool SimpleSched::isPassiveNode(SDNode *Node) {
565 if (isa<ConstantSDNode>(Node)) return true;
566 if (isa<RegisterSDNode>(Node)) return true;
567 if (isa<GlobalAddressSDNode>(Node)) return true;
568 if (isa<BasicBlockSDNode>(Node)) return true;
569 if (isa<FrameIndexSDNode>(Node)) return true;
570 if (isa<ConstantPoolSDNode>(Node)) return true;
571 if (isa<ExternalSymbolSDNode>(Node)) return true;
575 /// IncludeNode - Add node to NodeInfo vector.
577 void SimpleSched::IncludeNode(NodeInfo *NI) {
579 SDNode *Node = NI->Node;
581 if (Node->getOpcode() == ISD::EntryToken) return;
582 // Check current count for node
583 int Count = NI->getPending();
584 // If the node is already in list
585 if (Count < 0) return;
586 // Decrement count to indicate a visit
588 // If count has gone to zero then add node to list
591 if (NI->isInGroup()) {
592 Ordering.push_back(NI->Group->getLeader());
594 Ordering.push_back(NI);
596 // indicate node has been added
599 // Mark as visited with new count
600 NI->setPending(Count);
603 /// VisitAll - Visit each node breadth-wise to produce an initial ordering.
604 /// Note that the ordering in the Nodes vector is reversed.
605 void SimpleSched::VisitAll() {
606 // Add first element to list
607 Ordering.push_back(getNI(DAG.getRoot().Val));
609 // Iterate through all nodes that have been added
610 for (unsigned i = 0; i < Ordering.size(); i++) { // note: size() varies
611 // Visit all operands
612 NodeGroupOpIterator NGI(Ordering[i]);
613 while (!NGI.isEnd()) {
615 SDOperand Op = NGI.next();
617 SDNode *Node = Op.Val;
618 // Ignore passive nodes
619 if (isPassiveNode(Node)) continue;
621 IncludeNode(getNI(Node));
625 // Add entry node last (IncludeNode filters entry nodes)
626 if (DAG.getEntryNode().Val != DAG.getRoot().Val)
627 Ordering.push_back(getNI(DAG.getEntryNode().Val));
629 // FIXME - Reverse the order
630 for (unsigned i = 0, N = Ordering.size(), Half = N >> 1; i < Half; i++) {
631 unsigned j = N - i - 1;
632 NodeInfo *tmp = Ordering[i];
633 Ordering[i] = Ordering[j];
638 /// GatherNodeInfo - Get latency and resource information about each node.
640 void SimpleSched::GatherNodeInfo() {
641 // Allocate node information
642 Info = new NodeInfo[NodeCount];
643 // Get base of all nodes table
644 SelectionDAG::allnodes_iterator AllNodes = DAG.allnodes_begin();
646 // For each node being scheduled
647 for (unsigned i = 0, N = NodeCount; i < N; i++) {
648 // Get next node from DAG all nodes table
649 SDNode *Node = AllNodes[i];
650 // Fast reference to node schedule info
651 NodeInfo* NI = &Info[i];
656 // Set pending visit count
657 NI->setPending(Node->use_size());
659 MVT::ValueType VT = Node->getValueType(0);
660 if (Node->isTargetOpcode()) {
661 MachineOpCode TOpc = Node->getTargetOpcode();
662 // FIXME: This is an ugly (but temporary!) hack to test the scheduler
663 // before we have real target info.
664 // FIXME NI->Latency = std::max(1, TII.maxLatency(TOpc));
665 // FIXME NI->ResourceSet = TII.resources(TOpc);
666 if (TII.isCall(TOpc)) {
667 NI->ResourceSet = RSInteger;
669 } else if (TII.isLoad(TOpc)) {
670 NI->ResourceSet = RSLoadStore;
672 } else if (TII.isStore(TOpc)) {
673 NI->ResourceSet = RSLoadStore;
675 } else if (MVT::isInteger(VT)) {
676 NI->ResourceSet = RSInteger;
678 } else if (MVT::isFloatingPoint(VT)) {
679 NI->ResourceSet = RSFloat;
682 NI->ResourceSet = RSOther;
686 if (MVT::isInteger(VT)) {
687 NI->ResourceSet = RSInteger;
689 } else if (MVT::isFloatingPoint(VT)) {
690 NI->ResourceSet = RSFloat;
693 NI->ResourceSet = RSOther;
698 // Add one slot for the instruction itself
701 // Sum up all the latencies for max tally size
702 NSlots += NI->Latency;
705 // Put flagged nodes into groups
706 for (unsigned i = 0, N = NodeCount; i < N; i++) {
707 NodeInfo* NI = &Info[i];
708 SDNode *Node = NI->Node;
710 // For each operand (in reverse to only look at flags)
711 for (unsigned N = Node->getNumOperands(); 0 < N--;) {
713 SDOperand Op = Node->getOperand(N);
714 // No more flags to walk
715 if (Op.getValueType() != MVT::Flag) break;
717 NodeGroup::Add(getNI(Op.Val), NI);
722 /// isStrongDependency - Return true if node A has results used by node B.
723 /// I.E., B must wait for latency of A.
724 bool SimpleSched::isStrongDependency(NodeInfo *A, NodeInfo *B) {
725 // If A defines for B then it's a strong dependency
726 return isDefiner(A, B);
729 /// isWeakDependency Return true if node A produces a result that will
730 /// conflict with operands of B.
731 bool SimpleSched::isWeakDependency(NodeInfo *A, NodeInfo *B) {
732 // TODO check for conflicting real registers and aliases
733 #if 0 // FIXME - Since we are in SSA form and not checking register aliasing
734 return A->Node->getOpcode() == ISD::EntryToken || isStrongDependency(B, A);
736 return A->Node->getOpcode() == ISD::EntryToken;
740 /// ScheduleBackward - Schedule instructions so that any long latency
741 /// instructions and the critical path get pushed back in time. Time is run in
742 /// reverse to allow code reuse of the Tally and eliminate the overhead of
743 /// biasing every slot indices against NSlots.
744 void SimpleSched::ScheduleBackward() {
745 // Size and clear the resource tally
746 Tally.Initialize(NSlots);
747 // Get number of nodes to schedule
748 unsigned N = Ordering.size();
750 // For each node being scheduled
751 for (unsigned i = N; 0 < i--;) {
752 NodeInfo *NI = Ordering[i];
754 unsigned Slot = NotFound;
756 // Compare against those previously scheduled nodes
759 // Get following instruction
760 NodeInfo *Other = Ordering[j];
762 // Check dependency against previously inserted nodes
763 if (isStrongDependency(NI, Other)) {
764 Slot = Other->Slot + Other->Latency;
766 } else if (isWeakDependency(NI, Other)) {
772 // If independent of others (or first entry)
773 if (Slot == NotFound) Slot = 0;
775 // Find a slot where the needed resources are available
777 Slot = Tally.FindAndReserve(Slot, NI->Latency, NI->ResourceSet);
782 // Insert sort based on slot
785 // Get following instruction
786 NodeInfo *Other = Ordering[j];
787 // Should we look further
788 if (Slot >= Other->Slot) break;
789 // Shuffle other into ordering
790 Ordering[j - 1] = Other;
792 // Insert node in proper slot
793 if (j != i + 1) Ordering[j - 1] = NI;
797 /// ScheduleForward - Schedule instructions to maximize packing.
799 void SimpleSched::ScheduleForward() {
800 // Size and clear the resource tally
801 Tally.Initialize(NSlots);
802 // Get number of nodes to schedule
803 unsigned N = Ordering.size();
805 // For each node being scheduled
806 for (unsigned i = 0; i < N; i++) {
807 NodeInfo *NI = Ordering[i];
809 unsigned Slot = NotFound;
811 // Compare against those previously scheduled nodes
814 // Get following instruction
815 NodeInfo *Other = Ordering[j];
817 // Check dependency against previously inserted nodes
818 if (isStrongDependency(Other, NI)) {
819 Slot = Other->Slot + Other->Latency;
821 } else if (isWeakDependency(Other, NI)) {
827 // If independent of others (or first entry)
828 if (Slot == NotFound) Slot = 0;
830 // Find a slot where the needed resources are available
832 Slot = Tally.FindAndReserve(Slot, NI->Latency, NI->ResourceSet);
837 // Insert sort based on slot
840 // Get following instruction
841 NodeInfo *Other = Ordering[j];
842 // Should we look further
843 if (Slot >= Other->Slot) break;
844 // Shuffle other into ordering
845 Ordering[j + 1] = Other;
847 // Insert node in proper slot
848 if (j != i) Ordering[j + 1] = NI;
852 /// EmitAll - Emit all nodes in schedule sorted order.
854 void SimpleSched::EmitAll() {
855 // For each node in the ordering
856 for (unsigned i = 0, N = Ordering.size(); i < N; i++) {
857 // Get the scheduling info
858 NodeInfo *NI = Ordering[i];
860 // Iterate through nodes
861 NodeGroupIterator NGI(Ordering[i]);
862 while (NodeInfo *NI = NGI.next()) EmitNode(NI);
864 if (NI->isInGroup()) {
865 if (NI->isGroupLeader()) {
866 NodeGroupIterator NGI(Ordering[i]);
867 while (NodeInfo *NI = NGI.next()) EmitNode(NI);
876 /// CountResults - The results of target nodes have register or immediate
877 /// operands first, then an optional chain, and optional flag operands (which do
878 /// not go into the machine instrs.)
879 unsigned SimpleSched::CountResults(SDNode *Node) {
880 unsigned N = Node->getNumValues();
881 while (N && Node->getValueType(N - 1) == MVT::Flag)
883 if (N && Node->getValueType(N - 1) == MVT::Other)
884 --N; // Skip over chain result.
888 /// CountOperands The inputs to target nodes have any actual inputs first,
889 /// followed by an optional chain operand, then flag operands. Compute the
890 /// number of actual operands that will go into the machine instr.
891 unsigned SimpleSched::CountOperands(SDNode *Node) {
892 unsigned N = Node->getNumOperands();
893 while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
895 if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
896 --N; // Ignore chain if it exists.
900 /// CreateVirtualRegisters - Add result register values for things that are
901 /// defined by this instruction.
902 unsigned SimpleSched::CreateVirtualRegisters(MachineInstr *MI,
904 const TargetInstrDescriptor &II) {
905 // Create the result registers for this node and add the result regs to
906 // the machine instruction.
907 const TargetOperandInfo *OpInfo = II.OpInfo;
908 unsigned ResultReg = RegMap->createVirtualRegister(OpInfo[0].RegClass);
909 MI->addRegOperand(ResultReg, MachineOperand::Def);
910 for (unsigned i = 1; i != NumResults; ++i) {
911 assert(OpInfo[i].RegClass && "Isn't a register operand!");
912 MI->addRegOperand(RegMap->createVirtualRegister(OpInfo[i].RegClass),
913 MachineOperand::Def);
918 /// EmitNode - Generate machine code for an node and needed dependencies.
920 void SimpleSched::EmitNode(NodeInfo *NI) {
921 unsigned VRBase = 0; // First virtual register for node
922 SDNode *Node = NI->Node;
924 // If machine instruction
925 if (Node->isTargetOpcode()) {
926 unsigned Opc = Node->getTargetOpcode();
927 const TargetInstrDescriptor &II = TII.get(Opc);
929 unsigned NumResults = CountResults(Node);
930 unsigned NodeOperands = CountOperands(Node);
931 unsigned NumMIOperands = NodeOperands + NumResults;
933 assert((unsigned(II.numOperands) == NumMIOperands || II.numOperands == -1)&&
934 "#operands for dag node doesn't match .td file!");
937 // Create the new machine instruction.
938 MachineInstr *MI = new MachineInstr(Opc, NumMIOperands, true, true);
940 // Add result register values for things that are defined by this
942 if (NumResults) VRBase = CreateVirtualRegisters(MI, NumResults, II);
944 // Emit all of the actual operands of this instruction, adding them to the
945 // instruction as appropriate.
946 for (unsigned i = 0; i != NodeOperands; ++i) {
947 if (Node->getOperand(i).isTargetOpcode()) {
948 // Note that this case is redundant with the final else block, but we
949 // include it because it is the most common and it makes the logic
951 assert(Node->getOperand(i).getValueType() != MVT::Other &&
952 Node->getOperand(i).getValueType() != MVT::Flag &&
953 "Chain and flag operands should occur at end of operand list!");
955 // Get/emit the operand.
956 unsigned VReg = getVR(Node->getOperand(i));
957 MI->addRegOperand(VReg, MachineOperand::Use);
959 // Verify that it is right.
960 assert(MRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
961 assert(II.OpInfo[i+NumResults].RegClass &&
962 "Don't have operand info for this instruction!");
963 assert(RegMap->getRegClass(VReg) == II.OpInfo[i+NumResults].RegClass &&
964 "Register class of operand and regclass of use don't agree!");
965 } else if (ConstantSDNode *C =
966 dyn_cast<ConstantSDNode>(Node->getOperand(i))) {
967 MI->addZeroExtImm64Operand(C->getValue());
968 } else if (RegisterSDNode*R =
969 dyn_cast<RegisterSDNode>(Node->getOperand(i))) {
970 MI->addRegOperand(R->getReg(), MachineOperand::Use);
971 } else if (GlobalAddressSDNode *TGA =
972 dyn_cast<GlobalAddressSDNode>(Node->getOperand(i))) {
973 MI->addGlobalAddressOperand(TGA->getGlobal(), false, 0);
974 } else if (BasicBlockSDNode *BB =
975 dyn_cast<BasicBlockSDNode>(Node->getOperand(i))) {
976 MI->addMachineBasicBlockOperand(BB->getBasicBlock());
977 } else if (FrameIndexSDNode *FI =
978 dyn_cast<FrameIndexSDNode>(Node->getOperand(i))) {
979 MI->addFrameIndexOperand(FI->getIndex());
980 } else if (ConstantPoolSDNode *CP =
981 dyn_cast<ConstantPoolSDNode>(Node->getOperand(i))) {
982 unsigned Idx = ConstPool->getConstantPoolIndex(CP->get());
983 MI->addConstantPoolIndexOperand(Idx);
984 } else if (ExternalSymbolSDNode *ES =
985 dyn_cast<ExternalSymbolSDNode>(Node->getOperand(i))) {
986 MI->addExternalSymbolOperand(ES->getSymbol(), false);
988 assert(Node->getOperand(i).getValueType() != MVT::Other &&
989 Node->getOperand(i).getValueType() != MVT::Flag &&
990 "Chain and flag operands should occur at end of operand list!");
991 unsigned VReg = getVR(Node->getOperand(i));
992 MI->addRegOperand(VReg, MachineOperand::Use);
994 // Verify that it is right.
995 assert(MRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
996 assert(II.OpInfo[i+NumResults].RegClass &&
997 "Don't have operand info for this instruction!");
998 assert(RegMap->getRegClass(VReg) == II.OpInfo[i+NumResults].RegClass &&
999 "Register class of operand and regclass of use don't agree!");
1003 // Now that we have emitted all operands, emit this instruction itself.
1004 if ((II.Flags & M_USES_CUSTOM_DAG_SCHED_INSERTION) == 0) {
1005 BB->insert(BB->end(), MI);
1007 // Insert this instruction into the end of the basic block, potentially
1008 // taking some custom action.
1009 BB = DAG.getTargetLoweringInfo().InsertAtEndOfBasicBlock(MI, BB);
1012 switch (Node->getOpcode()) {
1015 assert(0 && "This target-independent node should have been selected!");
1016 case ISD::EntryToken: // fall thru
1017 case ISD::TokenFactor:
1019 case ISD::CopyToReg: {
1020 unsigned Val = getVR(Node->getOperand(2));
1021 MRI.copyRegToReg(*BB, BB->end(),
1022 cast<RegisterSDNode>(Node->getOperand(1))->getReg(), Val,
1023 RegMap->getRegClass(Val));
1026 case ISD::CopyFromReg: {
1027 unsigned SrcReg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
1029 // Figure out the register class to create for the destreg.
1030 const TargetRegisterClass *TRC = 0;
1031 if (MRegisterInfo::isVirtualRegister(SrcReg)) {
1032 TRC = RegMap->getRegClass(SrcReg);
1034 // Pick the register class of the right type that contains this physreg.
1035 for (MRegisterInfo::regclass_iterator I = MRI.regclass_begin(),
1036 E = MRI.regclass_end(); I != E; ++I)
1037 if ((*I)->getType() == Node->getValueType(0) &&
1038 (*I)->contains(SrcReg)) {
1042 assert(TRC && "Couldn't find register class for reg copy!");
1045 // Create the reg, emit the copy.
1046 VRBase = RegMap->createVirtualRegister(TRC);
1047 MRI.copyRegToReg(*BB, BB->end(), VRBase, SrcReg, TRC);
1053 assert(NI->VRBase == 0 && "Node emitted out of order - early");
1054 NI->VRBase = VRBase;
1057 /// EmitDag - Generate machine code for an operand and needed dependencies.
1059 unsigned SimpleSched::EmitDAG(SDOperand Op) {
1060 std::map<SDNode *, unsigned>::iterator OpI = VRMap.lower_bound(Op.Val);
1061 if (OpI != VRMap.end() && OpI->first == Op.Val)
1062 return OpI->second + Op.ResNo;
1063 unsigned &OpSlot = VRMap.insert(OpI, std::make_pair(Op.Val, 0))->second;
1065 unsigned ResultReg = 0;
1066 if (Op.isTargetOpcode()) {
1067 unsigned Opc = Op.getTargetOpcode();
1068 const TargetInstrDescriptor &II = TII.get(Opc);
1070 unsigned NumResults = CountResults(Op.Val);
1071 unsigned NodeOperands = CountOperands(Op.Val);
1072 unsigned NumMIOperands = NodeOperands + NumResults;
1074 assert((unsigned(II.numOperands) == NumMIOperands || II.numOperands == -1)&&
1075 "#operands for dag node doesn't match .td file!");
1078 // Create the new machine instruction.
1079 MachineInstr *MI = new MachineInstr(Opc, NumMIOperands, true, true);
1081 // Add result register values for things that are defined by this
1083 if (NumResults) ResultReg = CreateVirtualRegisters(MI, NumResults, II);
1085 // If there is a token chain operand, emit it first, as a hack to get avoid
1086 // really bad cases.
1087 if (Op.getNumOperands() > NodeOperands &&
1088 Op.getOperand(NodeOperands).getValueType() == MVT::Other) {
1089 EmitDAG(Op.getOperand(NodeOperands));
1092 // Emit all of the actual operands of this instruction, adding them to the
1093 // instruction as appropriate.
1094 for (unsigned i = 0; i != NodeOperands; ++i) {
1095 if (Op.getOperand(i).isTargetOpcode()) {
1096 // Note that this case is redundant with the final else block, but we
1097 // include it because it is the most common and it makes the logic
1099 assert(Op.getOperand(i).getValueType() != MVT::Other &&
1100 Op.getOperand(i).getValueType() != MVT::Flag &&
1101 "Chain and flag operands should occur at end of operand list!");
1103 unsigned VReg = EmitDAG(Op.getOperand(i));
1104 MI->addRegOperand(VReg, MachineOperand::Use);
1106 // Verify that it is right.
1107 assert(MRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
1108 assert(II.OpInfo[i+NumResults].RegClass &&
1109 "Don't have operand info for this instruction!");
1111 if (RegMap->getRegClass(VReg) != II.OpInfo[i+NumResults].RegClass) {
1112 std::cerr << "OP: ";
1113 Op.getOperand(i).Val->dump(&DAG); std::cerr << "\nUSE: ";
1114 Op.Val->dump(&DAG); std::cerr << "\n";
1117 assert(RegMap->getRegClass(VReg) == II.OpInfo[i+NumResults].RegClass &&
1118 "Register class of operand and regclass of use don't agree!");
1119 } else if (ConstantSDNode *C =
1120 dyn_cast<ConstantSDNode>(Op.getOperand(i))) {
1121 MI->addZeroExtImm64Operand(C->getValue());
1122 } else if (RegisterSDNode*R =dyn_cast<RegisterSDNode>(Op.getOperand(i))) {
1123 MI->addRegOperand(R->getReg(), MachineOperand::Use);
1124 } else if (GlobalAddressSDNode *TGA =
1125 dyn_cast<GlobalAddressSDNode>(Op.getOperand(i))) {
1126 MI->addGlobalAddressOperand(TGA->getGlobal(), false, 0);
1127 } else if (BasicBlockSDNode *BB =
1128 dyn_cast<BasicBlockSDNode>(Op.getOperand(i))) {
1129 MI->addMachineBasicBlockOperand(BB->getBasicBlock());
1130 } else if (FrameIndexSDNode *FI =
1131 dyn_cast<FrameIndexSDNode>(Op.getOperand(i))) {
1132 MI->addFrameIndexOperand(FI->getIndex());
1133 } else if (ConstantPoolSDNode *CP =
1134 dyn_cast<ConstantPoolSDNode>(Op.getOperand(i))) {
1135 unsigned Idx = ConstPool->getConstantPoolIndex(CP->get());
1136 MI->addConstantPoolIndexOperand(Idx);
1137 } else if (ExternalSymbolSDNode *ES =
1138 dyn_cast<ExternalSymbolSDNode>(Op.getOperand(i))) {
1139 MI->addExternalSymbolOperand(ES->getSymbol(), false);
1141 assert(Op.getOperand(i).getValueType() != MVT::Other &&
1142 Op.getOperand(i).getValueType() != MVT::Flag &&
1143 "Chain and flag operands should occur at end of operand list!");
1144 unsigned VReg = EmitDAG(Op.getOperand(i));
1145 MI->addRegOperand(VReg, MachineOperand::Use);
1147 // Verify that it is right.
1148 assert(MRegisterInfo::isVirtualRegister(VReg) && "Not a vreg?");
1149 assert(II.OpInfo[i+NumResults].RegClass &&
1150 "Don't have operand info for this instruction!");
1151 assert(RegMap->getRegClass(VReg) == II.OpInfo[i+NumResults].RegClass &&
1152 "Register class of operand and regclass of use don't agree!");
1156 // Finally, if this node has any flag operands, we *must* emit them last, to
1157 // avoid emitting operations that might clobber the flags.
1158 if (Op.getNumOperands() > NodeOperands) {
1159 unsigned i = NodeOperands;
1160 if (Op.getOperand(i).getValueType() == MVT::Other)
1161 ++i; // the chain is already selected.
1162 for (unsigned N = Op.getNumOperands(); i < N; i++) {
1163 assert(Op.getOperand(i).getValueType() == MVT::Flag &&
1164 "Must be flag operands!");
1165 EmitDAG(Op.getOperand(i));
1169 // Now that we have emitted all operands, emit this instruction itself.
1170 if ((II.Flags & M_USES_CUSTOM_DAG_SCHED_INSERTION) == 0) {
1171 BB->insert(BB->end(), MI);
1173 // Insert this instruction into the end of the basic block, potentially
1174 // taking some custom action.
1175 BB = DAG.getTargetLoweringInfo().InsertAtEndOfBasicBlock(MI, BB);
1178 switch (Op.getOpcode()) {
1181 assert(0 && "This target-independent node should have been selected!");
1182 case ISD::EntryToken: break;
1183 case ISD::TokenFactor:
1184 for (unsigned i = 0, N = Op.getNumOperands(); i < N; i++) {
1185 EmitDAG(Op.getOperand(i));
1188 case ISD::CopyToReg: {
1189 SDOperand FlagOp; FlagOp.ResNo = 0;
1190 if (Op.getNumOperands() == 4) {
1191 FlagOp = Op.getOperand(3);
1193 if (Op.getOperand(0).Val != FlagOp.Val) {
1194 EmitDAG(Op.getOperand(0)); // Emit the chain.
1196 unsigned Val = EmitDAG(Op.getOperand(2));
1200 MRI.copyRegToReg(*BB, BB->end(),
1201 cast<RegisterSDNode>(Op.getOperand(1))->getReg(), Val,
1202 RegMap->getRegClass(Val));
1205 case ISD::CopyFromReg: {
1206 EmitDAG(Op.getOperand(0)); // Emit the chain.
1207 unsigned SrcReg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1209 // Figure out the register class to create for the destreg.
1210 const TargetRegisterClass *TRC = 0;
1211 if (MRegisterInfo::isVirtualRegister(SrcReg)) {
1212 TRC = RegMap->getRegClass(SrcReg);
1214 // Pick the register class of the right type that contains this physreg.
1215 for (MRegisterInfo::regclass_iterator I = MRI.regclass_begin(),
1216 E = MRI.regclass_end(); I != E; ++I)
1217 if ((*I)->getType() == Op.Val->getValueType(0) &&
1218 (*I)->contains(SrcReg)) {
1222 assert(TRC && "Couldn't find register class for reg copy!");
1225 // Create the reg, emit the copy.
1226 ResultReg = RegMap->createVirtualRegister(TRC);
1227 MRI.copyRegToReg(*BB, BB->end(), ResultReg, SrcReg, TRC);
1234 return ResultReg+Op.ResNo;
1237 /// Schedule - Order nodes according to selected style.
1239 void SimpleSched::Schedule() {
1240 switch (ScheduleStyle) {
1241 case simpleScheduling:
1243 NodeCount = DAG.allnodes_size();
1244 // Don't waste time if is only entry and return
1245 if (NodeCount > 3) {
1246 // Get latency and resource requirements
1248 // Breadth first walk of DAG
1250 DEBUG(dump("Pre-"));
1251 // Push back long instructions and critical path
1253 DEBUG(dump("Mid-"));
1254 // Pack instructions to maximize resource utilization
1256 DEBUG(dump("Post-"));
1257 // Emit in scheduled order
1262 // Emit instructions in using a DFS from the exit root
1263 EmitDAG(DAG.getRoot());
1268 /// printSI - Print schedule info.
1270 void SimpleSched::printSI(std::ostream &O, NodeInfo *NI) const {
1272 using namespace std;
1273 SDNode *Node = NI->Node;
1276 << ", RS=" << NI->ResourceSet
1277 << ", Lat=" << NI->Latency
1278 << ", Slot=" << NI->Slot
1279 << ", ARITY=(" << Node->getNumOperands() << ","
1280 << Node->getNumValues() << ")"
1281 << " " << Node->getOperationName(&DAG);
1282 if (isFlagDefiner(Node)) O << "<#";
1283 if (isFlagUser(Node)) O << ">#";
1287 /// print - Print ordering to specified output stream.
1289 void SimpleSched::print(std::ostream &O) const {
1291 using namespace std;
1293 for (unsigned i = 0, N = Ordering.size(); i < N; i++) {
1294 NodeInfo *NI = Ordering[i];
1297 if (NI->isGroupLeader()) {
1298 NodeGroup *Group = NI->Group;
1299 for (NIIterator NII = Group->begin(), E = Group->end();
1310 /// dump - Print ordering to std::cerr.
1312 void SimpleSched::dump() const {
1315 //===----------------------------------------------------------------------===//
1318 //===----------------------------------------------------------------------===//
1319 /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each
1320 /// target node in the graph.
1321 void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &SD) {
1322 if (ViewDAGs) SD.viewGraph();
1323 BB = SimpleSched(SD, BB).Run();