1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Target/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/CodeGen/TargetSchedule.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/MC/MCAsmInfo.h"
25 #include "llvm/MC/MCInstrItineraries.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Target/TargetFrameLowering.h"
30 #include "llvm/Target/TargetLowering.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
36 static cl::opt<bool> DisableHazardRecognizer(
37 "disable-sched-hazard", cl::Hidden, cl::init(false),
38 cl::desc("Disable hazard detection during preRA scheduling"));
40 TargetInstrInfo::~TargetInstrInfo() {
43 const TargetRegisterClass*
44 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
45 const TargetRegisterInfo *TRI,
46 const MachineFunction &MF) const {
47 if (OpNum >= MCID.getNumOperands())
50 short RegClass = MCID.OpInfo[OpNum].RegClass;
51 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
52 return TRI->getPointerRegClass(MF, RegClass);
54 // Instructions like INSERT_SUBREG do not have fixed register classes.
58 // Otherwise just look it up normally.
59 return TRI->getRegClass(RegClass);
62 /// insertNoop - Insert a noop into the instruction stream at the specified
64 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
65 MachineBasicBlock::iterator MI) const {
66 llvm_unreachable("Target didn't implement insertNoop!");
69 /// Measure the specified inline asm to determine an approximation of its
71 /// Comments (which run till the next SeparatorString or newline) do not
72 /// count as an instruction.
73 /// Any other non-whitespace text is considered an instruction, with
74 /// multiple instructions separated by SeparatorString or newlines.
75 /// Variable-length instructions are not handled here; this function
76 /// may be overloaded in the target code to do that.
77 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
78 const MCAsmInfo &MAI) const {
81 // Count the number of instructions in the asm.
82 bool atInsnStart = true;
85 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
86 strlen(MAI.getSeparatorString())) == 0)
88 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
89 Length += MAI.getMaxInstLength();
92 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
93 strlen(MAI.getCommentString())) == 0)
100 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
101 /// after it, replacing it with an unconditional branch to NewDest.
103 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
104 MachineBasicBlock *NewDest) const {
105 MachineBasicBlock *MBB = Tail->getParent();
107 // Remove all the old successors of MBB from the CFG.
108 while (!MBB->succ_empty())
109 MBB->removeSuccessor(MBB->succ_begin());
111 // Remove all the dead instructions from the end of MBB.
112 MBB->erase(Tail, MBB->end());
114 // If MBB isn't immediately before MBB, insert a branch to it.
115 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
116 InsertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(),
117 Tail->getDebugLoc());
118 MBB->addSuccessor(NewDest);
121 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI,
124 unsigned Idx2) const {
125 const MCInstrDesc &MCID = MI->getDesc();
126 bool HasDef = MCID.getNumDefs();
127 if (HasDef && !MI->getOperand(0).isReg())
128 // No idea how to commute this instruction. Target should implement its own.
131 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
132 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
133 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
134 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
135 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
136 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
137 "This only knows how to commute register operands so far");
139 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
140 unsigned Reg1 = MI->getOperand(Idx1).getReg();
141 unsigned Reg2 = MI->getOperand(Idx2).getReg();
142 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
143 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
144 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
145 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
146 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
147 bool Reg1IsUndef = MI->getOperand(Idx1).isUndef();
148 bool Reg2IsUndef = MI->getOperand(Idx2).isUndef();
149 bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead();
150 bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead();
151 // If destination is tied to either of the commuted source register, then
152 // it must be updated.
153 if (HasDef && Reg0 == Reg1 &&
154 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
158 } else if (HasDef && Reg0 == Reg2 &&
159 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
166 // Create a new instruction.
167 MachineFunction &MF = *MI->getParent()->getParent();
168 MI = MF.CloneMachineInstr(MI);
172 MI->getOperand(0).setReg(Reg0);
173 MI->getOperand(0).setSubReg(SubReg0);
175 MI->getOperand(Idx2).setReg(Reg1);
176 MI->getOperand(Idx1).setReg(Reg2);
177 MI->getOperand(Idx2).setSubReg(SubReg1);
178 MI->getOperand(Idx1).setSubReg(SubReg2);
179 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
180 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
181 MI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
182 MI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
183 MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
184 MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
188 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
191 unsigned OpIdx2) const {
192 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
193 // any commutable operand, which is done in findCommutedOpIndices() method
195 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
196 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
197 assert(MI->isCommutable() &&
198 "Precondition violation: MI must be commutable.");
201 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
204 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
205 unsigned &ResultIdx2,
206 unsigned CommutableOpIdx1,
207 unsigned CommutableOpIdx2) {
208 if (ResultIdx1 == CommuteAnyOperandIndex &&
209 ResultIdx2 == CommuteAnyOperandIndex) {
210 ResultIdx1 = CommutableOpIdx1;
211 ResultIdx2 = CommutableOpIdx2;
212 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
213 if (ResultIdx2 == CommutableOpIdx1)
214 ResultIdx1 = CommutableOpIdx2;
215 else if (ResultIdx2 == CommutableOpIdx2)
216 ResultIdx1 = CommutableOpIdx1;
219 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
220 if (ResultIdx1 == CommutableOpIdx1)
221 ResultIdx2 = CommutableOpIdx2;
222 else if (ResultIdx1 == CommutableOpIdx2)
223 ResultIdx2 = CommutableOpIdx1;
227 // Check that the result operand indices match the given commutable
229 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
230 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
235 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
237 unsigned &SrcOpIdx2) const {
238 assert(!MI->isBundle() &&
239 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
241 const MCInstrDesc &MCID = MI->getDesc();
242 if (!MCID.isCommutable())
245 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
246 // is not true, then the target must implement this.
247 unsigned CommutableOpIdx1 = MCID.getNumDefs();
248 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
249 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
250 CommutableOpIdx1, CommutableOpIdx2))
253 if (!MI->getOperand(SrcOpIdx1).isReg() ||
254 !MI->getOperand(SrcOpIdx2).isReg())
261 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
262 if (!MI->isTerminator()) return false;
264 // Conditional branch is a special case.
265 if (MI->isBranch() && !MI->isBarrier())
267 if (!MI->isPredicable())
269 return !isPredicated(MI);
272 bool TargetInstrInfo::PredicateInstruction(
273 MachineInstr *MI, ArrayRef<MachineOperand> Pred) const {
274 bool MadeChange = false;
276 assert(!MI->isBundle() &&
277 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
279 const MCInstrDesc &MCID = MI->getDesc();
280 if (!MI->isPredicable())
283 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
284 if (MCID.OpInfo[i].isPredicate()) {
285 MachineOperand &MO = MI->getOperand(i);
287 MO.setReg(Pred[j].getReg());
289 } else if (MO.isImm()) {
290 MO.setImm(Pred[j].getImm());
292 } else if (MO.isMBB()) {
293 MO.setMBB(Pred[j].getMBB());
302 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
303 const MachineMemOperand *&MMO,
304 int &FrameIndex) const {
305 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
306 oe = MI->memoperands_end();
309 if ((*o)->isLoad()) {
310 if (const FixedStackPseudoSourceValue *Value =
311 dyn_cast_or_null<FixedStackPseudoSourceValue>(
312 (*o)->getPseudoValue())) {
313 FrameIndex = Value->getFrameIndex();
322 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
323 const MachineMemOperand *&MMO,
324 int &FrameIndex) const {
325 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
326 oe = MI->memoperands_end();
329 if ((*o)->isStore()) {
330 if (const FixedStackPseudoSourceValue *Value =
331 dyn_cast_or_null<FixedStackPseudoSourceValue>(
332 (*o)->getPseudoValue())) {
333 FrameIndex = Value->getFrameIndex();
342 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
343 unsigned SubIdx, unsigned &Size,
345 const MachineFunction &MF) const {
347 Size = RC->getSize();
351 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
352 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
353 // Convert bit size to byte size to be consistent with
354 // MCRegisterClass::getSize().
358 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
359 if (BitOffset < 0 || BitOffset % 8)
363 Offset = (unsigned)BitOffset / 8;
365 assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
367 if (!MF.getDataLayout().isLittleEndian()) {
368 Offset = RC->getSize() - (Offset + Size);
373 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
374 MachineBasicBlock::iterator I,
377 const MachineInstr *Orig,
378 const TargetRegisterInfo &TRI) const {
379 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
380 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
385 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
386 const MachineInstr *MI1,
387 const MachineRegisterInfo *MRI) const {
388 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
391 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
392 MachineFunction &MF) const {
393 assert(!Orig->isNotDuplicable() &&
394 "Instruction cannot be duplicated");
395 return MF.CloneMachineInstr(Orig);
398 // If the COPY instruction in MI can be folded to a stack operation, return
399 // the register class to use.
400 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
402 assert(MI->isCopy() && "MI must be a COPY instruction");
403 if (MI->getNumOperands() != 2)
405 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
407 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
408 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
410 if (FoldOp.getSubReg() || LiveOp.getSubReg())
413 unsigned FoldReg = FoldOp.getReg();
414 unsigned LiveReg = LiveOp.getReg();
416 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
417 "Cannot fold physregs");
419 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
420 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
422 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
423 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
425 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
428 // FIXME: Allow folding when register classes are memory compatible.
432 void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
433 llvm_unreachable("Not a MachO target");
436 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI,
437 ArrayRef<unsigned> Ops, int FrameIndex,
438 const TargetInstrInfo &TII) {
439 unsigned StartIdx = 0;
440 switch (MI->getOpcode()) {
441 case TargetOpcode::STACKMAP:
442 StartIdx = 2; // Skip ID, nShadowBytes.
444 case TargetOpcode::PATCHPOINT: {
445 // For PatchPoint, the call args are not foldable.
446 PatchPointOpers opers(MI);
447 StartIdx = opers.getVarIdx();
451 llvm_unreachable("unexpected stackmap opcode");
454 // Return false if any operands requested for folding are not foldable (not
455 // part of the stackmap's live values).
456 for (unsigned Op : Ops) {
461 MachineInstr *NewMI =
462 MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true);
463 MachineInstrBuilder MIB(MF, NewMI);
465 // No need to fold return, the meta data, and function arguments
466 for (unsigned i = 0; i < StartIdx; ++i)
467 MIB.addOperand(MI->getOperand(i));
469 for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) {
470 MachineOperand &MO = MI->getOperand(i);
471 if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) {
473 unsigned SpillOffset;
474 // Compute the spill slot size and offset.
475 const TargetRegisterClass *RC =
476 MF.getRegInfo().getRegClass(MO.getReg());
478 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
480 report_fatal_error("cannot spill patchpoint subregister operand");
481 MIB.addImm(StackMaps::IndirectMemRefOp);
482 MIB.addImm(SpillSize);
483 MIB.addFrameIndex(FrameIndex);
484 MIB.addImm(SpillOffset);
492 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
493 /// slot into the specified machine instruction for the specified operand(s).
494 /// If this is possible, a new instruction is returned with the specified
495 /// operand folded, otherwise NULL is returned. The client is responsible for
496 /// removing the old instruction and adding the new one in the instruction
498 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
499 ArrayRef<unsigned> Ops,
502 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
503 if (MI->getOperand(Ops[i]).isDef())
504 Flags |= MachineMemOperand::MOStore;
506 Flags |= MachineMemOperand::MOLoad;
508 MachineBasicBlock *MBB = MI->getParent();
509 assert(MBB && "foldMemoryOperand needs an inserted instruction");
510 MachineFunction &MF = *MBB->getParent();
512 MachineInstr *NewMI = nullptr;
514 if (MI->getOpcode() == TargetOpcode::STACKMAP ||
515 MI->getOpcode() == TargetOpcode::PATCHPOINT) {
516 // Fold stackmap/patchpoint.
517 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
519 MBB->insert(MI, NewMI);
521 // Ask the target to do the actual folding.
522 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
526 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
527 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
528 assert((!(Flags & MachineMemOperand::MOStore) ||
529 NewMI->mayStore()) &&
530 "Folded a def to a non-store!");
531 assert((!(Flags & MachineMemOperand::MOLoad) ||
533 "Folded a use to a non-load!");
534 const MachineFrameInfo &MFI = *MF.getFrameInfo();
535 assert(MFI.getObjectOffset(FI) != -1);
536 MachineMemOperand *MMO = MF.getMachineMemOperand(
537 MachinePointerInfo::getFixedStack(MF, FI), Flags, MFI.getObjectSize(FI),
538 MFI.getObjectAlignment(FI));
539 NewMI->addMemOperand(MF, MMO);
544 // Straight COPY may fold as load/store.
545 if (!MI->isCopy() || Ops.size() != 1)
548 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
552 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
553 MachineBasicBlock::iterator Pos = MI;
554 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
556 if (Flags == MachineMemOperand::MOStore)
557 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
559 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
563 bool TargetInstrInfo::hasReassociableOperands(
564 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
565 const MachineOperand &Op1 = Inst.getOperand(1);
566 const MachineOperand &Op2 = Inst.getOperand(2);
567 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
569 // We need virtual register definitions for the operands that we will
571 MachineInstr *MI1 = nullptr;
572 MachineInstr *MI2 = nullptr;
573 if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
574 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
575 if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
576 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
578 // And they need to be in the trace (otherwise, they won't have a depth).
579 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
582 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
583 bool &Commuted) const {
584 const MachineBasicBlock *MBB = Inst.getParent();
585 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
586 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
587 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
588 unsigned AssocOpcode = Inst.getOpcode();
590 // If only one operand has the same opcode and it's the second source operand,
591 // the operands must be commuted.
592 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
596 // 1. The previous instruction must be the same type as Inst.
597 // 2. The previous instruction must have virtual register definitions for its
598 // operands in the same basic block as Inst.
599 // 3. The previous instruction's result must only be used by Inst.
600 return MI1->getOpcode() == AssocOpcode &&
601 hasReassociableOperands(*MI1, MBB) &&
602 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
605 // 1. The operation must be associative and commutative.
606 // 2. The instruction must have virtual register definitions for its
607 // operands in the same basic block.
608 // 3. The instruction must have a reassociable sibling.
609 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
610 bool &Commuted) const {
611 return isAssociativeAndCommutative(Inst) &&
612 hasReassociableOperands(Inst, Inst.getParent()) &&
613 hasReassociableSibling(Inst, Commuted);
616 // The concept of the reassociation pass is that these operations can benefit
617 // from this kind of transformation:
627 // breaking the dependency between A and B, allowing them to be executed in
628 // parallel (or back-to-back in a pipeline) instead of depending on each other.
630 // FIXME: This has the potential to be expensive (compile time) while not
631 // improving the code at all. Some ways to limit the overhead:
632 // 1. Track successful transforms; bail out if hit rate gets too low.
633 // 2. Only enable at -O3 or some other non-default optimization level.
634 // 3. Pre-screen pattern candidates here: if an operand of the previous
635 // instruction is known to not increase the critical path, then don't match
637 bool TargetInstrInfo::getMachineCombinerPatterns(
639 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
642 if (isReassociationCandidate(Root, Commute)) {
643 // We found a sequence of instructions that may be suitable for a
644 // reassociation of operands to increase ILP. Specify each commutation
645 // possibility for the Prev instruction in the sequence and let the
646 // machine combiner decide if changing the operands is worthwhile.
648 Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_YB);
649 Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_YB);
651 Patterns.push_back(MachineCombinerPattern::MC_REASSOC_AX_BY);
652 Patterns.push_back(MachineCombinerPattern::MC_REASSOC_XA_BY);
660 /// Attempt the reassociation transformation to reduce critical path length.
661 /// See the above comments before getMachineCombinerPatterns().
662 void TargetInstrInfo::reassociateOps(
663 MachineInstr &Root, MachineInstr &Prev,
664 MachineCombinerPattern::MC_PATTERN Pattern,
665 SmallVectorImpl<MachineInstr *> &InsInstrs,
666 SmallVectorImpl<MachineInstr *> &DelInstrs,
667 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
668 MachineFunction *MF = Root.getParent()->getParent();
669 MachineRegisterInfo &MRI = MF->getRegInfo();
670 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
671 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
672 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
674 // This array encodes the operand index for each parameter because the
675 // operands may be commuted. Each row corresponds to a pattern value,
676 // and each column specifies the index of A, B, X, Y.
677 unsigned OpIdx[4][4] = {
684 MachineOperand &OpA = Prev.getOperand(OpIdx[Pattern][0]);
685 MachineOperand &OpB = Root.getOperand(OpIdx[Pattern][1]);
686 MachineOperand &OpX = Prev.getOperand(OpIdx[Pattern][2]);
687 MachineOperand &OpY = Root.getOperand(OpIdx[Pattern][3]);
688 MachineOperand &OpC = Root.getOperand(0);
690 unsigned RegA = OpA.getReg();
691 unsigned RegB = OpB.getReg();
692 unsigned RegX = OpX.getReg();
693 unsigned RegY = OpY.getReg();
694 unsigned RegC = OpC.getReg();
696 if (TargetRegisterInfo::isVirtualRegister(RegA))
697 MRI.constrainRegClass(RegA, RC);
698 if (TargetRegisterInfo::isVirtualRegister(RegB))
699 MRI.constrainRegClass(RegB, RC);
700 if (TargetRegisterInfo::isVirtualRegister(RegX))
701 MRI.constrainRegClass(RegX, RC);
702 if (TargetRegisterInfo::isVirtualRegister(RegY))
703 MRI.constrainRegClass(RegY, RC);
704 if (TargetRegisterInfo::isVirtualRegister(RegC))
705 MRI.constrainRegClass(RegC, RC);
707 // Create a new virtual register for the result of (X op Y) instead of
708 // recycling RegB because the MachineCombiner's computation of the critical
709 // path requires a new register definition rather than an existing one.
710 unsigned NewVR = MRI.createVirtualRegister(RC);
711 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
713 unsigned Opcode = Root.getOpcode();
714 bool KillA = OpA.isKill();
715 bool KillX = OpX.isKill();
716 bool KillY = OpY.isKill();
718 // Create new instructions for insertion.
719 MachineInstrBuilder MIB1 =
720 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
721 .addReg(RegX, getKillRegState(KillX))
722 .addReg(RegY, getKillRegState(KillY));
723 MachineInstrBuilder MIB2 =
724 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
725 .addReg(RegA, getKillRegState(KillA))
726 .addReg(NewVR, getKillRegState(true));
728 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
730 // Record new instructions for insertion and old instructions for deletion.
731 InsInstrs.push_back(MIB1);
732 InsInstrs.push_back(MIB2);
733 DelInstrs.push_back(&Prev);
734 DelInstrs.push_back(&Root);
737 void TargetInstrInfo::genAlternativeCodeSequence(
738 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
739 SmallVectorImpl<MachineInstr *> &InsInstrs,
740 SmallVectorImpl<MachineInstr *> &DelInstrs,
741 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
742 MachineRegisterInfo &MRI = Root.getParent()->getParent()->getRegInfo();
744 // Select the previous instruction in the sequence based on the input pattern.
745 MachineInstr *Prev = nullptr;
747 case MachineCombinerPattern::MC_REASSOC_AX_BY:
748 case MachineCombinerPattern::MC_REASSOC_XA_BY:
749 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
751 case MachineCombinerPattern::MC_REASSOC_AX_YB:
752 case MachineCombinerPattern::MC_REASSOC_XA_YB:
753 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
759 assert(Prev && "Unknown pattern for machine combiner");
761 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
765 /// foldMemoryOperand - Same as the previous version except it allows folding
766 /// of any load and store from / to any address, not just from a specific
768 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
769 ArrayRef<unsigned> Ops,
770 MachineInstr *LoadMI) const {
771 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
773 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
774 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
776 MachineBasicBlock &MBB = *MI->getParent();
777 MachineFunction &MF = *MBB.getParent();
779 // Ask the target to do the actual folding.
780 MachineInstr *NewMI = nullptr;
783 if ((MI->getOpcode() == TargetOpcode::STACKMAP ||
784 MI->getOpcode() == TargetOpcode::PATCHPOINT) &&
785 isLoadFromStackSlot(LoadMI, FrameIndex)) {
786 // Fold stackmap/patchpoint.
787 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
789 NewMI = MBB.insert(MI, NewMI);
791 // Ask the target to do the actual folding.
792 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
795 if (!NewMI) return nullptr;
797 // Copy the memoperands from the load to the folded instruction.
798 if (MI->memoperands_empty()) {
799 NewMI->setMemRefs(LoadMI->memoperands_begin(),
800 LoadMI->memoperands_end());
803 // Handle the rare case of folding multiple loads.
804 NewMI->setMemRefs(MI->memoperands_begin(),
805 MI->memoperands_end());
806 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
807 E = LoadMI->memoperands_end(); I != E; ++I) {
808 NewMI->addMemOperand(MF, *I);
814 bool TargetInstrInfo::
815 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
816 AliasAnalysis *AA) const {
817 const MachineFunction &MF = *MI->getParent()->getParent();
818 const MachineRegisterInfo &MRI = MF.getRegInfo();
820 // Remat clients assume operand 0 is the defined register.
821 if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
823 unsigned DefReg = MI->getOperand(0).getReg();
825 // A sub-register definition can only be rematerialized if the instruction
826 // doesn't read the other parts of the register. Otherwise it is really a
827 // read-modify-write operation on the full virtual register which cannot be
829 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
830 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
833 // A load from a fixed stack slot can be rematerialized. This may be
834 // redundant with subsequent checks, but it's target-independent,
835 // simple, and a common case.
837 if (isLoadFromStackSlot(MI, FrameIdx) &&
838 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
841 // Avoid instructions obviously unsafe for remat.
842 if (MI->isNotDuplicable() || MI->mayStore() ||
843 MI->hasUnmodeledSideEffects())
846 // Don't remat inline asm. We have no idea how expensive it is
847 // even if it's side effect free.
848 if (MI->isInlineAsm())
851 // Avoid instructions which load from potentially varying memory.
852 if (MI->mayLoad() && !MI->isInvariantLoad(AA))
855 // If any of the registers accessed are non-constant, conservatively assume
856 // the instruction is not rematerializable.
857 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
858 const MachineOperand &MO = MI->getOperand(i);
859 if (!MO.isReg()) continue;
860 unsigned Reg = MO.getReg();
864 // Check for a well-behaved physical register.
865 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
867 // If the physreg has no defs anywhere, it's just an ambient register
868 // and we can freely move its uses. Alternatively, if it's allocatable,
869 // it could get allocated to something with a def during allocation.
870 if (!MRI.isConstantPhysReg(Reg, MF))
873 // A physreg def. We can't remat it.
879 // Only allow one virtual-register def. There may be multiple defs of the
880 // same virtual register, though.
881 if (MO.isDef() && Reg != DefReg)
884 // Don't allow any virtual-register uses. Rematting an instruction with
885 // virtual register uses would length the live ranges of the uses, which
886 // is not necessarily a good idea, certainly not "trivial".
891 // Everything checked out.
895 int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const {
896 const MachineFunction *MF = MI->getParent()->getParent();
897 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
898 bool StackGrowsDown =
899 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
901 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
902 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
904 if (MI->getOpcode() != FrameSetupOpcode &&
905 MI->getOpcode() != FrameDestroyOpcode)
908 int SPAdj = MI->getOperand(0).getImm();
909 SPAdj = TFI->alignSPAdjust(SPAdj);
911 if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) ||
912 (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode))
918 /// isSchedulingBoundary - Test if the given instruction should be
919 /// considered a scheduling boundary. This primarily includes labels
921 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
922 const MachineBasicBlock *MBB,
923 const MachineFunction &MF) const {
924 // Terminators and labels can't be scheduled around.
925 if (MI->isTerminator() || MI->isPosition())
928 // Don't attempt to schedule around any instruction that defines
929 // a stack-oriented pointer, as it's unlikely to be profitable. This
930 // saves compile time, because it doesn't require every single
931 // stack slot reference to depend on the instruction that does the
933 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
934 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
935 return MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
938 // Provide a global flag for disabling the PreRA hazard recognizer that targets
939 // may choose to honor.
940 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
941 return !DisableHazardRecognizer;
944 // Default implementation of CreateTargetRAHazardRecognizer.
945 ScheduleHazardRecognizer *TargetInstrInfo::
946 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
947 const ScheduleDAG *DAG) const {
948 // Dummy hazard recognizer allows all instructions to issue.
949 return new ScheduleHazardRecognizer();
952 // Default implementation of CreateTargetMIHazardRecognizer.
953 ScheduleHazardRecognizer *TargetInstrInfo::
954 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
955 const ScheduleDAG *DAG) const {
956 return (ScheduleHazardRecognizer *)
957 new ScoreboardHazardRecognizer(II, DAG, "misched");
960 // Default implementation of CreateTargetPostRAHazardRecognizer.
961 ScheduleHazardRecognizer *TargetInstrInfo::
962 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
963 const ScheduleDAG *DAG) const {
964 return (ScheduleHazardRecognizer *)
965 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
968 //===----------------------------------------------------------------------===//
969 // SelectionDAG latency interface.
970 //===----------------------------------------------------------------------===//
973 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
974 SDNode *DefNode, unsigned DefIdx,
975 SDNode *UseNode, unsigned UseIdx) const {
976 if (!ItinData || ItinData->isEmpty())
979 if (!DefNode->isMachineOpcode())
982 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
983 if (!UseNode->isMachineOpcode())
984 return ItinData->getOperandCycle(DefClass, DefIdx);
985 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
986 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
989 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
991 if (!ItinData || ItinData->isEmpty())
994 if (!N->isMachineOpcode())
997 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1000 //===----------------------------------------------------------------------===//
1001 // MachineInstr latency interface.
1002 //===----------------------------------------------------------------------===//
1005 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1006 const MachineInstr *MI) const {
1007 if (!ItinData || ItinData->isEmpty())
1010 unsigned Class = MI->getDesc().getSchedClass();
1011 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1015 // The # of u-ops is dynamically determined. The specific target should
1016 // override this function to return the right number.
1020 /// Return the default expected latency for a def based on it's opcode.
1021 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1022 const MachineInstr *DefMI) const {
1023 if (DefMI->isTransient())
1025 if (DefMI->mayLoad())
1026 return SchedModel.LoadLatency;
1027 if (isHighLatencyDef(DefMI->getOpcode()))
1028 return SchedModel.HighLatency;
1032 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
1036 unsigned TargetInstrInfo::
1037 getInstrLatency(const InstrItineraryData *ItinData,
1038 const MachineInstr *MI,
1039 unsigned *PredCost) const {
1040 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1041 // still have a MinLatency property, which getStageLatency checks.
1043 return MI->mayLoad() ? 2 : 1;
1045 return ItinData->getStageLatency(MI->getDesc().getSchedClass());
1048 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1049 const MachineInstr *DefMI,
1050 unsigned DefIdx) const {
1051 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1052 if (!ItinData || ItinData->isEmpty())
1055 unsigned DefClass = DefMI->getDesc().getSchedClass();
1056 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1057 return (DefCycle != -1 && DefCycle <= 1);
1060 /// Both DefMI and UseMI must be valid. By default, call directly to the
1061 /// itinerary. This may be overriden by the target.
1062 int TargetInstrInfo::
1063 getOperandLatency(const InstrItineraryData *ItinData,
1064 const MachineInstr *DefMI, unsigned DefIdx,
1065 const MachineInstr *UseMI, unsigned UseIdx) const {
1066 unsigned DefClass = DefMI->getDesc().getSchedClass();
1067 unsigned UseClass = UseMI->getDesc().getSchedClass();
1068 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1071 /// If we can determine the operand latency from the def only, without itinerary
1072 /// lookup, do so. Otherwise return -1.
1073 int TargetInstrInfo::computeDefOperandLatency(
1074 const InstrItineraryData *ItinData,
1075 const MachineInstr *DefMI) const {
1077 // Let the target hook getInstrLatency handle missing itineraries.
1079 return getInstrLatency(ItinData, DefMI);
1081 if(ItinData->isEmpty())
1082 return defaultDefLatency(ItinData->SchedModel, DefMI);
1084 // ...operand lookup required
1088 /// computeOperandLatency - Compute and return the latency of the given data
1089 /// dependent def and use when the operand indices are already known. UseMI may
1090 /// be NULL for an unknown use.
1092 /// FindMin may be set to get the minimum vs. expected latency. Minimum
1093 /// latency is used for scheduling groups, while expected latency is for
1094 /// instruction cost and critical path.
1096 /// Depending on the subtarget's itinerary properties, this may or may not need
1097 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
1098 /// UseIdx to compute min latency.
1099 unsigned TargetInstrInfo::
1100 computeOperandLatency(const InstrItineraryData *ItinData,
1101 const MachineInstr *DefMI, unsigned DefIdx,
1102 const MachineInstr *UseMI, unsigned UseIdx) const {
1104 int DefLatency = computeDefOperandLatency(ItinData, DefMI);
1105 if (DefLatency >= 0)
1108 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
1110 int OperLatency = 0;
1112 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
1114 unsigned DefClass = DefMI->getDesc().getSchedClass();
1115 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
1117 if (OperLatency >= 0)
1120 // No operand latency was found.
1121 unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
1123 // Expected latency is the max of the stage latency and itinerary props.
1124 InstrLatency = std::max(InstrLatency,
1125 defaultDefLatency(ItinData->SchedModel, DefMI));
1126 return InstrLatency;
1129 bool TargetInstrInfo::getRegSequenceInputs(
1130 const MachineInstr &MI, unsigned DefIdx,
1131 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1132 assert((MI.isRegSequence() ||
1133 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1135 if (!MI.isRegSequence())
1136 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1138 // We are looking at:
1139 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1140 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1141 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1143 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1144 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1145 assert(MOSubIdx.isImm() &&
1146 "One of the subindex of the reg_sequence is not an immediate");
1147 // Record Reg:SubReg, SubIdx.
1148 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1149 (unsigned)MOSubIdx.getImm()));
1154 bool TargetInstrInfo::getExtractSubregInputs(
1155 const MachineInstr &MI, unsigned DefIdx,
1156 RegSubRegPairAndIdx &InputReg) const {
1157 assert((MI.isExtractSubreg() ||
1158 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1160 if (!MI.isExtractSubreg())
1161 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1163 // We are looking at:
1164 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1165 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1166 const MachineOperand &MOReg = MI.getOperand(1);
1167 const MachineOperand &MOSubIdx = MI.getOperand(2);
1168 assert(MOSubIdx.isImm() &&
1169 "The subindex of the extract_subreg is not an immediate");
1171 InputReg.Reg = MOReg.getReg();
1172 InputReg.SubReg = MOReg.getSubReg();
1173 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1177 bool TargetInstrInfo::getInsertSubregInputs(
1178 const MachineInstr &MI, unsigned DefIdx,
1179 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1180 assert((MI.isInsertSubreg() ||
1181 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1183 if (!MI.isInsertSubreg())
1184 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1186 // We are looking at:
1187 // Def = INSERT_SEQUENCE v0, v1, sub0.
1188 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1189 const MachineOperand &MOBaseReg = MI.getOperand(1);
1190 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1191 const MachineOperand &MOSubIdx = MI.getOperand(3);
1192 assert(MOSubIdx.isImm() &&
1193 "One of the subindex of the reg_sequence is not an immediate");
1194 BaseReg.Reg = MOBaseReg.getReg();
1195 BaseReg.SubReg = MOBaseReg.getSubReg();
1197 InsertedReg.Reg = MOInsertedReg.getReg();
1198 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1199 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();