1 //===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "HexagonInstrInfo.h"
16 #include "HexagonRegisterInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/DFAPacketizer.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PseudoSourceValue.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Support/raw_ostream.h"
29 #define GET_INSTRINFO_CTOR
30 #define GET_INSTRMAP_INFO
31 #include "HexagonGenInstrInfo.inc"
32 #include "HexagonGenDFAPacketizer.inc"
37 /// Constants for Hexagon instructions.
39 const int Hexagon_MEMW_OFFSET_MAX = 4095;
40 const int Hexagon_MEMW_OFFSET_MIN = -4096;
41 const int Hexagon_MEMD_OFFSET_MAX = 8191;
42 const int Hexagon_MEMD_OFFSET_MIN = -8192;
43 const int Hexagon_MEMH_OFFSET_MAX = 2047;
44 const int Hexagon_MEMH_OFFSET_MIN = -2048;
45 const int Hexagon_MEMB_OFFSET_MAX = 1023;
46 const int Hexagon_MEMB_OFFSET_MIN = -1024;
47 const int Hexagon_ADDI_OFFSET_MAX = 32767;
48 const int Hexagon_ADDI_OFFSET_MIN = -32768;
49 const int Hexagon_MEMD_AUTOINC_MAX = 56;
50 const int Hexagon_MEMD_AUTOINC_MIN = -64;
51 const int Hexagon_MEMW_AUTOINC_MAX = 28;
52 const int Hexagon_MEMW_AUTOINC_MIN = -32;
53 const int Hexagon_MEMH_AUTOINC_MAX = 14;
54 const int Hexagon_MEMH_AUTOINC_MIN = -16;
55 const int Hexagon_MEMB_AUTOINC_MAX = 7;
56 const int Hexagon_MEMB_AUTOINC_MIN = -8;
59 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
60 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
61 RI(ST, *this), Subtarget(ST) {
65 /// isLoadFromStackSlot - If the specified machine instruction is a direct
66 /// load from a stack slot, return the virtual or physical register number of
67 /// the destination along with the FrameIndex of the loaded stack slot. If
68 /// not, return 0. This predicate must return 0 if the instruction has
69 /// any side effects other than loading from the stack slot.
70 unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
71 int &FrameIndex) const {
74 switch (MI->getOpcode()) {
81 if (MI->getOperand(2).isFI() &&
82 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
83 FrameIndex = MI->getOperand(2).getIndex();
84 return MI->getOperand(0).getReg();
92 /// isStoreToStackSlot - If the specified machine instruction is a direct
93 /// store to a stack slot, return the virtual or physical register number of
94 /// the source reg along with the FrameIndex of the loaded stack slot. If
95 /// not, return 0. This predicate must return 0 if the instruction has
96 /// any side effects other than storing to the stack slot.
97 unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
98 int &FrameIndex) const {
99 switch (MI->getOpcode()) {
105 if (MI->getOperand(2).isFI() &&
106 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
107 FrameIndex = MI->getOperand(0).getIndex();
108 return MI->getOperand(2).getReg();
117 HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
118 MachineBasicBlock *FBB,
119 const SmallVectorImpl<MachineOperand> &Cond,
122 int BOpc = Hexagon::JMP;
123 int BccOpc = Hexagon::JMP_t;
125 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
128 // Check if ReverseBranchCondition has asked to reverse this branch
129 // If we want to reverse the branch an odd number of times, we want
131 if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
132 BccOpc = Hexagon::JMP_f;
138 // Due to a bug in TailMerging/CFG Optimization, we need to add a
139 // special case handling of a predicated jump followed by an
140 // unconditional jump. If not, Tail Merging and CFG Optimization go
141 // into an infinite loop.
142 MachineBasicBlock *NewTBB, *NewFBB;
143 SmallVector<MachineOperand, 4> Cond;
144 MachineInstr *Term = MBB.getFirstTerminator();
145 if (isPredicated(Term) && !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond,
147 MachineBasicBlock *NextBB =
148 llvm::next(MachineFunction::iterator(&MBB));
149 if (NewTBB == NextBB) {
150 ReverseBranchCondition(Cond);
152 return InsertBranch(MBB, TBB, 0, Cond, DL);
155 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
158 get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
163 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
164 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
170 bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
171 MachineBasicBlock *&TBB,
172 MachineBasicBlock *&FBB,
173 SmallVectorImpl<MachineOperand> &Cond,
174 bool AllowModify) const {
178 // If the block has no terminators, it just falls into the block after it.
179 MachineBasicBlock::instr_iterator I = MBB.instr_end();
180 if (I == MBB.instr_begin())
183 // A basic block may looks like this:
193 // It has two succs but does not have a terminator
194 // Don't know how to handle it.
199 } while (I != MBB.instr_begin());
204 while (I->isDebugValue()) {
205 if (I == MBB.instr_begin())
210 // Delete the JMP if it's equivalent to a fall-through.
211 if (AllowModify && I->getOpcode() == Hexagon::JMP &&
212 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
213 DEBUG(dbgs()<< "\nErasing the jump to successor block\n";);
214 I->eraseFromParent();
216 if (I == MBB.instr_begin())
220 if (!isUnpredicatedTerminator(I))
223 // Get the last instruction in the block.
224 MachineInstr *LastInst = I;
225 MachineInstr *SecondLastInst = NULL;
226 // Find one more terminator if present.
228 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(I)) {
232 // This is a third branch.
235 if (I == MBB.instr_begin())
240 int LastOpcode = LastInst->getOpcode();
242 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
243 bool LastOpcodeHasNot = PredOpcodeHasNot(LastOpcode);
245 // If there is only one terminator instruction, process it.
246 if (LastInst && !SecondLastInst) {
247 if (LastOpcode == Hexagon::JMP) {
248 TBB = LastInst->getOperand(0).getMBB();
251 if (LastOpcode == Hexagon::ENDLOOP0) {
252 TBB = LastInst->getOperand(0).getMBB();
253 Cond.push_back(LastInst->getOperand(0));
256 if (LastOpcodeHasJMP_c) {
257 TBB = LastInst->getOperand(1).getMBB();
258 if (LastOpcodeHasNot) {
259 Cond.push_back(MachineOperand::CreateImm(0));
261 Cond.push_back(LastInst->getOperand(0));
264 // Otherwise, don't know what this is.
268 int SecLastOpcode = SecondLastInst->getOpcode();
270 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
271 bool SecLastOpcodeHasNot = PredOpcodeHasNot(SecLastOpcode);
272 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::JMP)) {
273 TBB = SecondLastInst->getOperand(1).getMBB();
274 if (SecLastOpcodeHasNot)
275 Cond.push_back(MachineOperand::CreateImm(0));
276 Cond.push_back(SecondLastInst->getOperand(0));
277 FBB = LastInst->getOperand(0).getMBB();
281 // If the block ends with two Hexagon:JMPs, handle it. The second one is not
282 // executed, so remove it.
283 if (SecLastOpcode == Hexagon::JMP && LastOpcode == Hexagon::JMP) {
284 TBB = SecondLastInst->getOperand(0).getMBB();
287 I->eraseFromParent();
291 // If the block ends with an ENDLOOP, and JMP, handle it.
292 if (SecLastOpcode == Hexagon::ENDLOOP0 &&
293 LastOpcode == Hexagon::JMP) {
294 TBB = SecondLastInst->getOperand(0).getMBB();
295 Cond.push_back(SecondLastInst->getOperand(0));
296 FBB = LastInst->getOperand(0).getMBB();
300 // Otherwise, can't handle this.
305 unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
306 int BOpc = Hexagon::JMP;
307 int BccOpc = Hexagon::JMP_t;
308 int BccOpcNot = Hexagon::JMP_f;
310 MachineBasicBlock::iterator I = MBB.end();
311 if (I == MBB.begin()) return 0;
313 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc &&
314 I->getOpcode() != BccOpcNot)
317 // Remove the branch.
318 I->eraseFromParent();
322 if (I == MBB.begin()) return 1;
324 if (I->getOpcode() != BccOpc && I->getOpcode() != BccOpcNot)
327 // Remove the branch.
328 I->eraseFromParent();
333 /// \brief For a comparison instruction, return the source registers in
334 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
335 /// compares against in CmpValue. Return true if the comparison instruction
337 bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
338 unsigned &SrcReg, unsigned &SrcReg2,
339 int &Mask, int &Value) const {
340 unsigned Opc = MI->getOpcode();
342 // Set mask and the first source register.
344 case Hexagon::CMPEHexagon4rr:
345 case Hexagon::CMPEQri:
346 case Hexagon::CMPEQrr:
347 case Hexagon::CMPGT64rr:
348 case Hexagon::CMPGTU64rr:
349 case Hexagon::CMPGTUri:
350 case Hexagon::CMPGTUrr:
351 case Hexagon::CMPGTri:
352 case Hexagon::CMPGTrr:
353 SrcReg = MI->getOperand(1).getReg();
356 case Hexagon::CMPbEQri_V4:
357 case Hexagon::CMPbEQrr_sbsb_V4:
358 case Hexagon::CMPbEQrr_ubub_V4:
359 case Hexagon::CMPbGTUri_V4:
360 case Hexagon::CMPbGTUrr_V4:
361 case Hexagon::CMPbGTrr_V4:
362 SrcReg = MI->getOperand(1).getReg();
365 case Hexagon::CMPhEQri_V4:
366 case Hexagon::CMPhEQrr_shl_V4:
367 case Hexagon::CMPhEQrr_xor_V4:
368 case Hexagon::CMPhGTUri_V4:
369 case Hexagon::CMPhGTUrr_V4:
370 case Hexagon::CMPhGTrr_shl_V4:
371 SrcReg = MI->getOperand(1).getReg();
376 // Set the value/second source register.
378 case Hexagon::CMPEHexagon4rr:
379 case Hexagon::CMPEQrr:
380 case Hexagon::CMPGT64rr:
381 case Hexagon::CMPGTU64rr:
382 case Hexagon::CMPGTUrr:
383 case Hexagon::CMPGTrr:
384 case Hexagon::CMPbEQrr_sbsb_V4:
385 case Hexagon::CMPbEQrr_ubub_V4:
386 case Hexagon::CMPbGTUrr_V4:
387 case Hexagon::CMPbGTrr_V4:
388 case Hexagon::CMPhEQrr_shl_V4:
389 case Hexagon::CMPhEQrr_xor_V4:
390 case Hexagon::CMPhGTUrr_V4:
391 case Hexagon::CMPhGTrr_shl_V4:
392 SrcReg2 = MI->getOperand(2).getReg();
395 case Hexagon::CMPEQri:
396 case Hexagon::CMPGTUri:
397 case Hexagon::CMPGTri:
398 case Hexagon::CMPbEQri_V4:
399 case Hexagon::CMPbGTUri_V4:
400 case Hexagon::CMPhEQri_V4:
401 case Hexagon::CMPhGTUri_V4:
403 Value = MI->getOperand(2).getImm();
411 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
412 MachineBasicBlock::iterator I, DebugLoc DL,
413 unsigned DestReg, unsigned SrcReg,
414 bool KillSrc) const {
415 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
416 BuildMI(MBB, I, DL, get(Hexagon::TFR), DestReg).addReg(SrcReg);
419 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
420 BuildMI(MBB, I, DL, get(Hexagon::TFR64), DestReg).addReg(SrcReg);
423 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
424 // Map Pd = Ps to Pd = or(Ps, Ps).
425 BuildMI(MBB, I, DL, get(Hexagon::OR_pp),
426 DestReg).addReg(SrcReg).addReg(SrcReg);
429 if (Hexagon::DoubleRegsRegClass.contains(DestReg) &&
430 Hexagon::IntRegsRegClass.contains(SrcReg)) {
431 // We can have an overlap between single and double reg: r1:0 = r0.
432 if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
434 BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
435 Hexagon::subreg_hireg))).addImm(0);
437 // r1:0 = r1 or no overlap.
438 BuildMI(MBB, I, DL, get(Hexagon::TFR), (RI.getSubReg(DestReg,
439 Hexagon::subreg_loreg))).addReg(SrcReg);
440 BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
441 Hexagon::subreg_hireg))).addImm(0);
445 if (Hexagon::CRRegsRegClass.contains(DestReg) &&
446 Hexagon::IntRegsRegClass.contains(SrcReg)) {
447 BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg);
450 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
451 Hexagon::IntRegsRegClass.contains(DestReg)) {
452 BuildMI(MBB, I, DL, get(Hexagon::TFR_RsPd), DestReg).
453 addReg(SrcReg, getKillRegState(KillSrc));
456 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
457 Hexagon::PredRegsRegClass.contains(DestReg)) {
458 BuildMI(MBB, I, DL, get(Hexagon::TFR_PdRs), DestReg).
459 addReg(SrcReg, getKillRegState(KillSrc));
463 llvm_unreachable("Unimplemented");
467 void HexagonInstrInfo::
468 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
469 unsigned SrcReg, bool isKill, int FI,
470 const TargetRegisterClass *RC,
471 const TargetRegisterInfo *TRI) const {
473 DebugLoc DL = MBB.findDebugLoc(I);
474 MachineFunction &MF = *MBB.getParent();
475 MachineFrameInfo &MFI = *MF.getFrameInfo();
476 unsigned Align = MFI.getObjectAlignment(FI);
478 MachineMemOperand *MMO =
479 MF.getMachineMemOperand(
480 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
481 MachineMemOperand::MOStore,
482 MFI.getObjectSize(FI),
485 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
486 BuildMI(MBB, I, DL, get(Hexagon::STriw))
487 .addFrameIndex(FI).addImm(0)
488 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
489 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
490 BuildMI(MBB, I, DL, get(Hexagon::STrid))
491 .addFrameIndex(FI).addImm(0)
492 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
493 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
494 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
495 .addFrameIndex(FI).addImm(0)
496 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
498 llvm_unreachable("Unimplemented");
503 void HexagonInstrInfo::storeRegToAddr(
504 MachineFunction &MF, unsigned SrcReg,
506 SmallVectorImpl<MachineOperand> &Addr,
507 const TargetRegisterClass *RC,
508 SmallVectorImpl<MachineInstr*> &NewMIs) const
510 llvm_unreachable("Unimplemented");
514 void HexagonInstrInfo::
515 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
516 unsigned DestReg, int FI,
517 const TargetRegisterClass *RC,
518 const TargetRegisterInfo *TRI) const {
519 DebugLoc DL = MBB.findDebugLoc(I);
520 MachineFunction &MF = *MBB.getParent();
521 MachineFrameInfo &MFI = *MF.getFrameInfo();
522 unsigned Align = MFI.getObjectAlignment(FI);
524 MachineMemOperand *MMO =
525 MF.getMachineMemOperand(
526 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
527 MachineMemOperand::MOLoad,
528 MFI.getObjectSize(FI),
530 if (RC == &Hexagon::IntRegsRegClass) {
531 BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg)
532 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
533 } else if (RC == &Hexagon::DoubleRegsRegClass) {
534 BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg)
535 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
536 } else if (RC == &Hexagon::PredRegsRegClass) {
537 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
538 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
540 llvm_unreachable("Can't store this register to stack slot");
545 void HexagonInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
546 SmallVectorImpl<MachineOperand> &Addr,
547 const TargetRegisterClass *RC,
548 SmallVectorImpl<MachineInstr*> &NewMIs) const {
549 llvm_unreachable("Unimplemented");
553 MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
555 const SmallVectorImpl<unsigned> &Ops,
557 // Hexagon_TODO: Implement.
562 HexagonInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
563 int FrameIx, uint64_t Offset,
566 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Hexagon::DBG_VALUE))
567 .addImm(0).addImm(Offset).addMetadata(MDPtr);
571 unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
573 MachineRegisterInfo &RegInfo = MF->getRegInfo();
574 const TargetRegisterClass *TRC;
576 TRC = &Hexagon::PredRegsRegClass;
577 } else if (VT == MVT::i32 || VT == MVT::f32) {
578 TRC = &Hexagon::IntRegsRegClass;
579 } else if (VT == MVT::i64 || VT == MVT::f64) {
580 TRC = &Hexagon::DoubleRegsRegClass;
582 llvm_unreachable("Cannot handle this register class");
585 unsigned NewReg = RegInfo.createVirtualRegister(TRC);
589 bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
590 // Constant extenders are allowed only for V4 and above.
591 if (!Subtarget.hasV4TOps())
594 const MCInstrDesc &MID = MI->getDesc();
595 const uint64_t F = MID.TSFlags;
596 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
599 // TODO: This is largely obsolete now. Will need to be removed
600 // in consecutive patches.
601 switch(MI->getOpcode()) {
602 // TFR_FI Remains a special case.
603 case Hexagon::TFR_FI:
611 // This returns true in two cases:
612 // - The OP code itself indicates that this is an extended instruction.
613 // - One of MOs has been marked with HMOTF_ConstExtended flag.
614 bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
615 // First check if this is permanently extended op code.
616 const uint64_t F = MI->getDesc().TSFlags;
617 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
619 // Use MO operand flags to determine if one of MI's operands
620 // has HMOTF_ConstExtended flag set.
621 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
622 E = MI->operands_end(); I != E; ++I) {
623 if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
629 bool HexagonInstrInfo::isBranch (const MachineInstr *MI) const {
630 return MI->getDesc().isBranch();
633 bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
634 switch (MI->getOpcode()) {
635 default: return false;
637 case Hexagon::STrib_nv_V4:
638 case Hexagon::STrib_indexed_nv_V4:
639 case Hexagon::STrib_indexed_shl_nv_V4:
640 case Hexagon::STrib_shl_nv_V4:
641 case Hexagon::STb_GP_nv_V4:
642 case Hexagon::POST_STbri_nv_V4:
643 case Hexagon::STrib_cPt_nv_V4:
644 case Hexagon::STrib_cdnPt_nv_V4:
645 case Hexagon::STrib_cNotPt_nv_V4:
646 case Hexagon::STrib_cdnNotPt_nv_V4:
647 case Hexagon::STrib_indexed_cPt_nv_V4:
648 case Hexagon::STrib_indexed_cdnPt_nv_V4:
649 case Hexagon::STrib_indexed_cNotPt_nv_V4:
650 case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
651 case Hexagon::STrib_indexed_shl_cPt_nv_V4:
652 case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
653 case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
654 case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
655 case Hexagon::POST_STbri_cPt_nv_V4:
656 case Hexagon::POST_STbri_cdnPt_nv_V4:
657 case Hexagon::POST_STbri_cNotPt_nv_V4:
658 case Hexagon::POST_STbri_cdnNotPt_nv_V4:
659 case Hexagon::STb_GP_cPt_nv_V4:
660 case Hexagon::STb_GP_cNotPt_nv_V4:
661 case Hexagon::STb_GP_cdnPt_nv_V4:
662 case Hexagon::STb_GP_cdnNotPt_nv_V4:
663 case Hexagon::STrib_abs_nv_V4:
664 case Hexagon::STrib_abs_cPt_nv_V4:
665 case Hexagon::STrib_abs_cdnPt_nv_V4:
666 case Hexagon::STrib_abs_cNotPt_nv_V4:
667 case Hexagon::STrib_abs_cdnNotPt_nv_V4:
670 case Hexagon::STrih_nv_V4:
671 case Hexagon::STrih_indexed_nv_V4:
672 case Hexagon::STrih_indexed_shl_nv_V4:
673 case Hexagon::STrih_shl_nv_V4:
674 case Hexagon::STh_GP_nv_V4:
675 case Hexagon::POST_SThri_nv_V4:
676 case Hexagon::STrih_cPt_nv_V4:
677 case Hexagon::STrih_cdnPt_nv_V4:
678 case Hexagon::STrih_cNotPt_nv_V4:
679 case Hexagon::STrih_cdnNotPt_nv_V4:
680 case Hexagon::STrih_indexed_cPt_nv_V4:
681 case Hexagon::STrih_indexed_cdnPt_nv_V4:
682 case Hexagon::STrih_indexed_cNotPt_nv_V4:
683 case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
684 case Hexagon::STrih_indexed_shl_cPt_nv_V4:
685 case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
686 case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
687 case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
688 case Hexagon::POST_SThri_cPt_nv_V4:
689 case Hexagon::POST_SThri_cdnPt_nv_V4:
690 case Hexagon::POST_SThri_cNotPt_nv_V4:
691 case Hexagon::POST_SThri_cdnNotPt_nv_V4:
692 case Hexagon::STh_GP_cPt_nv_V4:
693 case Hexagon::STh_GP_cNotPt_nv_V4:
694 case Hexagon::STh_GP_cdnPt_nv_V4:
695 case Hexagon::STh_GP_cdnNotPt_nv_V4:
696 case Hexagon::STrih_abs_nv_V4:
697 case Hexagon::STrih_abs_cPt_nv_V4:
698 case Hexagon::STrih_abs_cdnPt_nv_V4:
699 case Hexagon::STrih_abs_cNotPt_nv_V4:
700 case Hexagon::STrih_abs_cdnNotPt_nv_V4:
703 case Hexagon::STriw_nv_V4:
704 case Hexagon::STriw_indexed_nv_V4:
705 case Hexagon::STriw_indexed_shl_nv_V4:
706 case Hexagon::STriw_shl_nv_V4:
707 case Hexagon::STw_GP_nv_V4:
708 case Hexagon::POST_STwri_nv_V4:
709 case Hexagon::STriw_cPt_nv_V4:
710 case Hexagon::STriw_cdnPt_nv_V4:
711 case Hexagon::STriw_cNotPt_nv_V4:
712 case Hexagon::STriw_cdnNotPt_nv_V4:
713 case Hexagon::STriw_indexed_cPt_nv_V4:
714 case Hexagon::STriw_indexed_cdnPt_nv_V4:
715 case Hexagon::STriw_indexed_cNotPt_nv_V4:
716 case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
717 case Hexagon::STriw_indexed_shl_cPt_nv_V4:
718 case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
719 case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
720 case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
721 case Hexagon::POST_STwri_cPt_nv_V4:
722 case Hexagon::POST_STwri_cdnPt_nv_V4:
723 case Hexagon::POST_STwri_cNotPt_nv_V4:
724 case Hexagon::POST_STwri_cdnNotPt_nv_V4:
725 case Hexagon::STw_GP_cPt_nv_V4:
726 case Hexagon::STw_GP_cNotPt_nv_V4:
727 case Hexagon::STw_GP_cdnPt_nv_V4:
728 case Hexagon::STw_GP_cdnNotPt_nv_V4:
729 case Hexagon::STriw_abs_nv_V4:
730 case Hexagon::STriw_abs_cPt_nv_V4:
731 case Hexagon::STriw_abs_cdnPt_nv_V4:
732 case Hexagon::STriw_abs_cNotPt_nv_V4:
733 case Hexagon::STriw_abs_cdnNotPt_nv_V4:
738 bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
739 if (isNewValueJump(MI))
742 if (isNewValueStore(MI))
748 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
749 return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4;
752 bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
753 bool isPred = MI->getDesc().isPredicable();
758 const int Opc = MI->getOpcode();
762 return isInt<12>(MI->getOperand(1).getImm());
765 case Hexagon::STrid_indexed:
766 return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
769 case Hexagon::STriw_indexed:
770 case Hexagon::STriw_nv_V4:
771 return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
774 case Hexagon::STrih_indexed:
775 case Hexagon::STrih_nv_V4:
776 return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
779 case Hexagon::STrib_indexed:
780 case Hexagon::STrib_nv_V4:
781 return isUInt<6>(MI->getOperand(1).getImm());
784 case Hexagon::LDrid_indexed:
785 return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
788 case Hexagon::LDriw_indexed:
789 return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
792 case Hexagon::LDriuh:
793 case Hexagon::LDrih_indexed:
794 case Hexagon::LDriuh_indexed:
795 return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
798 case Hexagon::LDriub:
799 case Hexagon::LDrib_indexed:
800 case Hexagon::LDriub_indexed:
801 return isUInt<6>(MI->getOperand(2).getImm());
803 case Hexagon::POST_LDrid:
804 return isShiftedInt<4,3>(MI->getOperand(3).getImm());
806 case Hexagon::POST_LDriw:
807 return isShiftedInt<4,2>(MI->getOperand(3).getImm());
809 case Hexagon::POST_LDrih:
810 case Hexagon::POST_LDriuh:
811 return isShiftedInt<4,1>(MI->getOperand(3).getImm());
813 case Hexagon::POST_LDrib:
814 case Hexagon::POST_LDriub:
815 return isInt<4>(MI->getOperand(3).getImm());
817 case Hexagon::STrib_imm_V4:
818 case Hexagon::STrih_imm_V4:
819 case Hexagon::STriw_imm_V4:
820 return (isUInt<6>(MI->getOperand(1).getImm()) &&
821 isInt<6>(MI->getOperand(2).getImm()));
823 case Hexagon::ADD_ri:
824 return isInt<8>(MI->getOperand(2).getImm());
832 return Subtarget.hasV4TOps();
838 // This function performs the following inversiones:
843 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
845 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
846 : Hexagon::getTruePredOpcode(Opc);
847 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
848 return InvPredOpcode;
851 default: llvm_unreachable("Unexpected predicated instruction");
852 case Hexagon::COMBINE_rr_cPt:
853 return Hexagon::COMBINE_rr_cNotPt;
854 case Hexagon::COMBINE_rr_cNotPt:
855 return Hexagon::COMBINE_rr_cPt;
858 case Hexagon::DEALLOC_RET_cPt_V4:
859 return Hexagon::DEALLOC_RET_cNotPt_V4;
860 case Hexagon::DEALLOC_RET_cNotPt_V4:
861 return Hexagon::DEALLOC_RET_cPt_V4;
866 int HexagonInstrInfo::
867 getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
868 enum Hexagon::PredSense inPredSense;
869 inPredSense = invertPredicate ? Hexagon::PredSense_false :
870 Hexagon::PredSense_true;
871 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
872 if (CondOpcode >= 0) // Valid Conditional opcode/instruction
875 // This switch case will be removed once all the instructions have been
876 // modified to use relation maps.
878 case Hexagon::TFRI_f:
879 return !invertPredicate ? Hexagon::TFRI_cPt_f :
880 Hexagon::TFRI_cNotPt_f;
881 case Hexagon::COMBINE_rr:
882 return !invertPredicate ? Hexagon::COMBINE_rr_cPt :
883 Hexagon::COMBINE_rr_cNotPt;
886 case Hexagon::STriw_f:
887 return !invertPredicate ? Hexagon::STriw_cPt :
888 Hexagon::STriw_cNotPt;
889 case Hexagon::STriw_indexed_f:
890 return !invertPredicate ? Hexagon::STriw_indexed_cPt :
891 Hexagon::STriw_indexed_cNotPt;
894 case Hexagon::DEALLOC_RET_V4:
895 return !invertPredicate ? Hexagon::DEALLOC_RET_cPt_V4 :
896 Hexagon::DEALLOC_RET_cNotPt_V4;
898 llvm_unreachable("Unexpected predicable instruction");
902 bool HexagonInstrInfo::
903 PredicateInstruction(MachineInstr *MI,
904 const SmallVectorImpl<MachineOperand> &Cond) const {
905 int Opc = MI->getOpcode();
906 assert (isPredicable(MI) && "Expected predicable instruction");
907 bool invertJump = (!Cond.empty() && Cond[0].isImm() &&
908 (Cond[0].getImm() == 0));
910 // This will change MI's opcode to its predicate version.
911 // However, its operand list is still the old one, i.e. the
912 // non-predicate one.
913 MI->setDesc(get(getMatchingCondBranchOpcode(Opc, invertJump)));
916 unsigned int GAIdx = 0;
918 // Indicates whether the current MI has a GlobalAddress operand
919 bool hasGAOpnd = false;
920 std::vector<MachineOperand> tmpOpnds;
922 // Indicates whether we need to shift operands to right.
923 bool needShift = true;
925 // The predicate is ALWAYS the FIRST input operand !!!
926 if (MI->getNumOperands() == 0) {
927 // The non-predicate version of MI does not take any operands,
928 // i.e. no outs and no ins. In this condition, the predicate
929 // operand will be directly placed at Operands[0]. No operand
935 else if ( MI->getOperand(MI->getNumOperands()-1).isReg()
936 && MI->getOperand(MI->getNumOperands()-1).isDef()
937 && !MI->getOperand(MI->getNumOperands()-1).isImplicit()) {
938 // The non-predicate version of MI does not have any input operands.
939 // In this condition, we extend the length of Operands[] by one and
940 // copy the original last operand to the newly allocated slot.
941 // At this moment, it is just a place holder. Later, we will put
942 // predicate operand directly into it. No operand shift is needed.
943 // Example: r0=BARRIER (this is a faked insn used here for illustration)
944 MI->addOperand(MI->getOperand(MI->getNumOperands()-1));
946 oper = MI->getNumOperands() - 2;
949 // We need to right shift all input operands by one. Duplicate the
950 // last operand into the newly allocated slot.
951 MI->addOperand(MI->getOperand(MI->getNumOperands()-1));
956 // Operands[ MI->getNumOperands() - 2 ] has been copied into
957 // Operands[ MI->getNumOperands() - 1 ], so we start from
958 // Operands[ MI->getNumOperands() - 3 ].
959 // oper is a signed int.
960 // It is ok if "MI->getNumOperands()-3" is -3, -2, or -1.
961 for (oper = MI->getNumOperands() - 3; oper >= 0; --oper)
963 MachineOperand &MO = MI->getOperand(oper);
965 // Opnd[0] Opnd[1] Opnd[2] Opnd[3] Opnd[4] Opnd[5] Opnd[6] Opnd[7]
966 // <Def0> <Def1> <Use0> <Use1> <ImpDef0> <ImpDef1> <ImpUse0> <ImpUse1>
970 // Predicate Operand here
971 if (MO.isReg() && !MO.isUse() && !MO.isImplicit()) {
975 MI->getOperand(oper+1).ChangeToRegister(MO.getReg(), MO.isDef(),
976 MO.isImplicit(), MO.isKill(),
977 MO.isDead(), MO.isUndef(),
980 else if (MO.isImm()) {
981 MI->getOperand(oper+1).ChangeToImmediate(MO.getImm());
983 else if (MO.isGlobal()) {
984 // MI can not have more than one GlobalAddress operand.
985 assert(hasGAOpnd == false && "MI can only have one GlobalAddress opnd");
987 // There is no member function called "ChangeToGlobalAddress" in the
988 // MachineOperand class (not like "ChangeToRegister" and
989 // "ChangeToImmediate"). So we have to remove them from Operands[] list
990 // first, and then add them back after we have inserted the predicate
991 // operand. tmpOpnds[] is to remember these operands before we remove
993 tmpOpnds.push_back(MO);
995 // Operands[oper] is a GlobalAddress operand;
996 // Operands[oper+1] has been copied into Operands[oper+2];
1002 assert(false && "Unexpected operand type");
1007 int regPos = invertJump ? 1 : 0;
1008 MachineOperand PredMO = Cond[regPos];
1010 // [oper] now points to the last explicit Def. Predicate operand must be
1011 // located at [oper+1]. See diagram above.
1012 // This assumes that the predicate is always the first operand,
1013 // i.e. Operands[0+numResults], in the set of inputs
1014 // It is better to have an assert here to check this. But I don't know how
1015 // to write this assert because findFirstPredOperandIdx() would return -1
1016 if (oper < -1) oper = -1;
1018 MI->getOperand(oper+1).ChangeToRegister(PredMO.getReg(), PredMO.isDef(),
1019 PredMO.isImplicit(), false,
1020 PredMO.isDead(), PredMO.isUndef(),
1023 MachineRegisterInfo &RegInfo = MI->getParent()->getParent()->getRegInfo();
1024 RegInfo.clearKillFlags(PredMO.getReg());
1030 // Operands[GAIdx] is the original GlobalAddress operand, which is
1031 // already copied into tmpOpnds[0].
1032 // Operands[GAIdx] now stores a copy of Operands[GAIdx-1]
1033 // Operands[GAIdx+1] has already been copied into Operands[GAIdx+2],
1034 // so we start from [GAIdx+2]
1035 for (i = GAIdx + 2; i < MI->getNumOperands(); ++i)
1036 tmpOpnds.push_back(MI->getOperand(i));
1038 // Remove all operands in range [ (GAIdx+1) ... (MI->getNumOperands()-1) ]
1039 // It is very important that we always remove from the end of Operands[]
1040 // MI->getNumOperands() is at least 2 if program goes to here.
1041 for (i = MI->getNumOperands() - 1; i > GAIdx; --i)
1042 MI->RemoveOperand(i);
1044 for (i = 0; i < tmpOpnds.size(); ++i)
1045 MI->addOperand(tmpOpnds[i]);
1054 isProfitableToIfCvt(MachineBasicBlock &MBB,
1056 unsigned ExtraPredCycles,
1057 const BranchProbability &Probability) const {
1064 isProfitableToIfCvt(MachineBasicBlock &TMBB,
1065 unsigned NumTCycles,
1066 unsigned ExtraTCycles,
1067 MachineBasicBlock &FMBB,
1068 unsigned NumFCycles,
1069 unsigned ExtraFCycles,
1070 const BranchProbability &Probability) const {
1074 // Returns true if an instruction is predicated irrespective of the predicate
1075 // sense. For example, all of the following will return true.
1076 // if (p0) R1 = add(R2, R3)
1077 // if (!p0) R1 = add(R2, R3)
1078 // if (p0.new) R1 = add(R2, R3)
1079 // if (!p0.new) R1 = add(R2, R3)
1080 bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const {
1081 const uint64_t F = MI->getDesc().TSFlags;
1083 return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1086 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
1087 const uint64_t F = get(Opcode).TSFlags;
1089 return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1092 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr *MI) const {
1093 const uint64_t F = MI->getDesc().TSFlags;
1095 assert(isPredicated(MI));
1096 return (!((F >> HexagonII::PredicatedFalsePos) &
1097 HexagonII::PredicatedFalseMask));
1100 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
1101 const uint64_t F = get(Opcode).TSFlags;
1103 // Make sure that the instruction is predicated.
1104 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1105 return (!((F >> HexagonII::PredicatedFalsePos) &
1106 HexagonII::PredicatedFalseMask));
1109 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr *MI) const {
1110 const uint64_t F = MI->getDesc().TSFlags;
1112 assert(isPredicated(MI));
1113 return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
1116 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
1117 const uint64_t F = get(Opcode).TSFlags;
1119 assert(isPredicated(Opcode));
1120 return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
1124 HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
1125 std::vector<MachineOperand> &Pred) const {
1126 for (unsigned oper = 0; oper < MI->getNumOperands(); ++oper) {
1127 MachineOperand MO = MI->getOperand(oper);
1128 if (MO.isReg() && MO.isDef()) {
1129 const TargetRegisterClass* RC = RI.getMinimalPhysRegClass(MO.getReg());
1130 if (RC == &Hexagon::PredRegsRegClass) {
1142 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
1143 const SmallVectorImpl<MachineOperand> &Pred2) const {
1150 // We indicate that we want to reverse the branch by
1151 // inserting a 0 at the beginning of the Cond vector.
1153 bool HexagonInstrInfo::
1154 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1155 if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
1156 Cond.erase(Cond.begin());
1158 Cond.insert(Cond.begin(), MachineOperand::CreateImm(0));
1164 bool HexagonInstrInfo::
1165 isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs,
1166 const BranchProbability &Probability) const {
1167 return (NumInstrs <= 4);
1170 bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
1171 switch (MI->getOpcode()) {
1172 default: return false;
1173 case Hexagon::DEALLOC_RET_V4 :
1174 case Hexagon::DEALLOC_RET_cPt_V4 :
1175 case Hexagon::DEALLOC_RET_cNotPt_V4 :
1176 case Hexagon::DEALLOC_RET_cdnPnt_V4 :
1177 case Hexagon::DEALLOC_RET_cNotdnPnt_V4 :
1178 case Hexagon::DEALLOC_RET_cdnPt_V4 :
1179 case Hexagon::DEALLOC_RET_cNotdnPt_V4 :
1185 bool HexagonInstrInfo::
1186 isValidOffset(const int Opcode, const int Offset) const {
1187 // This function is to check whether the "Offset" is in the correct range of
1188 // the given "Opcode". If "Offset" is not in the correct range, "ADD_ri" is
1189 // inserted to calculate the final address. Due to this reason, the function
1190 // assumes that the "Offset" has correct alignment.
1191 // We used to assert if the offset was not properly aligned, however,
1192 // there are cases where a misaligned pointer recast can cause this
1193 // problem, and we need to allow for it. The front end warns of such
1194 // misaligns with respect to load size.
1198 case Hexagon::LDriw:
1199 case Hexagon::LDriw_indexed:
1200 case Hexagon::LDriw_f:
1201 case Hexagon::STriw_indexed:
1202 case Hexagon::STriw:
1203 case Hexagon::STriw_f:
1204 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
1205 (Offset <= Hexagon_MEMW_OFFSET_MAX);
1207 case Hexagon::LDrid:
1208 case Hexagon::LDrid_indexed:
1209 case Hexagon::LDrid_f:
1210 case Hexagon::STrid:
1211 case Hexagon::STrid_indexed:
1212 case Hexagon::STrid_f:
1213 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
1214 (Offset <= Hexagon_MEMD_OFFSET_MAX);
1216 case Hexagon::LDrih:
1217 case Hexagon::LDriuh:
1218 case Hexagon::STrih:
1219 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
1220 (Offset <= Hexagon_MEMH_OFFSET_MAX);
1222 case Hexagon::LDrib:
1223 case Hexagon::STrib:
1224 case Hexagon::LDriub:
1225 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
1226 (Offset <= Hexagon_MEMB_OFFSET_MAX);
1228 case Hexagon::ADD_ri:
1229 case Hexagon::TFR_FI:
1230 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
1231 (Offset <= Hexagon_ADDI_OFFSET_MAX);
1233 case Hexagon::MemOPw_ADDi_V4 :
1234 case Hexagon::MemOPw_SUBi_V4 :
1235 case Hexagon::MemOPw_ADDr_V4 :
1236 case Hexagon::MemOPw_SUBr_V4 :
1237 case Hexagon::MemOPw_ANDr_V4 :
1238 case Hexagon::MemOPw_ORr_V4 :
1239 return (0 <= Offset && Offset <= 255);
1241 case Hexagon::MemOPh_ADDi_V4 :
1242 case Hexagon::MemOPh_SUBi_V4 :
1243 case Hexagon::MemOPh_ADDr_V4 :
1244 case Hexagon::MemOPh_SUBr_V4 :
1245 case Hexagon::MemOPh_ANDr_V4 :
1246 case Hexagon::MemOPh_ORr_V4 :
1247 return (0 <= Offset && Offset <= 127);
1249 case Hexagon::MemOPb_ADDi_V4 :
1250 case Hexagon::MemOPb_SUBi_V4 :
1251 case Hexagon::MemOPb_ADDr_V4 :
1252 case Hexagon::MemOPb_SUBr_V4 :
1253 case Hexagon::MemOPb_ANDr_V4 :
1254 case Hexagon::MemOPb_ORr_V4 :
1255 return (0 <= Offset && Offset <= 63);
1257 // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
1258 // any size. Later pass knows how to handle it.
1259 case Hexagon::STriw_pred:
1260 case Hexagon::LDriw_pred:
1263 case Hexagon::LOOP0_i:
1264 return isUInt<10>(Offset);
1266 // INLINEASM is very special.
1267 case Hexagon::INLINEASM:
1271 llvm_unreachable("No offset range is defined for this opcode. "
1272 "Please define it in the above switch statement!");
1277 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
1279 bool HexagonInstrInfo::
1280 isValidAutoIncImm(const EVT VT, const int Offset) const {
1282 if (VT == MVT::i64) {
1283 return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
1284 Offset <= Hexagon_MEMD_AUTOINC_MAX &&
1285 (Offset & 0x7) == 0);
1287 if (VT == MVT::i32) {
1288 return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
1289 Offset <= Hexagon_MEMW_AUTOINC_MAX &&
1290 (Offset & 0x3) == 0);
1292 if (VT == MVT::i16) {
1293 return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
1294 Offset <= Hexagon_MEMH_AUTOINC_MAX &&
1295 (Offset & 0x1) == 0);
1297 if (VT == MVT::i8) {
1298 return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
1299 Offset <= Hexagon_MEMB_AUTOINC_MAX);
1301 llvm_unreachable("Not an auto-inc opc!");
1305 bool HexagonInstrInfo::
1306 isMemOp(const MachineInstr *MI) const {
1307 switch (MI->getOpcode())
1309 default: return false;
1310 case Hexagon::MemOPw_ADDi_V4 :
1311 case Hexagon::MemOPw_SUBi_V4 :
1312 case Hexagon::MemOPw_ADDr_V4 :
1313 case Hexagon::MemOPw_SUBr_V4 :
1314 case Hexagon::MemOPw_ANDr_V4 :
1315 case Hexagon::MemOPw_ORr_V4 :
1316 case Hexagon::MemOPh_ADDi_V4 :
1317 case Hexagon::MemOPh_SUBi_V4 :
1318 case Hexagon::MemOPh_ADDr_V4 :
1319 case Hexagon::MemOPh_SUBr_V4 :
1320 case Hexagon::MemOPh_ANDr_V4 :
1321 case Hexagon::MemOPh_ORr_V4 :
1322 case Hexagon::MemOPb_ADDi_V4 :
1323 case Hexagon::MemOPb_SUBi_V4 :
1324 case Hexagon::MemOPb_ADDr_V4 :
1325 case Hexagon::MemOPb_SUBr_V4 :
1326 case Hexagon::MemOPb_ANDr_V4 :
1327 case Hexagon::MemOPb_ORr_V4 :
1328 case Hexagon::MemOPb_SETBITi_V4:
1329 case Hexagon::MemOPh_SETBITi_V4:
1330 case Hexagon::MemOPw_SETBITi_V4:
1331 case Hexagon::MemOPb_CLRBITi_V4:
1332 case Hexagon::MemOPh_CLRBITi_V4:
1333 case Hexagon::MemOPw_CLRBITi_V4:
1340 bool HexagonInstrInfo::
1341 isSpillPredRegOp(const MachineInstr *MI) const {
1342 switch (MI->getOpcode()) {
1343 default: return false;
1344 case Hexagon::STriw_pred :
1345 case Hexagon::LDriw_pred :
1350 bool HexagonInstrInfo::isNewValueJumpCandidate(const MachineInstr *MI) const {
1351 switch (MI->getOpcode()) {
1352 default: return false;
1353 case Hexagon::CMPEQrr:
1354 case Hexagon::CMPEQri:
1355 case Hexagon::CMPGTrr:
1356 case Hexagon::CMPGTri:
1357 case Hexagon::CMPGTUrr:
1358 case Hexagon::CMPGTUri:
1363 bool HexagonInstrInfo::
1364 isConditionalTransfer (const MachineInstr *MI) const {
1365 switch (MI->getOpcode()) {
1366 default: return false;
1367 case Hexagon::TFR_cPt:
1368 case Hexagon::TFR_cNotPt:
1369 case Hexagon::TFRI_cPt:
1370 case Hexagon::TFRI_cNotPt:
1371 case Hexagon::TFR_cdnPt:
1372 case Hexagon::TFR_cdnNotPt:
1373 case Hexagon::TFRI_cdnPt:
1374 case Hexagon::TFRI_cdnNotPt:
1379 bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
1380 const HexagonRegisterInfo& QRI = getRegisterInfo();
1381 switch (MI->getOpcode())
1383 default: return false;
1384 case Hexagon::ADD_ri_cPt:
1385 case Hexagon::ADD_ri_cNotPt:
1386 case Hexagon::ADD_rr_cPt:
1387 case Hexagon::ADD_rr_cNotPt:
1388 case Hexagon::XOR_rr_cPt:
1389 case Hexagon::XOR_rr_cNotPt:
1390 case Hexagon::AND_rr_cPt:
1391 case Hexagon::AND_rr_cNotPt:
1392 case Hexagon::OR_rr_cPt:
1393 case Hexagon::OR_rr_cNotPt:
1394 case Hexagon::SUB_rr_cPt:
1395 case Hexagon::SUB_rr_cNotPt:
1396 case Hexagon::COMBINE_rr_cPt:
1397 case Hexagon::COMBINE_rr_cNotPt:
1399 case Hexagon::ASLH_cPt_V4:
1400 case Hexagon::ASLH_cNotPt_V4:
1401 case Hexagon::ASRH_cPt_V4:
1402 case Hexagon::ASRH_cNotPt_V4:
1403 case Hexagon::SXTB_cPt_V4:
1404 case Hexagon::SXTB_cNotPt_V4:
1405 case Hexagon::SXTH_cPt_V4:
1406 case Hexagon::SXTH_cNotPt_V4:
1407 case Hexagon::ZXTB_cPt_V4:
1408 case Hexagon::ZXTB_cNotPt_V4:
1409 case Hexagon::ZXTH_cPt_V4:
1410 case Hexagon::ZXTH_cNotPt_V4:
1411 return QRI.Subtarget.hasV4TOps();
1415 bool HexagonInstrInfo::
1416 isConditionalLoad (const MachineInstr* MI) const {
1417 const HexagonRegisterInfo& QRI = getRegisterInfo();
1418 switch (MI->getOpcode())
1420 default: return false;
1421 case Hexagon::LDrid_cPt :
1422 case Hexagon::LDrid_cNotPt :
1423 case Hexagon::LDrid_indexed_cPt :
1424 case Hexagon::LDrid_indexed_cNotPt :
1425 case Hexagon::LDriw_cPt :
1426 case Hexagon::LDriw_cNotPt :
1427 case Hexagon::LDriw_indexed_cPt :
1428 case Hexagon::LDriw_indexed_cNotPt :
1429 case Hexagon::LDrih_cPt :
1430 case Hexagon::LDrih_cNotPt :
1431 case Hexagon::LDrih_indexed_cPt :
1432 case Hexagon::LDrih_indexed_cNotPt :
1433 case Hexagon::LDrib_cPt :
1434 case Hexagon::LDrib_cNotPt :
1435 case Hexagon::LDrib_indexed_cPt :
1436 case Hexagon::LDrib_indexed_cNotPt :
1437 case Hexagon::LDriuh_cPt :
1438 case Hexagon::LDriuh_cNotPt :
1439 case Hexagon::LDriuh_indexed_cPt :
1440 case Hexagon::LDriuh_indexed_cNotPt :
1441 case Hexagon::LDriub_cPt :
1442 case Hexagon::LDriub_cNotPt :
1443 case Hexagon::LDriub_indexed_cPt :
1444 case Hexagon::LDriub_indexed_cNotPt :
1446 case Hexagon::POST_LDrid_cPt :
1447 case Hexagon::POST_LDrid_cNotPt :
1448 case Hexagon::POST_LDriw_cPt :
1449 case Hexagon::POST_LDriw_cNotPt :
1450 case Hexagon::POST_LDrih_cPt :
1451 case Hexagon::POST_LDrih_cNotPt :
1452 case Hexagon::POST_LDrib_cPt :
1453 case Hexagon::POST_LDrib_cNotPt :
1454 case Hexagon::POST_LDriuh_cPt :
1455 case Hexagon::POST_LDriuh_cNotPt :
1456 case Hexagon::POST_LDriub_cPt :
1457 case Hexagon::POST_LDriub_cNotPt :
1458 return QRI.Subtarget.hasV4TOps();
1459 case Hexagon::LDrid_indexed_shl_cPt_V4 :
1460 case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
1461 case Hexagon::LDrib_indexed_shl_cPt_V4 :
1462 case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
1463 case Hexagon::LDriub_indexed_shl_cPt_V4 :
1464 case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
1465 case Hexagon::LDrih_indexed_shl_cPt_V4 :
1466 case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
1467 case Hexagon::LDriuh_indexed_shl_cPt_V4 :
1468 case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
1469 case Hexagon::LDriw_indexed_shl_cPt_V4 :
1470 case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
1471 return QRI.Subtarget.hasV4TOps();
1475 // Returns true if an instruction is a conditional store.
1477 // Note: It doesn't include conditional new-value stores as they can't be
1478 // converted to .new predicate.
1480 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
1482 // / \ (not OK. it will cause new-value store to be
1483 // / X conditional on p0.new while R2 producer is
1486 // p.new store p.old NV store
1487 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
1493 // [if (p0)memw(R0+#0)=R2]
1495 // The above diagram shows the steps involoved in the conversion of a predicated
1496 // store instruction to its .new predicated new-value form.
1498 // The following set of instructions further explains the scenario where
1499 // conditional new-value store becomes invalid when promoted to .new predicate
1502 // { 1) if (p0) r0 = add(r1, r2)
1503 // 2) p0 = cmp.eq(r3, #0) }
1505 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
1506 // the first two instructions because in instr 1, r0 is conditional on old value
1507 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
1508 // is not valid for new-value stores.
1509 bool HexagonInstrInfo::
1510 isConditionalStore (const MachineInstr* MI) const {
1511 const HexagonRegisterInfo& QRI = getRegisterInfo();
1512 switch (MI->getOpcode())
1514 default: return false;
1515 case Hexagon::STrib_imm_cPt_V4 :
1516 case Hexagon::STrib_imm_cNotPt_V4 :
1517 case Hexagon::STrib_indexed_shl_cPt_V4 :
1518 case Hexagon::STrib_indexed_shl_cNotPt_V4 :
1519 case Hexagon::STrib_cPt :
1520 case Hexagon::STrib_cNotPt :
1521 case Hexagon::POST_STbri_cPt :
1522 case Hexagon::POST_STbri_cNotPt :
1523 case Hexagon::STrid_indexed_cPt :
1524 case Hexagon::STrid_indexed_cNotPt :
1525 case Hexagon::STrid_indexed_shl_cPt_V4 :
1526 case Hexagon::POST_STdri_cPt :
1527 case Hexagon::POST_STdri_cNotPt :
1528 case Hexagon::STrih_cPt :
1529 case Hexagon::STrih_cNotPt :
1530 case Hexagon::STrih_indexed_cPt :
1531 case Hexagon::STrih_indexed_cNotPt :
1532 case Hexagon::STrih_imm_cPt_V4 :
1533 case Hexagon::STrih_imm_cNotPt_V4 :
1534 case Hexagon::STrih_indexed_shl_cPt_V4 :
1535 case Hexagon::STrih_indexed_shl_cNotPt_V4 :
1536 case Hexagon::POST_SThri_cPt :
1537 case Hexagon::POST_SThri_cNotPt :
1538 case Hexagon::STriw_cPt :
1539 case Hexagon::STriw_cNotPt :
1540 case Hexagon::STriw_indexed_cPt :
1541 case Hexagon::STriw_indexed_cNotPt :
1542 case Hexagon::STriw_imm_cPt_V4 :
1543 case Hexagon::STriw_imm_cNotPt_V4 :
1544 case Hexagon::STriw_indexed_shl_cPt_V4 :
1545 case Hexagon::STriw_indexed_shl_cNotPt_V4 :
1546 case Hexagon::POST_STwri_cPt :
1547 case Hexagon::POST_STwri_cNotPt :
1548 return QRI.Subtarget.hasV4TOps();
1550 // V4 global address store before promoting to dot new.
1551 case Hexagon::STd_GP_cPt_V4 :
1552 case Hexagon::STd_GP_cNotPt_V4 :
1553 case Hexagon::STb_GP_cPt_V4 :
1554 case Hexagon::STb_GP_cNotPt_V4 :
1555 case Hexagon::STh_GP_cPt_V4 :
1556 case Hexagon::STh_GP_cNotPt_V4 :
1557 case Hexagon::STw_GP_cPt_V4 :
1558 case Hexagon::STw_GP_cNotPt_V4 :
1559 return QRI.Subtarget.hasV4TOps();
1561 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
1562 // from the "Conditional Store" list. Because a predicated new value store
1563 // would NOT be promoted to a double dot new store. See diagram below:
1564 // This function returns yes for those stores that are predicated but not
1565 // yet promoted to predicate dot new instructions.
1567 // +---------------------+
1568 // /-----| if (p0) memw(..)=r0 |---------\~
1569 // || +---------------------+ ||
1570 // promote || /\ /\ || promote
1572 // \||/ demote || \||/
1574 // +-------------------------+ || +-------------------------+
1575 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
1576 // +-------------------------+ || +-------------------------+
1579 // promote || \/ NOT possible
1583 // +-----------------------------+
1584 // | if (p0.new) memw(..)=r0.new |
1585 // +-----------------------------+
1586 // Double Dot New Store
1592 bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
1593 if (isNewValue(MI) && isBranch(MI))
1598 bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
1599 return (getAddrMode(MI) == HexagonII::PostInc);
1602 bool HexagonInstrInfo::isNewValue(const MachineInstr* MI) const {
1603 const uint64_t F = MI->getDesc().TSFlags;
1604 return ((F >> HexagonII::NewValuePos) & HexagonII::NewValueMask);
1607 // Returns true, if any one of the operands is a dot new
1608 // insn, whether it is predicated dot new or register dot new.
1609 bool HexagonInstrInfo::isDotNewInst (const MachineInstr* MI) const {
1610 return (isNewValueInst(MI) ||
1611 (isPredicated(MI) && isPredicatedNew(MI)));
1614 // Return .new predicate version for an instruction.
1615 int HexagonInstrInfo::GetDotNewPredOp(MachineInstr *MI,
1616 const MachineBranchProbabilityInfo
1619 int NewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode());
1620 if (NewOpcode >= 0) // Valid predicate new instruction
1623 switch (MI->getOpcode()) {
1624 default: llvm_unreachable("Unknown .new type");
1626 case Hexagon::JMP_t:
1627 case Hexagon::JMP_f:
1628 return getDotNewPredJumpOp(MI, MBPI);
1630 case Hexagon::JMPR_t:
1631 return Hexagon::JMPR_tnew_tV3;
1633 case Hexagon::JMPR_f:
1634 return Hexagon::JMPR_fnew_tV3;
1636 case Hexagon::JMPret_t:
1637 return Hexagon::JMPret_tnew_tV3;
1639 case Hexagon::JMPret_f:
1640 return Hexagon::JMPret_fnew_tV3;
1643 // Conditional combine
1644 case Hexagon::COMBINE_rr_cPt :
1645 return Hexagon::COMBINE_rr_cdnPt;
1646 case Hexagon::COMBINE_rr_cNotPt :
1647 return Hexagon::COMBINE_rr_cdnNotPt;
1652 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
1653 const uint64_t F = MI->getDesc().TSFlags;
1655 return((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
1658 /// immediateExtend - Changes the instruction in place to one using an immediate
1660 void HexagonInstrInfo::immediateExtend(MachineInstr *MI) const {
1661 assert((isExtendable(MI)||isConstExtended(MI)) &&
1662 "Instruction must be extendable");
1663 // Find which operand is extendable.
1664 short ExtOpNum = getCExtOpNum(MI);
1665 MachineOperand &MO = MI->getOperand(ExtOpNum);
1666 // This needs to be something we understand.
1667 assert((MO.isMBB() || MO.isImm()) &&
1668 "Branch with unknown extendable field type");
1669 // Mark given operand as extended.
1670 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
1673 DFAPacketizer *HexagonInstrInfo::
1674 CreateTargetScheduleState(const TargetMachine *TM,
1675 const ScheduleDAG *DAG) const {
1676 const InstrItineraryData *II = TM->getInstrItineraryData();
1677 return TM->getSubtarget<HexagonGenSubtargetInfo>().createDFAPacketizer(II);
1680 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
1681 const MachineBasicBlock *MBB,
1682 const MachineFunction &MF) const {
1683 // Debug info is never a scheduling boundary. It's necessary to be explicit
1684 // due to the special treatment of IT instructions below, otherwise a
1685 // dbg_value followed by an IT will result in the IT instruction being
1686 // considered a scheduling hazard, which is wrong. It should be the actual
1687 // instruction preceding the dbg_value instruction(s), just like it is
1688 // when debug info is not present.
1689 if (MI->isDebugValue())
1692 // Terminators and labels can't be scheduled around.
1693 if (MI->getDesc().isTerminator() || MI->isLabel() || MI->isInlineAsm())
1699 bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
1701 // Constant extenders are allowed only for V4 and above.
1702 if (!Subtarget.hasV4TOps())
1705 const uint64_t F = MI->getDesc().TSFlags;
1706 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
1707 if (isExtended) // Instruction must be extended.
1710 unsigned isExtendable = (F >> HexagonII::ExtendablePos)
1711 & HexagonII::ExtendableMask;
1715 short ExtOpNum = getCExtOpNum(MI);
1716 const MachineOperand &MO = MI->getOperand(ExtOpNum);
1717 // Use MO operand flags to determine if MO
1718 // has the HMOTF_ConstExtended flag set.
1719 if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
1721 // If this is a Machine BB address we are talking about, and it is
1722 // not marked as extended, say so.
1726 // We could be using an instruction with an extendable immediate and shoehorn
1727 // a global address into it. If it is a global address it will be constant
1728 // extended. We do this for COMBINE.
1729 // We currently only handle isGlobal() because it is the only kind of
1730 // object we are going to end up with here for now.
1731 // In the future we probably should add isSymbol(), etc.
1732 if (MO.isGlobal() || MO.isSymbol())
1735 // If the extendable operand is not 'Immediate' type, the instruction should
1736 // have 'isExtended' flag set.
1737 assert(MO.isImm() && "Extendable operand must be Immediate type");
1739 int MinValue = getMinValue(MI);
1740 int MaxValue = getMaxValue(MI);
1741 int ImmValue = MO.getImm();
1743 return (ImmValue < MinValue || ImmValue > MaxValue);
1746 // Returns the opcode to use when converting MI, which is a conditional jump,
1747 // into a conditional instruction which uses the .new value of the predicate.
1748 // We also use branch probabilities to add a hint to the jump.
1750 HexagonInstrInfo::getDotNewPredJumpOp(MachineInstr *MI,
1752 MachineBranchProbabilityInfo *MBPI) const {
1754 // We assume that block can have at most two successors.
1756 MachineBasicBlock *Src = MI->getParent();
1757 MachineOperand *BrTarget = &MI->getOperand(1);
1758 MachineBasicBlock *Dst = BrTarget->getMBB();
1760 const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst);
1761 if (Prediction >= BranchProbability(1,2))
1764 switch (MI->getOpcode()) {
1765 case Hexagon::JMP_t:
1766 return taken ? Hexagon::JMP_tnew_t : Hexagon::JMP_tnew_nt;
1767 case Hexagon::JMP_f:
1768 return taken ? Hexagon::JMP_fnew_t : Hexagon::JMP_fnew_nt;
1771 llvm_unreachable("Unexpected jump instruction.");
1774 // Returns true if a particular operand is extendable for an instruction.
1775 bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
1776 unsigned short OperandNum) const {
1777 // Constant extenders are allowed only for V4 and above.
1778 if (!Subtarget.hasV4TOps())
1781 const uint64_t F = MI->getDesc().TSFlags;
1783 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
1787 // Returns Operand Index for the constant extended instruction.
1788 unsigned short HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const {
1789 const uint64_t F = MI->getDesc().TSFlags;
1790 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask);
1793 // Returns the min value that doesn't need to be extended.
1794 int HexagonInstrInfo::getMinValue(const MachineInstr *MI) const {
1795 const uint64_t F = MI->getDesc().TSFlags;
1796 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
1797 & HexagonII::ExtentSignedMask;
1798 unsigned bits = (F >> HexagonII::ExtentBitsPos)
1799 & HexagonII::ExtentBitsMask;
1801 if (isSigned) // if value is signed
1802 return -1 << (bits - 1);
1807 // Returns the max value that doesn't need to be extended.
1808 int HexagonInstrInfo::getMaxValue(const MachineInstr *MI) const {
1809 const uint64_t F = MI->getDesc().TSFlags;
1810 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
1811 & HexagonII::ExtentSignedMask;
1812 unsigned bits = (F >> HexagonII::ExtentBitsPos)
1813 & HexagonII::ExtentBitsMask;
1815 if (isSigned) // if value is signed
1816 return ~(-1 << (bits - 1));
1818 return ~(-1 << bits);
1821 // Returns true if an instruction can be converted into a non-extended
1822 // equivalent instruction.
1823 bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const {
1826 // Check if the instruction has a register form that uses register in place
1827 // of the extended operand, if so return that as the non-extended form.
1828 if (Hexagon::getRegForm(MI->getOpcode()) >= 0)
1831 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
1832 // Check addressing mode and retreive non-ext equivalent instruction.
1834 switch (getAddrMode(MI)) {
1835 case HexagonII::Absolute :
1836 // Load/store with absolute addressing mode can be converted into
1837 // base+offset mode.
1838 NonExtOpcode = Hexagon::getBasedWithImmOffset(MI->getOpcode());
1840 case HexagonII::BaseImmOffset :
1841 // Load/store with base+offset addressing mode can be converted into
1842 // base+register offset addressing mode. However left shift operand should
1844 NonExtOpcode = Hexagon::getBaseWithRegOffset(MI->getOpcode());
1849 if (NonExtOpcode < 0)
1856 // Returns opcode of the non-extended equivalent instruction.
1857 short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
1859 // Check if the instruction has a register form that uses register in place
1860 // of the extended operand, if so return that as the non-extended form.
1861 short NonExtOpcode = Hexagon::getRegForm(MI->getOpcode());
1862 if (NonExtOpcode >= 0)
1863 return NonExtOpcode;
1865 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
1866 // Check addressing mode and retreive non-ext equivalent instruction.
1867 switch (getAddrMode(MI)) {
1868 case HexagonII::Absolute :
1869 return Hexagon::getBasedWithImmOffset(MI->getOpcode());
1870 case HexagonII::BaseImmOffset :
1871 return Hexagon::getBaseWithRegOffset(MI->getOpcode());
1879 bool HexagonInstrInfo::PredOpcodeHasJMP_c(Opcode_t Opcode) const {
1880 return (Opcode == Hexagon::JMP_t) ||
1881 (Opcode == Hexagon::JMP_f) ||
1882 (Opcode == Hexagon::JMP_tnew_t) ||
1883 (Opcode == Hexagon::JMP_fnew_t) ||
1884 (Opcode == Hexagon::JMP_tnew_nt) ||
1885 (Opcode == Hexagon::JMP_fnew_nt);
1888 bool HexagonInstrInfo::PredOpcodeHasNot(Opcode_t Opcode) const {
1889 return (Opcode == Hexagon::JMP_f) ||
1890 (Opcode == Hexagon::JMP_fnew_t) ||
1891 (Opcode == Hexagon::JMP_fnew_nt);