1 //===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "HexagonInstrInfo.h"
16 #include "HexagonRegisterInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/DFAPacketizer.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PseudoSourceValue.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/MathExtras.h"
28 #define GET_INSTRINFO_CTOR
29 #define GET_INSTRMAP_INFO
30 #include "HexagonGenInstrInfo.inc"
31 #include "HexagonGenDFAPacketizer.inc"
36 /// Constants for Hexagon instructions.
38 const int Hexagon_MEMW_OFFSET_MAX = 4095;
39 const int Hexagon_MEMW_OFFSET_MIN = -4096;
40 const int Hexagon_MEMD_OFFSET_MAX = 8191;
41 const int Hexagon_MEMD_OFFSET_MIN = -8192;
42 const int Hexagon_MEMH_OFFSET_MAX = 2047;
43 const int Hexagon_MEMH_OFFSET_MIN = -2048;
44 const int Hexagon_MEMB_OFFSET_MAX = 1023;
45 const int Hexagon_MEMB_OFFSET_MIN = -1024;
46 const int Hexagon_ADDI_OFFSET_MAX = 32767;
47 const int Hexagon_ADDI_OFFSET_MIN = -32768;
48 const int Hexagon_MEMD_AUTOINC_MAX = 56;
49 const int Hexagon_MEMD_AUTOINC_MIN = -64;
50 const int Hexagon_MEMW_AUTOINC_MAX = 28;
51 const int Hexagon_MEMW_AUTOINC_MIN = -32;
52 const int Hexagon_MEMH_AUTOINC_MAX = 14;
53 const int Hexagon_MEMH_AUTOINC_MIN = -16;
54 const int Hexagon_MEMB_AUTOINC_MAX = 7;
55 const int Hexagon_MEMB_AUTOINC_MIN = -8;
58 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
59 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
60 RI(ST, *this), Subtarget(ST) {
64 /// isLoadFromStackSlot - If the specified machine instruction is a direct
65 /// load from a stack slot, return the virtual or physical register number of
66 /// the destination along with the FrameIndex of the loaded stack slot. If
67 /// not, return 0. This predicate must return 0 if the instruction has
68 /// any side effects other than loading from the stack slot.
69 unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
70 int &FrameIndex) const {
73 switch (MI->getOpcode()) {
80 if (MI->getOperand(2).isFI() &&
81 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
82 FrameIndex = MI->getOperand(2).getIndex();
83 return MI->getOperand(0).getReg();
91 /// isStoreToStackSlot - If the specified machine instruction is a direct
92 /// store to a stack slot, return the virtual or physical register number of
93 /// the source reg along with the FrameIndex of the loaded stack slot. If
94 /// not, return 0. This predicate must return 0 if the instruction has
95 /// any side effects other than storing to the stack slot.
96 unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
97 int &FrameIndex) const {
98 switch (MI->getOpcode()) {
104 if (MI->getOperand(2).isFI() &&
105 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
106 FrameIndex = MI->getOperand(0).getIndex();
107 return MI->getOperand(2).getReg();
116 HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
117 MachineBasicBlock *FBB,
118 const SmallVectorImpl<MachineOperand> &Cond,
121 int BOpc = Hexagon::JMP;
122 int BccOpc = Hexagon::JMP_t;
124 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
127 // Check if ReverseBranchCondition has asked to reverse this branch
128 // If we want to reverse the branch an odd number of times, we want
130 if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
131 BccOpc = Hexagon::JMP_f;
137 // Due to a bug in TailMerging/CFG Optimization, we need to add a
138 // special case handling of a predicated jump followed by an
139 // unconditional jump. If not, Tail Merging and CFG Optimization go
140 // into an infinite loop.
141 MachineBasicBlock *NewTBB, *NewFBB;
142 SmallVector<MachineOperand, 4> Cond;
143 MachineInstr *Term = MBB.getFirstTerminator();
144 if (isPredicated(Term) && !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond,
146 MachineBasicBlock *NextBB =
147 llvm::next(MachineFunction::iterator(&MBB));
148 if (NewTBB == NextBB) {
149 ReverseBranchCondition(Cond);
151 return InsertBranch(MBB, TBB, 0, Cond, DL);
154 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
157 get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
162 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
163 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
169 bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
170 MachineBasicBlock *&TBB,
171 MachineBasicBlock *&FBB,
172 SmallVectorImpl<MachineOperand> &Cond,
173 bool AllowModify) const {
177 // If the block has no terminators, it just falls into the block after it.
178 MachineBasicBlock::instr_iterator I = MBB.instr_end();
179 if (I == MBB.instr_begin())
182 // A basic block may looks like this:
192 // It has two succs but does not have a terminator
193 // Don't know how to handle it.
198 } while (I != MBB.instr_begin());
203 while (I->isDebugValue()) {
204 if (I == MBB.instr_begin())
209 // Delete the JMP if it's equivalent to a fall-through.
210 if (AllowModify && I->getOpcode() == Hexagon::JMP &&
211 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
212 DEBUG(dbgs()<< "\nErasing the jump to successor block\n";);
213 I->eraseFromParent();
215 if (I == MBB.instr_begin())
219 if (!isUnpredicatedTerminator(I))
222 // Get the last instruction in the block.
223 MachineInstr *LastInst = I;
224 MachineInstr *SecondLastInst = NULL;
225 // Find one more terminator if present.
227 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(I)) {
231 // This is a third branch.
234 if (I == MBB.instr_begin())
239 int LastOpcode = LastInst->getOpcode();
241 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
242 bool LastOpcodeHasNot = PredOpcodeHasNot(LastOpcode);
244 // If there is only one terminator instruction, process it.
245 if (LastInst && !SecondLastInst) {
246 if (LastOpcode == Hexagon::JMP) {
247 TBB = LastInst->getOperand(0).getMBB();
250 if (LastOpcode == Hexagon::ENDLOOP0) {
251 TBB = LastInst->getOperand(0).getMBB();
252 Cond.push_back(LastInst->getOperand(0));
255 if (LastOpcodeHasJMP_c) {
256 TBB = LastInst->getOperand(1).getMBB();
257 if (LastOpcodeHasNot) {
258 Cond.push_back(MachineOperand::CreateImm(0));
260 Cond.push_back(LastInst->getOperand(0));
263 // Otherwise, don't know what this is.
267 int SecLastOpcode = SecondLastInst->getOpcode();
269 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
270 bool SecLastOpcodeHasNot = PredOpcodeHasNot(SecLastOpcode);
271 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::JMP)) {
272 TBB = SecondLastInst->getOperand(1).getMBB();
273 if (SecLastOpcodeHasNot)
274 Cond.push_back(MachineOperand::CreateImm(0));
275 Cond.push_back(SecondLastInst->getOperand(0));
276 FBB = LastInst->getOperand(0).getMBB();
280 // If the block ends with two Hexagon:JMPs, handle it. The second one is not
281 // executed, so remove it.
282 if (SecLastOpcode == Hexagon::JMP && LastOpcode == Hexagon::JMP) {
283 TBB = SecondLastInst->getOperand(0).getMBB();
286 I->eraseFromParent();
290 // If the block ends with an ENDLOOP, and JMP, handle it.
291 if (SecLastOpcode == Hexagon::ENDLOOP0 &&
292 LastOpcode == Hexagon::JMP) {
293 TBB = SecondLastInst->getOperand(0).getMBB();
294 Cond.push_back(SecondLastInst->getOperand(0));
295 FBB = LastInst->getOperand(0).getMBB();
299 // Otherwise, can't handle this.
304 unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
305 int BOpc = Hexagon::JMP;
306 int BccOpc = Hexagon::JMP_t;
307 int BccOpcNot = Hexagon::JMP_f;
309 MachineBasicBlock::iterator I = MBB.end();
310 if (I == MBB.begin()) return 0;
312 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc &&
313 I->getOpcode() != BccOpcNot)
316 // Remove the branch.
317 I->eraseFromParent();
321 if (I == MBB.begin()) return 1;
323 if (I->getOpcode() != BccOpc && I->getOpcode() != BccOpcNot)
326 // Remove the branch.
327 I->eraseFromParent();
332 /// \brief For a comparison instruction, return the source registers in
333 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
334 /// compares against in CmpValue. Return true if the comparison instruction
336 bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
337 unsigned &SrcReg, unsigned &SrcReg2,
338 int &Mask, int &Value) const {
339 unsigned Opc = MI->getOpcode();
341 // Set mask and the first source register.
343 case Hexagon::CMPEHexagon4rr:
344 case Hexagon::CMPEQri:
345 case Hexagon::CMPEQrr:
346 case Hexagon::CMPGT64rr:
347 case Hexagon::CMPGTU64rr:
348 case Hexagon::CMPGTUri:
349 case Hexagon::CMPGTUrr:
350 case Hexagon::CMPGTri:
351 case Hexagon::CMPGTrr:
352 SrcReg = MI->getOperand(1).getReg();
355 case Hexagon::CMPbEQri_V4:
356 case Hexagon::CMPbEQrr_sbsb_V4:
357 case Hexagon::CMPbEQrr_ubub_V4:
358 case Hexagon::CMPbGTUri_V4:
359 case Hexagon::CMPbGTUrr_V4:
360 case Hexagon::CMPbGTrr_V4:
361 SrcReg = MI->getOperand(1).getReg();
364 case Hexagon::CMPhEQri_V4:
365 case Hexagon::CMPhEQrr_shl_V4:
366 case Hexagon::CMPhEQrr_xor_V4:
367 case Hexagon::CMPhGTUri_V4:
368 case Hexagon::CMPhGTUrr_V4:
369 case Hexagon::CMPhGTrr_shl_V4:
370 SrcReg = MI->getOperand(1).getReg();
375 // Set the value/second source register.
377 case Hexagon::CMPEHexagon4rr:
378 case Hexagon::CMPEQrr:
379 case Hexagon::CMPGT64rr:
380 case Hexagon::CMPGTU64rr:
381 case Hexagon::CMPGTUrr:
382 case Hexagon::CMPGTrr:
383 case Hexagon::CMPbEQrr_sbsb_V4:
384 case Hexagon::CMPbEQrr_ubub_V4:
385 case Hexagon::CMPbGTUrr_V4:
386 case Hexagon::CMPbGTrr_V4:
387 case Hexagon::CMPhEQrr_shl_V4:
388 case Hexagon::CMPhEQrr_xor_V4:
389 case Hexagon::CMPhGTUrr_V4:
390 case Hexagon::CMPhGTrr_shl_V4:
391 SrcReg2 = MI->getOperand(2).getReg();
394 case Hexagon::CMPEQri:
395 case Hexagon::CMPGTUri:
396 case Hexagon::CMPGTri:
397 case Hexagon::CMPbEQri_V4:
398 case Hexagon::CMPbGTUri_V4:
399 case Hexagon::CMPhEQri_V4:
400 case Hexagon::CMPhGTUri_V4:
402 Value = MI->getOperand(2).getImm();
410 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
411 MachineBasicBlock::iterator I, DebugLoc DL,
412 unsigned DestReg, unsigned SrcReg,
413 bool KillSrc) const {
414 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
415 BuildMI(MBB, I, DL, get(Hexagon::TFR), DestReg).addReg(SrcReg);
418 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
419 BuildMI(MBB, I, DL, get(Hexagon::TFR64), DestReg).addReg(SrcReg);
422 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
423 // Map Pd = Ps to Pd = or(Ps, Ps).
424 BuildMI(MBB, I, DL, get(Hexagon::OR_pp),
425 DestReg).addReg(SrcReg).addReg(SrcReg);
428 if (Hexagon::DoubleRegsRegClass.contains(DestReg) &&
429 Hexagon::IntRegsRegClass.contains(SrcReg)) {
430 // We can have an overlap between single and double reg: r1:0 = r0.
431 if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
433 BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
434 Hexagon::subreg_hireg))).addImm(0);
436 // r1:0 = r1 or no overlap.
437 BuildMI(MBB, I, DL, get(Hexagon::TFR), (RI.getSubReg(DestReg,
438 Hexagon::subreg_loreg))).addReg(SrcReg);
439 BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
440 Hexagon::subreg_hireg))).addImm(0);
444 if (Hexagon::CRRegsRegClass.contains(DestReg) &&
445 Hexagon::IntRegsRegClass.contains(SrcReg)) {
446 BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg);
449 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
450 Hexagon::IntRegsRegClass.contains(DestReg)) {
451 BuildMI(MBB, I, DL, get(Hexagon::TFR_RsPd), DestReg).
452 addReg(SrcReg, getKillRegState(KillSrc));
455 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
456 Hexagon::PredRegsRegClass.contains(DestReg)) {
457 BuildMI(MBB, I, DL, get(Hexagon::TFR_PdRs), DestReg).
458 addReg(SrcReg, getKillRegState(KillSrc));
462 llvm_unreachable("Unimplemented");
466 void HexagonInstrInfo::
467 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
468 unsigned SrcReg, bool isKill, int FI,
469 const TargetRegisterClass *RC,
470 const TargetRegisterInfo *TRI) const {
472 DebugLoc DL = MBB.findDebugLoc(I);
473 MachineFunction &MF = *MBB.getParent();
474 MachineFrameInfo &MFI = *MF.getFrameInfo();
475 unsigned Align = MFI.getObjectAlignment(FI);
477 MachineMemOperand *MMO =
478 MF.getMachineMemOperand(
479 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
480 MachineMemOperand::MOStore,
481 MFI.getObjectSize(FI),
484 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
485 BuildMI(MBB, I, DL, get(Hexagon::STriw))
486 .addFrameIndex(FI).addImm(0)
487 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
488 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
489 BuildMI(MBB, I, DL, get(Hexagon::STrid))
490 .addFrameIndex(FI).addImm(0)
491 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
492 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
493 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
494 .addFrameIndex(FI).addImm(0)
495 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
497 llvm_unreachable("Unimplemented");
502 void HexagonInstrInfo::storeRegToAddr(
503 MachineFunction &MF, unsigned SrcReg,
505 SmallVectorImpl<MachineOperand> &Addr,
506 const TargetRegisterClass *RC,
507 SmallVectorImpl<MachineInstr*> &NewMIs) const
509 llvm_unreachable("Unimplemented");
513 void HexagonInstrInfo::
514 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
515 unsigned DestReg, int FI,
516 const TargetRegisterClass *RC,
517 const TargetRegisterInfo *TRI) const {
518 DebugLoc DL = MBB.findDebugLoc(I);
519 MachineFunction &MF = *MBB.getParent();
520 MachineFrameInfo &MFI = *MF.getFrameInfo();
521 unsigned Align = MFI.getObjectAlignment(FI);
523 MachineMemOperand *MMO =
524 MF.getMachineMemOperand(
525 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
526 MachineMemOperand::MOLoad,
527 MFI.getObjectSize(FI),
529 if (RC == &Hexagon::IntRegsRegClass) {
530 BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg)
531 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
532 } else if (RC == &Hexagon::DoubleRegsRegClass) {
533 BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg)
534 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
535 } else if (RC == &Hexagon::PredRegsRegClass) {
536 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
537 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
539 llvm_unreachable("Can't store this register to stack slot");
544 void HexagonInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
545 SmallVectorImpl<MachineOperand> &Addr,
546 const TargetRegisterClass *RC,
547 SmallVectorImpl<MachineInstr*> &NewMIs) const {
548 llvm_unreachable("Unimplemented");
552 MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
554 const SmallVectorImpl<unsigned> &Ops,
556 // Hexagon_TODO: Implement.
561 HexagonInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
562 int FrameIx, uint64_t Offset,
565 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Hexagon::DBG_VALUE))
566 .addImm(0).addImm(Offset).addMetadata(MDPtr);
570 unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
572 MachineRegisterInfo &RegInfo = MF->getRegInfo();
573 const TargetRegisterClass *TRC;
575 TRC = &Hexagon::PredRegsRegClass;
576 } else if (VT == MVT::i32 || VT == MVT::f32) {
577 TRC = &Hexagon::IntRegsRegClass;
578 } else if (VT == MVT::i64 || VT == MVT::f64) {
579 TRC = &Hexagon::DoubleRegsRegClass;
581 llvm_unreachable("Cannot handle this register class");
584 unsigned NewReg = RegInfo.createVirtualRegister(TRC);
588 bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
589 // Constant extenders are allowed only for V4 and above.
590 if (!Subtarget.hasV4TOps())
593 const MCInstrDesc &MID = MI->getDesc();
594 const uint64_t F = MID.TSFlags;
595 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
598 // TODO: This is largely obsolete now. Will need to be removed
599 // in consecutive patches.
600 switch(MI->getOpcode()) {
601 // TFR_FI Remains a special case.
602 case Hexagon::TFR_FI:
610 // This returns true in two cases:
611 // - The OP code itself indicates that this is an extended instruction.
612 // - One of MOs has been marked with HMOTF_ConstExtended flag.
613 bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
614 // First check if this is permanently extended op code.
615 const uint64_t F = MI->getDesc().TSFlags;
616 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
618 // Use MO operand flags to determine if one of MI's operands
619 // has HMOTF_ConstExtended flag set.
620 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
621 E = MI->operands_end(); I != E; ++I) {
622 if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
628 bool HexagonInstrInfo::isBranch (const MachineInstr *MI) const {
629 return MI->getDesc().isBranch();
632 bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
633 switch (MI->getOpcode()) {
634 default: return false;
636 case Hexagon::STrib_nv_V4:
637 case Hexagon::STrib_indexed_nv_V4:
638 case Hexagon::STrib_indexed_shl_nv_V4:
639 case Hexagon::STrib_shl_nv_V4:
640 case Hexagon::STb_GP_nv_V4:
641 case Hexagon::POST_STbri_nv_V4:
642 case Hexagon::STrib_cPt_nv_V4:
643 case Hexagon::STrib_cdnPt_nv_V4:
644 case Hexagon::STrib_cNotPt_nv_V4:
645 case Hexagon::STrib_cdnNotPt_nv_V4:
646 case Hexagon::STrib_indexed_cPt_nv_V4:
647 case Hexagon::STrib_indexed_cdnPt_nv_V4:
648 case Hexagon::STrib_indexed_cNotPt_nv_V4:
649 case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
650 case Hexagon::STrib_indexed_shl_cPt_nv_V4:
651 case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
652 case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
653 case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
654 case Hexagon::POST_STbri_cPt_nv_V4:
655 case Hexagon::POST_STbri_cdnPt_nv_V4:
656 case Hexagon::POST_STbri_cNotPt_nv_V4:
657 case Hexagon::POST_STbri_cdnNotPt_nv_V4:
658 case Hexagon::STb_GP_cPt_nv_V4:
659 case Hexagon::STb_GP_cNotPt_nv_V4:
660 case Hexagon::STb_GP_cdnPt_nv_V4:
661 case Hexagon::STb_GP_cdnNotPt_nv_V4:
662 case Hexagon::STrib_abs_nv_V4:
663 case Hexagon::STrib_abs_cPt_nv_V4:
664 case Hexagon::STrib_abs_cdnPt_nv_V4:
665 case Hexagon::STrib_abs_cNotPt_nv_V4:
666 case Hexagon::STrib_abs_cdnNotPt_nv_V4:
669 case Hexagon::STrih_nv_V4:
670 case Hexagon::STrih_indexed_nv_V4:
671 case Hexagon::STrih_indexed_shl_nv_V4:
672 case Hexagon::STrih_shl_nv_V4:
673 case Hexagon::STh_GP_nv_V4:
674 case Hexagon::POST_SThri_nv_V4:
675 case Hexagon::STrih_cPt_nv_V4:
676 case Hexagon::STrih_cdnPt_nv_V4:
677 case Hexagon::STrih_cNotPt_nv_V4:
678 case Hexagon::STrih_cdnNotPt_nv_V4:
679 case Hexagon::STrih_indexed_cPt_nv_V4:
680 case Hexagon::STrih_indexed_cdnPt_nv_V4:
681 case Hexagon::STrih_indexed_cNotPt_nv_V4:
682 case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
683 case Hexagon::STrih_indexed_shl_cPt_nv_V4:
684 case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
685 case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
686 case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
687 case Hexagon::POST_SThri_cPt_nv_V4:
688 case Hexagon::POST_SThri_cdnPt_nv_V4:
689 case Hexagon::POST_SThri_cNotPt_nv_V4:
690 case Hexagon::POST_SThri_cdnNotPt_nv_V4:
691 case Hexagon::STh_GP_cPt_nv_V4:
692 case Hexagon::STh_GP_cNotPt_nv_V4:
693 case Hexagon::STh_GP_cdnPt_nv_V4:
694 case Hexagon::STh_GP_cdnNotPt_nv_V4:
695 case Hexagon::STrih_abs_nv_V4:
696 case Hexagon::STrih_abs_cPt_nv_V4:
697 case Hexagon::STrih_abs_cdnPt_nv_V4:
698 case Hexagon::STrih_abs_cNotPt_nv_V4:
699 case Hexagon::STrih_abs_cdnNotPt_nv_V4:
702 case Hexagon::STriw_nv_V4:
703 case Hexagon::STriw_indexed_nv_V4:
704 case Hexagon::STriw_indexed_shl_nv_V4:
705 case Hexagon::STriw_shl_nv_V4:
706 case Hexagon::STw_GP_nv_V4:
707 case Hexagon::POST_STwri_nv_V4:
708 case Hexagon::STriw_cPt_nv_V4:
709 case Hexagon::STriw_cdnPt_nv_V4:
710 case Hexagon::STriw_cNotPt_nv_V4:
711 case Hexagon::STriw_cdnNotPt_nv_V4:
712 case Hexagon::STriw_indexed_cPt_nv_V4:
713 case Hexagon::STriw_indexed_cdnPt_nv_V4:
714 case Hexagon::STriw_indexed_cNotPt_nv_V4:
715 case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
716 case Hexagon::STriw_indexed_shl_cPt_nv_V4:
717 case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
718 case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
719 case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
720 case Hexagon::POST_STwri_cPt_nv_V4:
721 case Hexagon::POST_STwri_cdnPt_nv_V4:
722 case Hexagon::POST_STwri_cNotPt_nv_V4:
723 case Hexagon::POST_STwri_cdnNotPt_nv_V4:
724 case Hexagon::STw_GP_cPt_nv_V4:
725 case Hexagon::STw_GP_cNotPt_nv_V4:
726 case Hexagon::STw_GP_cdnPt_nv_V4:
727 case Hexagon::STw_GP_cdnNotPt_nv_V4:
728 case Hexagon::STriw_abs_nv_V4:
729 case Hexagon::STriw_abs_cPt_nv_V4:
730 case Hexagon::STriw_abs_cdnPt_nv_V4:
731 case Hexagon::STriw_abs_cNotPt_nv_V4:
732 case Hexagon::STriw_abs_cdnNotPt_nv_V4:
737 bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
738 switch (MI->getOpcode())
740 default: return false;
742 case Hexagon::POST_LDrib:
743 case Hexagon::POST_LDrib_cPt:
744 case Hexagon::POST_LDrib_cNotPt:
745 case Hexagon::POST_LDrib_cdnPt_V4:
746 case Hexagon::POST_LDrib_cdnNotPt_V4:
748 // Load unsigned byte
749 case Hexagon::POST_LDriub:
750 case Hexagon::POST_LDriub_cPt:
751 case Hexagon::POST_LDriub_cNotPt:
752 case Hexagon::POST_LDriub_cdnPt_V4:
753 case Hexagon::POST_LDriub_cdnNotPt_V4:
756 case Hexagon::POST_LDrih:
757 case Hexagon::POST_LDrih_cPt:
758 case Hexagon::POST_LDrih_cNotPt:
759 case Hexagon::POST_LDrih_cdnPt_V4:
760 case Hexagon::POST_LDrih_cdnNotPt_V4:
762 // Load unsigned halfword
763 case Hexagon::POST_LDriuh:
764 case Hexagon::POST_LDriuh_cPt:
765 case Hexagon::POST_LDriuh_cNotPt:
766 case Hexagon::POST_LDriuh_cdnPt_V4:
767 case Hexagon::POST_LDriuh_cdnNotPt_V4:
770 case Hexagon::POST_LDriw:
771 case Hexagon::POST_LDriw_cPt:
772 case Hexagon::POST_LDriw_cNotPt:
773 case Hexagon::POST_LDriw_cdnPt_V4:
774 case Hexagon::POST_LDriw_cdnNotPt_V4:
777 case Hexagon::POST_LDrid:
778 case Hexagon::POST_LDrid_cPt:
779 case Hexagon::POST_LDrid_cNotPt:
780 case Hexagon::POST_LDrid_cdnPt_V4:
781 case Hexagon::POST_LDrid_cdnNotPt_V4:
784 case Hexagon::POST_STbri:
785 case Hexagon::POST_STbri_cPt:
786 case Hexagon::POST_STbri_cNotPt:
787 case Hexagon::POST_STbri_cdnPt_V4:
788 case Hexagon::POST_STbri_cdnNotPt_V4:
791 case Hexagon::POST_SThri:
792 case Hexagon::POST_SThri_cPt:
793 case Hexagon::POST_SThri_cNotPt:
794 case Hexagon::POST_SThri_cdnPt_V4:
795 case Hexagon::POST_SThri_cdnNotPt_V4:
798 case Hexagon::POST_STwri:
799 case Hexagon::POST_STwri_cPt:
800 case Hexagon::POST_STwri_cNotPt:
801 case Hexagon::POST_STwri_cdnPt_V4:
802 case Hexagon::POST_STwri_cdnNotPt_V4:
805 case Hexagon::POST_STdri:
806 case Hexagon::POST_STdri_cPt:
807 case Hexagon::POST_STdri_cNotPt:
808 case Hexagon::POST_STdri_cdnPt_V4:
809 case Hexagon::POST_STdri_cdnNotPt_V4:
814 bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
815 if (isNewValueJump(MI))
818 if (isNewValueStore(MI))
824 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
825 return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4;
828 bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
829 bool isPred = MI->getDesc().isPredicable();
834 const int Opc = MI->getOpcode();
838 return isInt<12>(MI->getOperand(1).getImm());
841 case Hexagon::STrid_indexed:
842 return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
845 case Hexagon::STriw_indexed:
846 case Hexagon::STriw_nv_V4:
847 return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
850 case Hexagon::STrih_indexed:
851 case Hexagon::STrih_nv_V4:
852 return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
855 case Hexagon::STrib_indexed:
856 case Hexagon::STrib_nv_V4:
857 return isUInt<6>(MI->getOperand(1).getImm());
860 case Hexagon::LDrid_indexed:
861 return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
864 case Hexagon::LDriw_indexed:
865 return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
868 case Hexagon::LDriuh:
869 case Hexagon::LDrih_indexed:
870 case Hexagon::LDriuh_indexed:
871 return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
874 case Hexagon::LDriub:
875 case Hexagon::LDrib_indexed:
876 case Hexagon::LDriub_indexed:
877 return isUInt<6>(MI->getOperand(2).getImm());
879 case Hexagon::POST_LDrid:
880 return isShiftedInt<4,3>(MI->getOperand(3).getImm());
882 case Hexagon::POST_LDriw:
883 return isShiftedInt<4,2>(MI->getOperand(3).getImm());
885 case Hexagon::POST_LDrih:
886 case Hexagon::POST_LDriuh:
887 return isShiftedInt<4,1>(MI->getOperand(3).getImm());
889 case Hexagon::POST_LDrib:
890 case Hexagon::POST_LDriub:
891 return isInt<4>(MI->getOperand(3).getImm());
893 case Hexagon::STrib_imm_V4:
894 case Hexagon::STrih_imm_V4:
895 case Hexagon::STriw_imm_V4:
896 return (isUInt<6>(MI->getOperand(1).getImm()) &&
897 isInt<6>(MI->getOperand(2).getImm()));
899 case Hexagon::ADD_ri:
900 return isInt<8>(MI->getOperand(2).getImm());
908 return Subtarget.hasV4TOps();
914 // This function performs the following inversiones:
919 // however, these inversiones are NOT included:
921 // cdnPt -X-> cdnNotPt
922 // cdnNotPt -X-> cdnPt
923 // cPt_nv -X-> cNotPt_nv (new value stores)
924 // cNotPt_nv -X-> cPt_nv (new value stores)
926 // because only the following transformations are allowed:
928 // cNotPt ---> cdnNotPt
930 // cNotPt ---> cNotPt_nv
932 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
934 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
935 : Hexagon::getTruePredOpcode(Opc);
936 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
937 return InvPredOpcode;
940 default: llvm_unreachable("Unexpected predicated instruction");
941 case Hexagon::TFR_cPt:
942 return Hexagon::TFR_cNotPt;
943 case Hexagon::TFR_cNotPt:
944 return Hexagon::TFR_cPt;
946 case Hexagon::TFRI_cPt:
947 return Hexagon::TFRI_cNotPt;
948 case Hexagon::TFRI_cNotPt:
949 return Hexagon::TFRI_cPt;
952 return Hexagon::JMP_f;
954 return Hexagon::JMP_t;
956 case Hexagon::ADD_ri_cPt:
957 return Hexagon::ADD_ri_cNotPt;
958 case Hexagon::ADD_ri_cNotPt:
959 return Hexagon::ADD_ri_cPt;
961 case Hexagon::ADD_rr_cPt:
962 return Hexagon::ADD_rr_cNotPt;
963 case Hexagon::ADD_rr_cNotPt:
964 return Hexagon::ADD_rr_cPt;
966 case Hexagon::XOR_rr_cPt:
967 return Hexagon::XOR_rr_cNotPt;
968 case Hexagon::XOR_rr_cNotPt:
969 return Hexagon::XOR_rr_cPt;
971 case Hexagon::AND_rr_cPt:
972 return Hexagon::AND_rr_cNotPt;
973 case Hexagon::AND_rr_cNotPt:
974 return Hexagon::AND_rr_cPt;
976 case Hexagon::OR_rr_cPt:
977 return Hexagon::OR_rr_cNotPt;
978 case Hexagon::OR_rr_cNotPt:
979 return Hexagon::OR_rr_cPt;
981 case Hexagon::SUB_rr_cPt:
982 return Hexagon::SUB_rr_cNotPt;
983 case Hexagon::SUB_rr_cNotPt:
984 return Hexagon::SUB_rr_cPt;
986 case Hexagon::COMBINE_rr_cPt:
987 return Hexagon::COMBINE_rr_cNotPt;
988 case Hexagon::COMBINE_rr_cNotPt:
989 return Hexagon::COMBINE_rr_cPt;
991 case Hexagon::ASLH_cPt_V4:
992 return Hexagon::ASLH_cNotPt_V4;
993 case Hexagon::ASLH_cNotPt_V4:
994 return Hexagon::ASLH_cPt_V4;
996 case Hexagon::ASRH_cPt_V4:
997 return Hexagon::ASRH_cNotPt_V4;
998 case Hexagon::ASRH_cNotPt_V4:
999 return Hexagon::ASRH_cPt_V4;
1001 case Hexagon::SXTB_cPt_V4:
1002 return Hexagon::SXTB_cNotPt_V4;
1003 case Hexagon::SXTB_cNotPt_V4:
1004 return Hexagon::SXTB_cPt_V4;
1006 case Hexagon::SXTH_cPt_V4:
1007 return Hexagon::SXTH_cNotPt_V4;
1008 case Hexagon::SXTH_cNotPt_V4:
1009 return Hexagon::SXTH_cPt_V4;
1011 case Hexagon::ZXTB_cPt_V4:
1012 return Hexagon::ZXTB_cNotPt_V4;
1013 case Hexagon::ZXTB_cNotPt_V4:
1014 return Hexagon::ZXTB_cPt_V4;
1016 case Hexagon::ZXTH_cPt_V4:
1017 return Hexagon::ZXTH_cNotPt_V4;
1018 case Hexagon::ZXTH_cNotPt_V4:
1019 return Hexagon::ZXTH_cPt_V4;
1022 case Hexagon::JMPR_t:
1023 return Hexagon::JMPR_f;
1024 case Hexagon::JMPR_f:
1025 return Hexagon::JMPR_t;
1027 // V4 indexed+scaled load.
1028 case Hexagon::LDrid_indexed_shl_cPt_V4:
1029 return Hexagon::LDrid_indexed_shl_cNotPt_V4;
1030 case Hexagon::LDrid_indexed_shl_cNotPt_V4:
1031 return Hexagon::LDrid_indexed_shl_cPt_V4;
1033 case Hexagon::LDrib_indexed_shl_cPt_V4:
1034 return Hexagon::LDrib_indexed_shl_cNotPt_V4;
1035 case Hexagon::LDrib_indexed_shl_cNotPt_V4:
1036 return Hexagon::LDrib_indexed_shl_cPt_V4;
1038 case Hexagon::LDriub_indexed_shl_cPt_V4:
1039 return Hexagon::LDriub_indexed_shl_cNotPt_V4;
1040 case Hexagon::LDriub_indexed_shl_cNotPt_V4:
1041 return Hexagon::LDriub_indexed_shl_cPt_V4;
1043 case Hexagon::LDrih_indexed_shl_cPt_V4:
1044 return Hexagon::LDrih_indexed_shl_cNotPt_V4;
1045 case Hexagon::LDrih_indexed_shl_cNotPt_V4:
1046 return Hexagon::LDrih_indexed_shl_cPt_V4;
1048 case Hexagon::LDriuh_indexed_shl_cPt_V4:
1049 return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
1050 case Hexagon::LDriuh_indexed_shl_cNotPt_V4:
1051 return Hexagon::LDriuh_indexed_shl_cPt_V4;
1053 case Hexagon::LDriw_indexed_shl_cPt_V4:
1054 return Hexagon::LDriw_indexed_shl_cNotPt_V4;
1055 case Hexagon::LDriw_indexed_shl_cNotPt_V4:
1056 return Hexagon::LDriw_indexed_shl_cPt_V4;
1059 case Hexagon::POST_STbri_cPt:
1060 return Hexagon::POST_STbri_cNotPt;
1061 case Hexagon::POST_STbri_cNotPt:
1062 return Hexagon::POST_STbri_cPt;
1064 case Hexagon::STrib_cPt:
1065 return Hexagon::STrib_cNotPt;
1066 case Hexagon::STrib_cNotPt:
1067 return Hexagon::STrib_cPt;
1069 case Hexagon::STrib_indexed_cPt:
1070 return Hexagon::STrib_indexed_cNotPt;
1071 case Hexagon::STrib_indexed_cNotPt:
1072 return Hexagon::STrib_indexed_cPt;
1074 case Hexagon::STrib_imm_cPt_V4:
1075 return Hexagon::STrib_imm_cNotPt_V4;
1076 case Hexagon::STrib_imm_cNotPt_V4:
1077 return Hexagon::STrib_imm_cPt_V4;
1079 case Hexagon::STrib_indexed_shl_cPt_V4:
1080 return Hexagon::STrib_indexed_shl_cNotPt_V4;
1081 case Hexagon::STrib_indexed_shl_cNotPt_V4:
1082 return Hexagon::STrib_indexed_shl_cPt_V4;
1085 case Hexagon::POST_SThri_cPt:
1086 return Hexagon::POST_SThri_cNotPt;
1087 case Hexagon::POST_SThri_cNotPt:
1088 return Hexagon::POST_SThri_cPt;
1090 case Hexagon::STrih_cPt:
1091 return Hexagon::STrih_cNotPt;
1092 case Hexagon::STrih_cNotPt:
1093 return Hexagon::STrih_cPt;
1095 case Hexagon::STrih_indexed_cPt:
1096 return Hexagon::STrih_indexed_cNotPt;
1097 case Hexagon::STrih_indexed_cNotPt:
1098 return Hexagon::STrih_indexed_cPt;
1100 case Hexagon::STrih_imm_cPt_V4:
1101 return Hexagon::STrih_imm_cNotPt_V4;
1102 case Hexagon::STrih_imm_cNotPt_V4:
1103 return Hexagon::STrih_imm_cPt_V4;
1105 case Hexagon::STrih_indexed_shl_cPt_V4:
1106 return Hexagon::STrih_indexed_shl_cNotPt_V4;
1107 case Hexagon::STrih_indexed_shl_cNotPt_V4:
1108 return Hexagon::STrih_indexed_shl_cPt_V4;
1111 case Hexagon::POST_STwri_cPt:
1112 return Hexagon::POST_STwri_cNotPt;
1113 case Hexagon::POST_STwri_cNotPt:
1114 return Hexagon::POST_STwri_cPt;
1116 case Hexagon::STriw_cPt:
1117 return Hexagon::STriw_cNotPt;
1118 case Hexagon::STriw_cNotPt:
1119 return Hexagon::STriw_cPt;
1121 case Hexagon::STriw_indexed_cPt:
1122 return Hexagon::STriw_indexed_cNotPt;
1123 case Hexagon::STriw_indexed_cNotPt:
1124 return Hexagon::STriw_indexed_cPt;
1126 case Hexagon::STriw_indexed_shl_cPt_V4:
1127 return Hexagon::STriw_indexed_shl_cNotPt_V4;
1128 case Hexagon::STriw_indexed_shl_cNotPt_V4:
1129 return Hexagon::STriw_indexed_shl_cPt_V4;
1131 case Hexagon::STriw_imm_cPt_V4:
1132 return Hexagon::STriw_imm_cNotPt_V4;
1133 case Hexagon::STriw_imm_cNotPt_V4:
1134 return Hexagon::STriw_imm_cPt_V4;
1137 case Hexagon::POST_STdri_cPt:
1138 return Hexagon::POST_STdri_cNotPt;
1139 case Hexagon::POST_STdri_cNotPt:
1140 return Hexagon::POST_STdri_cPt;
1142 case Hexagon::STrid_cPt:
1143 return Hexagon::STrid_cNotPt;
1144 case Hexagon::STrid_cNotPt:
1145 return Hexagon::STrid_cPt;
1147 case Hexagon::STrid_indexed_cPt:
1148 return Hexagon::STrid_indexed_cNotPt;
1149 case Hexagon::STrid_indexed_cNotPt:
1150 return Hexagon::STrid_indexed_cPt;
1152 case Hexagon::STrid_indexed_shl_cPt_V4:
1153 return Hexagon::STrid_indexed_shl_cNotPt_V4;
1154 case Hexagon::STrid_indexed_shl_cNotPt_V4:
1155 return Hexagon::STrid_indexed_shl_cPt_V4;
1157 // V4 Store to global address.
1158 case Hexagon::STd_GP_cPt_V4:
1159 return Hexagon::STd_GP_cNotPt_V4;
1160 case Hexagon::STd_GP_cNotPt_V4:
1161 return Hexagon::STd_GP_cPt_V4;
1163 case Hexagon::STb_GP_cPt_V4:
1164 return Hexagon::STb_GP_cNotPt_V4;
1165 case Hexagon::STb_GP_cNotPt_V4:
1166 return Hexagon::STb_GP_cPt_V4;
1168 case Hexagon::STh_GP_cPt_V4:
1169 return Hexagon::STh_GP_cNotPt_V4;
1170 case Hexagon::STh_GP_cNotPt_V4:
1171 return Hexagon::STh_GP_cPt_V4;
1173 case Hexagon::STw_GP_cPt_V4:
1174 return Hexagon::STw_GP_cNotPt_V4;
1175 case Hexagon::STw_GP_cNotPt_V4:
1176 return Hexagon::STw_GP_cPt_V4;
1179 case Hexagon::LDrid_cPt:
1180 return Hexagon::LDrid_cNotPt;
1181 case Hexagon::LDrid_cNotPt:
1182 return Hexagon::LDrid_cPt;
1184 case Hexagon::LDriw_cPt:
1185 return Hexagon::LDriw_cNotPt;
1186 case Hexagon::LDriw_cNotPt:
1187 return Hexagon::LDriw_cPt;
1189 case Hexagon::LDrih_cPt:
1190 return Hexagon::LDrih_cNotPt;
1191 case Hexagon::LDrih_cNotPt:
1192 return Hexagon::LDrih_cPt;
1194 case Hexagon::LDriuh_cPt:
1195 return Hexagon::LDriuh_cNotPt;
1196 case Hexagon::LDriuh_cNotPt:
1197 return Hexagon::LDriuh_cPt;
1199 case Hexagon::LDrib_cPt:
1200 return Hexagon::LDrib_cNotPt;
1201 case Hexagon::LDrib_cNotPt:
1202 return Hexagon::LDrib_cPt;
1204 case Hexagon::LDriub_cPt:
1205 return Hexagon::LDriub_cNotPt;
1206 case Hexagon::LDriub_cNotPt:
1207 return Hexagon::LDriub_cPt;
1210 case Hexagon::LDrid_indexed_cPt:
1211 return Hexagon::LDrid_indexed_cNotPt;
1212 case Hexagon::LDrid_indexed_cNotPt:
1213 return Hexagon::LDrid_indexed_cPt;
1215 case Hexagon::LDriw_indexed_cPt:
1216 return Hexagon::LDriw_indexed_cNotPt;
1217 case Hexagon::LDriw_indexed_cNotPt:
1218 return Hexagon::LDriw_indexed_cPt;
1220 case Hexagon::LDrih_indexed_cPt:
1221 return Hexagon::LDrih_indexed_cNotPt;
1222 case Hexagon::LDrih_indexed_cNotPt:
1223 return Hexagon::LDrih_indexed_cPt;
1225 case Hexagon::LDriuh_indexed_cPt:
1226 return Hexagon::LDriuh_indexed_cNotPt;
1227 case Hexagon::LDriuh_indexed_cNotPt:
1228 return Hexagon::LDriuh_indexed_cPt;
1230 case Hexagon::LDrib_indexed_cPt:
1231 return Hexagon::LDrib_indexed_cNotPt;
1232 case Hexagon::LDrib_indexed_cNotPt:
1233 return Hexagon::LDrib_indexed_cPt;
1235 case Hexagon::LDriub_indexed_cPt:
1236 return Hexagon::LDriub_indexed_cNotPt;
1237 case Hexagon::LDriub_indexed_cNotPt:
1238 return Hexagon::LDriub_indexed_cPt;
1241 case Hexagon::POST_LDrid_cPt:
1242 return Hexagon::POST_LDrid_cNotPt;
1243 case Hexagon::POST_LDriw_cNotPt:
1244 return Hexagon::POST_LDriw_cPt;
1246 case Hexagon::POST_LDrih_cPt:
1247 return Hexagon::POST_LDrih_cNotPt;
1248 case Hexagon::POST_LDrih_cNotPt:
1249 return Hexagon::POST_LDrih_cPt;
1251 case Hexagon::POST_LDriuh_cPt:
1252 return Hexagon::POST_LDriuh_cNotPt;
1253 case Hexagon::POST_LDriuh_cNotPt:
1254 return Hexagon::POST_LDriuh_cPt;
1256 case Hexagon::POST_LDrib_cPt:
1257 return Hexagon::POST_LDrib_cNotPt;
1258 case Hexagon::POST_LDrib_cNotPt:
1259 return Hexagon::POST_LDrib_cPt;
1261 case Hexagon::POST_LDriub_cPt:
1262 return Hexagon::POST_LDriub_cNotPt;
1263 case Hexagon::POST_LDriub_cNotPt:
1264 return Hexagon::POST_LDriub_cPt;
1267 case Hexagon::DEALLOC_RET_cPt_V4:
1268 return Hexagon::DEALLOC_RET_cNotPt_V4;
1269 case Hexagon::DEALLOC_RET_cNotPt_V4:
1270 return Hexagon::DEALLOC_RET_cPt_V4;
1275 int HexagonInstrInfo::
1276 getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
1277 enum Hexagon::PredSense inPredSense;
1278 inPredSense = invertPredicate ? Hexagon::PredSense_false :
1279 Hexagon::PredSense_true;
1280 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
1281 if (CondOpcode >= 0) // Valid Conditional opcode/instruction
1284 // This switch case will be removed once all the instructions have been
1285 // modified to use relation maps.
1288 return !invertPredicate ? Hexagon::TFR_cPt :
1289 Hexagon::TFR_cNotPt;
1290 case Hexagon::TFRI_f:
1291 return !invertPredicate ? Hexagon::TFRI_cPt_f :
1292 Hexagon::TFRI_cNotPt_f;
1294 return !invertPredicate ? Hexagon::TFRI_cPt :
1295 Hexagon::TFRI_cNotPt;
1297 return !invertPredicate ? Hexagon::JMP_t :
1300 case Hexagon::COMBINE_rr:
1301 return !invertPredicate ? Hexagon::COMBINE_rr_cPt :
1302 Hexagon::COMBINE_rr_cNotPt;
1304 return !invertPredicate ? Hexagon::ASLH_cPt_V4 :
1305 Hexagon::ASLH_cNotPt_V4;
1307 return !invertPredicate ? Hexagon::ASRH_cPt_V4 :
1308 Hexagon::ASRH_cNotPt_V4;
1310 return !invertPredicate ? Hexagon::SXTB_cPt_V4 :
1311 Hexagon::SXTB_cNotPt_V4;
1313 return !invertPredicate ? Hexagon::SXTH_cPt_V4 :
1314 Hexagon::SXTH_cNotPt_V4;
1316 return !invertPredicate ? Hexagon::ZXTB_cPt_V4 :
1317 Hexagon::ZXTB_cNotPt_V4;
1319 return !invertPredicate ? Hexagon::ZXTH_cPt_V4 :
1320 Hexagon::ZXTH_cNotPt_V4;
1323 return !invertPredicate ? Hexagon::JMPR_t :
1326 // V4 indexed+scaled load.
1327 case Hexagon::LDrid_indexed_shl_V4:
1328 return !invertPredicate ? Hexagon::LDrid_indexed_shl_cPt_V4 :
1329 Hexagon::LDrid_indexed_shl_cNotPt_V4;
1330 case Hexagon::LDrib_indexed_shl_V4:
1331 return !invertPredicate ? Hexagon::LDrib_indexed_shl_cPt_V4 :
1332 Hexagon::LDrib_indexed_shl_cNotPt_V4;
1333 case Hexagon::LDriub_indexed_shl_V4:
1334 return !invertPredicate ? Hexagon::LDriub_indexed_shl_cPt_V4 :
1335 Hexagon::LDriub_indexed_shl_cNotPt_V4;
1336 case Hexagon::LDrih_indexed_shl_V4:
1337 return !invertPredicate ? Hexagon::LDrih_indexed_shl_cPt_V4 :
1338 Hexagon::LDrih_indexed_shl_cNotPt_V4;
1339 case Hexagon::LDriuh_indexed_shl_V4:
1340 return !invertPredicate ? Hexagon::LDriuh_indexed_shl_cPt_V4 :
1341 Hexagon::LDriuh_indexed_shl_cNotPt_V4;
1342 case Hexagon::LDriw_indexed_shl_V4:
1343 return !invertPredicate ? Hexagon::LDriw_indexed_shl_cPt_V4 :
1344 Hexagon::LDriw_indexed_shl_cNotPt_V4;
1346 // V4 Load from global address
1347 case Hexagon::LDd_GP_V4:
1348 return !invertPredicate ? Hexagon::LDd_GP_cPt_V4 :
1349 Hexagon::LDd_GP_cNotPt_V4;
1350 case Hexagon::LDb_GP_V4:
1351 return !invertPredicate ? Hexagon::LDb_GP_cPt_V4 :
1352 Hexagon::LDb_GP_cNotPt_V4;
1353 case Hexagon::LDub_GP_V4:
1354 return !invertPredicate ? Hexagon::LDub_GP_cPt_V4 :
1355 Hexagon::LDub_GP_cNotPt_V4;
1356 case Hexagon::LDh_GP_V4:
1357 return !invertPredicate ? Hexagon::LDh_GP_cPt_V4 :
1358 Hexagon::LDh_GP_cNotPt_V4;
1359 case Hexagon::LDuh_GP_V4:
1360 return !invertPredicate ? Hexagon::LDuh_GP_cPt_V4 :
1361 Hexagon::LDuh_GP_cNotPt_V4;
1362 case Hexagon::LDw_GP_V4:
1363 return !invertPredicate ? Hexagon::LDw_GP_cPt_V4 :
1364 Hexagon::LDw_GP_cNotPt_V4;
1367 case Hexagon::POST_STbri:
1368 return !invertPredicate ? Hexagon::POST_STbri_cPt :
1369 Hexagon::POST_STbri_cNotPt;
1370 case Hexagon::STrib:
1371 return !invertPredicate ? Hexagon::STrib_cPt :
1372 Hexagon::STrib_cNotPt;
1373 case Hexagon::STrib_indexed:
1374 return !invertPredicate ? Hexagon::STrib_indexed_cPt :
1375 Hexagon::STrib_indexed_cNotPt;
1376 case Hexagon::STrib_imm_V4:
1377 return !invertPredicate ? Hexagon::STrib_imm_cPt_V4 :
1378 Hexagon::STrib_imm_cNotPt_V4;
1379 case Hexagon::STrib_indexed_shl_V4:
1380 return !invertPredicate ? Hexagon::STrib_indexed_shl_cPt_V4 :
1381 Hexagon::STrib_indexed_shl_cNotPt_V4;
1383 case Hexagon::POST_SThri:
1384 return !invertPredicate ? Hexagon::POST_SThri_cPt :
1385 Hexagon::POST_SThri_cNotPt;
1386 case Hexagon::STrih:
1387 return !invertPredicate ? Hexagon::STrih_cPt :
1388 Hexagon::STrih_cNotPt;
1389 case Hexagon::STrih_indexed:
1390 return !invertPredicate ? Hexagon::STrih_indexed_cPt :
1391 Hexagon::STrih_indexed_cNotPt;
1392 case Hexagon::STrih_imm_V4:
1393 return !invertPredicate ? Hexagon::STrih_imm_cPt_V4 :
1394 Hexagon::STrih_imm_cNotPt_V4;
1395 case Hexagon::STrih_indexed_shl_V4:
1396 return !invertPredicate ? Hexagon::STrih_indexed_shl_cPt_V4 :
1397 Hexagon::STrih_indexed_shl_cNotPt_V4;
1399 case Hexagon::POST_STwri:
1400 return !invertPredicate ? Hexagon::POST_STwri_cPt :
1401 Hexagon::POST_STwri_cNotPt;
1402 case Hexagon::STriw:
1403 return !invertPredicate ? Hexagon::STriw_cPt :
1404 Hexagon::STriw_cNotPt;
1405 case Hexagon::STriw_indexed:
1406 return !invertPredicate ? Hexagon::STriw_indexed_cPt :
1407 Hexagon::STriw_indexed_cNotPt;
1408 case Hexagon::STriw_indexed_shl_V4:
1409 return !invertPredicate ? Hexagon::STriw_indexed_shl_cPt_V4 :
1410 Hexagon::STriw_indexed_shl_cNotPt_V4;
1411 case Hexagon::STriw_imm_V4:
1412 return !invertPredicate ? Hexagon::STriw_imm_cPt_V4 :
1413 Hexagon::STriw_imm_cNotPt_V4;
1415 case Hexagon::POST_STdri:
1416 return !invertPredicate ? Hexagon::POST_STdri_cPt :
1417 Hexagon::POST_STdri_cNotPt;
1418 case Hexagon::STrid:
1419 return !invertPredicate ? Hexagon::STrid_cPt :
1420 Hexagon::STrid_cNotPt;
1421 case Hexagon::STrid_indexed:
1422 return !invertPredicate ? Hexagon::STrid_indexed_cPt :
1423 Hexagon::STrid_indexed_cNotPt;
1424 case Hexagon::STrid_indexed_shl_V4:
1425 return !invertPredicate ? Hexagon::STrid_indexed_shl_cPt_V4 :
1426 Hexagon::STrid_indexed_shl_cNotPt_V4;
1428 // V4 Store to global address
1429 case Hexagon::STd_GP_V4:
1430 return !invertPredicate ? Hexagon::STd_GP_cPt_V4 :
1431 Hexagon::STd_GP_cNotPt_V4;
1432 case Hexagon::STb_GP_V4:
1433 return !invertPredicate ? Hexagon::STb_GP_cPt_V4 :
1434 Hexagon::STb_GP_cNotPt_V4;
1435 case Hexagon::STh_GP_V4:
1436 return !invertPredicate ? Hexagon::STh_GP_cPt_V4 :
1437 Hexagon::STh_GP_cNotPt_V4;
1438 case Hexagon::STw_GP_V4:
1439 return !invertPredicate ? Hexagon::STw_GP_cPt_V4 :
1440 Hexagon::STw_GP_cNotPt_V4;
1443 case Hexagon::LDrid:
1444 return !invertPredicate ? Hexagon::LDrid_cPt :
1445 Hexagon::LDrid_cNotPt;
1446 case Hexagon::LDriw:
1447 return !invertPredicate ? Hexagon::LDriw_cPt :
1448 Hexagon::LDriw_cNotPt;
1449 case Hexagon::LDrih:
1450 return !invertPredicate ? Hexagon::LDrih_cPt :
1451 Hexagon::LDrih_cNotPt;
1452 case Hexagon::LDriuh:
1453 return !invertPredicate ? Hexagon::LDriuh_cPt :
1454 Hexagon::LDriuh_cNotPt;
1455 case Hexagon::LDrib:
1456 return !invertPredicate ? Hexagon::LDrib_cPt :
1457 Hexagon::LDrib_cNotPt;
1458 case Hexagon::LDriub:
1459 return !invertPredicate ? Hexagon::LDriub_cPt :
1460 Hexagon::LDriub_cNotPt;
1462 case Hexagon::LDrid_indexed:
1463 return !invertPredicate ? Hexagon::LDrid_indexed_cPt :
1464 Hexagon::LDrid_indexed_cNotPt;
1465 case Hexagon::LDriw_indexed:
1466 return !invertPredicate ? Hexagon::LDriw_indexed_cPt :
1467 Hexagon::LDriw_indexed_cNotPt;
1468 case Hexagon::LDrih_indexed:
1469 return !invertPredicate ? Hexagon::LDrih_indexed_cPt :
1470 Hexagon::LDrih_indexed_cNotPt;
1471 case Hexagon::LDriuh_indexed:
1472 return !invertPredicate ? Hexagon::LDriuh_indexed_cPt :
1473 Hexagon::LDriuh_indexed_cNotPt;
1474 case Hexagon::LDrib_indexed:
1475 return !invertPredicate ? Hexagon::LDrib_indexed_cPt :
1476 Hexagon::LDrib_indexed_cNotPt;
1477 case Hexagon::LDriub_indexed:
1478 return !invertPredicate ? Hexagon::LDriub_indexed_cPt :
1479 Hexagon::LDriub_indexed_cNotPt;
1480 // Post Increment Load.
1481 case Hexagon::POST_LDrid:
1482 return !invertPredicate ? Hexagon::POST_LDrid_cPt :
1483 Hexagon::POST_LDrid_cNotPt;
1484 case Hexagon::POST_LDriw:
1485 return !invertPredicate ? Hexagon::POST_LDriw_cPt :
1486 Hexagon::POST_LDriw_cNotPt;
1487 case Hexagon::POST_LDrih:
1488 return !invertPredicate ? Hexagon::POST_LDrih_cPt :
1489 Hexagon::POST_LDrih_cNotPt;
1490 case Hexagon::POST_LDriuh:
1491 return !invertPredicate ? Hexagon::POST_LDriuh_cPt :
1492 Hexagon::POST_LDriuh_cNotPt;
1493 case Hexagon::POST_LDrib:
1494 return !invertPredicate ? Hexagon::POST_LDrib_cPt :
1495 Hexagon::POST_LDrib_cNotPt;
1496 case Hexagon::POST_LDriub:
1497 return !invertPredicate ? Hexagon::POST_LDriub_cPt :
1498 Hexagon::POST_LDriub_cNotPt;
1500 case Hexagon::DEALLOC_RET_V4:
1501 return !invertPredicate ? Hexagon::DEALLOC_RET_cPt_V4 :
1502 Hexagon::DEALLOC_RET_cNotPt_V4;
1504 llvm_unreachable("Unexpected predicable instruction");
1508 bool HexagonInstrInfo::
1509 PredicateInstruction(MachineInstr *MI,
1510 const SmallVectorImpl<MachineOperand> &Cond) const {
1511 int Opc = MI->getOpcode();
1512 assert (isPredicable(MI) && "Expected predicable instruction");
1513 bool invertJump = (!Cond.empty() && Cond[0].isImm() &&
1514 (Cond[0].getImm() == 0));
1516 // This will change MI's opcode to its predicate version.
1517 // However, its operand list is still the old one, i.e. the
1518 // non-predicate one.
1519 MI->setDesc(get(getMatchingCondBranchOpcode(Opc, invertJump)));
1522 unsigned int GAIdx = 0;
1524 // Indicates whether the current MI has a GlobalAddress operand
1525 bool hasGAOpnd = false;
1526 std::vector<MachineOperand> tmpOpnds;
1528 // Indicates whether we need to shift operands to right.
1529 bool needShift = true;
1531 // The predicate is ALWAYS the FIRST input operand !!!
1532 if (MI->getNumOperands() == 0) {
1533 // The non-predicate version of MI does not take any operands,
1534 // i.e. no outs and no ins. In this condition, the predicate
1535 // operand will be directly placed at Operands[0]. No operand
1541 else if ( MI->getOperand(MI->getNumOperands()-1).isReg()
1542 && MI->getOperand(MI->getNumOperands()-1).isDef()
1543 && !MI->getOperand(MI->getNumOperands()-1).isImplicit()) {
1544 // The non-predicate version of MI does not have any input operands.
1545 // In this condition, we extend the length of Operands[] by one and
1546 // copy the original last operand to the newly allocated slot.
1547 // At this moment, it is just a place holder. Later, we will put
1548 // predicate operand directly into it. No operand shift is needed.
1549 // Example: r0=BARRIER (this is a faked insn used here for illustration)
1550 MI->addOperand(MI->getOperand(MI->getNumOperands()-1));
1552 oper = MI->getNumOperands() - 2;
1555 // We need to right shift all input operands by one. Duplicate the
1556 // last operand into the newly allocated slot.
1557 MI->addOperand(MI->getOperand(MI->getNumOperands()-1));
1562 // Operands[ MI->getNumOperands() - 2 ] has been copied into
1563 // Operands[ MI->getNumOperands() - 1 ], so we start from
1564 // Operands[ MI->getNumOperands() - 3 ].
1565 // oper is a signed int.
1566 // It is ok if "MI->getNumOperands()-3" is -3, -2, or -1.
1567 for (oper = MI->getNumOperands() - 3; oper >= 0; --oper)
1569 MachineOperand &MO = MI->getOperand(oper);
1571 // Opnd[0] Opnd[1] Opnd[2] Opnd[3] Opnd[4] Opnd[5] Opnd[6] Opnd[7]
1572 // <Def0> <Def1> <Use0> <Use1> <ImpDef0> <ImpDef1> <ImpUse0> <ImpUse1>
1576 // Predicate Operand here
1577 if (MO.isReg() && !MO.isUse() && !MO.isImplicit()) {
1581 MI->getOperand(oper+1).ChangeToRegister(MO.getReg(), MO.isDef(),
1582 MO.isImplicit(), MO.isKill(),
1583 MO.isDead(), MO.isUndef(),
1586 else if (MO.isImm()) {
1587 MI->getOperand(oper+1).ChangeToImmediate(MO.getImm());
1589 else if (MO.isGlobal()) {
1590 // MI can not have more than one GlobalAddress operand.
1591 assert(hasGAOpnd == false && "MI can only have one GlobalAddress opnd");
1593 // There is no member function called "ChangeToGlobalAddress" in the
1594 // MachineOperand class (not like "ChangeToRegister" and
1595 // "ChangeToImmediate"). So we have to remove them from Operands[] list
1596 // first, and then add them back after we have inserted the predicate
1597 // operand. tmpOpnds[] is to remember these operands before we remove
1599 tmpOpnds.push_back(MO);
1601 // Operands[oper] is a GlobalAddress operand;
1602 // Operands[oper+1] has been copied into Operands[oper+2];
1608 assert(false && "Unexpected operand type");
1613 int regPos = invertJump ? 1 : 0;
1614 MachineOperand PredMO = Cond[regPos];
1616 // [oper] now points to the last explicit Def. Predicate operand must be
1617 // located at [oper+1]. See diagram above.
1618 // This assumes that the predicate is always the first operand,
1619 // i.e. Operands[0+numResults], in the set of inputs
1620 // It is better to have an assert here to check this. But I don't know how
1621 // to write this assert because findFirstPredOperandIdx() would return -1
1622 if (oper < -1) oper = -1;
1624 MI->getOperand(oper+1).ChangeToRegister(PredMO.getReg(), PredMO.isDef(),
1625 PredMO.isImplicit(), false,
1626 PredMO.isDead(), PredMO.isUndef(),
1629 MachineRegisterInfo &RegInfo = MI->getParent()->getParent()->getRegInfo();
1630 RegInfo.clearKillFlags(PredMO.getReg());
1636 // Operands[GAIdx] is the original GlobalAddress operand, which is
1637 // already copied into tmpOpnds[0].
1638 // Operands[GAIdx] now stores a copy of Operands[GAIdx-1]
1639 // Operands[GAIdx+1] has already been copied into Operands[GAIdx+2],
1640 // so we start from [GAIdx+2]
1641 for (i = GAIdx + 2; i < MI->getNumOperands(); ++i)
1642 tmpOpnds.push_back(MI->getOperand(i));
1644 // Remove all operands in range [ (GAIdx+1) ... (MI->getNumOperands()-1) ]
1645 // It is very important that we always remove from the end of Operands[]
1646 // MI->getNumOperands() is at least 2 if program goes to here.
1647 for (i = MI->getNumOperands() - 1; i > GAIdx; --i)
1648 MI->RemoveOperand(i);
1650 for (i = 0; i < tmpOpnds.size(); ++i)
1651 MI->addOperand(tmpOpnds[i]);
1660 isProfitableToIfCvt(MachineBasicBlock &MBB,
1662 unsigned ExtraPredCycles,
1663 const BranchProbability &Probability) const {
1670 isProfitableToIfCvt(MachineBasicBlock &TMBB,
1671 unsigned NumTCycles,
1672 unsigned ExtraTCycles,
1673 MachineBasicBlock &FMBB,
1674 unsigned NumFCycles,
1675 unsigned ExtraFCycles,
1676 const BranchProbability &Probability) const {
1680 // Returns true if an instruction is predicated irrespective of the predicate
1681 // sense. For example, all of the following will return true.
1682 // if (p0) R1 = add(R2, R3)
1683 // if (!p0) R1 = add(R2, R3)
1684 // if (p0.new) R1 = add(R2, R3)
1685 // if (!p0.new) R1 = add(R2, R3)
1686 bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const {
1687 const uint64_t F = MI->getDesc().TSFlags;
1689 return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1692 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
1693 const uint64_t F = get(Opcode).TSFlags;
1695 return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1698 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr *MI) const {
1699 const uint64_t F = MI->getDesc().TSFlags;
1701 assert(isPredicated(MI));
1702 return (!((F >> HexagonII::PredicatedFalsePos) &
1703 HexagonII::PredicatedFalseMask));
1706 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
1707 const uint64_t F = get(Opcode).TSFlags;
1709 // Make sure that the instruction is predicated.
1710 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1711 return (!((F >> HexagonII::PredicatedFalsePos) &
1712 HexagonII::PredicatedFalseMask));
1715 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr *MI) const {
1716 const uint64_t F = MI->getDesc().TSFlags;
1718 assert(isPredicated(MI));
1719 return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
1722 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
1723 const uint64_t F = get(Opcode).TSFlags;
1725 assert(isPredicated(Opcode));
1726 return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
1730 HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
1731 std::vector<MachineOperand> &Pred) const {
1732 for (unsigned oper = 0; oper < MI->getNumOperands(); ++oper) {
1733 MachineOperand MO = MI->getOperand(oper);
1734 if (MO.isReg() && MO.isDef()) {
1735 const TargetRegisterClass* RC = RI.getMinimalPhysRegClass(MO.getReg());
1736 if (RC == &Hexagon::PredRegsRegClass) {
1748 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
1749 const SmallVectorImpl<MachineOperand> &Pred2) const {
1756 // We indicate that we want to reverse the branch by
1757 // inserting a 0 at the beginning of the Cond vector.
1759 bool HexagonInstrInfo::
1760 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1761 if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
1762 Cond.erase(Cond.begin());
1764 Cond.insert(Cond.begin(), MachineOperand::CreateImm(0));
1770 bool HexagonInstrInfo::
1771 isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs,
1772 const BranchProbability &Probability) const {
1773 return (NumInstrs <= 4);
1776 bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
1777 switch (MI->getOpcode()) {
1778 default: return false;
1779 case Hexagon::DEALLOC_RET_V4 :
1780 case Hexagon::DEALLOC_RET_cPt_V4 :
1781 case Hexagon::DEALLOC_RET_cNotPt_V4 :
1782 case Hexagon::DEALLOC_RET_cdnPnt_V4 :
1783 case Hexagon::DEALLOC_RET_cNotdnPnt_V4 :
1784 case Hexagon::DEALLOC_RET_cdnPt_V4 :
1785 case Hexagon::DEALLOC_RET_cNotdnPt_V4 :
1791 bool HexagonInstrInfo::
1792 isValidOffset(const int Opcode, const int Offset) const {
1793 // This function is to check whether the "Offset" is in the correct range of
1794 // the given "Opcode". If "Offset" is not in the correct range, "ADD_ri" is
1795 // inserted to calculate the final address. Due to this reason, the function
1796 // assumes that the "Offset" has correct alignment.
1797 // We used to assert if the offset was not properly aligned, however,
1798 // there are cases where a misaligned pointer recast can cause this
1799 // problem, and we need to allow for it. The front end warns of such
1800 // misaligns with respect to load size.
1804 case Hexagon::LDriw:
1805 case Hexagon::LDriw_indexed:
1806 case Hexagon::LDriw_f:
1807 case Hexagon::STriw_indexed:
1808 case Hexagon::STriw:
1809 case Hexagon::STriw_f:
1810 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
1811 (Offset <= Hexagon_MEMW_OFFSET_MAX);
1813 case Hexagon::LDrid:
1814 case Hexagon::LDrid_indexed:
1815 case Hexagon::LDrid_f:
1816 case Hexagon::STrid:
1817 case Hexagon::STrid_indexed:
1818 case Hexagon::STrid_f:
1819 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
1820 (Offset <= Hexagon_MEMD_OFFSET_MAX);
1822 case Hexagon::LDrih:
1823 case Hexagon::LDriuh:
1824 case Hexagon::STrih:
1825 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
1826 (Offset <= Hexagon_MEMH_OFFSET_MAX);
1828 case Hexagon::LDrib:
1829 case Hexagon::STrib:
1830 case Hexagon::LDriub:
1831 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
1832 (Offset <= Hexagon_MEMB_OFFSET_MAX);
1834 case Hexagon::ADD_ri:
1835 case Hexagon::TFR_FI:
1836 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
1837 (Offset <= Hexagon_ADDI_OFFSET_MAX);
1839 case Hexagon::MemOPw_ADDi_V4 :
1840 case Hexagon::MemOPw_SUBi_V4 :
1841 case Hexagon::MemOPw_ADDr_V4 :
1842 case Hexagon::MemOPw_SUBr_V4 :
1843 case Hexagon::MemOPw_ANDr_V4 :
1844 case Hexagon::MemOPw_ORr_V4 :
1845 return (0 <= Offset && Offset <= 255);
1847 case Hexagon::MemOPh_ADDi_V4 :
1848 case Hexagon::MemOPh_SUBi_V4 :
1849 case Hexagon::MemOPh_ADDr_V4 :
1850 case Hexagon::MemOPh_SUBr_V4 :
1851 case Hexagon::MemOPh_ANDr_V4 :
1852 case Hexagon::MemOPh_ORr_V4 :
1853 return (0 <= Offset && Offset <= 127);
1855 case Hexagon::MemOPb_ADDi_V4 :
1856 case Hexagon::MemOPb_SUBi_V4 :
1857 case Hexagon::MemOPb_ADDr_V4 :
1858 case Hexagon::MemOPb_SUBr_V4 :
1859 case Hexagon::MemOPb_ANDr_V4 :
1860 case Hexagon::MemOPb_ORr_V4 :
1861 return (0 <= Offset && Offset <= 63);
1863 // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
1864 // any size. Later pass knows how to handle it.
1865 case Hexagon::STriw_pred:
1866 case Hexagon::LDriw_pred:
1869 case Hexagon::LOOP0_i:
1870 return isUInt<10>(Offset);
1872 // INLINEASM is very special.
1873 case Hexagon::INLINEASM:
1877 llvm_unreachable("No offset range is defined for this opcode. "
1878 "Please define it in the above switch statement!");
1883 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
1885 bool HexagonInstrInfo::
1886 isValidAutoIncImm(const EVT VT, const int Offset) const {
1888 if (VT == MVT::i64) {
1889 return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
1890 Offset <= Hexagon_MEMD_AUTOINC_MAX &&
1891 (Offset & 0x7) == 0);
1893 if (VT == MVT::i32) {
1894 return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
1895 Offset <= Hexagon_MEMW_AUTOINC_MAX &&
1896 (Offset & 0x3) == 0);
1898 if (VT == MVT::i16) {
1899 return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
1900 Offset <= Hexagon_MEMH_AUTOINC_MAX &&
1901 (Offset & 0x1) == 0);
1903 if (VT == MVT::i8) {
1904 return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
1905 Offset <= Hexagon_MEMB_AUTOINC_MAX);
1907 llvm_unreachable("Not an auto-inc opc!");
1911 bool HexagonInstrInfo::
1912 isMemOp(const MachineInstr *MI) const {
1913 switch (MI->getOpcode())
1915 default: return false;
1916 case Hexagon::MemOPw_ADDi_V4 :
1917 case Hexagon::MemOPw_SUBi_V4 :
1918 case Hexagon::MemOPw_ADDr_V4 :
1919 case Hexagon::MemOPw_SUBr_V4 :
1920 case Hexagon::MemOPw_ANDr_V4 :
1921 case Hexagon::MemOPw_ORr_V4 :
1922 case Hexagon::MemOPh_ADDi_V4 :
1923 case Hexagon::MemOPh_SUBi_V4 :
1924 case Hexagon::MemOPh_ADDr_V4 :
1925 case Hexagon::MemOPh_SUBr_V4 :
1926 case Hexagon::MemOPh_ANDr_V4 :
1927 case Hexagon::MemOPh_ORr_V4 :
1928 case Hexagon::MemOPb_ADDi_V4 :
1929 case Hexagon::MemOPb_SUBi_V4 :
1930 case Hexagon::MemOPb_ADDr_V4 :
1931 case Hexagon::MemOPb_SUBr_V4 :
1932 case Hexagon::MemOPb_ANDr_V4 :
1933 case Hexagon::MemOPb_ORr_V4 :
1934 case Hexagon::MemOPb_SETBITi_V4:
1935 case Hexagon::MemOPh_SETBITi_V4:
1936 case Hexagon::MemOPw_SETBITi_V4:
1937 case Hexagon::MemOPb_CLRBITi_V4:
1938 case Hexagon::MemOPh_CLRBITi_V4:
1939 case Hexagon::MemOPw_CLRBITi_V4:
1946 bool HexagonInstrInfo::
1947 isSpillPredRegOp(const MachineInstr *MI) const {
1948 switch (MI->getOpcode()) {
1949 default: return false;
1950 case Hexagon::STriw_pred :
1951 case Hexagon::LDriw_pred :
1956 bool HexagonInstrInfo::isNewValueJumpCandidate(const MachineInstr *MI) const {
1957 switch (MI->getOpcode()) {
1958 default: return false;
1959 case Hexagon::CMPEQrr:
1960 case Hexagon::CMPEQri:
1961 case Hexagon::CMPGTrr:
1962 case Hexagon::CMPGTri:
1963 case Hexagon::CMPGTUrr:
1964 case Hexagon::CMPGTUri:
1969 bool HexagonInstrInfo::
1970 isConditionalTransfer (const MachineInstr *MI) const {
1971 switch (MI->getOpcode()) {
1972 default: return false;
1973 case Hexagon::TFR_cPt:
1974 case Hexagon::TFR_cNotPt:
1975 case Hexagon::TFRI_cPt:
1976 case Hexagon::TFRI_cNotPt:
1977 case Hexagon::TFR_cdnPt:
1978 case Hexagon::TFR_cdnNotPt:
1979 case Hexagon::TFRI_cdnPt:
1980 case Hexagon::TFRI_cdnNotPt:
1985 bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
1986 const HexagonRegisterInfo& QRI = getRegisterInfo();
1987 switch (MI->getOpcode())
1989 default: return false;
1990 case Hexagon::ADD_ri_cPt:
1991 case Hexagon::ADD_ri_cNotPt:
1992 case Hexagon::ADD_rr_cPt:
1993 case Hexagon::ADD_rr_cNotPt:
1994 case Hexagon::XOR_rr_cPt:
1995 case Hexagon::XOR_rr_cNotPt:
1996 case Hexagon::AND_rr_cPt:
1997 case Hexagon::AND_rr_cNotPt:
1998 case Hexagon::OR_rr_cPt:
1999 case Hexagon::OR_rr_cNotPt:
2000 case Hexagon::SUB_rr_cPt:
2001 case Hexagon::SUB_rr_cNotPt:
2002 case Hexagon::COMBINE_rr_cPt:
2003 case Hexagon::COMBINE_rr_cNotPt:
2005 case Hexagon::ASLH_cPt_V4:
2006 case Hexagon::ASLH_cNotPt_V4:
2007 case Hexagon::ASRH_cPt_V4:
2008 case Hexagon::ASRH_cNotPt_V4:
2009 case Hexagon::SXTB_cPt_V4:
2010 case Hexagon::SXTB_cNotPt_V4:
2011 case Hexagon::SXTH_cPt_V4:
2012 case Hexagon::SXTH_cNotPt_V4:
2013 case Hexagon::ZXTB_cPt_V4:
2014 case Hexagon::ZXTB_cNotPt_V4:
2015 case Hexagon::ZXTH_cPt_V4:
2016 case Hexagon::ZXTH_cNotPt_V4:
2017 return QRI.Subtarget.hasV4TOps();
2021 bool HexagonInstrInfo::
2022 isConditionalLoad (const MachineInstr* MI) const {
2023 const HexagonRegisterInfo& QRI = getRegisterInfo();
2024 switch (MI->getOpcode())
2026 default: return false;
2027 case Hexagon::LDrid_cPt :
2028 case Hexagon::LDrid_cNotPt :
2029 case Hexagon::LDrid_indexed_cPt :
2030 case Hexagon::LDrid_indexed_cNotPt :
2031 case Hexagon::LDriw_cPt :
2032 case Hexagon::LDriw_cNotPt :
2033 case Hexagon::LDriw_indexed_cPt :
2034 case Hexagon::LDriw_indexed_cNotPt :
2035 case Hexagon::LDrih_cPt :
2036 case Hexagon::LDrih_cNotPt :
2037 case Hexagon::LDrih_indexed_cPt :
2038 case Hexagon::LDrih_indexed_cNotPt :
2039 case Hexagon::LDrib_cPt :
2040 case Hexagon::LDrib_cNotPt :
2041 case Hexagon::LDrib_indexed_cPt :
2042 case Hexagon::LDrib_indexed_cNotPt :
2043 case Hexagon::LDriuh_cPt :
2044 case Hexagon::LDriuh_cNotPt :
2045 case Hexagon::LDriuh_indexed_cPt :
2046 case Hexagon::LDriuh_indexed_cNotPt :
2047 case Hexagon::LDriub_cPt :
2048 case Hexagon::LDriub_cNotPt :
2049 case Hexagon::LDriub_indexed_cPt :
2050 case Hexagon::LDriub_indexed_cNotPt :
2052 case Hexagon::POST_LDrid_cPt :
2053 case Hexagon::POST_LDrid_cNotPt :
2054 case Hexagon::POST_LDriw_cPt :
2055 case Hexagon::POST_LDriw_cNotPt :
2056 case Hexagon::POST_LDrih_cPt :
2057 case Hexagon::POST_LDrih_cNotPt :
2058 case Hexagon::POST_LDrib_cPt :
2059 case Hexagon::POST_LDrib_cNotPt :
2060 case Hexagon::POST_LDriuh_cPt :
2061 case Hexagon::POST_LDriuh_cNotPt :
2062 case Hexagon::POST_LDriub_cPt :
2063 case Hexagon::POST_LDriub_cNotPt :
2064 return QRI.Subtarget.hasV4TOps();
2065 case Hexagon::LDrid_indexed_shl_cPt_V4 :
2066 case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
2067 case Hexagon::LDrib_indexed_shl_cPt_V4 :
2068 case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
2069 case Hexagon::LDriub_indexed_shl_cPt_V4 :
2070 case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
2071 case Hexagon::LDrih_indexed_shl_cPt_V4 :
2072 case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
2073 case Hexagon::LDriuh_indexed_shl_cPt_V4 :
2074 case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
2075 case Hexagon::LDriw_indexed_shl_cPt_V4 :
2076 case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
2077 return QRI.Subtarget.hasV4TOps();
2081 // Returns true if an instruction is a conditional store.
2083 // Note: It doesn't include conditional new-value stores as they can't be
2084 // converted to .new predicate.
2086 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
2088 // / \ (not OK. it will cause new-value store to be
2089 // / X conditional on p0.new while R2 producer is
2092 // p.new store p.old NV store
2093 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
2099 // [if (p0)memw(R0+#0)=R2]
2101 // The above diagram shows the steps involoved in the conversion of a predicated
2102 // store instruction to its .new predicated new-value form.
2104 // The following set of instructions further explains the scenario where
2105 // conditional new-value store becomes invalid when promoted to .new predicate
2108 // { 1) if (p0) r0 = add(r1, r2)
2109 // 2) p0 = cmp.eq(r3, #0) }
2111 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
2112 // the first two instructions because in instr 1, r0 is conditional on old value
2113 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
2114 // is not valid for new-value stores.
2115 bool HexagonInstrInfo::
2116 isConditionalStore (const MachineInstr* MI) const {
2117 const HexagonRegisterInfo& QRI = getRegisterInfo();
2118 switch (MI->getOpcode())
2120 default: return false;
2121 case Hexagon::STrib_imm_cPt_V4 :
2122 case Hexagon::STrib_imm_cNotPt_V4 :
2123 case Hexagon::STrib_indexed_shl_cPt_V4 :
2124 case Hexagon::STrib_indexed_shl_cNotPt_V4 :
2125 case Hexagon::STrib_cPt :
2126 case Hexagon::STrib_cNotPt :
2127 case Hexagon::POST_STbri_cPt :
2128 case Hexagon::POST_STbri_cNotPt :
2129 case Hexagon::STrid_indexed_cPt :
2130 case Hexagon::STrid_indexed_cNotPt :
2131 case Hexagon::STrid_indexed_shl_cPt_V4 :
2132 case Hexagon::POST_STdri_cPt :
2133 case Hexagon::POST_STdri_cNotPt :
2134 case Hexagon::STrih_cPt :
2135 case Hexagon::STrih_cNotPt :
2136 case Hexagon::STrih_indexed_cPt :
2137 case Hexagon::STrih_indexed_cNotPt :
2138 case Hexagon::STrih_imm_cPt_V4 :
2139 case Hexagon::STrih_imm_cNotPt_V4 :
2140 case Hexagon::STrih_indexed_shl_cPt_V4 :
2141 case Hexagon::STrih_indexed_shl_cNotPt_V4 :
2142 case Hexagon::POST_SThri_cPt :
2143 case Hexagon::POST_SThri_cNotPt :
2144 case Hexagon::STriw_cPt :
2145 case Hexagon::STriw_cNotPt :
2146 case Hexagon::STriw_indexed_cPt :
2147 case Hexagon::STriw_indexed_cNotPt :
2148 case Hexagon::STriw_imm_cPt_V4 :
2149 case Hexagon::STriw_imm_cNotPt_V4 :
2150 case Hexagon::STriw_indexed_shl_cPt_V4 :
2151 case Hexagon::STriw_indexed_shl_cNotPt_V4 :
2152 case Hexagon::POST_STwri_cPt :
2153 case Hexagon::POST_STwri_cNotPt :
2154 return QRI.Subtarget.hasV4TOps();
2156 // V4 global address store before promoting to dot new.
2157 case Hexagon::STd_GP_cPt_V4 :
2158 case Hexagon::STd_GP_cNotPt_V4 :
2159 case Hexagon::STb_GP_cPt_V4 :
2160 case Hexagon::STb_GP_cNotPt_V4 :
2161 case Hexagon::STh_GP_cPt_V4 :
2162 case Hexagon::STh_GP_cNotPt_V4 :
2163 case Hexagon::STw_GP_cPt_V4 :
2164 case Hexagon::STw_GP_cNotPt_V4 :
2165 return QRI.Subtarget.hasV4TOps();
2167 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
2168 // from the "Conditional Store" list. Because a predicated new value store
2169 // would NOT be promoted to a double dot new store. See diagram below:
2170 // This function returns yes for those stores that are predicated but not
2171 // yet promoted to predicate dot new instructions.
2173 // +---------------------+
2174 // /-----| if (p0) memw(..)=r0 |---------\~
2175 // || +---------------------+ ||
2176 // promote || /\ /\ || promote
2178 // \||/ demote || \||/
2180 // +-------------------------+ || +-------------------------+
2181 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
2182 // +-------------------------+ || +-------------------------+
2185 // promote || \/ NOT possible
2189 // +-----------------------------+
2190 // | if (p0.new) memw(..)=r0.new |
2191 // +-----------------------------+
2192 // Double Dot New Store
2198 bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
2199 if (isNewValue(MI) && isBranch(MI))
2204 bool HexagonInstrInfo::isNewValue(const MachineInstr* MI) const {
2205 const uint64_t F = MI->getDesc().TSFlags;
2206 return ((F >> HexagonII::NewValuePos) & HexagonII::NewValueMask);
2209 // Returns true, if any one of the operands is a dot new
2210 // insn, whether it is predicated dot new or register dot new.
2211 bool HexagonInstrInfo::isDotNewInst (const MachineInstr* MI) const {
2212 return (isNewValueInst(MI) ||
2213 (isPredicated(MI) && isPredicatedNew(MI)));
2216 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
2217 const uint64_t F = MI->getDesc().TSFlags;
2219 return((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
2222 /// immediateExtend - Changes the instruction in place to one using an immediate
2224 void HexagonInstrInfo::immediateExtend(MachineInstr *MI) const {
2225 assert((isExtendable(MI)||isConstExtended(MI)) &&
2226 "Instruction must be extendable");
2227 // Find which operand is extendable.
2228 short ExtOpNum = getCExtOpNum(MI);
2229 MachineOperand &MO = MI->getOperand(ExtOpNum);
2230 // This needs to be something we understand.
2231 assert((MO.isMBB() || MO.isImm()) &&
2232 "Branch with unknown extendable field type");
2233 // Mark given operand as extended.
2234 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
2237 DFAPacketizer *HexagonInstrInfo::
2238 CreateTargetScheduleState(const TargetMachine *TM,
2239 const ScheduleDAG *DAG) const {
2240 const InstrItineraryData *II = TM->getInstrItineraryData();
2241 return TM->getSubtarget<HexagonGenSubtargetInfo>().createDFAPacketizer(II);
2244 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
2245 const MachineBasicBlock *MBB,
2246 const MachineFunction &MF) const {
2247 // Debug info is never a scheduling boundary. It's necessary to be explicit
2248 // due to the special treatment of IT instructions below, otherwise a
2249 // dbg_value followed by an IT will result in the IT instruction being
2250 // considered a scheduling hazard, which is wrong. It should be the actual
2251 // instruction preceding the dbg_value instruction(s), just like it is
2252 // when debug info is not present.
2253 if (MI->isDebugValue())
2256 // Terminators and labels can't be scheduled around.
2257 if (MI->getDesc().isTerminator() || MI->isLabel() || MI->isInlineAsm())
2263 bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
2265 // Constant extenders are allowed only for V4 and above.
2266 if (!Subtarget.hasV4TOps())
2269 const uint64_t F = MI->getDesc().TSFlags;
2270 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
2271 if (isExtended) // Instruction must be extended.
2274 unsigned isExtendable = (F >> HexagonII::ExtendablePos)
2275 & HexagonII::ExtendableMask;
2279 short ExtOpNum = getCExtOpNum(MI);
2280 const MachineOperand &MO = MI->getOperand(ExtOpNum);
2281 // Use MO operand flags to determine if MO
2282 // has the HMOTF_ConstExtended flag set.
2283 if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
2285 // If this is a Machine BB address we are talking about, and it is
2286 // not marked as extended, say so.
2290 // We could be using an instruction with an extendable immediate and shoehorn
2291 // a global address into it. If it is a global address it will be constant
2292 // extended. We do this for COMBINE.
2293 // We currently only handle isGlobal() because it is the only kind of
2294 // object we are going to end up with here for now.
2295 // In the future we probably should add isSymbol(), etc.
2296 if (MO.isGlobal() || MO.isSymbol())
2299 // If the extendable operand is not 'Immediate' type, the instruction should
2300 // have 'isExtended' flag set.
2301 assert(MO.isImm() && "Extendable operand must be Immediate type");
2303 int MinValue = getMinValue(MI);
2304 int MaxValue = getMaxValue(MI);
2305 int ImmValue = MO.getImm();
2307 return (ImmValue < MinValue || ImmValue > MaxValue);
2310 // Returns the opcode to use when converting MI, which is a conditional jump,
2311 // into a conditional instruction which uses the .new value of the predicate.
2312 // We also use branch probabilities to add a hint to the jump.
2314 HexagonInstrInfo::getDotNewPredJumpOp(MachineInstr *MI,
2316 MachineBranchProbabilityInfo *MBPI) const {
2318 // We assume that block can have at most two successors.
2320 MachineBasicBlock *Src = MI->getParent();
2321 MachineOperand *BrTarget = &MI->getOperand(1);
2322 MachineBasicBlock *Dst = BrTarget->getMBB();
2324 const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst);
2325 if (Prediction >= BranchProbability(1,2))
2328 switch (MI->getOpcode()) {
2329 case Hexagon::JMP_t:
2330 return taken ? Hexagon::JMP_tnew_t : Hexagon::JMP_tnew_nt;
2331 case Hexagon::JMP_f:
2332 return taken ? Hexagon::JMP_fnew_t : Hexagon::JMP_fnew_nt;
2335 llvm_unreachable("Unexpected jump instruction.");
2338 // Returns true if a particular operand is extendable for an instruction.
2339 bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
2340 unsigned short OperandNum) const {
2341 // Constant extenders are allowed only for V4 and above.
2342 if (!Subtarget.hasV4TOps())
2345 const uint64_t F = MI->getDesc().TSFlags;
2347 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2351 // Returns Operand Index for the constant extended instruction.
2352 unsigned short HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const {
2353 const uint64_t F = MI->getDesc().TSFlags;
2354 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask);
2357 // Returns the min value that doesn't need to be extended.
2358 int HexagonInstrInfo::getMinValue(const MachineInstr *MI) const {
2359 const uint64_t F = MI->getDesc().TSFlags;
2360 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
2361 & HexagonII::ExtentSignedMask;
2362 unsigned bits = (F >> HexagonII::ExtentBitsPos)
2363 & HexagonII::ExtentBitsMask;
2365 if (isSigned) // if value is signed
2366 return -1 << (bits - 1);
2371 // Returns the max value that doesn't need to be extended.
2372 int HexagonInstrInfo::getMaxValue(const MachineInstr *MI) const {
2373 const uint64_t F = MI->getDesc().TSFlags;
2374 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
2375 & HexagonII::ExtentSignedMask;
2376 unsigned bits = (F >> HexagonII::ExtentBitsPos)
2377 & HexagonII::ExtentBitsMask;
2379 if (isSigned) // if value is signed
2380 return ~(-1 << (bits - 1));
2382 return ~(-1 << bits);
2385 // Returns true if an instruction can be converted into a non-extended
2386 // equivalent instruction.
2387 bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const {
2390 // Check if the instruction has a register form that uses register in place
2391 // of the extended operand, if so return that as the non-extended form.
2392 if (Hexagon::getRegForm(MI->getOpcode()) >= 0)
2395 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
2396 // Check addressing mode and retreive non-ext equivalent instruction.
2398 switch (getAddrMode(MI)) {
2399 case HexagonII::Absolute :
2400 // Load/store with absolute addressing mode can be converted into
2401 // base+offset mode.
2402 NonExtOpcode = Hexagon::getBasedWithImmOffset(MI->getOpcode());
2404 case HexagonII::BaseImmOffset :
2405 // Load/store with base+offset addressing mode can be converted into
2406 // base+register offset addressing mode. However left shift operand should
2408 NonExtOpcode = Hexagon::getBaseWithRegOffset(MI->getOpcode());
2413 if (NonExtOpcode < 0)
2420 // Returns opcode of the non-extended equivalent instruction.
2421 short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
2423 // Check if the instruction has a register form that uses register in place
2424 // of the extended operand, if so return that as the non-extended form.
2425 short NonExtOpcode = Hexagon::getRegForm(MI->getOpcode());
2426 if (NonExtOpcode >= 0)
2427 return NonExtOpcode;
2429 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
2430 // Check addressing mode and retreive non-ext equivalent instruction.
2431 switch (getAddrMode(MI)) {
2432 case HexagonII::Absolute :
2433 return Hexagon::getBasedWithImmOffset(MI->getOpcode());
2434 case HexagonII::BaseImmOffset :
2435 return Hexagon::getBaseWithRegOffset(MI->getOpcode());
2443 bool HexagonInstrInfo::PredOpcodeHasJMP_c(Opcode_t Opcode) const {
2444 return (Opcode == Hexagon::JMP_t) ||
2445 (Opcode == Hexagon::JMP_f) ||
2446 (Opcode == Hexagon::JMP_tnew_t) ||
2447 (Opcode == Hexagon::JMP_fnew_t) ||
2448 (Opcode == Hexagon::JMP_tnew_nt) ||
2449 (Opcode == Hexagon::JMP_fnew_nt);
2452 bool HexagonInstrInfo::PredOpcodeHasNot(Opcode_t Opcode) const {
2453 return (Opcode == Hexagon::JMP_f) ||
2454 (Opcode == Hexagon::JMP_fnew_t) ||
2455 (Opcode == Hexagon::JMP_fnew_nt);