1 //===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Base ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMBaseInstrInfo.h"
16 #include "ARMConstantPoolValue.h"
17 #include "ARMHazardRecognizer.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMRegisterInfo.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "llvm/Constants.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineJumpTableInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/SelectionDAGNodes.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/Support/BranchProbability.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/ADT/STLExtras.h"
39 #define GET_INSTRINFO_CTOR
40 #include "ARMGenInstrInfo.inc"
45 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
46 cl::desc("Enable ARM 2-addr to 3-addr conv"));
49 WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
50 cl::desc("Widen ARM vmovs to vmovd when possible"));
52 /// ARM_MLxEntry - Record information about MLA / MLS instructions.
54 unsigned MLxOpc; // MLA / MLS opcode
55 unsigned MulOpc; // Expanded multiplication opcode
56 unsigned AddSubOpc; // Expanded add / sub opcode
57 bool NegAcc; // True if the acc is negated before the add / sub.
58 bool HasLane; // True if instruction has an extra "lane" operand.
61 static const ARM_MLxEntry ARM_MLxTable[] = {
62 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
64 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
65 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
66 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
67 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
68 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
69 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
70 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
71 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
74 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
75 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
76 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
77 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
78 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
79 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
80 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
81 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
84 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
85 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
87 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
88 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
89 assert(false && "Duplicated entries?");
90 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
91 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
95 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
96 // currently defaults to no prepass hazard recognizer.
97 ScheduleHazardRecognizer *ARMBaseInstrInfo::
98 CreateTargetHazardRecognizer(const TargetMachine *TM,
99 const ScheduleDAG *DAG) const {
100 if (usePreRAHazardRecognizer()) {
101 const InstrItineraryData *II = TM->getInstrItineraryData();
102 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
104 return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG);
107 ScheduleHazardRecognizer *ARMBaseInstrInfo::
108 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
109 const ScheduleDAG *DAG) const {
110 if (Subtarget.isThumb2() || Subtarget.hasVFP2())
111 return (ScheduleHazardRecognizer *)
112 new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG);
113 return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG);
117 ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
118 MachineBasicBlock::iterator &MBBI,
119 LiveVariables *LV) const {
120 // FIXME: Thumb2 support.
125 MachineInstr *MI = MBBI;
126 MachineFunction &MF = *MI->getParent()->getParent();
127 uint64_t TSFlags = MI->getDesc().TSFlags;
129 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
130 default: return NULL;
131 case ARMII::IndexModePre:
134 case ARMII::IndexModePost:
138 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
140 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
144 MachineInstr *UpdateMI = NULL;
145 MachineInstr *MemMI = NULL;
146 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
147 const MCInstrDesc &MCID = MI->getDesc();
148 unsigned NumOps = MCID.getNumOperands();
149 bool isLoad = !MI->mayStore();
150 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
151 const MachineOperand &Base = MI->getOperand(2);
152 const MachineOperand &Offset = MI->getOperand(NumOps-3);
153 unsigned WBReg = WB.getReg();
154 unsigned BaseReg = Base.getReg();
155 unsigned OffReg = Offset.getReg();
156 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
157 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
159 default: llvm_unreachable("Unknown indexed op!");
160 case ARMII::AddrMode2: {
161 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
162 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
164 if (ARM_AM::getSOImmVal(Amt) == -1)
165 // Can't encode it in a so_imm operand. This transformation will
166 // add more than 1 instruction. Abandon!
168 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
169 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
170 .addReg(BaseReg).addImm(Amt)
171 .addImm(Pred).addReg(0).addReg(0);
172 } else if (Amt != 0) {
173 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
174 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
175 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
176 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
177 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
178 .addImm(Pred).addReg(0).addReg(0);
180 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
181 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
182 .addReg(BaseReg).addReg(OffReg)
183 .addImm(Pred).addReg(0).addReg(0);
186 case ARMII::AddrMode3 : {
187 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
188 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
190 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
191 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
192 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
193 .addReg(BaseReg).addImm(Amt)
194 .addImm(Pred).addReg(0).addReg(0);
196 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
197 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
198 .addReg(BaseReg).addReg(OffReg)
199 .addImm(Pred).addReg(0).addReg(0);
204 std::vector<MachineInstr*> NewMIs;
207 MemMI = BuildMI(MF, MI->getDebugLoc(),
208 get(MemOpc), MI->getOperand(0).getReg())
209 .addReg(WBReg).addImm(0).addImm(Pred);
211 MemMI = BuildMI(MF, MI->getDebugLoc(),
212 get(MemOpc)).addReg(MI->getOperand(1).getReg())
213 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
214 NewMIs.push_back(MemMI);
215 NewMIs.push_back(UpdateMI);
218 MemMI = BuildMI(MF, MI->getDebugLoc(),
219 get(MemOpc), MI->getOperand(0).getReg())
220 .addReg(BaseReg).addImm(0).addImm(Pred);
222 MemMI = BuildMI(MF, MI->getDebugLoc(),
223 get(MemOpc)).addReg(MI->getOperand(1).getReg())
224 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
226 UpdateMI->getOperand(0).setIsDead();
227 NewMIs.push_back(UpdateMI);
228 NewMIs.push_back(MemMI);
231 // Transfer LiveVariables states, kill / dead info.
233 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
234 MachineOperand &MO = MI->getOperand(i);
235 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
236 unsigned Reg = MO.getReg();
238 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
240 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
242 LV->addVirtualRegisterDead(Reg, NewMI);
244 if (MO.isUse() && MO.isKill()) {
245 for (unsigned j = 0; j < 2; ++j) {
246 // Look at the two new MI's in reverse order.
247 MachineInstr *NewMI = NewMIs[j];
248 if (!NewMI->readsRegister(Reg))
250 LV->addVirtualRegisterKilled(Reg, NewMI);
251 if (VI.removeKill(MI))
252 VI.Kills.push_back(NewMI);
260 MFI->insert(MBBI, NewMIs[1]);
261 MFI->insert(MBBI, NewMIs[0]);
267 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
268 MachineBasicBlock *&FBB,
269 SmallVectorImpl<MachineOperand> &Cond,
270 bool AllowModify) const {
271 // If the block has no terminators, it just falls into the block after it.
272 MachineBasicBlock::iterator I = MBB.end();
273 if (I == MBB.begin())
276 while (I->isDebugValue()) {
277 if (I == MBB.begin())
281 if (!isUnpredicatedTerminator(I))
284 // Get the last instruction in the block.
285 MachineInstr *LastInst = I;
287 // If there is only one terminator instruction, process it.
288 unsigned LastOpc = LastInst->getOpcode();
289 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
290 if (isUncondBranchOpcode(LastOpc)) {
291 TBB = LastInst->getOperand(0).getMBB();
294 if (isCondBranchOpcode(LastOpc)) {
295 // Block ends with fall-through condbranch.
296 TBB = LastInst->getOperand(0).getMBB();
297 Cond.push_back(LastInst->getOperand(1));
298 Cond.push_back(LastInst->getOperand(2));
301 return true; // Can't handle indirect branch.
304 // Get the instruction before it if it is a terminator.
305 MachineInstr *SecondLastInst = I;
306 unsigned SecondLastOpc = SecondLastInst->getOpcode();
308 // If AllowModify is true and the block ends with two or more unconditional
309 // branches, delete all but the first unconditional branch.
310 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
311 while (isUncondBranchOpcode(SecondLastOpc)) {
312 LastInst->eraseFromParent();
313 LastInst = SecondLastInst;
314 LastOpc = LastInst->getOpcode();
315 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
316 // Return now the only terminator is an unconditional branch.
317 TBB = LastInst->getOperand(0).getMBB();
321 SecondLastOpc = SecondLastInst->getOpcode();
326 // If there are three terminators, we don't know what sort of block this is.
327 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
330 // If the block ends with a B and a Bcc, handle it.
331 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
332 TBB = SecondLastInst->getOperand(0).getMBB();
333 Cond.push_back(SecondLastInst->getOperand(1));
334 Cond.push_back(SecondLastInst->getOperand(2));
335 FBB = LastInst->getOperand(0).getMBB();
339 // If the block ends with two unconditional branches, handle it. The second
340 // one is not executed, so remove it.
341 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
342 TBB = SecondLastInst->getOperand(0).getMBB();
345 I->eraseFromParent();
349 // ...likewise if it ends with a branch table followed by an unconditional
350 // branch. The branch folder can create these, and we must get rid of them for
351 // correctness of Thumb constant islands.
352 if ((isJumpTableBranchOpcode(SecondLastOpc) ||
353 isIndirectBranchOpcode(SecondLastOpc)) &&
354 isUncondBranchOpcode(LastOpc)) {
357 I->eraseFromParent();
361 // Otherwise, can't handle this.
366 unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
367 MachineBasicBlock::iterator I = MBB.end();
368 if (I == MBB.begin()) return 0;
370 while (I->isDebugValue()) {
371 if (I == MBB.begin())
375 if (!isUncondBranchOpcode(I->getOpcode()) &&
376 !isCondBranchOpcode(I->getOpcode()))
379 // Remove the branch.
380 I->eraseFromParent();
384 if (I == MBB.begin()) return 1;
386 if (!isCondBranchOpcode(I->getOpcode()))
389 // Remove the branch.
390 I->eraseFromParent();
395 ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
396 MachineBasicBlock *FBB,
397 const SmallVectorImpl<MachineOperand> &Cond,
399 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
400 int BOpc = !AFI->isThumbFunction()
401 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
402 int BccOpc = !AFI->isThumbFunction()
403 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
404 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
406 // Shouldn't be a fall through.
407 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
408 assert((Cond.size() == 2 || Cond.size() == 0) &&
409 "ARM branch conditions have two components!");
412 if (Cond.empty()) { // Unconditional branch?
414 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0);
416 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
418 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
419 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
423 // Two-way conditional branch.
424 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
425 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
427 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0);
429 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
433 bool ARMBaseInstrInfo::
434 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
435 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
436 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
440 bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
441 if (MI->isBundle()) {
442 MachineBasicBlock::const_instr_iterator I = MI;
443 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
444 while (++I != E && I->isInsideBundle()) {
445 int PIdx = I->findFirstPredOperandIdx();
446 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
452 int PIdx = MI->findFirstPredOperandIdx();
453 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
456 bool ARMBaseInstrInfo::
457 PredicateInstruction(MachineInstr *MI,
458 const SmallVectorImpl<MachineOperand> &Pred) const {
459 unsigned Opc = MI->getOpcode();
460 if (isUncondBranchOpcode(Opc)) {
461 MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
462 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
463 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
467 int PIdx = MI->findFirstPredOperandIdx();
469 MachineOperand &PMO = MI->getOperand(PIdx);
470 PMO.setImm(Pred[0].getImm());
471 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
477 bool ARMBaseInstrInfo::
478 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
479 const SmallVectorImpl<MachineOperand> &Pred2) const {
480 if (Pred1.size() > 2 || Pred2.size() > 2)
483 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
484 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
494 return CC2 == ARMCC::HI;
496 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
498 return CC2 == ARMCC::GT;
500 return CC2 == ARMCC::LT;
504 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
505 std::vector<MachineOperand> &Pred) const {
506 // FIXME: This confuses implicit_def with optional CPSR def.
507 const MCInstrDesc &MCID = MI->getDesc();
508 if (!MCID.getImplicitDefs() && !MI->hasOptionalDef())
512 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
513 const MachineOperand &MO = MI->getOperand(i);
514 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
523 /// isPredicable - Return true if the specified instruction can be predicated.
524 /// By default, this returns true for every instruction with a
525 /// PredicateOperand.
526 bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
527 if (!MI->isPredicable())
530 if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
531 ARMFunctionInfo *AFI =
532 MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
533 return AFI->isThumb2Function();
538 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
539 LLVM_ATTRIBUTE_NOINLINE
540 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
542 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
544 assert(JTI < JT.size());
545 return JT[JTI].MBBs.size();
548 /// GetInstSize - Return the size of the specified MachineInstr.
550 unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
551 const MachineBasicBlock &MBB = *MI->getParent();
552 const MachineFunction *MF = MBB.getParent();
553 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
555 const MCInstrDesc &MCID = MI->getDesc();
557 return MCID.getSize();
559 // If this machine instr is an inline asm, measure it.
560 if (MI->getOpcode() == ARM::INLINEASM)
561 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
564 unsigned Opc = MI->getOpcode();
566 case TargetOpcode::IMPLICIT_DEF:
567 case TargetOpcode::KILL:
568 case TargetOpcode::PROLOG_LABEL:
569 case TargetOpcode::EH_LABEL:
570 case TargetOpcode::DBG_VALUE:
572 case TargetOpcode::BUNDLE:
573 return getInstBundleLength(MI);
574 case ARM::MOVi16_ga_pcrel:
575 case ARM::MOVTi16_ga_pcrel:
576 case ARM::t2MOVi16_ga_pcrel:
577 case ARM::t2MOVTi16_ga_pcrel:
580 case ARM::t2MOVi32imm:
582 case ARM::CONSTPOOL_ENTRY:
583 // If this machine instr is a constant pool entry, its size is recorded as
585 return MI->getOperand(2).getImm();
586 case ARM::Int_eh_sjlj_longjmp:
588 case ARM::tInt_eh_sjlj_longjmp:
590 case ARM::Int_eh_sjlj_setjmp:
591 case ARM::Int_eh_sjlj_setjmp_nofp:
593 case ARM::tInt_eh_sjlj_setjmp:
594 case ARM::t2Int_eh_sjlj_setjmp:
595 case ARM::t2Int_eh_sjlj_setjmp_nofp:
603 case ARM::t2TBH_JT: {
604 // These are jumptable branches, i.e. a branch followed by an inlined
605 // jumptable. The size is 4 + 4 * number of entries. For TBB, each
606 // entry is one byte; TBH two byte each.
607 unsigned EntrySize = (Opc == ARM::t2TBB_JT)
608 ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
609 unsigned NumOps = MCID.getNumOperands();
610 MachineOperand JTOP =
611 MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2));
612 unsigned JTI = JTOP.getIndex();
613 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
615 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
616 assert(JTI < JT.size());
617 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
618 // 4 aligned. The assembler / linker may add 2 byte padding just before
619 // the JT entries. The size does not include this padding; the
620 // constant islands pass does separate bookkeeping for it.
621 // FIXME: If we know the size of the function is less than (1 << 16) *2
622 // bytes, we can use 16-bit entries instead. Then there won't be an
624 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
625 unsigned NumEntries = getNumJTEntries(JT, JTI);
626 if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
627 // Make sure the instruction that follows TBB is 2-byte aligned.
628 // FIXME: Constant island pass should insert an "ALIGN" instruction
631 return NumEntries * EntrySize + InstSize;
634 // Otherwise, pseudo-instruction sizes are zero.
639 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const {
641 MachineBasicBlock::const_instr_iterator I = MI;
642 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
643 while (++I != E && I->isInsideBundle()) {
644 assert(!I->isBundle() && "No nested bundle!");
645 Size += GetInstSizeInBytes(&*I);
650 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
651 MachineBasicBlock::iterator I, DebugLoc DL,
652 unsigned DestReg, unsigned SrcReg,
653 bool KillSrc) const {
654 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
655 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
657 if (GPRDest && GPRSrc) {
658 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
659 .addReg(SrcReg, getKillRegState(KillSrc))));
663 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
664 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
667 if (SPRDest && SPRSrc)
669 else if (GPRDest && SPRSrc)
671 else if (SPRDest && GPRSrc)
673 else if (ARM::DPRRegClass.contains(DestReg, SrcReg))
675 else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
679 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
680 MIB.addReg(SrcReg, getKillRegState(KillSrc));
681 if (Opc == ARM::VORRq)
682 MIB.addReg(SrcReg, getKillRegState(KillSrc));
687 // Generate instructions for VMOVQQ and VMOVQQQQ pseudos in place.
688 if (ARM::QQPRRegClass.contains(DestReg, SrcReg) ||
689 ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
690 const TargetRegisterInfo *TRI = &getRegisterInfo();
691 assert(ARM::qsub_0 + 3 == ARM::qsub_3 && "Expected contiguous enum.");
692 unsigned EndSubReg = ARM::QQPRRegClass.contains(DestReg, SrcReg) ?
693 ARM::qsub_1 : ARM::qsub_3;
694 for (unsigned i = ARM::qsub_0, e = EndSubReg + 1; i != e; ++i) {
695 unsigned Dst = TRI->getSubReg(DestReg, i);
696 unsigned Src = TRI->getSubReg(SrcReg, i);
697 MachineInstrBuilder Mov =
698 AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VORRq))
699 .addReg(Dst, RegState::Define)
700 .addReg(Src, getKillRegState(KillSrc))
701 .addReg(Src, getKillRegState(KillSrc)));
702 if (i == EndSubReg) {
703 Mov->addRegisterDefined(DestReg, TRI);
705 Mov->addRegisterKilled(SrcReg, TRI);
710 llvm_unreachable("Impossible reg-to-reg copy");
714 MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB,
715 unsigned Reg, unsigned SubIdx, unsigned State,
716 const TargetRegisterInfo *TRI) {
718 return MIB.addReg(Reg, State);
720 if (TargetRegisterInfo::isPhysicalRegister(Reg))
721 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
722 return MIB.addReg(Reg, State, SubIdx);
725 void ARMBaseInstrInfo::
726 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
727 unsigned SrcReg, bool isKill, int FI,
728 const TargetRegisterClass *RC,
729 const TargetRegisterInfo *TRI) const {
731 if (I != MBB.end()) DL = I->getDebugLoc();
732 MachineFunction &MF = *MBB.getParent();
733 MachineFrameInfo &MFI = *MF.getFrameInfo();
734 unsigned Align = MFI.getObjectAlignment(FI);
736 MachineMemOperand *MMO =
737 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
738 MachineMemOperand::MOStore,
739 MFI.getObjectSize(FI),
742 switch (RC->getSize()) {
744 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
745 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12))
746 .addReg(SrcReg, getKillRegState(isKill))
747 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
748 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
749 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
750 .addReg(SrcReg, getKillRegState(isKill))
751 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
753 llvm_unreachable("Unknown reg class!");
756 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
757 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
758 .addReg(SrcReg, getKillRegState(isKill))
759 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
761 llvm_unreachable("Unknown reg class!");
764 if (ARM::QPRRegClass.hasSubClassEq(RC)) {
765 // Use aligned spills if the stack can be realigned.
766 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
767 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo))
768 .addFrameIndex(FI).addImm(16)
769 .addReg(SrcReg, getKillRegState(isKill))
770 .addMemOperand(MMO));
772 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA))
773 .addReg(SrcReg, getKillRegState(isKill))
775 .addMemOperand(MMO));
778 llvm_unreachable("Unknown reg class!");
781 if (ARM::QQPRRegClass.hasSubClassEq(RC)) {
782 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
783 // FIXME: It's possible to only store part of the QQ register if the
784 // spilled def has a sub-register index.
785 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo))
786 .addFrameIndex(FI).addImm(16)
787 .addReg(SrcReg, getKillRegState(isKill))
788 .addMemOperand(MMO));
790 MachineInstrBuilder MIB =
791 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
794 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
795 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
796 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
797 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
800 llvm_unreachable("Unknown reg class!");
803 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
804 MachineInstrBuilder MIB =
805 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
808 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
809 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
810 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
811 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
812 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
813 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
814 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
815 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
817 llvm_unreachable("Unknown reg class!");
820 llvm_unreachable("Unknown reg class!");
825 ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
826 int &FrameIndex) const {
827 switch (MI->getOpcode()) {
830 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
831 if (MI->getOperand(1).isFI() &&
832 MI->getOperand(2).isReg() &&
833 MI->getOperand(3).isImm() &&
834 MI->getOperand(2).getReg() == 0 &&
835 MI->getOperand(3).getImm() == 0) {
836 FrameIndex = MI->getOperand(1).getIndex();
837 return MI->getOperand(0).getReg();
845 if (MI->getOperand(1).isFI() &&
846 MI->getOperand(2).isImm() &&
847 MI->getOperand(2).getImm() == 0) {
848 FrameIndex = MI->getOperand(1).getIndex();
849 return MI->getOperand(0).getReg();
852 case ARM::VST1q64Pseudo:
853 if (MI->getOperand(0).isFI() &&
854 MI->getOperand(2).getSubReg() == 0) {
855 FrameIndex = MI->getOperand(0).getIndex();
856 return MI->getOperand(2).getReg();
860 if (MI->getOperand(1).isFI() &&
861 MI->getOperand(0).getSubReg() == 0) {
862 FrameIndex = MI->getOperand(1).getIndex();
863 return MI->getOperand(0).getReg();
871 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
872 int &FrameIndex) const {
873 const MachineMemOperand *Dummy;
874 return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
877 void ARMBaseInstrInfo::
878 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
879 unsigned DestReg, int FI,
880 const TargetRegisterClass *RC,
881 const TargetRegisterInfo *TRI) const {
883 if (I != MBB.end()) DL = I->getDebugLoc();
884 MachineFunction &MF = *MBB.getParent();
885 MachineFrameInfo &MFI = *MF.getFrameInfo();
886 unsigned Align = MFI.getObjectAlignment(FI);
887 MachineMemOperand *MMO =
888 MF.getMachineMemOperand(
889 MachinePointerInfo::getFixedStack(FI),
890 MachineMemOperand::MOLoad,
891 MFI.getObjectSize(FI),
894 switch (RC->getSize()) {
896 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
897 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
898 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
900 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
901 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
902 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
904 llvm_unreachable("Unknown reg class!");
907 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
908 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
909 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
911 llvm_unreachable("Unknown reg class!");
914 if (ARM::QPRRegClass.hasSubClassEq(RC)) {
915 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
916 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg)
917 .addFrameIndex(FI).addImm(16)
918 .addMemOperand(MMO));
920 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
922 .addMemOperand(MMO));
925 llvm_unreachable("Unknown reg class!");
928 if (ARM::QQPRRegClass.hasSubClassEq(RC)) {
929 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
930 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
931 .addFrameIndex(FI).addImm(16)
932 .addMemOperand(MMO));
934 MachineInstrBuilder MIB =
935 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
938 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
939 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
940 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
941 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
942 MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
945 llvm_unreachable("Unknown reg class!");
948 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
949 MachineInstrBuilder MIB =
950 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
953 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
954 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
955 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
956 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
957 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
958 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
959 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
960 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
961 MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
963 llvm_unreachable("Unknown reg class!");
966 llvm_unreachable("Unknown regclass!");
971 ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
972 int &FrameIndex) const {
973 switch (MI->getOpcode()) {
976 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
977 if (MI->getOperand(1).isFI() &&
978 MI->getOperand(2).isReg() &&
979 MI->getOperand(3).isImm() &&
980 MI->getOperand(2).getReg() == 0 &&
981 MI->getOperand(3).getImm() == 0) {
982 FrameIndex = MI->getOperand(1).getIndex();
983 return MI->getOperand(0).getReg();
991 if (MI->getOperand(1).isFI() &&
992 MI->getOperand(2).isImm() &&
993 MI->getOperand(2).getImm() == 0) {
994 FrameIndex = MI->getOperand(1).getIndex();
995 return MI->getOperand(0).getReg();
998 case ARM::VLD1q64Pseudo:
999 if (MI->getOperand(1).isFI() &&
1000 MI->getOperand(0).getSubReg() == 0) {
1001 FrameIndex = MI->getOperand(1).getIndex();
1002 return MI->getOperand(0).getReg();
1006 if (MI->getOperand(1).isFI() &&
1007 MI->getOperand(0).getSubReg() == 0) {
1008 FrameIndex = MI->getOperand(1).getIndex();
1009 return MI->getOperand(0).getReg();
1017 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
1018 int &FrameIndex) const {
1019 const MachineMemOperand *Dummy;
1020 return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
1023 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
1024 // This hook gets to expand COPY instructions before they become
1025 // copyPhysReg() calls. Look for VMOVS instructions that can legally be
1026 // widened to VMOVD. We prefer the VMOVD when possible because it may be
1027 // changed into a VORR that can go down the NEON pipeline.
1028 if (!WidenVMOVS || !MI->isCopy())
1031 // Look for a copy between even S-registers. That is where we keep floats
1032 // when using NEON v2f32 instructions for f32 arithmetic.
1033 unsigned DstRegS = MI->getOperand(0).getReg();
1034 unsigned SrcRegS = MI->getOperand(1).getReg();
1035 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
1038 const TargetRegisterInfo *TRI = &getRegisterInfo();
1039 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1041 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1043 if (!DstRegD || !SrcRegD)
1046 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only
1047 // legal if the COPY already defines the full DstRegD, and it isn't a
1048 // sub-register insertion.
1049 if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI))
1052 // A dead copy shouldn't show up here, but reject it just in case.
1053 if (MI->getOperand(0).isDead())
1056 // All clear, widen the COPY.
1057 DEBUG(dbgs() << "widening: " << *MI);
1059 // Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg
1060 // or some other super-register.
1061 int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD);
1062 if (ImpDefIdx != -1)
1063 MI->RemoveOperand(ImpDefIdx);
1065 // Change the opcode and operands.
1066 MI->setDesc(get(ARM::VMOVD));
1067 MI->getOperand(0).setReg(DstRegD);
1068 MI->getOperand(1).setReg(SrcRegD);
1069 AddDefaultPred(MachineInstrBuilder(MI));
1071 // We are now reading SrcRegD instead of SrcRegS. This may upset the
1072 // register scavenger and machine verifier, so we need to indicate that we
1073 // are reading an undefined value from SrcRegD, but a proper value from
1075 MI->getOperand(1).setIsUndef();
1076 MachineInstrBuilder(MI).addReg(SrcRegS, RegState::Implicit);
1078 // SrcRegD may actually contain an unrelated value in the ssub_1
1079 // sub-register. Don't kill it. Only kill the ssub_0 sub-register.
1080 if (MI->getOperand(1).isKill()) {
1081 MI->getOperand(1).setIsKill(false);
1082 MI->addRegisterKilled(SrcRegS, TRI, true);
1085 DEBUG(dbgs() << "replaced by: " << *MI);
1090 ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1091 int FrameIx, uint64_t Offset,
1092 const MDNode *MDPtr,
1093 DebugLoc DL) const {
1094 MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
1095 .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
1099 /// Create a copy of a const pool value. Update CPI to the new index and return
1101 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1102 MachineConstantPool *MCP = MF.getConstantPool();
1103 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1105 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1106 assert(MCPE.isMachineConstantPoolEntry() &&
1107 "Expecting a machine constantpool entry!");
1108 ARMConstantPoolValue *ACPV =
1109 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1111 unsigned PCLabelId = AFI->createPICLabelUId();
1112 ARMConstantPoolValue *NewCPV = 0;
1113 // FIXME: The below assumes PIC relocation model and that the function
1114 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
1115 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
1116 // instructions, so that's probably OK, but is PIC always correct when
1118 if (ACPV->isGlobalValue())
1119 NewCPV = ARMConstantPoolConstant::
1120 Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
1122 else if (ACPV->isExtSymbol())
1123 NewCPV = ARMConstantPoolSymbol::
1124 Create(MF.getFunction()->getContext(),
1125 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1126 else if (ACPV->isBlockAddress())
1127 NewCPV = ARMConstantPoolConstant::
1128 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1129 ARMCP::CPBlockAddress, 4);
1130 else if (ACPV->isLSDA())
1131 NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId,
1133 else if (ACPV->isMachineBasicBlock())
1134 NewCPV = ARMConstantPoolMBB::
1135 Create(MF.getFunction()->getContext(),
1136 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1138 llvm_unreachable("Unexpected ARM constantpool value type!!");
1139 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
1143 void ARMBaseInstrInfo::
1144 reMaterialize(MachineBasicBlock &MBB,
1145 MachineBasicBlock::iterator I,
1146 unsigned DestReg, unsigned SubIdx,
1147 const MachineInstr *Orig,
1148 const TargetRegisterInfo &TRI) const {
1149 unsigned Opcode = Orig->getOpcode();
1152 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
1153 MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
1157 case ARM::tLDRpci_pic:
1158 case ARM::t2LDRpci_pic: {
1159 MachineFunction &MF = *MBB.getParent();
1160 unsigned CPI = Orig->getOperand(1).getIndex();
1161 unsigned PCLabelId = duplicateCPV(MF, CPI);
1162 MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
1164 .addConstantPoolIndex(CPI).addImm(PCLabelId);
1165 MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
1172 ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
1173 MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
1174 switch(Orig->getOpcode()) {
1175 case ARM::tLDRpci_pic:
1176 case ARM::t2LDRpci_pic: {
1177 unsigned CPI = Orig->getOperand(1).getIndex();
1178 unsigned PCLabelId = duplicateCPV(MF, CPI);
1179 Orig->getOperand(1).setIndex(CPI);
1180 Orig->getOperand(2).setImm(PCLabelId);
1187 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
1188 const MachineInstr *MI1,
1189 const MachineRegisterInfo *MRI) const {
1190 int Opcode = MI0->getOpcode();
1191 if (Opcode == ARM::t2LDRpci ||
1192 Opcode == ARM::t2LDRpci_pic ||
1193 Opcode == ARM::tLDRpci ||
1194 Opcode == ARM::tLDRpci_pic ||
1195 Opcode == ARM::MOV_ga_dyn ||
1196 Opcode == ARM::MOV_ga_pcrel ||
1197 Opcode == ARM::MOV_ga_pcrel_ldr ||
1198 Opcode == ARM::t2MOV_ga_dyn ||
1199 Opcode == ARM::t2MOV_ga_pcrel) {
1200 if (MI1->getOpcode() != Opcode)
1202 if (MI0->getNumOperands() != MI1->getNumOperands())
1205 const MachineOperand &MO0 = MI0->getOperand(1);
1206 const MachineOperand &MO1 = MI1->getOperand(1);
1207 if (MO0.getOffset() != MO1.getOffset())
1210 if (Opcode == ARM::MOV_ga_dyn ||
1211 Opcode == ARM::MOV_ga_pcrel ||
1212 Opcode == ARM::MOV_ga_pcrel_ldr ||
1213 Opcode == ARM::t2MOV_ga_dyn ||
1214 Opcode == ARM::t2MOV_ga_pcrel)
1215 // Ignore the PC labels.
1216 return MO0.getGlobal() == MO1.getGlobal();
1218 const MachineFunction *MF = MI0->getParent()->getParent();
1219 const MachineConstantPool *MCP = MF->getConstantPool();
1220 int CPI0 = MO0.getIndex();
1221 int CPI1 = MO1.getIndex();
1222 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1223 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1224 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
1225 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
1226 if (isARMCP0 && isARMCP1) {
1227 ARMConstantPoolValue *ACPV0 =
1228 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1229 ARMConstantPoolValue *ACPV1 =
1230 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1231 return ACPV0->hasSameValue(ACPV1);
1232 } else if (!isARMCP0 && !isARMCP1) {
1233 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
1236 } else if (Opcode == ARM::PICLDR) {
1237 if (MI1->getOpcode() != Opcode)
1239 if (MI0->getNumOperands() != MI1->getNumOperands())
1242 unsigned Addr0 = MI0->getOperand(1).getReg();
1243 unsigned Addr1 = MI1->getOperand(1).getReg();
1244 if (Addr0 != Addr1) {
1246 !TargetRegisterInfo::isVirtualRegister(Addr0) ||
1247 !TargetRegisterInfo::isVirtualRegister(Addr1))
1250 // This assumes SSA form.
1251 MachineInstr *Def0 = MRI->getVRegDef(Addr0);
1252 MachineInstr *Def1 = MRI->getVRegDef(Addr1);
1253 // Check if the loaded value, e.g. a constantpool of a global address, are
1255 if (!produceSameValue(Def0, Def1, MRI))
1259 for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) {
1260 // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
1261 const MachineOperand &MO0 = MI0->getOperand(i);
1262 const MachineOperand &MO1 = MI1->getOperand(i);
1263 if (!MO0.isIdenticalTo(MO1))
1269 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1272 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
1273 /// determine if two loads are loading from the same base address. It should
1274 /// only return true if the base pointers are the same and the only differences
1275 /// between the two addresses is the offset. It also returns the offsets by
1277 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1279 int64_t &Offset2) const {
1280 // Don't worry about Thumb: just ARM and Thumb2.
1281 if (Subtarget.isThumb1Only()) return false;
1283 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
1286 switch (Load1->getMachineOpcode()) {
1299 case ARM::t2LDRSHi8:
1301 case ARM::t2LDRSHi12:
1305 switch (Load2->getMachineOpcode()) {
1318 case ARM::t2LDRSHi8:
1320 case ARM::t2LDRSHi12:
1324 // Check if base addresses and chain operands match.
1325 if (Load1->getOperand(0) != Load2->getOperand(0) ||
1326 Load1->getOperand(4) != Load2->getOperand(4))
1329 // Index should be Reg0.
1330 if (Load1->getOperand(3) != Load2->getOperand(3))
1333 // Determine the offsets.
1334 if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
1335 isa<ConstantSDNode>(Load2->getOperand(1))) {
1336 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
1337 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
1344 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
1345 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
1346 /// be scheduled togther. On some targets if two loads are loading from
1347 /// addresses in the same cache line, it's better if they are scheduled
1348 /// together. This function takes two integers that represent the load offsets
1349 /// from the common base address. It returns true if it decides it's desirable
1350 /// to schedule the two loads together. "NumLoads" is the number of loads that
1351 /// have already been scheduled after Load1.
1352 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1353 int64_t Offset1, int64_t Offset2,
1354 unsigned NumLoads) const {
1355 // Don't worry about Thumb: just ARM and Thumb2.
1356 if (Subtarget.isThumb1Only()) return false;
1358 assert(Offset2 > Offset1);
1360 if ((Offset2 - Offset1) / 8 > 64)
1363 if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
1364 return false; // FIXME: overly conservative?
1366 // Four loads in a row should be sufficient.
1373 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
1374 const MachineBasicBlock *MBB,
1375 const MachineFunction &MF) const {
1376 // Debug info is never a scheduling boundary. It's necessary to be explicit
1377 // due to the special treatment of IT instructions below, otherwise a
1378 // dbg_value followed by an IT will result in the IT instruction being
1379 // considered a scheduling hazard, which is wrong. It should be the actual
1380 // instruction preceding the dbg_value instruction(s), just like it is
1381 // when debug info is not present.
1382 if (MI->isDebugValue())
1385 // Terminators and labels can't be scheduled around.
1386 if (MI->isTerminator() || MI->isLabel())
1389 // Treat the start of the IT block as a scheduling boundary, but schedule
1390 // t2IT along with all instructions following it.
1391 // FIXME: This is a big hammer. But the alternative is to add all potential
1392 // true and anti dependencies to IT block instructions as implicit operands
1393 // to the t2IT instruction. The added compile time and complexity does not
1395 MachineBasicBlock::const_iterator I = MI;
1396 // Make sure to skip any dbg_value instructions
1397 while (++I != MBB->end() && I->isDebugValue())
1399 if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
1402 // Don't attempt to schedule around any instruction that defines
1403 // a stack-oriented pointer, as it's unlikely to be profitable. This
1404 // saves compile time, because it doesn't require every single
1405 // stack slot reference to depend on the instruction that does the
1407 if (MI->definesRegister(ARM::SP))
1413 bool ARMBaseInstrInfo::
1414 isProfitableToIfCvt(MachineBasicBlock &MBB,
1415 unsigned NumCycles, unsigned ExtraPredCycles,
1416 const BranchProbability &Probability) const {
1420 // Attempt to estimate the relative costs of predication versus branching.
1421 unsigned UnpredCost = Probability.getNumerator() * NumCycles;
1422 UnpredCost /= Probability.getDenominator();
1423 UnpredCost += 1; // The branch itself
1424 UnpredCost += Subtarget.getMispredictionPenalty() / 10;
1426 return (NumCycles + ExtraPredCycles) <= UnpredCost;
1429 bool ARMBaseInstrInfo::
1430 isProfitableToIfCvt(MachineBasicBlock &TMBB,
1431 unsigned TCycles, unsigned TExtra,
1432 MachineBasicBlock &FMBB,
1433 unsigned FCycles, unsigned FExtra,
1434 const BranchProbability &Probability) const {
1435 if (!TCycles || !FCycles)
1438 // Attempt to estimate the relative costs of predication versus branching.
1439 unsigned TUnpredCost = Probability.getNumerator() * TCycles;
1440 TUnpredCost /= Probability.getDenominator();
1442 uint32_t Comp = Probability.getDenominator() - Probability.getNumerator();
1443 unsigned FUnpredCost = Comp * FCycles;
1444 FUnpredCost /= Probability.getDenominator();
1446 unsigned UnpredCost = TUnpredCost + FUnpredCost;
1447 UnpredCost += 1; // The branch itself
1448 UnpredCost += Subtarget.getMispredictionPenalty() / 10;
1450 return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost;
1453 /// getInstrPredicate - If instruction is predicated, returns its predicate
1454 /// condition, otherwise returns AL. It also returns the condition code
1455 /// register by reference.
1457 llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
1458 int PIdx = MI->findFirstPredOperandIdx();
1464 PredReg = MI->getOperand(PIdx+1).getReg();
1465 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
1469 int llvm::getMatchingCondBranchOpcode(int Opc) {
1474 if (Opc == ARM::t2B)
1477 llvm_unreachable("Unknown unconditional branch opcode!");
1481 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
1482 /// instruction is encoded with an 'S' bit is determined by the optional CPSR
1485 /// This will go away once we can teach tblgen how to set the optional CPSR def
1487 struct AddSubFlagsOpcodePair {
1489 unsigned MachineOpc;
1492 static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
1493 {ARM::ADDSri, ARM::ADDri},
1494 {ARM::ADDSrr, ARM::ADDrr},
1495 {ARM::ADDSrsi, ARM::ADDrsi},
1496 {ARM::ADDSrsr, ARM::ADDrsr},
1498 {ARM::SUBSri, ARM::SUBri},
1499 {ARM::SUBSrr, ARM::SUBrr},
1500 {ARM::SUBSrsi, ARM::SUBrsi},
1501 {ARM::SUBSrsr, ARM::SUBrsr},
1503 {ARM::RSBSri, ARM::RSBri},
1504 {ARM::RSBSrsi, ARM::RSBrsi},
1505 {ARM::RSBSrsr, ARM::RSBrsr},
1507 {ARM::t2ADDSri, ARM::t2ADDri},
1508 {ARM::t2ADDSrr, ARM::t2ADDrr},
1509 {ARM::t2ADDSrs, ARM::t2ADDrs},
1511 {ARM::t2SUBSri, ARM::t2SUBri},
1512 {ARM::t2SUBSrr, ARM::t2SUBrr},
1513 {ARM::t2SUBSrs, ARM::t2SUBrs},
1515 {ARM::t2RSBSri, ARM::t2RSBri},
1516 {ARM::t2RSBSrs, ARM::t2RSBrs},
1519 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
1520 static const int NPairs =
1521 sizeof(AddSubFlagsOpcodeMap) / sizeof(AddSubFlagsOpcodePair);
1522 for (AddSubFlagsOpcodePair *OpcPair = &AddSubFlagsOpcodeMap[0],
1523 *End = &AddSubFlagsOpcodeMap[NPairs]; OpcPair != End; ++OpcPair) {
1524 if (OldOpc == OpcPair->PseudoOpc) {
1525 return OpcPair->MachineOpc;
1531 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
1532 MachineBasicBlock::iterator &MBBI, DebugLoc dl,
1533 unsigned DestReg, unsigned BaseReg, int NumBytes,
1534 ARMCC::CondCodes Pred, unsigned PredReg,
1535 const ARMBaseInstrInfo &TII, unsigned MIFlags) {
1536 bool isSub = NumBytes < 0;
1537 if (isSub) NumBytes = -NumBytes;
1540 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
1541 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
1542 assert(ThisVal && "Didn't extract field correctly");
1544 // We will handle these bits from offset, clear them.
1545 NumBytes &= ~ThisVal;
1547 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
1549 // Build the new ADD / SUB.
1550 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
1551 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
1552 .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
1553 .addImm((unsigned)Pred).addReg(PredReg).addReg(0)
1554 .setMIFlags(MIFlags);
1559 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1560 unsigned FrameReg, int &Offset,
1561 const ARMBaseInstrInfo &TII) {
1562 unsigned Opcode = MI.getOpcode();
1563 const MCInstrDesc &Desc = MI.getDesc();
1564 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1567 // Memory operands in inline assembly always use AddrMode2.
1568 if (Opcode == ARM::INLINEASM)
1569 AddrMode = ARMII::AddrMode2;
1571 if (Opcode == ARM::ADDri) {
1572 Offset += MI.getOperand(FrameRegIdx+1).getImm();
1574 // Turn it into a move.
1575 MI.setDesc(TII.get(ARM::MOVr));
1576 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1577 MI.RemoveOperand(FrameRegIdx+1);
1580 } else if (Offset < 0) {
1583 MI.setDesc(TII.get(ARM::SUBri));
1586 // Common case: small offset, fits into instruction.
1587 if (ARM_AM::getSOImmVal(Offset) != -1) {
1588 // Replace the FrameIndex with sp / fp
1589 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1590 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1595 // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1597 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1598 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1600 // We will handle these bits from offset, clear them.
1601 Offset &= ~ThisImmVal;
1603 // Get the properly encoded SOImmVal field.
1604 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1605 "Bit extraction didn't work?");
1606 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1608 unsigned ImmIdx = 0;
1610 unsigned NumBits = 0;
1613 case ARMII::AddrMode_i12: {
1614 ImmIdx = FrameRegIdx + 1;
1615 InstrOffs = MI.getOperand(ImmIdx).getImm();
1619 case ARMII::AddrMode2: {
1620 ImmIdx = FrameRegIdx+2;
1621 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1622 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1627 case ARMII::AddrMode3: {
1628 ImmIdx = FrameRegIdx+2;
1629 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1630 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1635 case ARMII::AddrMode4:
1636 case ARMII::AddrMode6:
1637 // Can't fold any offset even if it's zero.
1639 case ARMII::AddrMode5: {
1640 ImmIdx = FrameRegIdx+1;
1641 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1642 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1649 llvm_unreachable("Unsupported addressing mode!");
1652 Offset += InstrOffs * Scale;
1653 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1659 // Attempt to fold address comp. if opcode has offset bits
1661 // Common case: small offset, fits into instruction.
1662 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1663 int ImmedOffset = Offset / Scale;
1664 unsigned Mask = (1 << NumBits) - 1;
1665 if ((unsigned)Offset <= Mask * Scale) {
1666 // Replace the FrameIndex with sp
1667 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1668 // FIXME: When addrmode2 goes away, this will simplify (like the
1669 // T2 version), as the LDR.i12 versions don't need the encoding
1670 // tricks for the offset value.
1672 if (AddrMode == ARMII::AddrMode_i12)
1673 ImmedOffset = -ImmedOffset;
1675 ImmedOffset |= 1 << NumBits;
1677 ImmOp.ChangeToImmediate(ImmedOffset);
1682 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1683 ImmedOffset = ImmedOffset & Mask;
1685 if (AddrMode == ARMII::AddrMode_i12)
1686 ImmedOffset = -ImmedOffset;
1688 ImmedOffset |= 1 << NumBits;
1690 ImmOp.ChangeToImmediate(ImmedOffset);
1691 Offset &= ~(Mask*Scale);
1695 Offset = (isSub) ? -Offset : Offset;
1699 bool ARMBaseInstrInfo::
1700 AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask,
1701 int &CmpValue) const {
1702 switch (MI->getOpcode()) {
1706 SrcReg = MI->getOperand(0).getReg();
1708 CmpValue = MI->getOperand(1).getImm();
1712 SrcReg = MI->getOperand(0).getReg();
1713 CmpMask = MI->getOperand(1).getImm();
1721 /// isSuitableForMask - Identify a suitable 'and' instruction that
1722 /// operates on the given source register and applies the same mask
1723 /// as a 'tst' instruction. Provide a limited look-through for copies.
1724 /// When successful, MI will hold the found instruction.
1725 static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
1726 int CmpMask, bool CommonUse) {
1727 switch (MI->getOpcode()) {
1730 if (CmpMask != MI->getOperand(2).getImm())
1732 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
1736 // Walk down one instruction which is potentially an 'and'.
1737 const MachineInstr &Copy = *MI;
1738 MachineBasicBlock::iterator AND(
1739 llvm::next(MachineBasicBlock::iterator(MI)));
1740 if (AND == MI->getParent()->end()) return false;
1742 return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
1750 /// OptimizeCompareInstr - Convert the instruction supplying the argument to the
1751 /// comparison into one that sets the zero bit in the flags register.
1752 bool ARMBaseInstrInfo::
1753 OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
1754 int CmpValue, const MachineRegisterInfo *MRI) const {
1758 MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg);
1759 if (llvm::next(DI) != MRI->def_end())
1760 // Only support one definition.
1763 MachineInstr *MI = &*DI;
1765 // Masked compares sometimes use the same register as the corresponding 'and'.
1766 if (CmpMask != ~0) {
1767 if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) {
1769 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
1770 UE = MRI->use_end(); UI != UE; ++UI) {
1771 if (UI->getParent() != CmpInstr->getParent()) continue;
1772 MachineInstr *PotentialAND = &*UI;
1773 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true))
1778 if (!MI) return false;
1782 // Conservatively refuse to convert an instruction which isn't in the same BB
1783 // as the comparison.
1784 if (MI->getParent() != CmpInstr->getParent())
1787 // Check that CPSR isn't set between the comparison instruction and the one we
1789 MachineBasicBlock::iterator I = CmpInstr,E = MI, B = MI->getParent()->begin();
1791 // Early exit if CmpInstr is at the beginning of the BB.
1792 if (I == B) return false;
1795 for (; I != E; --I) {
1796 const MachineInstr &Instr = *I;
1798 for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) {
1799 const MachineOperand &MO = Instr.getOperand(IO);
1800 if (!MO.isReg()) continue;
1802 // This instruction modifies or uses CPSR after the one we want to
1803 // change. We can't do this transformation.
1804 if (MO.getReg() == ARM::CPSR)
1809 // The 'and' is below the comparison instruction.
1813 // Set the "zero" bit in CPSR.
1814 switch (MI->getOpcode()) {
1848 case ARM::t2EORri: {
1849 // Scan forward for the use of CPSR, if it's a conditional code requires
1850 // checking of V bit, then this is not safe to do. If we can't find the
1851 // CPSR use (i.e. used in another block), then it's not safe to perform
1852 // the optimization.
1853 bool isSafe = false;
1855 E = MI->getParent()->end();
1856 while (!isSafe && ++I != E) {
1857 const MachineInstr &Instr = *I;
1858 for (unsigned IO = 0, EO = Instr.getNumOperands();
1859 !isSafe && IO != EO; ++IO) {
1860 const MachineOperand &MO = Instr.getOperand(IO);
1861 if (!MO.isReg() || MO.getReg() != ARM::CPSR)
1867 // Condition code is after the operand before CPSR.
1868 ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm();
1887 // Toggle the optional operand to CPSR.
1888 MI->getOperand(5).setReg(ARM::CPSR);
1889 MI->getOperand(5).setIsDef(true);
1890 CmpInstr->eraseFromParent();
1898 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
1899 MachineInstr *DefMI, unsigned Reg,
1900 MachineRegisterInfo *MRI) const {
1901 // Fold large immediates into add, sub, or, xor.
1902 unsigned DefOpc = DefMI->getOpcode();
1903 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
1905 if (!DefMI->getOperand(1).isImm())
1906 // Could be t2MOVi32imm <ga:xx>
1909 if (!MRI->hasOneNonDBGUse(Reg))
1912 unsigned UseOpc = UseMI->getOpcode();
1913 unsigned NewUseOpc = 0;
1914 uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
1915 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
1916 bool Commute = false;
1918 default: return false;
1926 case ARM::t2EORrr: {
1927 Commute = UseMI->getOperand(2).getReg() != Reg;
1934 NewUseOpc = ARM::SUBri;
1940 if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
1942 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
1943 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
1946 case ARM::ADDrr: NewUseOpc = ARM::ADDri; break;
1947 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
1948 case ARM::EORrr: NewUseOpc = ARM::EORri; break;
1952 case ARM::t2SUBrr: {
1956 NewUseOpc = ARM::t2SUBri;
1961 case ARM::t2EORrr: {
1962 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
1964 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
1965 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
1968 case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break;
1969 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
1970 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
1978 unsigned OpIdx = Commute ? 2 : 1;
1979 unsigned Reg1 = UseMI->getOperand(OpIdx).getReg();
1980 bool isKill = UseMI->getOperand(OpIdx).isKill();
1981 unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
1982 AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
1983 UseMI, UseMI->getDebugLoc(),
1984 get(NewUseOpc), NewReg)
1985 .addReg(Reg1, getKillRegState(isKill))
1986 .addImm(SOImmValV1)));
1987 UseMI->setDesc(get(NewUseOpc));
1988 UseMI->getOperand(1).setReg(NewReg);
1989 UseMI->getOperand(1).setIsKill();
1990 UseMI->getOperand(2).ChangeToImmediate(SOImmValV2);
1991 DefMI->eraseFromParent();
1996 ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1997 const MachineInstr *MI) const {
1998 if (!ItinData || ItinData->isEmpty())
2001 const MCInstrDesc &Desc = MI->getDesc();
2002 unsigned Class = Desc.getSchedClass();
2003 unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
2007 unsigned Opc = MI->getOpcode();
2010 llvm_unreachable("Unexpected multi-uops instruction!");
2015 // The number of uOps for load / store multiple are determined by the number
2018 // On Cortex-A8, each pair of register loads / stores can be scheduled on the
2019 // same cycle. The scheduling for the first load / store must be done
2020 // separately by assuming the the address is not 64-bit aligned.
2022 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
2023 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
2024 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
2026 case ARM::VLDMDIA_UPD:
2027 case ARM::VLDMDDB_UPD:
2029 case ARM::VLDMSIA_UPD:
2030 case ARM::VLDMSDB_UPD:
2032 case ARM::VSTMDIA_UPD:
2033 case ARM::VSTMDDB_UPD:
2035 case ARM::VSTMSIA_UPD:
2036 case ARM::VSTMSDB_UPD: {
2037 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
2038 return (NumRegs / 2) + (NumRegs % 2) + 1;
2041 case ARM::LDMIA_RET:
2046 case ARM::LDMIA_UPD:
2047 case ARM::LDMDA_UPD:
2048 case ARM::LDMDB_UPD:
2049 case ARM::LDMIB_UPD:
2054 case ARM::STMIA_UPD:
2055 case ARM::STMDA_UPD:
2056 case ARM::STMDB_UPD:
2057 case ARM::STMIB_UPD:
2059 case ARM::tLDMIA_UPD:
2060 case ARM::tSTMIA_UPD:
2064 case ARM::t2LDMIA_RET:
2067 case ARM::t2LDMIA_UPD:
2068 case ARM::t2LDMDB_UPD:
2071 case ARM::t2STMIA_UPD:
2072 case ARM::t2STMDB_UPD: {
2073 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
2074 if (Subtarget.isCortexA8()) {
2077 // 4 registers would be issued: 2, 2.
2078 // 5 registers would be issued: 2, 2, 1.
2079 UOps = (NumRegs / 2);
2083 } else if (Subtarget.isCortexA9()) {
2084 UOps = (NumRegs / 2);
2085 // If there are odd number of registers or if it's not 64-bit aligned,
2086 // then it takes an extra AGU (Address Generation Unit) cycle.
2087 if ((NumRegs % 2) ||
2088 !MI->hasOneMemOperand() ||
2089 (*MI->memoperands_begin())->getAlignment() < 8)
2093 // Assume the worst.
2101 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
2102 const MCInstrDesc &DefMCID,
2104 unsigned DefIdx, unsigned DefAlign) const {
2105 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
2107 // Def is the address writeback.
2108 return ItinData->getOperandCycle(DefClass, DefIdx);
2111 if (Subtarget.isCortexA8()) {
2112 // (regno / 2) + (regno % 2) + 1
2113 DefCycle = RegNo / 2 + 1;
2116 } else if (Subtarget.isCortexA9()) {
2118 bool isSLoad = false;
2120 switch (DefMCID.getOpcode()) {
2123 case ARM::VLDMSIA_UPD:
2124 case ARM::VLDMSDB_UPD:
2129 // If there are odd number of 'S' registers or if it's not 64-bit aligned,
2130 // then it takes an extra cycle.
2131 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
2134 // Assume the worst.
2135 DefCycle = RegNo + 2;
2142 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
2143 const MCInstrDesc &DefMCID,
2145 unsigned DefIdx, unsigned DefAlign) const {
2146 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
2148 // Def is the address writeback.
2149 return ItinData->getOperandCycle(DefClass, DefIdx);
2152 if (Subtarget.isCortexA8()) {
2153 // 4 registers would be issued: 1, 2, 1.
2154 // 5 registers would be issued: 1, 2, 2.
2155 DefCycle = RegNo / 2;
2158 // Result latency is issue cycle + 2: E2.
2160 } else if (Subtarget.isCortexA9()) {
2161 DefCycle = (RegNo / 2);
2162 // If there are odd number of registers or if it's not 64-bit aligned,
2163 // then it takes an extra AGU (Address Generation Unit) cycle.
2164 if ((RegNo % 2) || DefAlign < 8)
2166 // Result latency is AGU cycles + 2.
2169 // Assume the worst.
2170 DefCycle = RegNo + 2;
2177 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
2178 const MCInstrDesc &UseMCID,
2180 unsigned UseIdx, unsigned UseAlign) const {
2181 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
2183 return ItinData->getOperandCycle(UseClass, UseIdx);
2186 if (Subtarget.isCortexA8()) {
2187 // (regno / 2) + (regno % 2) + 1
2188 UseCycle = RegNo / 2 + 1;
2191 } else if (Subtarget.isCortexA9()) {
2193 bool isSStore = false;
2195 switch (UseMCID.getOpcode()) {
2198 case ARM::VSTMSIA_UPD:
2199 case ARM::VSTMSDB_UPD:
2204 // If there are odd number of 'S' registers or if it's not 64-bit aligned,
2205 // then it takes an extra cycle.
2206 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
2209 // Assume the worst.
2210 UseCycle = RegNo + 2;
2217 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
2218 const MCInstrDesc &UseMCID,
2220 unsigned UseIdx, unsigned UseAlign) const {
2221 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
2223 return ItinData->getOperandCycle(UseClass, UseIdx);
2226 if (Subtarget.isCortexA8()) {
2227 UseCycle = RegNo / 2;
2232 } else if (Subtarget.isCortexA9()) {
2233 UseCycle = (RegNo / 2);
2234 // If there are odd number of registers or if it's not 64-bit aligned,
2235 // then it takes an extra AGU (Address Generation Unit) cycle.
2236 if ((RegNo % 2) || UseAlign < 8)
2239 // Assume the worst.
2246 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
2247 const MCInstrDesc &DefMCID,
2248 unsigned DefIdx, unsigned DefAlign,
2249 const MCInstrDesc &UseMCID,
2250 unsigned UseIdx, unsigned UseAlign) const {
2251 unsigned DefClass = DefMCID.getSchedClass();
2252 unsigned UseClass = UseMCID.getSchedClass();
2254 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
2255 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
2257 // This may be a def / use of a variable_ops instruction, the operand
2258 // latency might be determinable dynamically. Let the target try to
2261 bool LdmBypass = false;
2262 switch (DefMCID.getOpcode()) {
2264 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
2268 case ARM::VLDMDIA_UPD:
2269 case ARM::VLDMDDB_UPD:
2271 case ARM::VLDMSIA_UPD:
2272 case ARM::VLDMSDB_UPD:
2273 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
2276 case ARM::LDMIA_RET:
2281 case ARM::LDMIA_UPD:
2282 case ARM::LDMDA_UPD:
2283 case ARM::LDMDB_UPD:
2284 case ARM::LDMIB_UPD:
2286 case ARM::tLDMIA_UPD:
2288 case ARM::t2LDMIA_RET:
2291 case ARM::t2LDMIA_UPD:
2292 case ARM::t2LDMDB_UPD:
2294 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
2299 // We can't seem to determine the result latency of the def, assume it's 2.
2303 switch (UseMCID.getOpcode()) {
2305 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
2309 case ARM::VSTMDIA_UPD:
2310 case ARM::VSTMDDB_UPD:
2312 case ARM::VSTMSIA_UPD:
2313 case ARM::VSTMSDB_UPD:
2314 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
2321 case ARM::STMIA_UPD:
2322 case ARM::STMDA_UPD:
2323 case ARM::STMDB_UPD:
2324 case ARM::STMIB_UPD:
2325 case ARM::tSTMIA_UPD:
2330 case ARM::t2STMIA_UPD:
2331 case ARM::t2STMDB_UPD:
2332 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
2337 // Assume it's read in the first stage.
2340 UseCycle = DefCycle - UseCycle + 1;
2343 // It's a variable_ops instruction so we can't use DefIdx here. Just use
2344 // first def operand.
2345 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
2348 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
2349 UseClass, UseIdx)) {
2357 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
2358 const MachineInstr *MI, unsigned Reg,
2359 unsigned &DefIdx, unsigned &Dist) {
2362 MachineBasicBlock::const_iterator I = MI; ++I;
2363 MachineBasicBlock::const_instr_iterator II =
2364 llvm::prior(I.getInstrIterator());
2365 assert(II->isInsideBundle() && "Empty bundle?");
2368 while (II->isInsideBundle()) {
2369 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
2376 assert(Idx != -1 && "Cannot find bundled definition!");
2381 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
2382 const MachineInstr *MI, unsigned Reg,
2383 unsigned &UseIdx, unsigned &Dist) {
2386 MachineBasicBlock::const_instr_iterator II = MI; ++II;
2387 assert(II->isInsideBundle() && "Empty bundle?");
2388 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
2390 // FIXME: This doesn't properly handle multiple uses.
2392 while (II != E && II->isInsideBundle()) {
2393 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
2396 if (II->getOpcode() != ARM::t2IT)
2411 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
2412 const MachineInstr *DefMI, unsigned DefIdx,
2413 const MachineInstr *UseMI, unsigned UseIdx) const {
2414 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
2415 DefMI->isRegSequence() || DefMI->isImplicitDef())
2418 if (!ItinData || ItinData->isEmpty())
2419 return DefMI->mayLoad() ? 3 : 1;
2421 const MCInstrDesc *DefMCID = &DefMI->getDesc();
2422 const MCInstrDesc *UseMCID = &UseMI->getDesc();
2423 const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
2424 unsigned Reg = DefMO.getReg();
2425 if (Reg == ARM::CPSR) {
2426 if (DefMI->getOpcode() == ARM::FMSTAT) {
2427 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
2428 return Subtarget.isCortexA9() ? 1 : 20;
2431 // CPSR set and branch can be paired in the same cycle.
2432 if (UseMI->isBranch())
2435 // Otherwise it takes the instruction latency (generally one).
2436 int Latency = getInstrLatency(ItinData, DefMI);
2438 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
2439 // its uses. Instructions which are otherwise scheduled between them may
2440 // incur a code size penalty (not able to use the CPSR setting 16-bit
2442 if (Latency > 0 && Subtarget.isThumb2()) {
2443 const MachineFunction *MF = DefMI->getParent()->getParent();
2444 if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
2450 unsigned DefAlign = DefMI->hasOneMemOperand()
2451 ? (*DefMI->memoperands_begin())->getAlignment() : 0;
2452 unsigned UseAlign = UseMI->hasOneMemOperand()
2453 ? (*UseMI->memoperands_begin())->getAlignment() : 0;
2455 unsigned DefAdj = 0;
2456 if (DefMI->isBundle()) {
2457 DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
2458 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
2459 DefMI->isRegSequence() || DefMI->isImplicitDef())
2461 DefMCID = &DefMI->getDesc();
2463 unsigned UseAdj = 0;
2464 if (UseMI->isBundle()) {
2466 const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
2467 Reg, NewUseIdx, UseAdj);
2471 UseMCID = &UseMI->getDesc();
2475 int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
2476 *UseMCID, UseIdx, UseAlign);
2477 int Adj = DefAdj + UseAdj;
2479 Latency -= (int)(DefAdj + UseAdj);
2485 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
2486 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
2487 // variants are one cycle cheaper.
2488 switch (DefMCID->getOpcode()) {
2492 unsigned ShOpVal = DefMI->getOperand(3).getImm();
2493 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
2495 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
2502 case ARM::t2LDRSHs: {
2503 // Thumb2 mode: lsl only.
2504 unsigned ShAmt = DefMI->getOperand(3).getImm();
2505 if (ShAmt == 0 || ShAmt == 2)
2512 if (DefAlign < 8 && Subtarget.isCortexA9())
2513 switch (DefMCID->getOpcode()) {
2519 case ARM::VLD1q8wb_fixed:
2520 case ARM::VLD1q16wb_fixed:
2521 case ARM::VLD1q32wb_fixed:
2522 case ARM::VLD1q64wb_fixed:
2523 case ARM::VLD1q8wb_register:
2524 case ARM::VLD1q16wb_register:
2525 case ARM::VLD1q32wb_register:
2526 case ARM::VLD1q64wb_register:
2533 case ARM::VLD2d8wb_fixed:
2534 case ARM::VLD2d16wb_fixed:
2535 case ARM::VLD2d32wb_fixed:
2536 case ARM::VLD2q8wb_fixed:
2537 case ARM::VLD2q16wb_fixed:
2538 case ARM::VLD2q32wb_fixed:
2539 case ARM::VLD2d8wb_register:
2540 case ARM::VLD2d16wb_register:
2541 case ARM::VLD2d32wb_register:
2542 case ARM::VLD2q8wb_register:
2543 case ARM::VLD2q16wb_register:
2544 case ARM::VLD2q32wb_register:
2549 case ARM::VLD3d8_UPD:
2550 case ARM::VLD3d16_UPD:
2551 case ARM::VLD3d32_UPD:
2552 case ARM::VLD1d64Twb_fixed:
2553 case ARM::VLD1d64Twb_register:
2554 case ARM::VLD3q8_UPD:
2555 case ARM::VLD3q16_UPD:
2556 case ARM::VLD3q32_UPD:
2561 case ARM::VLD4d8_UPD:
2562 case ARM::VLD4d16_UPD:
2563 case ARM::VLD4d32_UPD:
2564 case ARM::VLD1d64Qwb_fixed:
2565 case ARM::VLD1d64Qwb_register:
2566 case ARM::VLD4q8_UPD:
2567 case ARM::VLD4q16_UPD:
2568 case ARM::VLD4q32_UPD:
2569 case ARM::VLD1DUPq8:
2570 case ARM::VLD1DUPq16:
2571 case ARM::VLD1DUPq32:
2572 case ARM::VLD1DUPq8wb_fixed:
2573 case ARM::VLD1DUPq16wb_fixed:
2574 case ARM::VLD1DUPq32wb_fixed:
2575 case ARM::VLD1DUPq8wb_register:
2576 case ARM::VLD1DUPq16wb_register:
2577 case ARM::VLD1DUPq32wb_register:
2578 case ARM::VLD2DUPd8:
2579 case ARM::VLD2DUPd16:
2580 case ARM::VLD2DUPd32:
2581 case ARM::VLD2DUPd8wb_fixed:
2582 case ARM::VLD2DUPd16wb_fixed:
2583 case ARM::VLD2DUPd32wb_fixed:
2584 case ARM::VLD2DUPd8wb_register:
2585 case ARM::VLD2DUPd16wb_register:
2586 case ARM::VLD2DUPd32wb_register:
2587 case ARM::VLD4DUPd8:
2588 case ARM::VLD4DUPd16:
2589 case ARM::VLD4DUPd32:
2590 case ARM::VLD4DUPd8_UPD:
2591 case ARM::VLD4DUPd16_UPD:
2592 case ARM::VLD4DUPd32_UPD:
2594 case ARM::VLD1LNd16:
2595 case ARM::VLD1LNd32:
2596 case ARM::VLD1LNd8_UPD:
2597 case ARM::VLD1LNd16_UPD:
2598 case ARM::VLD1LNd32_UPD:
2600 case ARM::VLD2LNd16:
2601 case ARM::VLD2LNd32:
2602 case ARM::VLD2LNq16:
2603 case ARM::VLD2LNq32:
2604 case ARM::VLD2LNd8_UPD:
2605 case ARM::VLD2LNd16_UPD:
2606 case ARM::VLD2LNd32_UPD:
2607 case ARM::VLD2LNq16_UPD:
2608 case ARM::VLD2LNq32_UPD:
2610 case ARM::VLD4LNd16:
2611 case ARM::VLD4LNd32:
2612 case ARM::VLD4LNq16:
2613 case ARM::VLD4LNq32:
2614 case ARM::VLD4LNd8_UPD:
2615 case ARM::VLD4LNd16_UPD:
2616 case ARM::VLD4LNd32_UPD:
2617 case ARM::VLD4LNq16_UPD:
2618 case ARM::VLD4LNq32_UPD:
2619 // If the address is not 64-bit aligned, the latencies of these
2620 // instructions increases by one.
2629 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
2630 SDNode *DefNode, unsigned DefIdx,
2631 SDNode *UseNode, unsigned UseIdx) const {
2632 if (!DefNode->isMachineOpcode())
2635 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
2637 if (isZeroCost(DefMCID.Opcode))
2640 if (!ItinData || ItinData->isEmpty())
2641 return DefMCID.mayLoad() ? 3 : 1;
2643 if (!UseNode->isMachineOpcode()) {
2644 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
2645 if (Subtarget.isCortexA9())
2646 return Latency <= 2 ? 1 : Latency - 1;
2648 return Latency <= 3 ? 1 : Latency - 2;
2651 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
2652 const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
2653 unsigned DefAlign = !DefMN->memoperands_empty()
2654 ? (*DefMN->memoperands_begin())->getAlignment() : 0;
2655 const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
2656 unsigned UseAlign = !UseMN->memoperands_empty()
2657 ? (*UseMN->memoperands_begin())->getAlignment() : 0;
2658 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
2659 UseMCID, UseIdx, UseAlign);
2662 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
2663 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
2664 // variants are one cycle cheaper.
2665 switch (DefMCID.getOpcode()) {
2670 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
2671 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
2673 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
2680 case ARM::t2LDRSHs: {
2681 // Thumb2 mode: lsl only.
2683 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
2684 if (ShAmt == 0 || ShAmt == 2)
2691 if (DefAlign < 8 && Subtarget.isCortexA9())
2692 switch (DefMCID.getOpcode()) {
2694 case ARM::VLD1q8Pseudo:
2695 case ARM::VLD1q16Pseudo:
2696 case ARM::VLD1q32Pseudo:
2697 case ARM::VLD1q64Pseudo:
2698 case ARM::VLD1q8PseudoWB_register:
2699 case ARM::VLD1q16PseudoWB_register:
2700 case ARM::VLD1q32PseudoWB_register:
2701 case ARM::VLD1q64PseudoWB_register:
2702 case ARM::VLD1q8PseudoWB_fixed:
2703 case ARM::VLD1q16PseudoWB_fixed:
2704 case ARM::VLD1q32PseudoWB_fixed:
2705 case ARM::VLD1q64PseudoWB_fixed:
2706 case ARM::VLD2d8Pseudo:
2707 case ARM::VLD2d16Pseudo:
2708 case ARM::VLD2d32Pseudo:
2709 case ARM::VLD2q8Pseudo:
2710 case ARM::VLD2q16Pseudo:
2711 case ARM::VLD2q32Pseudo:
2712 case ARM::VLD2d8PseudoWB_fixed:
2713 case ARM::VLD2d16PseudoWB_fixed:
2714 case ARM::VLD2d32PseudoWB_fixed:
2715 case ARM::VLD2q8PseudoWB_fixed:
2716 case ARM::VLD2q16PseudoWB_fixed:
2717 case ARM::VLD2q32PseudoWB_fixed:
2718 case ARM::VLD2d8PseudoWB_register:
2719 case ARM::VLD2d16PseudoWB_register:
2720 case ARM::VLD2d32PseudoWB_register:
2721 case ARM::VLD2q8PseudoWB_register:
2722 case ARM::VLD2q16PseudoWB_register:
2723 case ARM::VLD2q32PseudoWB_register:
2724 case ARM::VLD3d8Pseudo:
2725 case ARM::VLD3d16Pseudo:
2726 case ARM::VLD3d32Pseudo:
2727 case ARM::VLD1d64TPseudo:
2728 case ARM::VLD3d8Pseudo_UPD:
2729 case ARM::VLD3d16Pseudo_UPD:
2730 case ARM::VLD3d32Pseudo_UPD:
2731 case ARM::VLD3q8Pseudo_UPD:
2732 case ARM::VLD3q16Pseudo_UPD:
2733 case ARM::VLD3q32Pseudo_UPD:
2734 case ARM::VLD3q8oddPseudo:
2735 case ARM::VLD3q16oddPseudo:
2736 case ARM::VLD3q32oddPseudo:
2737 case ARM::VLD3q8oddPseudo_UPD:
2738 case ARM::VLD3q16oddPseudo_UPD:
2739 case ARM::VLD3q32oddPseudo_UPD:
2740 case ARM::VLD4d8Pseudo:
2741 case ARM::VLD4d16Pseudo:
2742 case ARM::VLD4d32Pseudo:
2743 case ARM::VLD1d64QPseudo:
2744 case ARM::VLD4d8Pseudo_UPD:
2745 case ARM::VLD4d16Pseudo_UPD:
2746 case ARM::VLD4d32Pseudo_UPD:
2747 case ARM::VLD4q8Pseudo_UPD:
2748 case ARM::VLD4q16Pseudo_UPD:
2749 case ARM::VLD4q32Pseudo_UPD:
2750 case ARM::VLD4q8oddPseudo:
2751 case ARM::VLD4q16oddPseudo:
2752 case ARM::VLD4q32oddPseudo:
2753 case ARM::VLD4q8oddPseudo_UPD:
2754 case ARM::VLD4q16oddPseudo_UPD:
2755 case ARM::VLD4q32oddPseudo_UPD:
2756 case ARM::VLD1DUPq8Pseudo:
2757 case ARM::VLD1DUPq16Pseudo:
2758 case ARM::VLD1DUPq32Pseudo:
2759 case ARM::VLD1DUPq8PseudoWB_fixed:
2760 case ARM::VLD1DUPq16PseudoWB_fixed:
2761 case ARM::VLD1DUPq32PseudoWB_fixed:
2762 case ARM::VLD1DUPq8PseudoWB_register:
2763 case ARM::VLD1DUPq16PseudoWB_register:
2764 case ARM::VLD1DUPq32PseudoWB_register:
2765 case ARM::VLD2DUPd8Pseudo:
2766 case ARM::VLD2DUPd16Pseudo:
2767 case ARM::VLD2DUPd32Pseudo:
2768 case ARM::VLD2DUPd8PseudoWB_fixed:
2769 case ARM::VLD2DUPd16PseudoWB_fixed:
2770 case ARM::VLD2DUPd32PseudoWB_fixed:
2771 case ARM::VLD2DUPd8PseudoWB_register:
2772 case ARM::VLD2DUPd16PseudoWB_register:
2773 case ARM::VLD2DUPd32PseudoWB_register:
2774 case ARM::VLD4DUPd8Pseudo:
2775 case ARM::VLD4DUPd16Pseudo:
2776 case ARM::VLD4DUPd32Pseudo:
2777 case ARM::VLD4DUPd8Pseudo_UPD:
2778 case ARM::VLD4DUPd16Pseudo_UPD:
2779 case ARM::VLD4DUPd32Pseudo_UPD:
2780 case ARM::VLD1LNq8Pseudo:
2781 case ARM::VLD1LNq16Pseudo:
2782 case ARM::VLD1LNq32Pseudo:
2783 case ARM::VLD1LNq8Pseudo_UPD:
2784 case ARM::VLD1LNq16Pseudo_UPD:
2785 case ARM::VLD1LNq32Pseudo_UPD:
2786 case ARM::VLD2LNd8Pseudo:
2787 case ARM::VLD2LNd16Pseudo:
2788 case ARM::VLD2LNd32Pseudo:
2789 case ARM::VLD2LNq16Pseudo:
2790 case ARM::VLD2LNq32Pseudo:
2791 case ARM::VLD2LNd8Pseudo_UPD:
2792 case ARM::VLD2LNd16Pseudo_UPD:
2793 case ARM::VLD2LNd32Pseudo_UPD:
2794 case ARM::VLD2LNq16Pseudo_UPD:
2795 case ARM::VLD2LNq32Pseudo_UPD:
2796 case ARM::VLD4LNd8Pseudo:
2797 case ARM::VLD4LNd16Pseudo:
2798 case ARM::VLD4LNd32Pseudo:
2799 case ARM::VLD4LNq16Pseudo:
2800 case ARM::VLD4LNq32Pseudo:
2801 case ARM::VLD4LNd8Pseudo_UPD:
2802 case ARM::VLD4LNd16Pseudo_UPD:
2803 case ARM::VLD4LNd32Pseudo_UPD:
2804 case ARM::VLD4LNq16Pseudo_UPD:
2805 case ARM::VLD4LNq32Pseudo_UPD:
2806 // If the address is not 64-bit aligned, the latencies of these
2807 // instructions increases by one.
2816 ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData,
2817 const MachineInstr *DefMI, unsigned DefIdx,
2818 const MachineInstr *DepMI) const {
2819 unsigned Reg = DefMI->getOperand(DefIdx).getReg();
2820 if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI))
2823 // If the second MI is predicated, then there is an implicit use dependency.
2824 return getOperandLatency(ItinData, DefMI, DefIdx, DepMI,
2825 DepMI->getNumOperands());
2828 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
2829 const MachineInstr *MI,
2830 unsigned *PredCost) const {
2831 if (MI->isCopyLike() || MI->isInsertSubreg() ||
2832 MI->isRegSequence() || MI->isImplicitDef())
2835 if (!ItinData || ItinData->isEmpty())
2838 if (MI->isBundle()) {
2840 MachineBasicBlock::const_instr_iterator I = MI;
2841 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
2842 while (++I != E && I->isInsideBundle()) {
2843 if (I->getOpcode() != ARM::t2IT)
2844 Latency += getInstrLatency(ItinData, I, PredCost);
2849 const MCInstrDesc &MCID = MI->getDesc();
2850 unsigned Class = MCID.getSchedClass();
2851 unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
2852 if (PredCost && MCID.hasImplicitDefOfPhysReg(ARM::CPSR))
2853 // When predicated, CPSR is an additional source operand for CPSR updating
2854 // instructions, this apparently increases their latencies.
2857 return ItinData->getStageLatency(Class);
2858 return getNumMicroOps(ItinData, MI);
2861 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
2862 SDNode *Node) const {
2863 if (!Node->isMachineOpcode())
2866 if (!ItinData || ItinData->isEmpty())
2869 unsigned Opcode = Node->getMachineOpcode();
2872 return ItinData->getStageLatency(get(Opcode).getSchedClass());
2879 bool ARMBaseInstrInfo::
2880 hasHighOperandLatency(const InstrItineraryData *ItinData,
2881 const MachineRegisterInfo *MRI,
2882 const MachineInstr *DefMI, unsigned DefIdx,
2883 const MachineInstr *UseMI, unsigned UseIdx) const {
2884 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
2885 unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask;
2886 if (Subtarget.isCortexA8() &&
2887 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
2888 // CortexA8 VFP instructions are not pipelined.
2891 // Hoist VFP / NEON instructions with 4 or higher latency.
2892 int Latency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
2895 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
2896 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
2899 bool ARMBaseInstrInfo::
2900 hasLowDefLatency(const InstrItineraryData *ItinData,
2901 const MachineInstr *DefMI, unsigned DefIdx) const {
2902 if (!ItinData || ItinData->isEmpty())
2905 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
2906 if (DDomain == ARMII::DomainGeneral) {
2907 unsigned DefClass = DefMI->getDesc().getSchedClass();
2908 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
2909 return (DefCycle != -1 && DefCycle <= 2);
2914 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI,
2915 StringRef &ErrInfo) const {
2916 if (convertAddSubFlagsOpcode(MI->getOpcode())) {
2917 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
2924 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
2925 unsigned &AddSubOpc,
2926 bool &NegAcc, bool &HasLane) const {
2927 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
2928 if (I == MLxEntryMap.end())
2931 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
2932 MulOpc = Entry.MulOpc;
2933 AddSubOpc = Entry.AddSubOpc;
2934 NegAcc = Entry.NegAcc;
2935 HasLane = Entry.HasLane;
2939 //===----------------------------------------------------------------------===//
2940 // Execution domains.
2941 //===----------------------------------------------------------------------===//
2943 // Some instructions go down the NEON pipeline, some go down the VFP pipeline,
2944 // and some can go down both. The vmov instructions go down the VFP pipeline,
2945 // but they can be changed to vorr equivalents that are executed by the NEON
2948 // We use the following execution domain numbering:
2956 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
2958 std::pair<uint16_t, uint16_t>
2959 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
2960 // VMOVD is a VFP instruction, but can be changed to NEON if it isn't
2962 if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
2963 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
2965 // No other instructions can be swizzled, so just determine their domain.
2966 unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask;
2968 if (Domain & ARMII::DomainNEON)
2969 return std::make_pair(ExeNEON, 0);
2971 // Certain instructions can go either way on Cortex-A8.
2972 // Treat them as NEON instructions.
2973 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
2974 return std::make_pair(ExeNEON, 0);
2976 if (Domain & ARMII::DomainVFP)
2977 return std::make_pair(ExeVFP, 0);
2979 return std::make_pair(ExeGeneric, 0);
2983 ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
2984 // We only know how to change VMOVD into VORR.
2985 assert(MI->getOpcode() == ARM::VMOVD && "Can only swizzle VMOVD");
2986 if (Domain != ExeNEON)
2989 // Zap the predicate operands.
2990 assert(!isPredicated(MI) && "Cannot predicate a VORRd");
2991 MI->RemoveOperand(3);
2992 MI->RemoveOperand(2);
2994 // Change to a VORRd which requires two identical use operands.
2995 MI->setDesc(get(ARM::VORRd));
2997 // Add the extra source operand and new predicates.
2998 // This will go before any implicit ops.
2999 AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1)));