1 //===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMGenInstrInfo.inc"
18 #include "ARMMachineFunctionInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/LiveVariables.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
29 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
30 cl::desc("Enable ARM 2-addr to 3-addr conv"));
33 const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
34 return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
38 const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
42 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI)
43 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
46 ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
47 : ARMBaseInstrInfo(STI), RI(*this, STI) {
50 /// Return true if the instruction is a register to register move and
51 /// leave the source and dest operands in the passed parameters.
53 bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI,
54 unsigned &SrcReg, unsigned &DstReg,
55 unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
56 SrcSubIdx = DstSubIdx = 0; // No sub-registers.
58 unsigned oc = MI.getOpcode();
66 SrcReg = MI.getOperand(1).getReg();
67 DstReg = MI.getOperand(0).getReg();
70 assert(MI.getDesc().getNumOperands() >= 2 &&
71 MI.getOperand(0).isReg() &&
72 MI.getOperand(1).isReg() &&
73 "Invalid ARM MOV instruction");
74 SrcReg = MI.getOperand(1).getReg();
75 DstReg = MI.getOperand(0).getReg();
80 unsigned ARMInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
81 int &FrameIndex) const {
82 switch (MI->getOpcode()) {
85 if (MI->getOperand(1).isFI() &&
86 MI->getOperand(2).isReg() &&
87 MI->getOperand(3).isImm() &&
88 MI->getOperand(2).getReg() == 0 &&
89 MI->getOperand(3).getImm() == 0) {
90 FrameIndex = MI->getOperand(1).getIndex();
91 return MI->getOperand(0).getReg();
96 if (MI->getOperand(1).isFI() &&
97 MI->getOperand(2).isImm() &&
98 MI->getOperand(2).getImm() == 0) {
99 FrameIndex = MI->getOperand(1).getIndex();
100 return MI->getOperand(0).getReg();
107 unsigned ARMInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
108 int &FrameIndex) const {
109 switch (MI->getOpcode()) {
112 if (MI->getOperand(1).isFI() &&
113 MI->getOperand(2).isReg() &&
114 MI->getOperand(3).isImm() &&
115 MI->getOperand(2).getReg() == 0 &&
116 MI->getOperand(3).getImm() == 0) {
117 FrameIndex = MI->getOperand(1).getIndex();
118 return MI->getOperand(0).getReg();
123 if (MI->getOperand(1).isFI() &&
124 MI->getOperand(2).isImm() &&
125 MI->getOperand(2).getImm() == 0) {
126 FrameIndex = MI->getOperand(1).getIndex();
127 return MI->getOperand(0).getReg();
135 void ARMInstrInfo::reMaterialize(MachineBasicBlock &MBB,
136 MachineBasicBlock::iterator I,
138 const MachineInstr *Orig) const {
139 DebugLoc dl = Orig->getDebugLoc();
140 if (Orig->getOpcode() == ARM::MOVi2pieces) {
141 RI.emitLoadConstPool(MBB, I, this, dl,
143 Orig->getOperand(1).getImm(),
144 (ARMCC::CondCodes)Orig->getOperand(2).getImm(),
145 Orig->getOperand(3).getReg());
149 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
150 MI->getOperand(0).setReg(DestReg);
154 static unsigned getUnindexedOpcode(unsigned Opc) {
167 case ARM::LDRSH_POST:
170 case ARM::LDRSB_POST:
186 ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
187 MachineBasicBlock::iterator &MBBI,
188 LiveVariables *LV) const {
192 MachineInstr *MI = MBBI;
193 MachineFunction &MF = *MI->getParent()->getParent();
194 unsigned TSFlags = MI->getDesc().TSFlags;
196 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
197 default: return NULL;
198 case ARMII::IndexModePre:
201 case ARMII::IndexModePost:
205 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
207 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
211 MachineInstr *UpdateMI = NULL;
212 MachineInstr *MemMI = NULL;
213 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
214 const TargetInstrDesc &TID = MI->getDesc();
215 unsigned NumOps = TID.getNumOperands();
216 bool isLoad = !TID.mayStore();
217 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
218 const MachineOperand &Base = MI->getOperand(2);
219 const MachineOperand &Offset = MI->getOperand(NumOps-3);
220 unsigned WBReg = WB.getReg();
221 unsigned BaseReg = Base.getReg();
222 unsigned OffReg = Offset.getReg();
223 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
224 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
227 assert(false && "Unknown indexed op!");
229 case ARMII::AddrMode2: {
230 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
231 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
233 int SOImmVal = ARM_AM::getSOImmVal(Amt);
235 // Can't encode it in a so_imm operand. This transformation will
236 // add more than 1 instruction. Abandon!
238 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
239 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
240 .addReg(BaseReg).addImm(SOImmVal)
241 .addImm(Pred).addReg(0).addReg(0);
242 } else if (Amt != 0) {
243 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
244 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
245 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
246 get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
247 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
248 .addImm(Pred).addReg(0).addReg(0);
250 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
251 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
252 .addReg(BaseReg).addReg(OffReg)
253 .addImm(Pred).addReg(0).addReg(0);
256 case ARMII::AddrMode3 : {
257 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
258 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
260 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
261 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
262 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
263 .addReg(BaseReg).addImm(Amt)
264 .addImm(Pred).addReg(0).addReg(0);
266 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
267 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
268 .addReg(BaseReg).addReg(OffReg)
269 .addImm(Pred).addReg(0).addReg(0);
274 std::vector<MachineInstr*> NewMIs;
277 MemMI = BuildMI(MF, MI->getDebugLoc(),
278 get(MemOpc), MI->getOperand(0).getReg())
279 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
281 MemMI = BuildMI(MF, MI->getDebugLoc(),
282 get(MemOpc)).addReg(MI->getOperand(1).getReg())
283 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
284 NewMIs.push_back(MemMI);
285 NewMIs.push_back(UpdateMI);
288 MemMI = BuildMI(MF, MI->getDebugLoc(),
289 get(MemOpc), MI->getOperand(0).getReg())
290 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
292 MemMI = BuildMI(MF, MI->getDebugLoc(),
293 get(MemOpc)).addReg(MI->getOperand(1).getReg())
294 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
296 UpdateMI->getOperand(0).setIsDead();
297 NewMIs.push_back(UpdateMI);
298 NewMIs.push_back(MemMI);
301 // Transfer LiveVariables states, kill / dead info.
303 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
304 MachineOperand &MO = MI->getOperand(i);
305 if (MO.isReg() && MO.getReg() &&
306 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
307 unsigned Reg = MO.getReg();
309 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
311 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
313 LV->addVirtualRegisterDead(Reg, NewMI);
315 if (MO.isUse() && MO.isKill()) {
316 for (unsigned j = 0; j < 2; ++j) {
317 // Look at the two new MI's in reverse order.
318 MachineInstr *NewMI = NewMIs[j];
319 if (!NewMI->readsRegister(Reg))
321 LV->addVirtualRegisterKilled(Reg, NewMI);
322 if (VI.removeKill(MI))
323 VI.Kills.push_back(NewMI);
331 MFI->insert(MBBI, NewMIs[1]);
332 MFI->insert(MBBI, NewMIs[0]);
338 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
339 MachineBasicBlock *&FBB,
340 SmallVectorImpl<MachineOperand> &Cond,
341 bool AllowModify) const {
342 // If the block has no terminators, it just falls into the block after it.
343 MachineBasicBlock::iterator I = MBB.end();
344 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
347 // Get the last instruction in the block.
348 MachineInstr *LastInst = I;
350 // If there is only one terminator instruction, process it.
351 unsigned LastOpc = LastInst->getOpcode();
352 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
353 if (LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B) {
354 TBB = LastInst->getOperand(0).getMBB();
357 if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc || LastOpc == ARM::t2Bcc) {
358 // Block ends with fall-through condbranch.
359 TBB = LastInst->getOperand(0).getMBB();
360 Cond.push_back(LastInst->getOperand(1));
361 Cond.push_back(LastInst->getOperand(2));
364 return true; // Can't handle indirect branch.
367 // Get the instruction before it if it is a terminator.
368 MachineInstr *SecondLastInst = I;
370 // If there are three terminators, we don't know what sort of block this is.
371 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
374 // If the block ends with ARM::B/ARM::tB/ARM::t2B and a
375 // ARM::Bcc/ARM::tBcc/ARM::t2Bcc, handle it.
376 unsigned SecondLastOpc = SecondLastInst->getOpcode();
377 if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) ||
378 (SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB) ||
379 (SecondLastOpc == ARM::t2Bcc && LastOpc == ARM::t2B)) {
380 TBB = SecondLastInst->getOperand(0).getMBB();
381 Cond.push_back(SecondLastInst->getOperand(1));
382 Cond.push_back(SecondLastInst->getOperand(2));
383 FBB = LastInst->getOperand(0).getMBB();
387 // If the block ends with two unconditional branches, handle it. The second
388 // one is not executed, so remove it.
389 if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB ||
390 SecondLastOpc==ARM::t2B) &&
391 (LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B)) {
392 TBB = SecondLastInst->getOperand(0).getMBB();
395 I->eraseFromParent();
399 // ...likewise if it ends with a branch table followed by an unconditional
400 // branch. The branch folder can create these, and we must get rid of them for
401 // correctness of Thumb constant islands.
402 if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm ||
403 SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr ||
404 SecondLastOpc == ARM::t2BR_JTr || SecondLastOpc==ARM::t2BR_JTm ||
405 SecondLastOpc == ARM::t2BR_JTadd) &&
406 (LastOpc == ARM::B || LastOpc == ARM::tB || LastOpc == ARM::t2B)) {
409 I->eraseFromParent();
413 // Otherwise, can't handle this.
418 unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
419 MachineFunction &MF = *MBB.getParent();
420 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
421 int BOpc = AFI->isThumbFunction() ?
422 (AFI->isThumb2Function() ? ARM::t2B : ARM::tB) : ARM::B;
423 int BccOpc = AFI->isThumbFunction() ?
424 (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
426 MachineBasicBlock::iterator I = MBB.end();
427 if (I == MBB.begin()) return 0;
429 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
432 // Remove the branch.
433 I->eraseFromParent();
437 if (I == MBB.begin()) return 1;
439 if (I->getOpcode() != BccOpc)
442 // Remove the branch.
443 I->eraseFromParent();
448 ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
449 MachineBasicBlock *FBB,
450 const SmallVectorImpl<MachineOperand> &Cond) const {
451 // FIXME this should probably have a DebugLoc argument
452 DebugLoc dl = DebugLoc::getUnknownLoc();
453 MachineFunction &MF = *MBB.getParent();
454 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
455 int BOpc = AFI->isThumbFunction() ?
456 (AFI->isThumb2Function() ? ARM::t2B : ARM::tB) : ARM::B;
457 int BccOpc = AFI->isThumbFunction() ?
458 (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc) : ARM::Bcc;
460 // Shouldn't be a fall through.
461 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
462 assert((Cond.size() == 2 || Cond.size() == 0) &&
463 "ARM branch conditions have two components!");
466 if (Cond.empty()) // Unconditional branch?
467 BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
469 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
470 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
474 // Two-way conditional branch.
475 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
476 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
477 BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
481 bool ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
482 MachineBasicBlock::iterator I,
483 unsigned DestReg, unsigned SrcReg,
484 const TargetRegisterClass *DestRC,
485 const TargetRegisterClass *SrcRC) const {
486 DebugLoc DL = DebugLoc::getUnknownLoc();
487 if (I != MBB.end()) DL = I->getDebugLoc();
489 if (DestRC != SrcRC) {
490 // Not yet supported!
494 if (DestRC == ARM::GPRRegisterClass)
495 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
497 else if (DestRC == ARM::SPRRegisterClass)
498 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYS), DestReg)
500 else if (DestRC == ARM::DPRRegisterClass)
501 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYD), DestReg)
503 else if (DestRC == ARM::QPRRegisterClass)
504 BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg);
512 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
513 unsigned SrcReg, bool isKill, int FI,
514 const TargetRegisterClass *RC) const {
515 DebugLoc DL = DebugLoc::getUnknownLoc();
516 if (I != MBB.end()) DL = I->getDebugLoc();
518 if (RC == ARM::GPRRegisterClass) {
519 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
520 .addReg(SrcReg, getKillRegState(isKill))
521 .addFrameIndex(FI).addReg(0).addImm(0));
522 } else if (RC == ARM::DPRRegisterClass) {
523 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTD))
524 .addReg(SrcReg, getKillRegState(isKill))
525 .addFrameIndex(FI).addImm(0));
527 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
528 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS))
529 .addReg(SrcReg, getKillRegState(isKill))
530 .addFrameIndex(FI).addImm(0));
534 void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
536 SmallVectorImpl<MachineOperand> &Addr,
537 const TargetRegisterClass *RC,
538 SmallVectorImpl<MachineInstr*> &NewMIs) const{
539 DebugLoc DL = DebugLoc::getUnknownLoc();
541 if (RC == ARM::GPRRegisterClass) {
543 } else if (RC == ARM::DPRRegisterClass) {
546 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
550 MachineInstrBuilder MIB =
551 BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill));
552 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
553 MIB.addOperand(Addr[i]);
555 NewMIs.push_back(MIB);
560 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
561 unsigned DestReg, int FI,
562 const TargetRegisterClass *RC) const {
563 DebugLoc DL = DebugLoc::getUnknownLoc();
564 if (I != MBB.end()) DL = I->getDebugLoc();
566 if (RC == ARM::GPRRegisterClass) {
567 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
568 .addFrameIndex(FI).addReg(0).addImm(0));
569 } else if (RC == ARM::DPRRegisterClass) {
570 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg)
571 .addFrameIndex(FI).addImm(0));
573 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
574 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg)
575 .addFrameIndex(FI).addImm(0));
580 loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
581 SmallVectorImpl<MachineOperand> &Addr,
582 const TargetRegisterClass *RC,
583 SmallVectorImpl<MachineInstr*> &NewMIs) const {
584 DebugLoc DL = DebugLoc::getUnknownLoc();
586 if (RC == ARM::GPRRegisterClass) {
588 } else if (RC == ARM::DPRRegisterClass) {
591 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
595 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
596 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
597 MIB.addOperand(Addr[i]);
599 NewMIs.push_back(MIB);
603 MachineInstr *ARMInstrInfo::
604 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
605 const SmallVectorImpl<unsigned> &Ops, int FI) const {
606 if (Ops.size() != 1) return NULL;
608 unsigned OpNum = Ops[0];
609 unsigned Opc = MI->getOpcode();
610 MachineInstr *NewMI = NULL;
614 if (MI->getOperand(4).getReg() == ARM::CPSR)
615 // If it is updating CPSR, then it cannot be folded.
617 unsigned Pred = MI->getOperand(2).getImm();
618 unsigned PredReg = MI->getOperand(3).getReg();
619 if (OpNum == 0) { // move -> store
620 unsigned SrcReg = MI->getOperand(1).getReg();
621 bool isKill = MI->getOperand(1).isKill();
622 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
623 .addReg(SrcReg, getKillRegState(isKill))
624 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
625 } else { // move -> load
626 unsigned DstReg = MI->getOperand(0).getReg();
627 bool isDead = MI->getOperand(0).isDead();
628 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
629 .addReg(DstReg, RegState::Define | getDeadRegState(isDead))
630 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
635 unsigned Pred = MI->getOperand(2).getImm();
636 unsigned PredReg = MI->getOperand(3).getReg();
637 if (OpNum == 0) { // move -> store
638 unsigned SrcReg = MI->getOperand(1).getReg();
639 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS))
640 .addReg(SrcReg).addFrameIndex(FI)
641 .addImm(0).addImm(Pred).addReg(PredReg);
642 } else { // move -> load
643 unsigned DstReg = MI->getOperand(0).getReg();
644 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS), DstReg)
646 .addImm(0).addImm(Pred).addReg(PredReg);
651 unsigned Pred = MI->getOperand(2).getImm();
652 unsigned PredReg = MI->getOperand(3).getReg();
653 if (OpNum == 0) { // move -> store
654 unsigned SrcReg = MI->getOperand(1).getReg();
655 bool isKill = MI->getOperand(1).isKill();
656 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD))
657 .addReg(SrcReg, getKillRegState(isKill))
658 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
659 } else { // move -> load
660 unsigned DstReg = MI->getOperand(0).getReg();
661 bool isDead = MI->getOperand(0).isDead();
662 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD))
663 .addReg(DstReg, RegState::Define | getDeadRegState(isDead))
664 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
674 ARMInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
675 const SmallVectorImpl<unsigned> &Ops) const {
676 if (Ops.size() != 1) return false;
678 unsigned Opc = MI->getOpcode();
682 // If it is updating CPSR, then it cannot be folded.
683 return MI->getOperand(4).getReg() != ARM::CPSR;
690 return false; // FIXME
697 ARMBaseInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
698 if (MBB.empty()) return false;
700 switch (MBB.back().getOpcode()) {
701 case ARM::BX_RET: // Return.
704 case ARM::tBX_RET_vararg:
708 case ARM::t2B: // Uncond branch.
711 case ARM::BR_JTr: // Jumptable branch.
713 case ARM::BR_JTm: // Jumptable branch through mem.
714 case ARM::t2BR_JTadd:
715 case ARM::BR_JTadd: // Jumptable branch add to pc.
717 default: return false;
721 bool ARMBaseInstrInfo::
722 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
723 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
724 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
728 bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
729 int PIdx = MI->findFirstPredOperandIdx();
730 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
733 bool ARMBaseInstrInfo::
734 PredicateInstruction(MachineInstr *MI,
735 const SmallVectorImpl<MachineOperand> &Pred) const {
736 unsigned Opc = MI->getOpcode();
737 if (Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B) {
738 MI->setDesc(get((Opc == ARM::B) ? ARM::Bcc :
739 ((Opc == ARM::tB) ? ARM::tBcc : ARM::t2Bcc)));
740 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
741 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
745 int PIdx = MI->findFirstPredOperandIdx();
747 MachineOperand &PMO = MI->getOperand(PIdx);
748 PMO.setImm(Pred[0].getImm());
749 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
755 bool ARMBaseInstrInfo::
756 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
757 const SmallVectorImpl<MachineOperand> &Pred2) const {
758 if (Pred1.size() > 2 || Pred2.size() > 2)
761 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
762 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
772 return CC2 == ARMCC::HI;
774 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
776 return CC2 == ARMCC::GT;
778 return CC2 == ARMCC::LT;
782 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
783 std::vector<MachineOperand> &Pred) const {
784 const TargetInstrDesc &TID = MI->getDesc();
785 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
789 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
790 const MachineOperand &MO = MI->getOperand(i);
791 if (MO.isReg() && MO.getReg() == ARM::CPSR) {
801 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
802 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
803 unsigned JTI) DISABLE_INLINE;
804 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
806 return JT[JTI].MBBs.size();
809 /// GetInstSize - Return the size of the specified MachineInstr.
811 unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
812 const MachineBasicBlock &MBB = *MI->getParent();
813 const MachineFunction *MF = MBB.getParent();
814 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
816 // Basic size info comes from the TSFlags field.
817 const TargetInstrDesc &TID = MI->getDesc();
818 unsigned TSFlags = TID.TSFlags;
820 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
822 // If this machine instr is an inline asm, measure it.
823 if (MI->getOpcode() == ARM::INLINEASM)
824 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
827 switch (MI->getOpcode()) {
829 assert(0 && "Unknown or unset size field for instr!");
831 case TargetInstrInfo::IMPLICIT_DEF:
832 case TargetInstrInfo::DECLARE:
833 case TargetInstrInfo::DBG_LABEL:
834 case TargetInstrInfo::EH_LABEL:
839 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
840 case ARMII::Size4Bytes: return 4; // Arm instruction.
841 case ARMII::Size2Bytes: return 2; // Thumb instruction.
842 case ARMII::SizeSpecial: {
843 switch (MI->getOpcode()) {
844 case ARM::CONSTPOOL_ENTRY:
845 // If this machine instr is a constant pool entry, its size is recorded as
847 return MI->getOperand(2).getImm();
848 case ARM::Int_eh_sjlj_setjmp: return 12;
854 case ARM::t2BR_JTadd:
856 // These are jumptable branches, i.e. a branch followed by an inlined
857 // jumptable. The size is 4 + 4 * number of entries.
858 unsigned NumOps = TID.getNumOperands();
859 MachineOperand JTOP =
860 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
861 unsigned JTI = JTOP.getIndex();
862 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
863 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
864 assert(JTI < JT.size());
865 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
866 // 4 aligned. The assembler / linker may add 2 byte padding just before
867 // the JT entries. The size does not include this padding; the
868 // constant islands pass does separate bookkeeping for it.
869 // FIXME: If we know the size of the function is less than (1 << 16) *2
870 // bytes, we can use 16-bit entries instead. Then there won't be an
872 return getNumJTEntries(JT, JTI) * 4 +
873 ((MI->getOpcode()==ARM::tBR_JTr) ? 2 : 4);
876 // Otherwise, pseudo-instruction sizes are zero.
881 return 0; // Not reached