1 //===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMGenInstrInfo.inc"
18 #include "ARMMachineFunctionInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/LiveVariables.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
28 static cl::opt<bool> EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
29 cl::desc("Enable ARM 2-addr to 3-addr conv"));
32 const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
33 return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
37 const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
41 ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
42 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
46 const TargetRegisterClass *ARMInstrInfo::getPointerRegClass() const {
47 return &ARM::GPRRegClass;
50 /// Return true if the instruction is a register to register move and
51 /// leave the source and dest operands in the passed parameters.
53 bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI,
54 unsigned &SrcReg, unsigned &DstReg) const {
55 unsigned oc = MI.getOpcode();
61 SrcReg = MI.getOperand(1).getReg();
62 DstReg = MI.getOperand(0).getReg();
66 assert(MI.getDesc().getNumOperands() >= 2 &&
67 MI.getOperand(0).isRegister() &&
68 MI.getOperand(1).isRegister() &&
69 "Invalid ARM MOV instruction");
70 SrcReg = MI.getOperand(1).getReg();
71 DstReg = MI.getOperand(0).getReg();
76 unsigned ARMInstrInfo::isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const{
77 switch (MI->getOpcode()) {
80 if (MI->getOperand(1).isFrameIndex() &&
81 MI->getOperand(2).isRegister() &&
82 MI->getOperand(3).isImmediate() &&
83 MI->getOperand(2).getReg() == 0 &&
84 MI->getOperand(3).getImm() == 0) {
85 FrameIndex = MI->getOperand(1).getIndex();
86 return MI->getOperand(0).getReg();
91 if (MI->getOperand(1).isFrameIndex() &&
92 MI->getOperand(2).isImmediate() &&
93 MI->getOperand(2).getImm() == 0) {
94 FrameIndex = MI->getOperand(1).getIndex();
95 return MI->getOperand(0).getReg();
99 if (MI->getOperand(1).isFrameIndex() &&
100 MI->getOperand(2).isImmediate() &&
101 MI->getOperand(2).getImm() == 0) {
102 FrameIndex = MI->getOperand(1).getIndex();
103 return MI->getOperand(0).getReg();
110 unsigned ARMInstrInfo::isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const {
111 switch (MI->getOpcode()) {
114 if (MI->getOperand(1).isFrameIndex() &&
115 MI->getOperand(2).isRegister() &&
116 MI->getOperand(3).isImmediate() &&
117 MI->getOperand(2).getReg() == 0 &&
118 MI->getOperand(3).getImm() == 0) {
119 FrameIndex = MI->getOperand(1).getIndex();
120 return MI->getOperand(0).getReg();
125 if (MI->getOperand(1).isFrameIndex() &&
126 MI->getOperand(2).isImmediate() &&
127 MI->getOperand(2).getImm() == 0) {
128 FrameIndex = MI->getOperand(1).getIndex();
129 return MI->getOperand(0).getReg();
133 if (MI->getOperand(1).isFrameIndex() &&
134 MI->getOperand(2).isImmediate() &&
135 MI->getOperand(2).getImm() == 0) {
136 FrameIndex = MI->getOperand(1).getIndex();
137 return MI->getOperand(0).getReg();
144 void ARMInstrInfo::reMaterialize(MachineBasicBlock &MBB,
145 MachineBasicBlock::iterator I,
147 const MachineInstr *Orig) const {
148 if (Orig->getOpcode() == ARM::MOVi2pieces) {
149 RI.emitLoadConstPool(MBB, I, DestReg, Orig->getOperand(1).getImm(),
150 Orig->getOperand(2).getImm(),
151 Orig->getOperand(3).getReg(), this, false);
155 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
156 MI->getOperand(0).setReg(DestReg);
160 static unsigned getUnindexedOpcode(unsigned Opc) {
173 case ARM::LDRSH_POST:
176 case ARM::LDRSB_POST:
192 ARMInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
193 MachineBasicBlock::iterator &MBBI,
194 LiveVariables *LV) const {
198 MachineInstr *MI = MBBI;
199 MachineFunction &MF = *MI->getParent()->getParent();
200 unsigned TSFlags = MI->getDesc().TSFlags;
202 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
203 default: return NULL;
204 case ARMII::IndexModePre:
207 case ARMII::IndexModePost:
211 // Try spliting an indexed load / store to a un-indexed one plus an add/sub
213 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
217 MachineInstr *UpdateMI = NULL;
218 MachineInstr *MemMI = NULL;
219 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
220 const TargetInstrDesc &TID = MI->getDesc();
221 unsigned NumOps = TID.getNumOperands();
222 bool isLoad = !TID.mayStore();
223 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
224 const MachineOperand &Base = MI->getOperand(2);
225 const MachineOperand &Offset = MI->getOperand(NumOps-3);
226 unsigned WBReg = WB.getReg();
227 unsigned BaseReg = Base.getReg();
228 unsigned OffReg = Offset.getReg();
229 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
230 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
233 assert(false && "Unknown indexed op!");
235 case ARMII::AddrMode2: {
236 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
237 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
239 int SOImmVal = ARM_AM::getSOImmVal(Amt);
241 // Can't encode it in a so_imm operand. This transformation will
242 // add more than 1 instruction. Abandon!
244 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
245 .addReg(BaseReg).addImm(SOImmVal)
246 .addImm(Pred).addReg(0).addReg(0);
247 } else if (Amt != 0) {
248 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
249 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
250 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
251 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
252 .addImm(Pred).addReg(0).addReg(0);
254 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
255 .addReg(BaseReg).addReg(OffReg)
256 .addImm(Pred).addReg(0).addReg(0);
259 case ARMII::AddrMode3 : {
260 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
261 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
263 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
264 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
265 .addReg(BaseReg).addImm(Amt)
266 .addImm(Pred).addReg(0).addReg(0);
268 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
269 .addReg(BaseReg).addReg(OffReg)
270 .addImm(Pred).addReg(0).addReg(0);
275 std::vector<MachineInstr*> NewMIs;
278 MemMI = BuildMI(MF, get(MemOpc), MI->getOperand(0).getReg())
279 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
281 MemMI = BuildMI(MF, get(MemOpc)).addReg(MI->getOperand(1).getReg())
282 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
283 NewMIs.push_back(MemMI);
284 NewMIs.push_back(UpdateMI);
287 MemMI = BuildMI(MF, get(MemOpc), MI->getOperand(0).getReg())
288 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
290 MemMI = BuildMI(MF, get(MemOpc)).addReg(MI->getOperand(1).getReg())
291 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
293 UpdateMI->getOperand(0).setIsDead();
294 NewMIs.push_back(UpdateMI);
295 NewMIs.push_back(MemMI);
298 // Transfer LiveVariables states, kill / dead info.
299 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
300 MachineOperand &MO = MI->getOperand(i);
301 if (MO.isRegister() && MO.getReg() &&
302 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
303 unsigned Reg = MO.getReg();
306 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
308 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
310 LV->addVirtualRegisterDead(Reg, NewMI);
312 if (MO.isUse() && MO.isKill()) {
313 for (unsigned j = 0; j < 2; ++j) {
314 // Look at the two new MI's in reverse order.
315 MachineInstr *NewMI = NewMIs[j];
316 if (!NewMI->readsRegister(Reg))
318 LV->addVirtualRegisterKilled(Reg, NewMI);
319 if (VI.removeKill(MI))
320 VI.Kills.push_back(NewMI);
328 MFI->insert(MBBI, NewMIs[1]);
329 MFI->insert(MBBI, NewMIs[0]);
334 bool ARMInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
335 MachineBasicBlock *&FBB,
336 SmallVectorImpl<MachineOperand> &Cond) const {
337 // If the block has no terminators, it just falls into the block after it.
338 MachineBasicBlock::iterator I = MBB.end();
339 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
342 // Get the last instruction in the block.
343 MachineInstr *LastInst = I;
345 // If there is only one terminator instruction, process it.
346 unsigned LastOpc = LastInst->getOpcode();
347 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
348 if (LastOpc == ARM::B || LastOpc == ARM::tB) {
349 TBB = LastInst->getOperand(0).getMBB();
352 if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc) {
353 // Block ends with fall-through condbranch.
354 TBB = LastInst->getOperand(0).getMBB();
355 Cond.push_back(LastInst->getOperand(1));
356 Cond.push_back(LastInst->getOperand(2));
359 return true; // Can't handle indirect branch.
362 // Get the instruction before it if it is a terminator.
363 MachineInstr *SecondLastInst = I;
365 // If there are three terminators, we don't know what sort of block this is.
366 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
369 // If the block ends with ARM::B/ARM::tB and a ARM::Bcc/ARM::tBcc, handle it.
370 unsigned SecondLastOpc = SecondLastInst->getOpcode();
371 if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) ||
372 (SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB)) {
373 TBB = SecondLastInst->getOperand(0).getMBB();
374 Cond.push_back(SecondLastInst->getOperand(1));
375 Cond.push_back(SecondLastInst->getOperand(2));
376 FBB = LastInst->getOperand(0).getMBB();
380 // If the block ends with two unconditional branches, handle it. The second
381 // one is not executed, so remove it.
382 if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB) &&
383 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
384 TBB = SecondLastInst->getOperand(0).getMBB();
386 I->eraseFromParent();
390 // Likewise if it ends with a branch table followed by an unconditional branch.
391 // The branch folder can create these, and we must get rid of them for
392 // correctness of Thumb constant islands.
393 if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm ||
394 SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr) &&
395 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
397 I->eraseFromParent();
401 // Otherwise, can't handle this.
406 unsigned ARMInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
407 MachineFunction &MF = *MBB.getParent();
408 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
409 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
410 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
412 MachineBasicBlock::iterator I = MBB.end();
413 if (I == MBB.begin()) return 0;
415 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
418 // Remove the branch.
419 I->eraseFromParent();
423 if (I == MBB.begin()) return 1;
425 if (I->getOpcode() != BccOpc)
428 // Remove the branch.
429 I->eraseFromParent();
433 unsigned ARMInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
434 MachineBasicBlock *FBB,
435 const SmallVectorImpl<MachineOperand> &Cond) const {
436 MachineFunction &MF = *MBB.getParent();
437 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
438 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
439 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
441 // Shouldn't be a fall through.
442 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
443 assert((Cond.size() == 2 || Cond.size() == 0) &&
444 "ARM branch conditions have two components!");
447 if (Cond.empty()) // Unconditional branch?
448 BuildMI(&MBB, get(BOpc)).addMBB(TBB);
450 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
451 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
455 // Two-way conditional branch.
456 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
457 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
458 BuildMI(&MBB, get(BOpc)).addMBB(FBB);
462 bool ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
463 MachineBasicBlock::iterator I,
464 unsigned DestReg, unsigned SrcReg,
465 const TargetRegisterClass *DestRC,
466 const TargetRegisterClass *SrcRC) const {
467 if (DestRC != SrcRC) {
468 // Not yet supported!
472 if (DestRC == ARM::GPRRegisterClass) {
473 MachineFunction &MF = *MBB.getParent();
474 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
475 if (AFI->isThumbFunction())
476 BuildMI(MBB, I, get(ARM::tMOVr), DestReg).addReg(SrcReg);
478 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, get(ARM::MOVr), DestReg)
480 } else if (DestRC == ARM::SPRRegisterClass)
481 AddDefaultPred(BuildMI(MBB, I, get(ARM::FCPYS), DestReg)
483 else if (DestRC == ARM::DPRRegisterClass)
484 AddDefaultPred(BuildMI(MBB, I, get(ARM::FCPYD), DestReg)
492 static const MachineInstrBuilder &ARMInstrAddOperand(MachineInstrBuilder &MIB,
493 MachineOperand &MO) {
495 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
496 else if (MO.isImmediate())
497 MIB = MIB.addImm(MO.getImm());
498 else if (MO.isFrameIndex())
499 MIB = MIB.addFrameIndex(MO.getIndex());
501 assert(0 && "Unknown operand for ARMInstrAddOperand!");
507 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
508 unsigned SrcReg, bool isKill, int FI,
509 const TargetRegisterClass *RC) const {
510 if (RC == ARM::GPRRegisterClass) {
511 MachineFunction &MF = *MBB.getParent();
512 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
513 if (AFI->isThumbFunction())
514 BuildMI(MBB, I, get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
515 .addFrameIndex(FI).addImm(0);
517 AddDefaultPred(BuildMI(MBB, I, get(ARM::STR))
518 .addReg(SrcReg, false, false, isKill)
519 .addFrameIndex(FI).addReg(0).addImm(0));
520 } else if (RC == ARM::DPRRegisterClass) {
521 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTD))
522 .addReg(SrcReg, false, false, isKill)
523 .addFrameIndex(FI).addImm(0));
525 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
526 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTS))
527 .addReg(SrcReg, false, false, isKill)
528 .addFrameIndex(FI).addImm(0));
532 void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
534 SmallVectorImpl<MachineOperand> &Addr,
535 const TargetRegisterClass *RC,
536 SmallVectorImpl<MachineInstr*> &NewMIs) const {
538 if (RC == ARM::GPRRegisterClass) {
539 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
540 if (AFI->isThumbFunction()) {
541 Opc = Addr[0].isFrameIndex() ? ARM::tSpill : ARM::tSTR;
542 MachineInstrBuilder MIB =
543 BuildMI(MF, get(Opc)).addReg(SrcReg, false, false, isKill);
544 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
545 MIB = ARMInstrAddOperand(MIB, Addr[i]);
546 NewMIs.push_back(MIB);
550 } else if (RC == ARM::DPRRegisterClass) {
553 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
557 MachineInstrBuilder MIB =
558 BuildMI(MF, get(Opc)).addReg(SrcReg, false, false, isKill);
559 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
560 MIB = ARMInstrAddOperand(MIB, Addr[i]);
562 NewMIs.push_back(MIB);
567 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
568 unsigned DestReg, int FI,
569 const TargetRegisterClass *RC) const {
570 if (RC == ARM::GPRRegisterClass) {
571 MachineFunction &MF = *MBB.getParent();
572 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
573 if (AFI->isThumbFunction())
574 BuildMI(MBB, I, get(ARM::tRestore), DestReg)
575 .addFrameIndex(FI).addImm(0);
577 AddDefaultPred(BuildMI(MBB, I, get(ARM::LDR), DestReg)
578 .addFrameIndex(FI).addReg(0).addImm(0));
579 } else if (RC == ARM::DPRRegisterClass) {
580 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDD), DestReg)
581 .addFrameIndex(FI).addImm(0));
583 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
584 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDS), DestReg)
585 .addFrameIndex(FI).addImm(0));
589 void ARMInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
590 SmallVectorImpl<MachineOperand> &Addr,
591 const TargetRegisterClass *RC,
592 SmallVectorImpl<MachineInstr*> &NewMIs) const {
594 if (RC == ARM::GPRRegisterClass) {
595 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
596 if (AFI->isThumbFunction()) {
597 Opc = Addr[0].isFrameIndex() ? ARM::tRestore : ARM::tLDR;
598 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
599 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
600 MIB = ARMInstrAddOperand(MIB, Addr[i]);
601 NewMIs.push_back(MIB);
605 } else if (RC == ARM::DPRRegisterClass) {
608 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
612 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
613 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
614 MIB = ARMInstrAddOperand(MIB, Addr[i]);
616 NewMIs.push_back(MIB);
620 bool ARMInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
621 MachineBasicBlock::iterator MI,
622 const std::vector<CalleeSavedInfo> &CSI) const {
623 MachineFunction &MF = *MBB.getParent();
624 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
625 if (!AFI->isThumbFunction() || CSI.empty())
628 MachineInstrBuilder MIB = BuildMI(MBB, MI, get(ARM::tPUSH));
629 for (unsigned i = CSI.size(); i != 0; --i) {
630 unsigned Reg = CSI[i-1].getReg();
631 // Add the callee-saved register as live-in. It's killed at the spill.
633 MIB.addReg(Reg, false/*isDef*/,false/*isImp*/,true/*isKill*/);
638 bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
639 MachineBasicBlock::iterator MI,
640 const std::vector<CalleeSavedInfo> &CSI) const {
641 MachineFunction &MF = *MBB.getParent();
642 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
643 if (!AFI->isThumbFunction() || CSI.empty())
646 bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
647 MachineInstr *PopMI = MF.CreateMachineInstr(get(ARM::tPOP));
648 MBB.insert(MI, PopMI);
649 for (unsigned i = CSI.size(); i != 0; --i) {
650 unsigned Reg = CSI[i-1].getReg();
651 if (Reg == ARM::LR) {
652 // Special epilogue for vararg functions. See emitEpilogue
656 PopMI->setDesc(get(ARM::tPOP_RET));
659 PopMI->addOperand(MachineOperand::CreateReg(Reg, true));
664 MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF,
666 SmallVectorImpl<unsigned> &Ops,
668 if (Ops.size() != 1) return NULL;
670 unsigned OpNum = Ops[0];
671 unsigned Opc = MI->getOpcode();
672 MachineInstr *NewMI = NULL;
676 if (MI->getOperand(4).getReg() == ARM::CPSR)
677 // If it is updating CPSR, then it cannot be foled.
679 unsigned Pred = MI->getOperand(2).getImm();
680 unsigned PredReg = MI->getOperand(3).getReg();
681 if (OpNum == 0) { // move -> store
682 unsigned SrcReg = MI->getOperand(1).getReg();
683 bool isKill = MI->getOperand(1).isKill();
684 NewMI = BuildMI(MF, get(ARM::STR)).addReg(SrcReg, false, false, isKill)
685 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
686 } else { // move -> load
687 unsigned DstReg = MI->getOperand(0).getReg();
688 bool isDead = MI->getOperand(0).isDead();
689 NewMI = BuildMI(MF, get(ARM::LDR)).addReg(DstReg, true, false, false, isDead)
690 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
695 if (OpNum == 0) { // move -> store
696 unsigned SrcReg = MI->getOperand(1).getReg();
697 bool isKill = MI->getOperand(1).isKill();
698 if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
699 // tSpill cannot take a high register operand.
701 NewMI = BuildMI(MF, get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
702 .addFrameIndex(FI).addImm(0);
703 } else { // move -> load
704 unsigned DstReg = MI->getOperand(0).getReg();
705 if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
706 // tRestore cannot target a high register operand.
708 bool isDead = MI->getOperand(0).isDead();
709 NewMI = BuildMI(MF, get(ARM::tRestore))
710 .addReg(DstReg, true, false, false, isDead)
711 .addFrameIndex(FI).addImm(0);
716 unsigned Pred = MI->getOperand(2).getImm();
717 unsigned PredReg = MI->getOperand(3).getReg();
718 if (OpNum == 0) { // move -> store
719 unsigned SrcReg = MI->getOperand(1).getReg();
720 NewMI = BuildMI(MF, get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
721 .addImm(0).addImm(Pred).addReg(PredReg);
722 } else { // move -> load
723 unsigned DstReg = MI->getOperand(0).getReg();
724 NewMI = BuildMI(MF, get(ARM::FLDS), DstReg).addFrameIndex(FI)
725 .addImm(0).addImm(Pred).addReg(PredReg);
730 unsigned Pred = MI->getOperand(2).getImm();
731 unsigned PredReg = MI->getOperand(3).getReg();
732 if (OpNum == 0) { // move -> store
733 unsigned SrcReg = MI->getOperand(1).getReg();
734 bool isKill = MI->getOperand(1).isKill();
735 NewMI = BuildMI(MF, get(ARM::FSTD)).addReg(SrcReg, false, false, isKill)
736 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
737 } else { // move -> load
738 unsigned DstReg = MI->getOperand(0).getReg();
739 bool isDead = MI->getOperand(0).isDead();
740 NewMI = BuildMI(MF, get(ARM::FLDD)).addReg(DstReg, true, false, false, isDead)
741 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
750 bool ARMInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
751 SmallVectorImpl<unsigned> &Ops) const {
752 if (Ops.size() != 1) return false;
754 unsigned OpNum = Ops[0];
755 unsigned Opc = MI->getOpcode();
759 // If it is updating CPSR, then it cannot be foled.
760 return MI->getOperand(4).getReg() != ARM::CPSR;
762 if (OpNum == 0) { // move -> store
763 unsigned SrcReg = MI->getOperand(1).getReg();
764 if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
765 // tSpill cannot take a high register operand.
767 } else { // move -> load
768 unsigned DstReg = MI->getOperand(0).getReg();
769 if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
770 // tRestore cannot target a high register operand.
783 bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
784 if (MBB.empty()) return false;
786 switch (MBB.back().getOpcode()) {
787 case ARM::BX_RET: // Return.
790 case ARM::tBX_RET_vararg:
793 case ARM::tB: // Uncond branch.
795 case ARM::BR_JTr: // Jumptable branch.
796 case ARM::BR_JTm: // Jumptable branch through mem.
797 case ARM::BR_JTadd: // Jumptable branch add to pc.
799 default: return false;
804 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
805 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
806 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
810 bool ARMInstrInfo::isPredicated(const MachineInstr *MI) const {
811 int PIdx = MI->findFirstPredOperandIdx();
812 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
815 bool ARMInstrInfo::PredicateInstruction(MachineInstr *MI,
816 const SmallVectorImpl<MachineOperand> &Pred) const {
817 unsigned Opc = MI->getOpcode();
818 if (Opc == ARM::B || Opc == ARM::tB) {
819 MI->setDesc(get(Opc == ARM::B ? ARM::Bcc : ARM::tBcc));
820 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
821 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
825 int PIdx = MI->findFirstPredOperandIdx();
827 MachineOperand &PMO = MI->getOperand(PIdx);
828 PMO.setImm(Pred[0].getImm());
829 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
836 ARMInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
837 const SmallVectorImpl<MachineOperand> &Pred2) const{
838 if (Pred1.size() > 2 || Pred2.size() > 2)
841 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
842 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
852 return CC2 == ARMCC::HI;
854 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
856 return CC2 == ARMCC::GT;
858 return CC2 == ARMCC::LT;
862 bool ARMInstrInfo::DefinesPredicate(MachineInstr *MI,
863 std::vector<MachineOperand> &Pred) const {
864 const TargetInstrDesc &TID = MI->getDesc();
865 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
869 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
870 const MachineOperand &MO = MI->getOperand(i);
871 if (MO.isRegister() && MO.getReg() == ARM::CPSR) {
881 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
882 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
883 unsigned JTI) DISABLE_INLINE;
884 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
886 return JT[JTI].MBBs.size();
889 /// GetInstSize - Return the size of the specified MachineInstr.
891 unsigned ARMInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
892 const MachineBasicBlock &MBB = *MI->getParent();
893 const MachineFunction *MF = MBB.getParent();
894 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
896 // Basic size info comes from the TSFlags field.
897 const TargetInstrDesc &TID = MI->getDesc();
898 unsigned TSFlags = TID.TSFlags;
900 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
902 // If this machine instr is an inline asm, measure it.
903 if (MI->getOpcode() == ARM::INLINEASM)
904 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
907 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
909 assert(0 && "Unknown or unset size field for instr!");
911 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
912 case ARMII::Size4Bytes: return 4; // Arm instruction.
913 case ARMII::Size2Bytes: return 2; // Thumb instruction.
914 case ARMII::SizeSpecial: {
915 switch (MI->getOpcode()) {
916 case ARM::CONSTPOOL_ENTRY:
917 // If this machine instr is a constant pool entry, its size is recorded as
919 return MI->getOperand(2).getImm();
924 // These are jumptable branches, i.e. a branch followed by an inlined
925 // jumptable. The size is 4 + 4 * number of entries.
926 unsigned NumOps = TID.getNumOperands();
927 MachineOperand JTOP =
928 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
929 unsigned JTI = JTOP.getIndex();
930 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
931 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
932 assert(JTI < JT.size());
933 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
934 // 4 aligned. The assembler / linker may add 2 byte padding just before
935 // the JT entries. The size does not include this padding; the
936 // constant islands pass does separate bookkeeping for it.
937 // FIXME: If we know the size of the function is less than (1 << 16) *2
938 // bytes, we can use 16-bit entries instead. Then there won't be an
940 return getNumJTEntries(JT, JTI) * 4 +
941 (MI->getOpcode()==ARM::tBR_JTr ? 2 : 4);
944 // Otherwise, pseudo-instruction sizes are zero.
949 return 0; // Not reached