1 //===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMGenInstrInfo.inc"
18 #include "ARMMachineFunctionInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/LiveVariables.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
28 static cl::opt<bool> EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
29 cl::desc("Enable ARM 2-addr to 3-addr conv"));
32 const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
33 return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
37 const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
41 ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
42 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
46 const TargetRegisterClass *ARMInstrInfo::getPointerRegClass() const {
47 return &ARM::GPRRegClass;
50 /// Return true if the instruction is a register to register move and
51 /// leave the source and dest operands in the passed parameters.
53 bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI,
54 unsigned &SrcReg, unsigned &DstReg) const {
55 unsigned oc = MI.getOpcode();
61 SrcReg = MI.getOperand(1).getReg();
62 DstReg = MI.getOperand(0).getReg();
66 assert(MI.getDesc().getNumOperands() >= 2 &&
67 MI.getOperand(0).isReg() &&
68 MI.getOperand(1).isReg() &&
69 "Invalid ARM MOV instruction");
70 SrcReg = MI.getOperand(1).getReg();
71 DstReg = MI.getOperand(0).getReg();
76 unsigned ARMInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
77 int &FrameIndex) const {
78 switch (MI->getOpcode()) {
81 if (MI->getOperand(1).isFI() &&
82 MI->getOperand(2).isReg() &&
83 MI->getOperand(3).isImm() &&
84 MI->getOperand(2).getReg() == 0 &&
85 MI->getOperand(3).getImm() == 0) {
86 FrameIndex = MI->getOperand(1).getIndex();
87 return MI->getOperand(0).getReg();
92 if (MI->getOperand(1).isFI() &&
93 MI->getOperand(2).isImm() &&
94 MI->getOperand(2).getImm() == 0) {
95 FrameIndex = MI->getOperand(1).getIndex();
96 return MI->getOperand(0).getReg();
100 if (MI->getOperand(1).isFI() &&
101 MI->getOperand(2).isImm() &&
102 MI->getOperand(2).getImm() == 0) {
103 FrameIndex = MI->getOperand(1).getIndex();
104 return MI->getOperand(0).getReg();
111 unsigned ARMInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
112 int &FrameIndex) const {
113 switch (MI->getOpcode()) {
116 if (MI->getOperand(1).isFI() &&
117 MI->getOperand(2).isReg() &&
118 MI->getOperand(3).isImm() &&
119 MI->getOperand(2).getReg() == 0 &&
120 MI->getOperand(3).getImm() == 0) {
121 FrameIndex = MI->getOperand(1).getIndex();
122 return MI->getOperand(0).getReg();
127 if (MI->getOperand(1).isFI() &&
128 MI->getOperand(2).isImm() &&
129 MI->getOperand(2).getImm() == 0) {
130 FrameIndex = MI->getOperand(1).getIndex();
131 return MI->getOperand(0).getReg();
135 if (MI->getOperand(1).isFI() &&
136 MI->getOperand(2).isImm() &&
137 MI->getOperand(2).getImm() == 0) {
138 FrameIndex = MI->getOperand(1).getIndex();
139 return MI->getOperand(0).getReg();
146 void ARMInstrInfo::reMaterialize(MachineBasicBlock &MBB,
147 MachineBasicBlock::iterator I,
149 const MachineInstr *Orig) const {
150 if (Orig->getOpcode() == ARM::MOVi2pieces) {
151 RI.emitLoadConstPool(MBB, I, DestReg, Orig->getOperand(1).getImm(),
152 Orig->getOperand(2).getImm(),
153 Orig->getOperand(3).getReg(), this, false);
157 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
158 MI->getOperand(0).setReg(DestReg);
162 static unsigned getUnindexedOpcode(unsigned Opc) {
175 case ARM::LDRSH_POST:
178 case ARM::LDRSB_POST:
194 ARMInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
195 MachineBasicBlock::iterator &MBBI,
196 LiveVariables *LV) const {
200 MachineInstr *MI = MBBI;
201 MachineFunction &MF = *MI->getParent()->getParent();
202 unsigned TSFlags = MI->getDesc().TSFlags;
204 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
205 default: return NULL;
206 case ARMII::IndexModePre:
209 case ARMII::IndexModePost:
213 // Try spliting an indexed load / store to a un-indexed one plus an add/sub
215 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
219 MachineInstr *UpdateMI = NULL;
220 MachineInstr *MemMI = NULL;
221 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
222 const TargetInstrDesc &TID = MI->getDesc();
223 unsigned NumOps = TID.getNumOperands();
224 bool isLoad = !TID.mayStore();
225 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
226 const MachineOperand &Base = MI->getOperand(2);
227 const MachineOperand &Offset = MI->getOperand(NumOps-3);
228 unsigned WBReg = WB.getReg();
229 unsigned BaseReg = Base.getReg();
230 unsigned OffReg = Offset.getReg();
231 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
232 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
235 assert(false && "Unknown indexed op!");
237 case ARMII::AddrMode2: {
238 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
239 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
241 int SOImmVal = ARM_AM::getSOImmVal(Amt);
243 // Can't encode it in a so_imm operand. This transformation will
244 // add more than 1 instruction. Abandon!
246 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
247 .addReg(BaseReg).addImm(SOImmVal)
248 .addImm(Pred).addReg(0).addReg(0);
249 } else if (Amt != 0) {
250 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
251 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
252 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
253 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
254 .addImm(Pred).addReg(0).addReg(0);
256 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
257 .addReg(BaseReg).addReg(OffReg)
258 .addImm(Pred).addReg(0).addReg(0);
261 case ARMII::AddrMode3 : {
262 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
263 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
265 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
266 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
267 .addReg(BaseReg).addImm(Amt)
268 .addImm(Pred).addReg(0).addReg(0);
270 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
271 .addReg(BaseReg).addReg(OffReg)
272 .addImm(Pred).addReg(0).addReg(0);
277 std::vector<MachineInstr*> NewMIs;
280 MemMI = BuildMI(MF, get(MemOpc), MI->getOperand(0).getReg())
281 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
283 MemMI = BuildMI(MF, get(MemOpc)).addReg(MI->getOperand(1).getReg())
284 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
285 NewMIs.push_back(MemMI);
286 NewMIs.push_back(UpdateMI);
289 MemMI = BuildMI(MF, get(MemOpc), MI->getOperand(0).getReg())
290 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
292 MemMI = BuildMI(MF, get(MemOpc)).addReg(MI->getOperand(1).getReg())
293 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
295 UpdateMI->getOperand(0).setIsDead();
296 NewMIs.push_back(UpdateMI);
297 NewMIs.push_back(MemMI);
300 // Transfer LiveVariables states, kill / dead info.
302 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
303 MachineOperand &MO = MI->getOperand(i);
304 if (MO.isReg() && MO.getReg() &&
305 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
306 unsigned Reg = MO.getReg();
308 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
310 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
312 LV->addVirtualRegisterDead(Reg, NewMI);
314 if (MO.isUse() && MO.isKill()) {
315 for (unsigned j = 0; j < 2; ++j) {
316 // Look at the two new MI's in reverse order.
317 MachineInstr *NewMI = NewMIs[j];
318 if (!NewMI->readsRegister(Reg))
320 LV->addVirtualRegisterKilled(Reg, NewMI);
321 if (VI.removeKill(MI))
322 VI.Kills.push_back(NewMI);
330 MFI->insert(MBBI, NewMIs[1]);
331 MFI->insert(MBBI, NewMIs[0]);
336 bool ARMInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
337 MachineBasicBlock *&FBB,
338 SmallVectorImpl<MachineOperand> &Cond) const {
339 // If the block has no terminators, it just falls into the block after it.
340 MachineBasicBlock::iterator I = MBB.end();
341 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
344 // Get the last instruction in the block.
345 MachineInstr *LastInst = I;
347 // If there is only one terminator instruction, process it.
348 unsigned LastOpc = LastInst->getOpcode();
349 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
350 if (LastOpc == ARM::B || LastOpc == ARM::tB) {
351 TBB = LastInst->getOperand(0).getMBB();
354 if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc) {
355 // Block ends with fall-through condbranch.
356 TBB = LastInst->getOperand(0).getMBB();
357 Cond.push_back(LastInst->getOperand(1));
358 Cond.push_back(LastInst->getOperand(2));
361 return true; // Can't handle indirect branch.
364 // Get the instruction before it if it is a terminator.
365 MachineInstr *SecondLastInst = I;
367 // If there are three terminators, we don't know what sort of block this is.
368 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
371 // If the block ends with ARM::B/ARM::tB and a ARM::Bcc/ARM::tBcc, handle it.
372 unsigned SecondLastOpc = SecondLastInst->getOpcode();
373 if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) ||
374 (SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB)) {
375 TBB = SecondLastInst->getOperand(0).getMBB();
376 Cond.push_back(SecondLastInst->getOperand(1));
377 Cond.push_back(SecondLastInst->getOperand(2));
378 FBB = LastInst->getOperand(0).getMBB();
382 // If the block ends with two unconditional branches, handle it. The second
383 // one is not executed, so remove it.
384 if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB) &&
385 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
386 TBB = SecondLastInst->getOperand(0).getMBB();
388 I->eraseFromParent();
392 // Likewise if it ends with a branch table followed by an unconditional branch.
393 // The branch folder can create these, and we must get rid of them for
394 // correctness of Thumb constant islands.
395 if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm ||
396 SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr) &&
397 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
399 I->eraseFromParent();
403 // Otherwise, can't handle this.
408 unsigned ARMInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
409 MachineFunction &MF = *MBB.getParent();
410 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
411 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
412 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
414 MachineBasicBlock::iterator I = MBB.end();
415 if (I == MBB.begin()) return 0;
417 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
420 // Remove the branch.
421 I->eraseFromParent();
425 if (I == MBB.begin()) return 1;
427 if (I->getOpcode() != BccOpc)
430 // Remove the branch.
431 I->eraseFromParent();
435 unsigned ARMInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
436 MachineBasicBlock *FBB,
437 const SmallVectorImpl<MachineOperand> &Cond) const {
438 MachineFunction &MF = *MBB.getParent();
439 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
440 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
441 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
443 // Shouldn't be a fall through.
444 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
445 assert((Cond.size() == 2 || Cond.size() == 0) &&
446 "ARM branch conditions have two components!");
449 if (Cond.empty()) // Unconditional branch?
450 BuildMI(&MBB, get(BOpc)).addMBB(TBB);
452 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
453 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
457 // Two-way conditional branch.
458 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
459 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
460 BuildMI(&MBB, get(BOpc)).addMBB(FBB);
464 bool ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
465 MachineBasicBlock::iterator I,
466 unsigned DestReg, unsigned SrcReg,
467 const TargetRegisterClass *DestRC,
468 const TargetRegisterClass *SrcRC) const {
469 if (DestRC != SrcRC) {
470 // Not yet supported!
474 if (DestRC == ARM::GPRRegisterClass) {
475 MachineFunction &MF = *MBB.getParent();
476 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
477 if (AFI->isThumbFunction())
478 BuildMI(MBB, I, get(ARM::tMOVr), DestReg).addReg(SrcReg);
480 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, get(ARM::MOVr), DestReg)
482 } else if (DestRC == ARM::SPRRegisterClass)
483 AddDefaultPred(BuildMI(MBB, I, get(ARM::FCPYS), DestReg)
485 else if (DestRC == ARM::DPRRegisterClass)
486 AddDefaultPred(BuildMI(MBB, I, get(ARM::FCPYD), DestReg)
494 static const MachineInstrBuilder &ARMInstrAddOperand(MachineInstrBuilder &MIB,
495 MachineOperand &MO) {
497 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
499 MIB = MIB.addImm(MO.getImm());
501 MIB = MIB.addFrameIndex(MO.getIndex());
503 assert(0 && "Unknown operand for ARMInstrAddOperand!");
509 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
510 unsigned SrcReg, bool isKill, int FI,
511 const TargetRegisterClass *RC) const {
512 if (RC == ARM::GPRRegisterClass) {
513 MachineFunction &MF = *MBB.getParent();
514 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
515 if (AFI->isThumbFunction())
516 BuildMI(MBB, I, get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
517 .addFrameIndex(FI).addImm(0);
519 AddDefaultPred(BuildMI(MBB, I, get(ARM::STR))
520 .addReg(SrcReg, false, false, isKill)
521 .addFrameIndex(FI).addReg(0).addImm(0));
522 } else if (RC == ARM::DPRRegisterClass) {
523 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTD))
524 .addReg(SrcReg, false, false, isKill)
525 .addFrameIndex(FI).addImm(0));
527 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
528 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTS))
529 .addReg(SrcReg, false, false, isKill)
530 .addFrameIndex(FI).addImm(0));
534 void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
536 SmallVectorImpl<MachineOperand> &Addr,
537 const TargetRegisterClass *RC,
538 SmallVectorImpl<MachineInstr*> &NewMIs) const {
540 if (RC == ARM::GPRRegisterClass) {
541 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
542 if (AFI->isThumbFunction()) {
543 Opc = Addr[0].isFI() ? ARM::tSpill : ARM::tSTR;
544 MachineInstrBuilder MIB =
545 BuildMI(MF, get(Opc)).addReg(SrcReg, false, false, isKill);
546 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
547 MIB = ARMInstrAddOperand(MIB, Addr[i]);
548 NewMIs.push_back(MIB);
552 } else if (RC == ARM::DPRRegisterClass) {
555 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
559 MachineInstrBuilder MIB =
560 BuildMI(MF, get(Opc)).addReg(SrcReg, false, false, isKill);
561 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
562 MIB = ARMInstrAddOperand(MIB, Addr[i]);
564 NewMIs.push_back(MIB);
569 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
570 unsigned DestReg, int FI,
571 const TargetRegisterClass *RC) const {
572 if (RC == ARM::GPRRegisterClass) {
573 MachineFunction &MF = *MBB.getParent();
574 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
575 if (AFI->isThumbFunction())
576 BuildMI(MBB, I, get(ARM::tRestore), DestReg)
577 .addFrameIndex(FI).addImm(0);
579 AddDefaultPred(BuildMI(MBB, I, get(ARM::LDR), DestReg)
580 .addFrameIndex(FI).addReg(0).addImm(0));
581 } else if (RC == ARM::DPRRegisterClass) {
582 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDD), DestReg)
583 .addFrameIndex(FI).addImm(0));
585 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
586 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDS), DestReg)
587 .addFrameIndex(FI).addImm(0));
591 void ARMInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
592 SmallVectorImpl<MachineOperand> &Addr,
593 const TargetRegisterClass *RC,
594 SmallVectorImpl<MachineInstr*> &NewMIs) const {
596 if (RC == ARM::GPRRegisterClass) {
597 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
598 if (AFI->isThumbFunction()) {
599 Opc = Addr[0].isFI() ? ARM::tRestore : ARM::tLDR;
600 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
601 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
602 MIB = ARMInstrAddOperand(MIB, Addr[i]);
603 NewMIs.push_back(MIB);
607 } else if (RC == ARM::DPRRegisterClass) {
610 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
614 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
615 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
616 MIB = ARMInstrAddOperand(MIB, Addr[i]);
618 NewMIs.push_back(MIB);
622 bool ARMInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
623 MachineBasicBlock::iterator MI,
624 const std::vector<CalleeSavedInfo> &CSI) const {
625 MachineFunction &MF = *MBB.getParent();
626 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
627 if (!AFI->isThumbFunction() || CSI.empty())
630 MachineInstrBuilder MIB = BuildMI(MBB, MI, get(ARM::tPUSH));
631 for (unsigned i = CSI.size(); i != 0; --i) {
632 unsigned Reg = CSI[i-1].getReg();
633 // Add the callee-saved register as live-in. It's killed at the spill.
635 MIB.addReg(Reg, false/*isDef*/,false/*isImp*/,true/*isKill*/);
640 bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
641 MachineBasicBlock::iterator MI,
642 const std::vector<CalleeSavedInfo> &CSI) const {
643 MachineFunction &MF = *MBB.getParent();
644 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
645 if (!AFI->isThumbFunction() || CSI.empty())
648 bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
649 MachineInstr *PopMI = MF.CreateMachineInstr(get(ARM::tPOP));
650 MBB.insert(MI, PopMI);
651 for (unsigned i = CSI.size(); i != 0; --i) {
652 unsigned Reg = CSI[i-1].getReg();
653 if (Reg == ARM::LR) {
654 // Special epilogue for vararg functions. See emitEpilogue
658 PopMI->setDesc(get(ARM::tPOP_RET));
661 PopMI->addOperand(MachineOperand::CreateReg(Reg, true));
666 MachineInstr *ARMInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
668 const SmallVectorImpl<unsigned> &Ops,
670 if (Ops.size() != 1) return NULL;
672 unsigned OpNum = Ops[0];
673 unsigned Opc = MI->getOpcode();
674 MachineInstr *NewMI = NULL;
678 if (MI->getOperand(4).getReg() == ARM::CPSR)
679 // If it is updating CPSR, then it cannot be foled.
681 unsigned Pred = MI->getOperand(2).getImm();
682 unsigned PredReg = MI->getOperand(3).getReg();
683 if (OpNum == 0) { // move -> store
684 unsigned SrcReg = MI->getOperand(1).getReg();
685 bool isKill = MI->getOperand(1).isKill();
686 NewMI = BuildMI(MF, get(ARM::STR)).addReg(SrcReg, false, false, isKill)
687 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
688 } else { // move -> load
689 unsigned DstReg = MI->getOperand(0).getReg();
690 bool isDead = MI->getOperand(0).isDead();
691 NewMI = BuildMI(MF, get(ARM::LDR)).addReg(DstReg, true, false, false, isDead)
692 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
697 if (OpNum == 0) { // move -> store
698 unsigned SrcReg = MI->getOperand(1).getReg();
699 bool isKill = MI->getOperand(1).isKill();
700 if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
701 // tSpill cannot take a high register operand.
703 NewMI = BuildMI(MF, get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
704 .addFrameIndex(FI).addImm(0);
705 } else { // move -> load
706 unsigned DstReg = MI->getOperand(0).getReg();
707 if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
708 // tRestore cannot target a high register operand.
710 bool isDead = MI->getOperand(0).isDead();
711 NewMI = BuildMI(MF, get(ARM::tRestore))
712 .addReg(DstReg, true, false, false, isDead)
713 .addFrameIndex(FI).addImm(0);
718 unsigned Pred = MI->getOperand(2).getImm();
719 unsigned PredReg = MI->getOperand(3).getReg();
720 if (OpNum == 0) { // move -> store
721 unsigned SrcReg = MI->getOperand(1).getReg();
722 NewMI = BuildMI(MF, get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
723 .addImm(0).addImm(Pred).addReg(PredReg);
724 } else { // move -> load
725 unsigned DstReg = MI->getOperand(0).getReg();
726 NewMI = BuildMI(MF, get(ARM::FLDS), DstReg).addFrameIndex(FI)
727 .addImm(0).addImm(Pred).addReg(PredReg);
732 unsigned Pred = MI->getOperand(2).getImm();
733 unsigned PredReg = MI->getOperand(3).getReg();
734 if (OpNum == 0) { // move -> store
735 unsigned SrcReg = MI->getOperand(1).getReg();
736 bool isKill = MI->getOperand(1).isKill();
737 NewMI = BuildMI(MF, get(ARM::FSTD)).addReg(SrcReg, false, false, isKill)
738 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
739 } else { // move -> load
740 unsigned DstReg = MI->getOperand(0).getReg();
741 bool isDead = MI->getOperand(0).isDead();
742 NewMI = BuildMI(MF, get(ARM::FLDD)).addReg(DstReg, true, false, false, isDead)
743 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
752 bool ARMInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
753 const SmallVectorImpl<unsigned> &Ops) const {
754 if (Ops.size() != 1) return false;
756 unsigned OpNum = Ops[0];
757 unsigned Opc = MI->getOpcode();
761 // If it is updating CPSR, then it cannot be foled.
762 return MI->getOperand(4).getReg() != ARM::CPSR;
764 if (OpNum == 0) { // move -> store
765 unsigned SrcReg = MI->getOperand(1).getReg();
766 if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
767 // tSpill cannot take a high register operand.
769 } else { // move -> load
770 unsigned DstReg = MI->getOperand(0).getReg();
771 if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
772 // tRestore cannot target a high register operand.
785 bool ARMInstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
786 if (MBB.empty()) return false;
788 switch (MBB.back().getOpcode()) {
789 case ARM::BX_RET: // Return.
792 case ARM::tBX_RET_vararg:
795 case ARM::tB: // Uncond branch.
797 case ARM::BR_JTr: // Jumptable branch.
798 case ARM::BR_JTm: // Jumptable branch through mem.
799 case ARM::BR_JTadd: // Jumptable branch add to pc.
801 default: return false;
806 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
807 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
808 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
812 bool ARMInstrInfo::isPredicated(const MachineInstr *MI) const {
813 int PIdx = MI->findFirstPredOperandIdx();
814 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
817 bool ARMInstrInfo::PredicateInstruction(MachineInstr *MI,
818 const SmallVectorImpl<MachineOperand> &Pred) const {
819 unsigned Opc = MI->getOpcode();
820 if (Opc == ARM::B || Opc == ARM::tB) {
821 MI->setDesc(get(Opc == ARM::B ? ARM::Bcc : ARM::tBcc));
822 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
823 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
827 int PIdx = MI->findFirstPredOperandIdx();
829 MachineOperand &PMO = MI->getOperand(PIdx);
830 PMO.setImm(Pred[0].getImm());
831 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
838 ARMInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
839 const SmallVectorImpl<MachineOperand> &Pred2) const{
840 if (Pred1.size() > 2 || Pred2.size() > 2)
843 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
844 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
854 return CC2 == ARMCC::HI;
856 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
858 return CC2 == ARMCC::GT;
860 return CC2 == ARMCC::LT;
864 bool ARMInstrInfo::DefinesPredicate(MachineInstr *MI,
865 std::vector<MachineOperand> &Pred) const {
866 const TargetInstrDesc &TID = MI->getDesc();
867 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
871 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
872 const MachineOperand &MO = MI->getOperand(i);
873 if (MO.isReg() && MO.getReg() == ARM::CPSR) {
883 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
884 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
885 unsigned JTI) DISABLE_INLINE;
886 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
888 return JT[JTI].MBBs.size();
891 /// GetInstSize - Return the size of the specified MachineInstr.
893 unsigned ARMInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
894 const MachineBasicBlock &MBB = *MI->getParent();
895 const MachineFunction *MF = MBB.getParent();
896 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
898 // Basic size info comes from the TSFlags field.
899 const TargetInstrDesc &TID = MI->getDesc();
900 unsigned TSFlags = TID.TSFlags;
902 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
904 // If this machine instr is an inline asm, measure it.
905 if (MI->getOpcode() == ARM::INLINEASM)
906 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
909 switch (MI->getOpcode()) {
911 assert(0 && "Unknown or unset size field for instr!");
913 case TargetInstrInfo::IMPLICIT_DEF:
914 case TargetInstrInfo::DECLARE:
915 case TargetInstrInfo::DBG_LABEL:
916 case TargetInstrInfo::EH_LABEL:
921 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
922 case ARMII::Size4Bytes: return 4; // Arm instruction.
923 case ARMII::Size2Bytes: return 2; // Thumb instruction.
924 case ARMII::SizeSpecial: {
925 switch (MI->getOpcode()) {
926 case ARM::CONSTPOOL_ENTRY:
927 // If this machine instr is a constant pool entry, its size is recorded as
929 return MI->getOperand(2).getImm();
934 // These are jumptable branches, i.e. a branch followed by an inlined
935 // jumptable. The size is 4 + 4 * number of entries.
936 unsigned NumOps = TID.getNumOperands();
937 MachineOperand JTOP =
938 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
939 unsigned JTI = JTOP.getIndex();
940 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
941 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
942 assert(JTI < JT.size());
943 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
944 // 4 aligned. The assembler / linker may add 2 byte padding just before
945 // the JT entries. The size does not include this padding; the
946 // constant islands pass does separate bookkeeping for it.
947 // FIXME: If we know the size of the function is less than (1 << 16) *2
948 // bytes, we can use 16-bit entries instead. Then there won't be an
950 return getNumJTEntries(JT, JTI) * 4 +
951 (MI->getOpcode()==ARM::tBR_JTr ? 2 : 4);
954 // Otherwise, pseudo-instruction sizes are zero.
959 return 0; // Not reached