1 //===-- ARMBaseRegisterInfo.cpp - ARM Register Information ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMBaseRegisterInfo.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMFrameLowering.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMSubtarget.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/RegisterScavenging.h"
29 #include "llvm/CodeGen/VirtRegMap.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Target/TargetFrameLowering.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetOptions.h"
41 #define DEBUG_TYPE "arm-register-info"
43 #define GET_REGINFO_TARGET_DESC
44 #include "ARMGenRegisterInfo.inc"
48 ARMBaseRegisterInfo::ARMBaseRegisterInfo()
49 : ARMGenRegisterInfo(ARM::LR, 0, 0, ARM::PC), BasePtr(ARM::R6) {}
51 static unsigned getFramePointerReg(const ARMSubtarget &STI) {
52 if (STI.isTargetMachO()) {
53 if (STI.isTargetDarwin() || STI.isThumb1Only())
57 } else if (STI.isTargetWindows())
60 return STI.isThumb() ? ARM::R7 : ARM::R11;
64 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
65 const ARMSubtarget &STI = MF->getSubtarget<ARMSubtarget>();
66 const MCPhysReg *RegList =
67 STI.isTargetDarwin() ? CSR_iOS_SaveList : CSR_AAPCS_SaveList;
69 const Function *F = MF->getFunction();
70 if (F->getCallingConv() == CallingConv::GHC) {
71 // GHC set of callee saved regs is empty as all those regs are
72 // used for passing STG regs around
73 return CSR_NoRegs_SaveList;
74 } else if (F->hasFnAttribute("interrupt")) {
76 // M-class CPUs have hardware which saves the registers needed to allow a
77 // function conforming to the AAPCS to function as a handler.
78 return CSR_AAPCS_SaveList;
79 } else if (F->getFnAttribute("interrupt").getValueAsString() == "FIQ") {
80 // Fast interrupt mode gives the handler a private copy of R8-R14, so less
81 // need to be saved to restore user-mode state.
82 return CSR_FIQ_SaveList;
84 // Generally only R13-R14 (i.e. SP, LR) are automatically preserved by
85 // exception handling.
86 return CSR_GenericInt_SaveList;
94 ARMBaseRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
95 CallingConv::ID CC) const {
96 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
97 if (CC == CallingConv::GHC)
98 // This is academic becase all GHC calls are (supposed to be) tail calls
99 return CSR_NoRegs_RegMask;
100 return STI.isTargetDarwin() ? CSR_iOS_RegMask : CSR_AAPCS_RegMask;
104 ARMBaseRegisterInfo::getNoPreservedMask() const {
105 return CSR_NoRegs_RegMask;
109 ARMBaseRegisterInfo::getTLSCallPreservedMask(const MachineFunction &MF) const {
110 assert(MF.getSubtarget<ARMSubtarget>().isTargetDarwin() &&
111 "only know about special TLS call on Darwin");
112 return CSR_iOS_TLSCall_RegMask;
117 ARMBaseRegisterInfo::getThisReturnPreservedMask(const MachineFunction &MF,
118 CallingConv::ID CC) const {
119 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
120 // This should return a register mask that is the same as that returned by
121 // getCallPreservedMask but that additionally preserves the register used for
122 // the first i32 argument (which must also be the register used to return a
123 // single i32 return value)
125 // In case that the calling convention does not use the same register for
126 // both or otherwise does not want to enable this optimization, the function
127 // should return NULL
128 if (CC == CallingConv::GHC)
129 // This is academic becase all GHC calls are (supposed to be) tail calls
131 return STI.isTargetDarwin() ? CSR_iOS_ThisReturn_RegMask
132 : CSR_AAPCS_ThisReturn_RegMask;
135 BitVector ARMBaseRegisterInfo::
136 getReservedRegs(const MachineFunction &MF) const {
137 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
138 const ARMFrameLowering *TFI = getFrameLowering(MF);
140 // FIXME: avoid re-calculating this every time.
141 BitVector Reserved(getNumRegs());
142 Reserved.set(ARM::SP);
143 Reserved.set(ARM::PC);
144 Reserved.set(ARM::FPSCR);
145 Reserved.set(ARM::APSR_NZCV);
147 Reserved.set(getFramePointerReg(STI));
148 if (hasBasePointer(MF))
149 Reserved.set(BasePtr);
150 // Some targets reserve R9.
151 if (STI.isR9Reserved())
152 Reserved.set(ARM::R9);
153 // Reserve D16-D31 if the subtarget doesn't support them.
154 if (!STI.hasVFP3() || STI.hasD16()) {
155 assert(ARM::D31 == ARM::D16 + 15);
156 for (unsigned i = 0; i != 16; ++i)
157 Reserved.set(ARM::D16 + i);
159 const TargetRegisterClass *RC = &ARM::GPRPairRegClass;
160 for(TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); I!=E; ++I)
161 for (MCSubRegIterator SI(*I, this); SI.isValid(); ++SI)
162 if (Reserved.test(*SI)) Reserved.set(*I);
167 const TargetRegisterClass *
168 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
169 const MachineFunction &) const {
170 const TargetRegisterClass *Super = RC;
171 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
173 switch (Super->getID()) {
174 case ARM::GPRRegClassID:
175 case ARM::SPRRegClassID:
176 case ARM::DPRRegClassID:
177 case ARM::QPRRegClassID:
178 case ARM::QQPRRegClassID:
179 case ARM::QQQQPRRegClassID:
180 case ARM::GPRPairRegClassID:
188 const TargetRegisterClass *
189 ARMBaseRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
191 return &ARM::GPRRegClass;
194 const TargetRegisterClass *
195 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
196 if (RC == &ARM::CCRRegClass)
197 return &ARM::rGPRRegClass; // Can't copy CCR registers.
202 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
203 MachineFunction &MF) const {
204 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
205 const ARMFrameLowering *TFI = getFrameLowering(MF);
207 switch (RC->getID()) {
210 case ARM::tGPRRegClassID:
211 return TFI->hasFP(MF) ? 4 : 5;
212 case ARM::GPRRegClassID: {
213 unsigned FP = TFI->hasFP(MF) ? 1 : 0;
214 return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
216 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
217 case ARM::DPRRegClassID:
222 // Get the other register in a GPRPair.
223 static unsigned getPairedGPR(unsigned Reg, bool Odd, const MCRegisterInfo *RI) {
224 for (MCSuperRegIterator Supers(Reg, RI); Supers.isValid(); ++Supers)
225 if (ARM::GPRPairRegClass.contains(*Supers))
226 return RI->getSubReg(*Supers, Odd ? ARM::gsub_1 : ARM::gsub_0);
230 // Resolve the RegPairEven / RegPairOdd register allocator hints.
232 ARMBaseRegisterInfo::getRegAllocationHints(unsigned VirtReg,
233 ArrayRef<MCPhysReg> Order,
234 SmallVectorImpl<MCPhysReg> &Hints,
235 const MachineFunction &MF,
236 const VirtRegMap *VRM,
237 const LiveRegMatrix *Matrix) const {
238 const MachineRegisterInfo &MRI = MF.getRegInfo();
239 std::pair<unsigned, unsigned> Hint = MRI.getRegAllocationHint(VirtReg);
242 switch (Hint.first) {
243 case ARMRI::RegPairEven:
246 case ARMRI::RegPairOdd:
250 TargetRegisterInfo::getRegAllocationHints(VirtReg, Order, Hints, MF, VRM);
254 // This register should preferably be even (Odd == 0) or odd (Odd == 1).
255 // Check if the other part of the pair has already been assigned, and provide
256 // the paired register as the first hint.
257 unsigned Paired = Hint.second;
261 unsigned PairedPhys = 0;
262 if (TargetRegisterInfo::isPhysicalRegister(Paired)) {
264 } else if (VRM && VRM->hasPhys(Paired)) {
265 PairedPhys = getPairedGPR(VRM->getPhys(Paired), Odd, this);
268 // First prefer the paired physreg.
270 std::find(Order.begin(), Order.end(), PairedPhys) != Order.end())
271 Hints.push_back(PairedPhys);
273 // Then prefer even or odd registers.
274 for (unsigned I = 0, E = Order.size(); I != E; ++I) {
275 unsigned Reg = Order[I];
276 if (Reg == PairedPhys || (getEncodingValue(Reg) & 1) != Odd)
278 // Don't provide hints that are paired to a reserved register.
279 unsigned Paired = getPairedGPR(Reg, !Odd, this);
280 if (!Paired || MRI.isReserved(Paired))
282 Hints.push_back(Reg);
287 ARMBaseRegisterInfo::updateRegAllocHint(unsigned Reg, unsigned NewReg,
288 MachineFunction &MF) const {
289 MachineRegisterInfo *MRI = &MF.getRegInfo();
290 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
291 if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
292 Hint.first == (unsigned)ARMRI::RegPairEven) &&
293 TargetRegisterInfo::isVirtualRegister(Hint.second)) {
294 // If 'Reg' is one of the even / odd register pair and it's now changed
295 // (e.g. coalesced) into a different register. The other register of the
296 // pair allocation hint must be updated to reflect the relationship
298 unsigned OtherReg = Hint.second;
299 Hint = MRI->getRegAllocationHint(OtherReg);
300 // Make sure the pair has not already divorced.
301 if (Hint.second == Reg) {
302 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
303 if (TargetRegisterInfo::isVirtualRegister(NewReg))
304 MRI->setRegAllocationHint(NewReg,
305 Hint.first == (unsigned)ARMRI::RegPairOdd ? ARMRI::RegPairEven
306 : ARMRI::RegPairOdd, OtherReg);
311 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
312 const MachineFrameInfo *MFI = MF.getFrameInfo();
313 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
314 const ARMFrameLowering *TFI = getFrameLowering(MF);
316 // When outgoing call frames are so large that we adjust the stack pointer
317 // around the call, we can no longer use the stack pointer to reach the
318 // emergency spill slot.
319 if (needsStackRealignment(MF) && !TFI->hasReservedCallFrame(MF))
322 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
323 // negative range for ldr/str (255), and thumb1 is positive offsets only.
324 // It's going to be better to use the SP or Base Pointer instead. When there
325 // are variable sized objects, we can't reference off of the SP, so we
326 // reserve a Base Pointer.
327 if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
328 // Conservatively estimate whether the negative offset from the frame
329 // pointer will be sufficient to reach. If a function has a smallish
330 // frame, it's less likely to have lots of spills and callee saved
331 // space, so it's all more likely to be within range of the frame pointer.
332 // If it's wrong, the scavenger will still enable access to work, it just
334 if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
342 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
343 const MachineRegisterInfo *MRI = &MF.getRegInfo();
344 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
345 const ARMFrameLowering *TFI = getFrameLowering(MF);
346 // We can't realign the stack if:
347 // 1. Dynamic stack realignment is explicitly disabled,
348 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
349 // 3. There are VLAs in the function and the base pointer is disabled.
350 if (!TargetRegisterInfo::canRealignStack(MF))
352 if (AFI->isThumb1OnlyFunction())
354 // Stack realignment requires a frame pointer. If we already started
355 // register allocation with frame pointer elimination, it is too late now.
356 if (!MRI->canReserveReg(getFramePointerReg(MF.getSubtarget<ARMSubtarget>())))
358 // We may also need a base pointer if there are dynamic allocas or stack
359 // pointer adjustments around calls.
360 if (TFI->hasReservedCallFrame(MF))
362 // A base pointer is required and allowed. Check that it isn't too late to
364 return MRI->canReserveReg(BasePtr);
367 bool ARMBaseRegisterInfo::
368 cannotEliminateFrame(const MachineFunction &MF) const {
369 const MachineFrameInfo *MFI = MF.getFrameInfo();
370 if (MF.getTarget().Options.DisableFramePointerElim(MF) && MFI->adjustsStack())
372 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
373 || needsStackRealignment(MF);
377 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
378 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
379 const ARMFrameLowering *TFI = getFrameLowering(MF);
382 return getFramePointerReg(STI);
386 /// emitLoadConstPool - Emits a load from constpool to materialize the
387 /// specified immediate.
388 void ARMBaseRegisterInfo::
389 emitLoadConstPool(MachineBasicBlock &MBB,
390 MachineBasicBlock::iterator &MBBI,
392 unsigned DestReg, unsigned SubIdx, int Val,
393 ARMCC::CondCodes Pred,
394 unsigned PredReg, unsigned MIFlags) const {
395 MachineFunction &MF = *MBB.getParent();
396 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
397 MachineConstantPool *ConstantPool = MF.getConstantPool();
399 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
400 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
402 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
403 .addReg(DestReg, getDefRegState(true), SubIdx)
404 .addConstantPoolIndex(Idx)
405 .addImm(0).addImm(Pred).addReg(PredReg)
406 .setMIFlags(MIFlags);
409 bool ARMBaseRegisterInfo::
410 requiresRegisterScavenging(const MachineFunction &MF) const {
414 bool ARMBaseRegisterInfo::
415 trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
419 bool ARMBaseRegisterInfo::
420 requiresFrameIndexScavenging(const MachineFunction &MF) const {
424 bool ARMBaseRegisterInfo::
425 requiresVirtualBaseRegisters(const MachineFunction &MF) const {
429 int64_t ARMBaseRegisterInfo::
430 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
431 const MCInstrDesc &Desc = MI->getDesc();
432 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
433 int64_t InstrOffs = 0;
437 case ARMII::AddrModeT2_i8:
438 case ARMII::AddrModeT2_i12:
439 case ARMII::AddrMode_i12:
440 InstrOffs = MI->getOperand(Idx+1).getImm();
443 case ARMII::AddrMode5: {
445 const MachineOperand &OffOp = MI->getOperand(Idx+1);
446 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
447 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
448 InstrOffs = -InstrOffs;
452 case ARMII::AddrMode2: {
454 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
455 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
456 InstrOffs = -InstrOffs;
459 case ARMII::AddrMode3: {
461 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
462 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
463 InstrOffs = -InstrOffs;
466 case ARMII::AddrModeT1_s: {
468 InstrOffs = MI->getOperand(ImmIdx).getImm();
473 llvm_unreachable("Unsupported addressing mode!");
476 return InstrOffs * Scale;
479 /// needsFrameBaseReg - Returns true if the instruction's frame index
480 /// reference would be better served by a base register other than FP
481 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
482 /// references it should create new base registers for.
483 bool ARMBaseRegisterInfo::
484 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
485 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
486 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
489 // It's the load/store FI references that cause issues, as it can be difficult
490 // to materialize the offset if it won't fit in the literal field. Estimate
491 // based on the size of the local frame and some conservative assumptions
492 // about the rest of the stack frame (note, this is pre-regalloc, so
493 // we don't know everything for certain yet) whether this offset is likely
494 // to be out of range of the immediate. Return true if so.
496 // We only generate virtual base registers for loads and stores, so
497 // return false for everything else.
498 unsigned Opc = MI->getOpcode();
500 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
501 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
502 case ARM::t2LDRi12: case ARM::t2LDRi8:
503 case ARM::t2STRi12: case ARM::t2STRi8:
504 case ARM::VLDRS: case ARM::VLDRD:
505 case ARM::VSTRS: case ARM::VSTRD:
506 case ARM::tSTRspi: case ARM::tLDRspi:
512 // Without a virtual base register, if the function has variable sized
513 // objects, all fixed-size local references will be via the frame pointer,
514 // Approximate the offset and see if it's legal for the instruction.
515 // Note that the incoming offset is based on the SP value at function entry,
516 // so it'll be negative.
517 MachineFunction &MF = *MI->getParent()->getParent();
518 const ARMFrameLowering *TFI = getFrameLowering(MF);
519 MachineFrameInfo *MFI = MF.getFrameInfo();
520 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
522 // Estimate an offset from the frame pointer.
523 // Conservatively assume all callee-saved registers get pushed. R4-R6
524 // will be earlier than the FP, so we ignore those.
526 int64_t FPOffset = Offset - 8;
527 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
528 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
530 // Estimate an offset from the stack pointer.
531 // The incoming offset is relating to the SP at the start of the function,
532 // but when we access the local it'll be relative to the SP after local
533 // allocation, so adjust our SP-relative offset by that allocation size.
534 Offset += MFI->getLocalFrameSize();
535 // Assume that we'll have at least some spill slots allocated.
536 // FIXME: This is a total SWAG number. We should run some statistics
537 // and pick a real one.
538 Offset += 128; // 128 bytes of spill slots
540 // If there's a frame pointer and the addressing mode allows it, try using it.
541 // The FP is only available if there is no dynamic realignment. We
542 // don't know for sure yet whether we'll need that, so we guess based
543 // on whether there are any local variables that would trigger it.
544 unsigned StackAlign = TFI->getStackAlignment();
545 if (TFI->hasFP(MF) &&
546 !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
547 if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
550 // If we can reference via the stack pointer, try that.
551 // FIXME: This (and the code that resolves the references) can be improved
552 // to only disallow SP relative references in the live range of
553 // the VLA(s). In practice, it's unclear how much difference that
554 // would make, but it may be worth doing.
555 if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, ARM::SP, Offset))
558 // The offset likely isn't legal, we want to allocate a virtual base register.
562 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
563 /// be a pointer to FrameIdx at the beginning of the basic block.
564 void ARMBaseRegisterInfo::
565 materializeFrameBaseRegister(MachineBasicBlock *MBB,
566 unsigned BaseReg, int FrameIdx,
567 int64_t Offset) const {
568 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
569 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
570 (AFI->isThumb1OnlyFunction() ? ARM::tADDframe : ARM::t2ADDri);
572 MachineBasicBlock::iterator Ins = MBB->begin();
573 DebugLoc DL; // Defaults to "unknown"
574 if (Ins != MBB->end())
575 DL = Ins->getDebugLoc();
577 const MachineFunction &MF = *MBB->getParent();
578 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
579 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
580 const MCInstrDesc &MCID = TII.get(ADDriOpc);
581 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
583 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
584 .addFrameIndex(FrameIdx).addImm(Offset);
586 if (!AFI->isThumb1OnlyFunction())
587 AddDefaultCC(AddDefaultPred(MIB));
590 void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
591 int64_t Offset) const {
592 MachineBasicBlock &MBB = *MI.getParent();
593 MachineFunction &MF = *MBB.getParent();
594 const ARMBaseInstrInfo &TII =
595 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
596 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
597 int Off = Offset; // ARM doesn't need the general 64-bit offsets
600 assert(!AFI->isThumb1OnlyFunction() &&
601 "This resolveFrameIndex does not support Thumb1!");
603 while (!MI.getOperand(i).isFI()) {
605 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
608 if (!AFI->isThumbFunction())
609 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
611 assert(AFI->isThumb2Function());
612 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
614 assert (Done && "Unable to resolve frame index!");
618 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
619 int64_t Offset) const {
620 const MCInstrDesc &Desc = MI->getDesc();
621 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
624 while (!MI->getOperand(i).isFI()) {
626 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
629 // AddrMode4 and AddrMode6 cannot handle any offset.
630 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
633 unsigned NumBits = 0;
635 bool isSigned = true;
637 case ARMII::AddrModeT2_i8:
638 case ARMII::AddrModeT2_i12:
639 // i8 supports only negative, and i12 supports only positive, so
640 // based on Offset sign, consider the appropriate instruction
649 case ARMII::AddrMode5:
654 case ARMII::AddrMode_i12:
655 case ARMII::AddrMode2:
658 case ARMII::AddrMode3:
661 case ARMII::AddrModeT1_s:
662 NumBits = (BaseReg == ARM::SP ? 8 : 5);
667 llvm_unreachable("Unsupported addressing mode!");
670 Offset += getFrameIndexInstrOffset(MI, i);
671 // Make sure the offset is encodable for instructions that scale the
673 if ((Offset & (Scale-1)) != 0)
676 if (isSigned && Offset < 0)
679 unsigned Mask = (1 << NumBits) - 1;
680 if ((unsigned)Offset <= Mask * Scale)
687 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
688 int SPAdj, unsigned FIOperandNum,
689 RegScavenger *RS) const {
690 MachineInstr &MI = *II;
691 MachineBasicBlock &MBB = *MI.getParent();
692 MachineFunction &MF = *MBB.getParent();
693 const ARMBaseInstrInfo &TII =
694 *static_cast<const ARMBaseInstrInfo *>(MF.getSubtarget().getInstrInfo());
695 const ARMFrameLowering *TFI = getFrameLowering(MF);
696 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
697 assert(!AFI->isThumb1OnlyFunction() &&
698 "This eliminateFrameIndex does not support Thumb1!");
699 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
702 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
704 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
705 // call frame setup/destroy instructions have already been eliminated. That
706 // means the stack pointer cannot be used to access the emergency spill slot
707 // when !hasReservedCallFrame().
709 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
710 assert(TFI->hasReservedCallFrame(MF) &&
711 "Cannot use SP to access the emergency spill slot in "
712 "functions without a reserved call frame");
713 assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
714 "Cannot use SP to access the emergency spill slot in "
715 "functions with variable sized frame objects");
719 assert(!MI.isDebugValue() && "DBG_VALUEs should be handled in target-independent code");
721 // Modify MI as necessary to handle as much of 'Offset' as possible
723 if (!AFI->isThumbFunction())
724 Done = rewriteARMFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
726 assert(AFI->isThumb2Function());
727 Done = rewriteT2FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII);
732 // If we get here, the immediate doesn't fit into the instruction. We folded
733 // as much as possible above, handle the rest, providing a register that is
736 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
737 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
738 "This code isn't needed if offset already handled!");
740 unsigned ScratchReg = 0;
741 int PIdx = MI.findFirstPredOperandIdx();
742 ARMCC::CondCodes Pred = (PIdx == -1)
743 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
744 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
746 // Must be addrmode4/6.
747 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
749 ScratchReg = MF.getRegInfo().createVirtualRegister(&ARM::GPRRegClass);
750 if (!AFI->isThumbFunction())
751 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
752 Offset, Pred, PredReg, TII);
754 assert(AFI->isThumb2Function());
755 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
756 Offset, Pred, PredReg, TII);
758 // Update the original instruction to use the scratch register.
759 MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false,true);
763 bool ARMBaseRegisterInfo::shouldCoalesce(MachineInstr *MI,
764 const TargetRegisterClass *SrcRC,
766 const TargetRegisterClass *DstRC,
768 const TargetRegisterClass *NewRC) const {
769 auto MBB = MI->getParent();
770 auto MF = MBB->getParent();
771 const MachineRegisterInfo &MRI = MF->getRegInfo();
772 // If not copying into a sub-register this should be ok because we shouldn't
773 // need to split the reg.
776 // Small registers don't frequently cause a problem, so we can coalesce them.
777 if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32)
781 MRI.getTargetRegisterInfo()->getRegClassWeight(NewRC);
783 MRI.getTargetRegisterInfo()->getRegClassWeight(SrcRC);
785 MRI.getTargetRegisterInfo()->getRegClassWeight(DstRC);
786 // If the source register class is more expensive than the destination, the
787 // coalescing is probably profitable.
788 if (SrcRCWeight.RegWeight > NewRCWeight.RegWeight)
790 if (DstRCWeight.RegWeight > NewRCWeight.RegWeight)
793 // If the register allocator isn't constrained, we can always allow coalescing
794 // unfortunately we don't know yet if we will be constrained.
795 // The goal of this heuristic is to restrict how many expensive registers
796 // we allow to coalesce in a given basic block.
797 auto AFI = MF->getInfo<ARMFunctionInfo>();
798 auto It = AFI->getCoalescedWeight(MBB);
800 DEBUG(dbgs() << "\tARM::shouldCoalesce - Coalesced Weight: "
801 << It->second << "\n");
802 DEBUG(dbgs() << "\tARM::shouldCoalesce - Reg Weight: "
803 << NewRCWeight.RegWeight << "\n");
805 // This number is the largest round number that which meets the criteria:
806 // (1) addresses PR18825
807 // (2) generates better code in some test cases (like vldm-shed-a9.ll)
808 // (3) Doesn't regress any test cases (in-tree, test-suite, and SPEC)
809 // In practice the SizeMultiplier will only factor in for straight line code
810 // that uses a lot of NEON vectors, which isn't terribly common.
811 unsigned SizeMultiplier = MBB->size()/100;
812 SizeMultiplier = SizeMultiplier ? SizeMultiplier : 1;
813 if (It->second < NewRCWeight.WeightLimit * SizeMultiplier) {
814 It->second += NewRCWeight.RegWeight;