1 //===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
12 //===----------------------------------------------------------------------===//
15 #include "ARMAddressingModes.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMInstrInfo.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMSubtarget.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/LLVMContext.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineLocation.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/RegisterScavenging.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetFrameInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/Support/CommandLine.h"
44 ReuseFrameIndexVals("arm-reuse-frame-index-vals", cl::Hidden, cl::init(true),
45 cl::desc("Reuse repeated frame index values"));
50 unsigned ARMBaseRegisterInfo::getRegisterNumbering(unsigned RegEnum,
58 llvm_unreachable("Unknown ARM register!");
59 case R0: case D0: case Q0: return 0;
60 case R1: case D1: case Q1: return 1;
61 case R2: case D2: case Q2: return 2;
62 case R3: case D3: case Q3: return 3;
63 case R4: case D4: case Q4: return 4;
64 case R5: case D5: case Q5: return 5;
65 case R6: case D6: case Q6: return 6;
66 case R7: case D7: case Q7: return 7;
67 case R8: case D8: case Q8: return 8;
68 case R9: case D9: case Q9: return 9;
69 case R10: case D10: case Q10: return 10;
70 case R11: case D11: case Q11: return 11;
71 case R12: case D12: case Q12: return 12;
72 case SP: case D13: case Q13: return 13;
73 case LR: case D14: case Q14: return 14;
74 case PC: case D15: case Q15: return 15;
93 case S0: case S1: case S2: case S3:
94 case S4: case S5: case S6: case S7:
95 case S8: case S9: case S10: case S11:
96 case S12: case S13: case S14: case S15:
97 case S16: case S17: case S18: case S19:
98 case S20: case S21: case S22: case S23:
99 case S24: case S25: case S26: case S27:
100 case S28: case S29: case S30: case S31: {
104 default: return 0; // Avoid compile time warning.
142 ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
143 const ARMSubtarget &sti)
144 : ARMGenRegisterInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
146 FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11) {
150 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
151 static const unsigned CalleeSavedRegs[] = {
152 ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
153 ARM::R7, ARM::R6, ARM::R5, ARM::R4,
155 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
156 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
160 static const unsigned DarwinCalleeSavedRegs[] = {
161 // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
163 ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4,
164 ARM::R11, ARM::R10, ARM::R8,
166 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
167 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
170 return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
173 BitVector ARMBaseRegisterInfo::
174 getReservedRegs(const MachineFunction &MF) const {
175 // FIXME: avoid re-calculating this everytime.
176 BitVector Reserved(getNumRegs());
177 Reserved.set(ARM::SP);
178 Reserved.set(ARM::PC);
179 if (STI.isTargetDarwin() || hasFP(MF))
180 Reserved.set(FramePtr);
181 // Some targets reserve R9.
182 if (STI.isR9Reserved())
183 Reserved.set(ARM::R9);
187 bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
188 unsigned Reg) const {
196 if (FramePtr == Reg && (STI.isTargetDarwin() || hasFP(MF)))
200 return STI.isR9Reserved();
206 const TargetRegisterClass *
207 ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
208 const TargetRegisterClass *B,
209 unsigned SubIdx) const {
217 if (A->getSize() == 8) {
218 if (B == &ARM::SPR_8RegClass)
219 return &ARM::DPR_8RegClass;
220 assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
221 if (A == &ARM::DPR_8RegClass)
223 return &ARM::DPR_VFP2RegClass;
226 if (A->getSize() == 16) {
227 if (B == &ARM::SPR_8RegClass)
228 return &ARM::QPR_8RegClass;
229 return &ARM::QPR_VFP2RegClass;
232 if (A->getSize() == 32) {
233 if (B == &ARM::SPR_8RegClass)
234 return 0; // Do not allow coalescing!
235 return &ARM::QQPR_VFP2RegClass;
238 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
239 return 0; // Do not allow coalescing!
246 if (A->getSize() == 16) {
247 if (B == &ARM::DPR_VFP2RegClass)
248 return &ARM::QPR_VFP2RegClass;
249 if (B == &ARM::DPR_8RegClass)
250 return 0; // Do not allow coalescing!
254 if (A->getSize() == 32) {
255 if (B == &ARM::DPR_VFP2RegClass)
256 return &ARM::QQPR_VFP2RegClass;
257 if (B == &ARM::DPR_8RegClass)
258 return 0; // Do not allow coalescing!
262 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
263 if (B != &ARM::DPRRegClass)
264 return 0; // Do not allow coalescing!
271 // D sub-registers of QQQQ registers.
272 if (A->getSize() == 64 && B == &ARM::DPRRegClass)
274 return 0; // Do not allow coalescing!
280 if (A->getSize() == 32) {
281 if (B == &ARM::QPR_VFP2RegClass)
282 return &ARM::QQPR_VFP2RegClass;
283 if (B == &ARM::QPR_8RegClass)
284 return 0; // Do not allow coalescing!
288 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
289 if (B == &ARM::QPRRegClass)
291 return 0; // Do not allow coalescing!
295 // Q sub-registers of QQQQ registers.
296 if (A->getSize() == 64 && B == &ARM::QPRRegClass)
298 return 0; // Do not allow coalescing!
305 ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
306 SmallVectorImpl<unsigned> &SubIndices,
307 unsigned &NewSubIdx) const {
309 unsigned Size = RC->getSize() * 8;
313 NewSubIdx = 0; // Whole register.
314 unsigned NumRegs = SubIndices.size();
316 // 8 D registers -> 1 QQQQ register.
317 return (Size == 512 &&
318 SubIndices[0] == ARM::dsub_0 &&
319 SubIndices[1] == ARM::dsub_1 &&
320 SubIndices[2] == ARM::dsub_2 &&
321 SubIndices[3] == ARM::dsub_3 &&
322 SubIndices[4] == ARM::dsub_4 &&
323 SubIndices[5] == ARM::dsub_5 &&
324 SubIndices[6] == ARM::dsub_6 &&
325 SubIndices[7] == ARM::dsub_7);
326 } else if (NumRegs == 4) {
327 if (SubIndices[0] == ARM::qsub_0) {
328 // 4 Q registers -> 1 QQQQ register.
329 return (Size == 512 &&
330 SubIndices[1] == ARM::qsub_1 &&
331 SubIndices[2] == ARM::qsub_2 &&
332 SubIndices[3] == ARM::qsub_3);
333 } else if (SubIndices[0] == ARM::dsub_0) {
334 // 4 D registers -> 1 QQ register.
336 SubIndices[1] == ARM::dsub_1 &&
337 SubIndices[2] == ARM::dsub_2 &&
338 SubIndices[3] == ARM::dsub_3) {
340 NewSubIdx = ARM::qqsub_0;
343 } else if (SubIndices[0] == ARM::dsub_4) {
344 // 4 D registers -> 1 QQ register (2nd).
346 SubIndices[1] == ARM::dsub_5 &&
347 SubIndices[2] == ARM::dsub_6 &&
348 SubIndices[3] == ARM::dsub_7) {
349 NewSubIdx = ARM::qqsub_1;
352 } else if (SubIndices[0] == ARM::ssub_0) {
353 // 4 S registers -> 1 Q register.
355 SubIndices[1] == ARM::ssub_1 &&
356 SubIndices[2] == ARM::ssub_2 &&
357 SubIndices[3] == ARM::ssub_3) {
359 NewSubIdx = ARM::qsub_0;
363 } else if (NumRegs == 2) {
364 if (SubIndices[0] == ARM::qsub_0) {
365 // 2 Q registers -> 1 QQ register.
366 if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
368 NewSubIdx = ARM::qqsub_0;
371 } else if (SubIndices[0] == ARM::qsub_2) {
372 // 2 Q registers -> 1 QQ register (2nd).
373 if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
374 NewSubIdx = ARM::qqsub_1;
377 } else if (SubIndices[0] == ARM::dsub_0) {
378 // 2 D registers -> 1 Q register.
379 if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
381 NewSubIdx = ARM::qsub_0;
384 } else if (SubIndices[0] == ARM::dsub_2) {
385 // 2 D registers -> 1 Q register (2nd).
386 if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
387 NewSubIdx = ARM::qsub_1;
390 } else if (SubIndices[0] == ARM::dsub_4) {
391 // 2 D registers -> 1 Q register (3rd).
392 if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
393 NewSubIdx = ARM::qsub_2;
396 } else if (SubIndices[0] == ARM::dsub_6) {
397 // 2 D registers -> 1 Q register (3rd).
398 if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
399 NewSubIdx = ARM::qsub_3;
402 } else if (SubIndices[0] == ARM::ssub_0) {
403 // 2 S registers -> 1 D register.
404 if (SubIndices[1] == ARM::ssub_1) {
406 NewSubIdx = ARM::dsub_0;
409 } else if (SubIndices[0] == ARM::ssub_2) {
410 // 2 S registers -> 1 D register (2nd).
411 if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
412 NewSubIdx = ARM::dsub_1;
421 const TargetRegisterClass *
422 ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
423 return ARM::GPRRegisterClass;
426 /// getAllocationOrder - Returns the register allocation order for a specified
427 /// register class in the form of a pair of TargetRegisterClass iterators.
428 std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
429 ARMBaseRegisterInfo::getAllocationOrder(const TargetRegisterClass *RC,
430 unsigned HintType, unsigned HintReg,
431 const MachineFunction &MF) const {
432 // Alternative register allocation orders when favoring even / odd registers
433 // of register pairs.
435 // No FP, R9 is available.
436 static const unsigned GPREven1[] = {
437 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
438 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
441 static const unsigned GPROdd1[] = {
442 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
443 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
447 // FP is R7, R9 is available.
448 static const unsigned GPREven2[] = {
449 ARM::R0, ARM::R2, ARM::R4, ARM::R8, ARM::R10,
450 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
453 static const unsigned GPROdd2[] = {
454 ARM::R1, ARM::R3, ARM::R5, ARM::R9, ARM::R11,
455 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
459 // FP is R11, R9 is available.
460 static const unsigned GPREven3[] = {
461 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
462 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
465 static const unsigned GPROdd3[] = {
466 ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
467 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
471 // No FP, R9 is not available.
472 static const unsigned GPREven4[] = {
473 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R10,
474 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
477 static const unsigned GPROdd4[] = {
478 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R11,
479 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
483 // FP is R7, R9 is not available.
484 static const unsigned GPREven5[] = {
485 ARM::R0, ARM::R2, ARM::R4, ARM::R10,
486 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
489 static const unsigned GPROdd5[] = {
490 ARM::R1, ARM::R3, ARM::R5, ARM::R11,
491 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
495 // FP is R11, R9 is not available.
496 static const unsigned GPREven6[] = {
497 ARM::R0, ARM::R2, ARM::R4, ARM::R6,
498 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
500 static const unsigned GPROdd6[] = {
501 ARM::R1, ARM::R3, ARM::R5, ARM::R7,
502 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
506 if (HintType == ARMRI::RegPairEven) {
507 if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
508 // It's no longer possible to fulfill this hint. Return the default
510 return std::make_pair(RC->allocation_order_begin(MF),
511 RC->allocation_order_end(MF));
513 if (!STI.isTargetDarwin() && !hasFP(MF)) {
514 if (!STI.isR9Reserved())
515 return std::make_pair(GPREven1,
516 GPREven1 + (sizeof(GPREven1)/sizeof(unsigned)));
518 return std::make_pair(GPREven4,
519 GPREven4 + (sizeof(GPREven4)/sizeof(unsigned)));
520 } else if (FramePtr == ARM::R7) {
521 if (!STI.isR9Reserved())
522 return std::make_pair(GPREven2,
523 GPREven2 + (sizeof(GPREven2)/sizeof(unsigned)));
525 return std::make_pair(GPREven5,
526 GPREven5 + (sizeof(GPREven5)/sizeof(unsigned)));
527 } else { // FramePtr == ARM::R11
528 if (!STI.isR9Reserved())
529 return std::make_pair(GPREven3,
530 GPREven3 + (sizeof(GPREven3)/sizeof(unsigned)));
532 return std::make_pair(GPREven6,
533 GPREven6 + (sizeof(GPREven6)/sizeof(unsigned)));
535 } else if (HintType == ARMRI::RegPairOdd) {
536 if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
537 // It's no longer possible to fulfill this hint. Return the default
539 return std::make_pair(RC->allocation_order_begin(MF),
540 RC->allocation_order_end(MF));
542 if (!STI.isTargetDarwin() && !hasFP(MF)) {
543 if (!STI.isR9Reserved())
544 return std::make_pair(GPROdd1,
545 GPROdd1 + (sizeof(GPROdd1)/sizeof(unsigned)));
547 return std::make_pair(GPROdd4,
548 GPROdd4 + (sizeof(GPROdd4)/sizeof(unsigned)));
549 } else if (FramePtr == ARM::R7) {
550 if (!STI.isR9Reserved())
551 return std::make_pair(GPROdd2,
552 GPROdd2 + (sizeof(GPROdd2)/sizeof(unsigned)));
554 return std::make_pair(GPROdd5,
555 GPROdd5 + (sizeof(GPROdd5)/sizeof(unsigned)));
556 } else { // FramePtr == ARM::R11
557 if (!STI.isR9Reserved())
558 return std::make_pair(GPROdd3,
559 GPROdd3 + (sizeof(GPROdd3)/sizeof(unsigned)));
561 return std::make_pair(GPROdd6,
562 GPROdd6 + (sizeof(GPROdd6)/sizeof(unsigned)));
565 return std::make_pair(RC->allocation_order_begin(MF),
566 RC->allocation_order_end(MF));
569 /// ResolveRegAllocHint - Resolves the specified register allocation hint
570 /// to a physical register. Returns the physical register if it is successful.
572 ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
573 const MachineFunction &MF) const {
574 if (Reg == 0 || !isPhysicalRegister(Reg))
578 else if (Type == (unsigned)ARMRI::RegPairOdd)
580 return getRegisterPairOdd(Reg, MF);
581 else if (Type == (unsigned)ARMRI::RegPairEven)
583 return getRegisterPairEven(Reg, MF);
588 ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
589 MachineFunction &MF) const {
590 MachineRegisterInfo *MRI = &MF.getRegInfo();
591 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
592 if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
593 Hint.first == (unsigned)ARMRI::RegPairEven) &&
594 Hint.second && TargetRegisterInfo::isVirtualRegister(Hint.second)) {
595 // If 'Reg' is one of the even / odd register pair and it's now changed
596 // (e.g. coalesced) into a different register. The other register of the
597 // pair allocation hint must be updated to reflect the relationship
599 unsigned OtherReg = Hint.second;
600 Hint = MRI->getRegAllocationHint(OtherReg);
601 if (Hint.second == Reg)
602 // Make sure the pair has not already divorced.
603 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
607 /// hasFP - Return true if the specified function should have a dedicated frame
608 /// pointer register. This is true if the function has variable sized allocas
609 /// or if frame pointer elimination is disabled.
611 bool ARMBaseRegisterInfo::hasFP(const MachineFunction &MF) const {
612 const MachineFrameInfo *MFI = MF.getFrameInfo();
613 return ((DisableFramePointerElim(MF) && MFI->adjustsStack())||
614 needsStackRealignment(MF) ||
615 MFI->hasVarSizedObjects() ||
616 MFI->isFrameAddressTaken());
619 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
620 const MachineFrameInfo *MFI = MF.getFrameInfo();
621 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
622 return (RealignStack &&
623 !AFI->isThumb1OnlyFunction() &&
624 !MFI->hasVarSizedObjects());
627 bool ARMBaseRegisterInfo::
628 needsStackRealignment(const MachineFunction &MF) const {
629 const MachineFrameInfo *MFI = MF.getFrameInfo();
630 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
631 unsigned StackAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
632 return (RealignStack &&
633 !AFI->isThumb1OnlyFunction() &&
634 (MFI->getMaxAlignment() > StackAlign) &&
635 !MFI->hasVarSizedObjects());
638 bool ARMBaseRegisterInfo::
639 cannotEliminateFrame(const MachineFunction &MF) const {
640 const MachineFrameInfo *MFI = MF.getFrameInfo();
641 if (DisableFramePointerElim(MF) && MFI->adjustsStack())
643 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
644 || needsStackRealignment(MF);
647 /// estimateStackSize - Estimate and return the size of the frame.
648 static unsigned estimateStackSize(MachineFunction &MF) {
649 const MachineFrameInfo *FFI = MF.getFrameInfo();
651 for (int i = FFI->getObjectIndexBegin(); i != 0; ++i) {
652 int FixedOff = -FFI->getObjectOffset(i);
653 if (FixedOff > Offset) Offset = FixedOff;
655 for (unsigned i = 0, e = FFI->getObjectIndexEnd(); i != e; ++i) {
656 if (FFI->isDeadObjectIndex(i))
658 Offset += FFI->getObjectSize(i);
659 unsigned Align = FFI->getObjectAlignment(i);
660 // Adjust to alignment boundary
661 Offset = (Offset+Align-1)/Align*Align;
663 return (unsigned)Offset;
666 /// estimateRSStackSizeLimit - Look at each instruction that references stack
667 /// frames and return the stack size limit beyond which some of these
668 /// instructions will require a scratch register during their expansion later.
670 ARMBaseRegisterInfo::estimateRSStackSizeLimit(MachineFunction &MF) const {
671 unsigned Limit = (1 << 12) - 1;
672 for (MachineFunction::iterator BB = MF.begin(),E = MF.end(); BB != E; ++BB) {
673 for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end();
675 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
676 if (!I->getOperand(i).isFI()) continue;
678 // When using ADDri to get the address of a stack object, 255 is the
679 // largest offset guaranteed to fit in the immediate offset.
680 if (I->getOpcode() == ARM::ADDri) {
681 Limit = std::min(Limit, (1U << 8) - 1);
685 // Otherwise check the addressing mode.
686 switch (I->getDesc().TSFlags & ARMII::AddrModeMask) {
687 case ARMII::AddrMode3:
688 case ARMII::AddrModeT2_i8:
689 Limit = std::min(Limit, (1U << 8) - 1);
691 case ARMII::AddrMode5:
692 case ARMII::AddrModeT2_i8s4:
693 Limit = std::min(Limit, ((1U << 8) - 1) * 4);
695 case ARMII::AddrModeT2_i12:
696 if (hasFP(MF)) Limit = std::min(Limit, (1U << 8) - 1);
698 case ARMII::AddrMode6:
699 // Addressing mode 6 (load/store) instructions can't encode an
700 // immediate offset for stack references.
705 break; // At most one FI per instruction
714 ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
715 RegScavenger *RS) const {
716 // This tells PEI to spill the FP as if it is any other callee-save register
717 // to take advantage the eliminateFrameIndex machinery. This also ensures it
718 // is spilled in the order specified by getCalleeSavedRegs() to make it easier
719 // to combine multiple loads / stores.
720 bool CanEliminateFrame = true;
721 bool CS1Spilled = false;
722 bool LRSpilled = false;
723 unsigned NumGPRSpills = 0;
724 SmallVector<unsigned, 4> UnspilledCS1GPRs;
725 SmallVector<unsigned, 4> UnspilledCS2GPRs;
726 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
727 MachineFrameInfo *MFI = MF.getFrameInfo();
729 // Spill R4 if Thumb2 function requires stack realignment - it will be used as
731 // FIXME: It will be better just to find spare register here.
732 if (needsStackRealignment(MF) &&
733 AFI->isThumb2Function())
734 MF.getRegInfo().setPhysRegUsed(ARM::R4);
736 // Spill LR if Thumb1 function uses variable length argument lists.
737 if (AFI->isThumb1OnlyFunction() && AFI->getVarArgsRegSaveSize() > 0)
738 MF.getRegInfo().setPhysRegUsed(ARM::LR);
740 // Don't spill FP if the frame can be eliminated. This is determined
741 // by scanning the callee-save registers to see if any is used.
742 const unsigned *CSRegs = getCalleeSavedRegs();
743 for (unsigned i = 0; CSRegs[i]; ++i) {
744 unsigned Reg = CSRegs[i];
745 bool Spilled = false;
746 if (MF.getRegInfo().isPhysRegUsed(Reg)) {
747 AFI->setCSRegisterIsSpilled(Reg);
749 CanEliminateFrame = false;
751 // Check alias registers too.
752 for (const unsigned *Aliases = getAliasSet(Reg); *Aliases; ++Aliases) {
753 if (MF.getRegInfo().isPhysRegUsed(*Aliases)) {
755 CanEliminateFrame = false;
760 if (!ARM::GPRRegisterClass->contains(Reg))
766 if (!STI.isTargetDarwin()) {
773 // Keep track if LR and any of R4, R5, R6, and R7 is spilled.
788 if (!STI.isTargetDarwin()) {
789 UnspilledCS1GPRs.push_back(Reg);
799 UnspilledCS1GPRs.push_back(Reg);
802 UnspilledCS2GPRs.push_back(Reg);
808 bool ForceLRSpill = false;
809 if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
810 unsigned FnSize = TII.GetFunctionSizeInBytes(MF);
811 // Force LR to be spilled if the Thumb function size is > 2048. This enables
812 // use of BL to implement far jump. If it turns out that it's not needed
813 // then the branch fix up path will undo it.
814 if (FnSize >= (1 << 11)) {
815 CanEliminateFrame = false;
820 // If any of the stack slot references may be out of range of an immediate
821 // offset, make sure a register (or a spill slot) is available for the
822 // register scavenger. Note that if we're indexing off the frame pointer, the
823 // effective stack size is 4 bytes larger since the FP points to the stack
824 // slot of the previous FP. Also, if we have variable sized objects in the
825 // function, stack slot references will often be negative, and some of
826 // our instructions are positive-offset only, so conservatively consider
827 // that case to want a spill slot (or register) as well.
828 // FIXME: We could add logic to be more precise about negative offsets
829 // and which instructions will need a scratch register for them. Is it
830 // worth the effort and added fragility?
831 bool BigStack = RS &&
832 (estimateStackSize(MF) + (hasFP(MF) ? 4:0) >= estimateRSStackSizeLimit(MF))
833 || MFI->hasVarSizedObjects();
835 bool ExtraCSSpill = false;
836 if (BigStack || !CanEliminateFrame || cannotEliminateFrame(MF)) {
837 AFI->setHasStackFrame(true);
839 // If LR is not spilled, but at least one of R4, R5, R6, and R7 is spilled.
840 // Spill LR as well so we can fold BX_RET to the registers restore (LDM).
841 if (!LRSpilled && CS1Spilled) {
842 MF.getRegInfo().setPhysRegUsed(ARM::LR);
843 AFI->setCSRegisterIsSpilled(ARM::LR);
845 UnspilledCS1GPRs.erase(std::find(UnspilledCS1GPRs.begin(),
846 UnspilledCS1GPRs.end(), (unsigned)ARM::LR));
847 ForceLRSpill = false;
851 // Darwin ABI requires FP to point to the stack slot that contains the
853 if (STI.isTargetDarwin() || hasFP(MF)) {
854 MF.getRegInfo().setPhysRegUsed(FramePtr);
858 // If stack and double are 8-byte aligned and we are spilling an odd number
859 // of GPRs. Spill one extra callee save GPR so we won't have to pad between
860 // the integer and double callee save areas.
861 unsigned TargetAlign = MF.getTarget().getFrameInfo()->getStackAlignment();
862 if (TargetAlign == 8 && (NumGPRSpills & 1)) {
863 if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
864 for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
865 unsigned Reg = UnspilledCS1GPRs[i];
866 // Don't spill high register if the function is thumb1
867 if (!AFI->isThumb1OnlyFunction() ||
868 isARMLowRegister(Reg) || Reg == ARM::LR) {
869 MF.getRegInfo().setPhysRegUsed(Reg);
870 AFI->setCSRegisterIsSpilled(Reg);
871 if (!isReservedReg(MF, Reg))
876 } else if (!UnspilledCS2GPRs.empty() &&
877 !AFI->isThumb1OnlyFunction()) {
878 unsigned Reg = UnspilledCS2GPRs.front();
879 MF.getRegInfo().setPhysRegUsed(Reg);
880 AFI->setCSRegisterIsSpilled(Reg);
881 if (!isReservedReg(MF, Reg))
886 // Estimate if we might need to scavenge a register at some point in order
887 // to materialize a stack offset. If so, either spill one additional
888 // callee-saved register or reserve a special spill slot to facilitate
889 // register scavenging. Thumb1 needs a spill slot for stack pointer
890 // adjustments also, even when the frame itself is small.
891 if (BigStack && !ExtraCSSpill) {
892 // If any non-reserved CS register isn't spilled, just spill one or two
893 // extra. That should take care of it!
894 unsigned NumExtras = TargetAlign / 4;
895 SmallVector<unsigned, 2> Extras;
896 while (NumExtras && !UnspilledCS1GPRs.empty()) {
897 unsigned Reg = UnspilledCS1GPRs.back();
898 UnspilledCS1GPRs.pop_back();
899 if (!isReservedReg(MF, Reg) &&
900 (!AFI->isThumb1OnlyFunction() || isARMLowRegister(Reg) ||
902 Extras.push_back(Reg);
906 // For non-Thumb1 functions, also check for hi-reg CS registers
907 if (!AFI->isThumb1OnlyFunction()) {
908 while (NumExtras && !UnspilledCS2GPRs.empty()) {
909 unsigned Reg = UnspilledCS2GPRs.back();
910 UnspilledCS2GPRs.pop_back();
911 if (!isReservedReg(MF, Reg)) {
912 Extras.push_back(Reg);
917 if (Extras.size() && NumExtras == 0) {
918 for (unsigned i = 0, e = Extras.size(); i != e; ++i) {
919 MF.getRegInfo().setPhysRegUsed(Extras[i]);
920 AFI->setCSRegisterIsSpilled(Extras[i]);
922 } else if (!AFI->isThumb1OnlyFunction()) {
923 // note: Thumb1 functions spill to R12, not the stack. Reserve a slot
924 // closest to SP or frame pointer.
925 const TargetRegisterClass *RC = ARM::GPRRegisterClass;
926 RS->setScavengingFrameIndex(MFI->CreateStackObject(RC->getSize(),
934 MF.getRegInfo().setPhysRegUsed(ARM::LR);
935 AFI->setCSRegisterIsSpilled(ARM::LR);
936 AFI->setLRIsSpilledForFarJump(true);
940 unsigned ARMBaseRegisterInfo::getRARegister() const {
945 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
946 if (STI.isTargetDarwin() || hasFP(MF))
952 ARMBaseRegisterInfo::getFrameIndexReference(const MachineFunction &MF, int FI,
953 unsigned &FrameReg) const {
954 const MachineFrameInfo *MFI = MF.getFrameInfo();
955 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
956 int Offset = MFI->getObjectOffset(FI) + MFI->getStackSize();
957 bool isFixed = MFI->isFixedObjectIndex(FI);
960 if (AFI->isGPRCalleeSavedArea1Frame(FI))
961 Offset -= AFI->getGPRCalleeSavedArea1Offset();
962 else if (AFI->isGPRCalleeSavedArea2Frame(FI))
963 Offset -= AFI->getGPRCalleeSavedArea2Offset();
964 else if (AFI->isDPRCalleeSavedAreaFrame(FI))
965 Offset -= AFI->getDPRCalleeSavedAreaOffset();
966 else if (needsStackRealignment(MF)) {
967 // When dynamically realigning the stack, use the frame pointer for
968 // parameters, and the stack pointer for locals.
969 assert (hasFP(MF) && "dynamic stack realignment without a FP!");
971 FrameReg = getFrameRegister(MF);
972 Offset -= AFI->getFramePtrSpillOffset();
974 } else if (hasFP(MF) && AFI->hasStackFrame()) {
975 if (isFixed || MFI->hasVarSizedObjects()) {
976 // Use frame pointer to reference fixed objects unless this is a
977 // frameless function.
978 FrameReg = getFrameRegister(MF);
979 Offset -= AFI->getFramePtrSpillOffset();
980 } else if (AFI->isThumb2Function()) {
981 // In Thumb2 mode, the negative offset is very limited.
982 int FPOffset = Offset - AFI->getFramePtrSpillOffset();
983 if (FPOffset >= -255 && FPOffset < 0) {
984 FrameReg = getFrameRegister(MF);
994 ARMBaseRegisterInfo::getFrameIndexOffset(const MachineFunction &MF,
997 return getFrameIndexReference(MF, FI, FrameReg);
1000 unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
1001 llvm_unreachable("What is the exception register");
1005 unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
1006 llvm_unreachable("What is the exception handler register");
1010 int ARMBaseRegisterInfo::getDwarfRegNum(unsigned RegNum, bool isEH) const {
1011 return ARMGenRegisterInfo::getDwarfRegNumFull(RegNum, 0);
1014 unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
1015 const MachineFunction &MF) const {
1018 // Return 0 if either register of the pair is a special register.
1027 return isReservedReg(MF, ARM::R7) ? 0 : ARM::R6;
1029 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8;
1031 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
1103 unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
1104 const MachineFunction &MF) const {
1107 // Return 0 if either register of the pair is a special register.
1116 return isReservedReg(MF, ARM::R7) ? 0 : ARM::R7;
1118 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9;
1120 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
1192 /// emitLoadConstPool - Emits a load from constpool to materialize the
1193 /// specified immediate.
1194 void ARMBaseRegisterInfo::
1195 emitLoadConstPool(MachineBasicBlock &MBB,
1196 MachineBasicBlock::iterator &MBBI,
1198 unsigned DestReg, unsigned SubIdx, int Val,
1199 ARMCC::CondCodes Pred,
1200 unsigned PredReg) const {
1201 MachineFunction &MF = *MBB.getParent();
1202 MachineConstantPool *ConstantPool = MF.getConstantPool();
1204 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
1205 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
1207 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
1208 .addReg(DestReg, getDefRegState(true), SubIdx)
1209 .addConstantPoolIndex(Idx)
1210 .addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
1213 bool ARMBaseRegisterInfo::
1214 requiresRegisterScavenging(const MachineFunction &MF) const {
1218 bool ARMBaseRegisterInfo::
1219 requiresFrameIndexScavenging(const MachineFunction &MF) const {
1223 // hasReservedCallFrame - Under normal circumstances, when a frame pointer is
1224 // not required, we reserve argument space for call sites in the function
1225 // immediately on entry to the current function. This eliminates the need for
1226 // add/sub sp brackets around call sites. Returns true if the call frame is
1227 // included as part of the stack frame.
1228 bool ARMBaseRegisterInfo::
1229 hasReservedCallFrame(MachineFunction &MF) const {
1230 const MachineFrameInfo *FFI = MF.getFrameInfo();
1231 unsigned CFSize = FFI->getMaxCallFrameSize();
1232 // It's not always a good idea to include the call frame as part of the
1233 // stack frame. ARM (especially Thumb) has small immediate offset to
1234 // address the stack frame. So a large call frame can cause poor codegen
1235 // and may even makes it impossible to scavenge a register.
1236 if (CFSize >= ((1 << 12) - 1) / 2) // Half of imm12
1239 return !MF.getFrameInfo()->hasVarSizedObjects();
1242 // canSimplifyCallFramePseudos - If there is a reserved call frame, the
1243 // call frame pseudos can be simplified. Unlike most targets, having a FP
1244 // is not sufficient here since we still may reference some objects via SP
1245 // even when FP is available in Thumb2 mode.
1246 bool ARMBaseRegisterInfo::
1247 canSimplifyCallFramePseudos(MachineFunction &MF) const {
1248 return hasReservedCallFrame(MF) || MF.getFrameInfo()->hasVarSizedObjects();
1252 emitSPUpdate(bool isARM,
1253 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
1254 DebugLoc dl, const ARMBaseInstrInfo &TII,
1256 ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
1258 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1259 Pred, PredReg, TII);
1261 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
1262 Pred, PredReg, TII);
1266 void ARMBaseRegisterInfo::
1267 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1268 MachineBasicBlock::iterator I) const {
1269 if (!hasReservedCallFrame(MF)) {
1270 // If we have alloca, convert as follows:
1271 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
1272 // ADJCALLSTACKUP -> add, sp, sp, amount
1273 MachineInstr *Old = I;
1274 DebugLoc dl = Old->getDebugLoc();
1275 unsigned Amount = Old->getOperand(0).getImm();
1277 // We need to keep the stack aligned properly. To do this, we round the
1278 // amount of space needed for the outgoing arguments up to the next
1279 // alignment boundary.
1280 unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
1281 Amount = (Amount+Align-1)/Align*Align;
1283 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1284 assert(!AFI->isThumb1OnlyFunction() &&
1285 "This eliminateCallFramePseudoInstr does not support Thumb1!");
1286 bool isARM = !AFI->isThumbFunction();
1288 // Replace the pseudo instruction with a new instruction...
1289 unsigned Opc = Old->getOpcode();
1290 int PIdx = Old->findFirstPredOperandIdx();
1291 ARMCC::CondCodes Pred = (PIdx == -1)
1292 ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
1293 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
1294 // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
1295 unsigned PredReg = Old->getOperand(2).getReg();
1296 emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
1298 // Note: PredReg is operand 3 for ADJCALLSTACKUP.
1299 unsigned PredReg = Old->getOperand(3).getReg();
1300 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
1301 emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
1309 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1310 int SPAdj, FrameIndexValue *Value,
1311 RegScavenger *RS) const {
1313 MachineInstr &MI = *II;
1314 MachineBasicBlock &MBB = *MI.getParent();
1315 MachineFunction &MF = *MBB.getParent();
1316 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1317 assert(!AFI->isThumb1OnlyFunction() &&
1318 "This eliminateFrameIndex does not support Thumb1!");
1320 while (!MI.getOperand(i).isFI()) {
1322 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1325 int FrameIndex = MI.getOperand(i).getIndex();
1328 int Offset = getFrameIndexReference(MF, FrameIndex, FrameReg);
1329 if (FrameReg != ARM::SP)
1333 // Special handling of dbg_value instructions.
1334 if (MI.isDebugValue()) {
1335 MI.getOperand(i). ChangeToRegister(FrameReg, false /*isDef*/);
1336 MI.getOperand(i+1).ChangeToImmediate(Offset);
1340 // Modify MI as necessary to handle as much of 'Offset' as possible
1342 if (!AFI->isThumbFunction())
1343 Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1345 assert(AFI->isThumb2Function());
1346 Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1351 // If we get here, the immediate doesn't fit into the instruction. We folded
1352 // as much as possible above, handle the rest, providing a register that is
1355 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1356 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1357 "This code isn't needed if offset already handled!");
1359 unsigned ScratchReg = 0;
1360 int PIdx = MI.findFirstPredOperandIdx();
1361 ARMCC::CondCodes Pred = (PIdx == -1)
1362 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1363 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1365 // Must be addrmode4/6.
1366 MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1368 ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1370 Value->first = FrameReg; // use the frame register as a kind indicator
1371 Value->second = Offset;
1373 if (!AFI->isThumbFunction())
1374 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1375 Offset, Pred, PredReg, TII);
1377 assert(AFI->isThumb2Function());
1378 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1379 Offset, Pred, PredReg, TII);
1381 MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1382 if (!ReuseFrameIndexVals)
1388 /// Move iterator past the next bunch of callee save load / store ops for
1389 /// the particular spill area (1: integer area 1, 2: integer area 2,
1390 /// 3: fp area, 0: don't care).
1391 static void movePastCSLoadStoreOps(MachineBasicBlock &MBB,
1392 MachineBasicBlock::iterator &MBBI,
1393 int Opc1, int Opc2, unsigned Area,
1394 const ARMSubtarget &STI) {
1395 while (MBBI != MBB.end() &&
1396 ((MBBI->getOpcode() == Opc1) || (MBBI->getOpcode() == Opc2)) &&
1397 MBBI->getOperand(1).isFI()) {
1400 unsigned Category = 0;
1401 switch (MBBI->getOperand(0).getReg()) {
1402 case ARM::R4: case ARM::R5: case ARM::R6: case ARM::R7:
1406 case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
1407 Category = STI.isTargetDarwin() ? 2 : 1;
1409 case ARM::D8: case ARM::D9: case ARM::D10: case ARM::D11:
1410 case ARM::D12: case ARM::D13: case ARM::D14: case ARM::D15:
1417 if (Done || Category != Area)
1425 void ARMBaseRegisterInfo::
1426 emitPrologue(MachineFunction &MF) const {
1427 MachineBasicBlock &MBB = MF.front();
1428 MachineBasicBlock::iterator MBBI = MBB.begin();
1429 MachineFrameInfo *MFI = MF.getFrameInfo();
1430 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1431 assert(!AFI->isThumb1OnlyFunction() &&
1432 "This emitPrologue does not support Thumb1!");
1433 bool isARM = !AFI->isThumbFunction();
1434 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1435 unsigned NumBytes = MFI->getStackSize();
1436 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
1437 DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1439 // Determine the sizes of each callee-save spill areas and record which frame
1440 // belongs to which callee-save spill areas.
1441 unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
1442 int FramePtrSpillFI = 0;
1444 // Allocate the vararg register save area. This is not counted in NumBytes.
1446 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -VARegSaveSize);
1448 if (!AFI->hasStackFrame()) {
1450 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1454 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1455 unsigned Reg = CSI[i].getReg();
1456 int FI = CSI[i].getFrameIdx();
1463 if (Reg == FramePtr)
1464 FramePtrSpillFI = FI;
1465 AFI->addGPRCalleeSavedArea1Frame(FI);
1472 if (Reg == FramePtr)
1473 FramePtrSpillFI = FI;
1474 if (STI.isTargetDarwin()) {
1475 AFI->addGPRCalleeSavedArea2Frame(FI);
1478 AFI->addGPRCalleeSavedArea1Frame(FI);
1483 AFI->addDPRCalleeSavedAreaFrame(FI);
1488 // Build the new SUBri to adjust SP for integer callee-save spill area 1.
1489 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS1Size);
1490 movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 1, STI);
1492 // Set FP to point to the stack slot that contains the previous FP.
1493 // For Darwin, FP is R7, which has now been stored in spill area 1.
1494 // Otherwise, if this is not Darwin, all the callee-saved registers go
1495 // into spill area 1, including the FP in R11. In either case, it is
1496 // now safe to emit this assignment.
1497 if (STI.isTargetDarwin() || hasFP(MF)) {
1498 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri : ARM::t2ADDri;
1499 MachineInstrBuilder MIB =
1500 BuildMI(MBB, MBBI, dl, TII.get(ADDriOpc), FramePtr)
1501 .addFrameIndex(FramePtrSpillFI).addImm(0);
1502 AddDefaultCC(AddDefaultPred(MIB));
1505 // Build the new SUBri to adjust SP for integer callee-save spill area 2.
1506 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -GPRCS2Size);
1508 // Build the new SUBri to adjust SP for FP callee-save spill area.
1509 movePastCSLoadStoreOps(MBB, MBBI, ARM::STR, ARM::t2STRi12, 2, STI);
1510 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -DPRCSSize);
1512 // Determine starting offsets of spill areas.
1513 unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
1514 unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
1515 unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
1516 if (STI.isTargetDarwin() || hasFP(MF))
1517 AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) +
1519 AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
1520 AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
1521 AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
1523 movePastCSLoadStoreOps(MBB, MBBI, ARM::VSTRD, 0, 3, STI);
1524 NumBytes = DPRCSOffset;
1526 // Adjust SP after all the callee-save spills.
1527 emitSPUpdate(isARM, MBB, MBBI, dl, TII, -NumBytes);
1530 if (STI.isTargetELF() && hasFP(MF)) {
1531 MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
1532 AFI->getFramePtrSpillOffset());
1535 AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
1536 AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
1537 AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
1539 // If we need dynamic stack realignment, do it here.
1540 if (needsStackRealignment(MF)) {
1541 unsigned MaxAlign = MFI->getMaxAlignment();
1542 assert (!AFI->isThumb1OnlyFunction());
1543 if (!AFI->isThumbFunction()) {
1544 // Emit bic sp, sp, MaxAlign
1545 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1546 TII.get(ARM::BICri), ARM::SP)
1547 .addReg(ARM::SP, RegState::Kill)
1548 .addImm(MaxAlign-1)));
1550 // We cannot use sp as source/dest register here, thus we're emitting the
1551 // following sequence:
1553 // bic r4, r4, MaxAlign
1555 // FIXME: It will be better just to find spare register here.
1556 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2tgpr), ARM::R4)
1557 .addReg(ARM::SP, RegState::Kill);
1558 AddDefaultCC(AddDefaultPred(BuildMI(MBB, MBBI, dl,
1559 TII.get(ARM::t2BICri), ARM::R4)
1560 .addReg(ARM::R4, RegState::Kill)
1561 .addImm(MaxAlign-1)));
1562 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVtgpr2gpr), ARM::SP)
1563 .addReg(ARM::R4, RegState::Kill);
1568 static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
1569 for (unsigned i = 0; CSRegs[i]; ++i)
1570 if (Reg == CSRegs[i])
1575 static bool isCSRestore(MachineInstr *MI,
1576 const ARMBaseInstrInfo &TII,
1577 const unsigned *CSRegs) {
1578 return ((MI->getOpcode() == (int)ARM::VLDRD ||
1579 MI->getOpcode() == (int)ARM::LDR ||
1580 MI->getOpcode() == (int)ARM::t2LDRi12) &&
1581 MI->getOperand(1).isFI() &&
1582 isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
1585 void ARMBaseRegisterInfo::
1586 emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {
1587 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1588 assert(MBBI->getDesc().isReturn() &&
1589 "Can only insert epilog into returning blocks");
1590 unsigned RetOpcode = MBBI->getOpcode();
1591 DebugLoc dl = MBBI->getDebugLoc();
1592 MachineFrameInfo *MFI = MF.getFrameInfo();
1593 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1594 assert(!AFI->isThumb1OnlyFunction() &&
1595 "This emitEpilogue does not support Thumb1!");
1596 bool isARM = !AFI->isThumbFunction();
1598 unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
1599 int NumBytes = (int)MFI->getStackSize();
1601 if (!AFI->hasStackFrame()) {
1603 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1605 // Unwind MBBI to point to first LDR / VLDRD.
1606 const unsigned *CSRegs = getCalleeSavedRegs();
1607 if (MBBI != MBB.begin()) {
1610 while (MBBI != MBB.begin() && isCSRestore(MBBI, TII, CSRegs));
1611 if (!isCSRestore(MBBI, TII, CSRegs))
1615 // Move SP to start of FP callee save spill area.
1616 NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
1617 AFI->getGPRCalleeSavedArea2Size() +
1618 AFI->getDPRCalleeSavedAreaSize());
1620 // Darwin ABI requires FP to point to the stack slot that contains the
1622 bool HasFP = hasFP(MF);
1623 if ((STI.isTargetDarwin() && NumBytes) || HasFP) {
1624 NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
1625 // Reset SP based on frame pointer only if the stack frame extends beyond
1626 // frame pointer stack slot or target is ELF and the function has FP.
1628 AFI->getGPRCalleeSavedArea2Size() ||
1629 AFI->getDPRCalleeSavedAreaSize() ||
1630 AFI->getDPRCalleeSavedAreaOffset()) {
1633 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
1636 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, FramePtr, -NumBytes,
1641 BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), ARM::SP)
1643 .addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
1645 BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVgpr2gpr), ARM::SP)
1649 } else if (NumBytes)
1650 emitSPUpdate(isARM, MBB, MBBI, dl, TII, NumBytes);
1652 // Move SP to start of integer callee save spill area 2.
1653 movePastCSLoadStoreOps(MBB, MBBI, ARM::VLDRD, 0, 3, STI);
1654 emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getDPRCalleeSavedAreaSize());
1656 // Move SP to start of integer callee save spill area 1.
1657 movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 2, STI);
1658 emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea2Size());
1660 // Move SP to SP upon entry to the function.
1661 movePastCSLoadStoreOps(MBB, MBBI, ARM::LDR, ARM::t2LDRi12, 1, STI);
1662 emitSPUpdate(isARM, MBB, MBBI, dl, TII, AFI->getGPRCalleeSavedArea1Size());
1665 if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNdiND ||
1666 RetOpcode == ARM::TCRETURNri || RetOpcode == ARM::TCRETURNriND) {
1667 // Tail call return: adjust the stack pointer and jump to callee.
1668 MBBI = prior(MBB.end());
1669 MachineOperand &JumpTarget = MBBI->getOperand(0);
1671 // Jump to label or value in register.
1672 if (RetOpcode == ARM::TCRETURNdi) {
1673 BuildMI(MBB, MBBI, dl,
1674 TII.get(STI.isThumb() ? ARM::TAILJMPdt : ARM::TAILJMPd)).
1675 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1676 JumpTarget.getTargetFlags());
1677 } else if (RetOpcode == ARM::TCRETURNdiND) {
1678 BuildMI(MBB, MBBI, dl,
1679 TII.get(STI.isThumb() ? ARM::TAILJMPdNDt : ARM::TAILJMPdND)).
1680 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1681 JumpTarget.getTargetFlags());
1682 } else if (RetOpcode == ARM::TCRETURNri) {
1683 BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPr)).
1684 addReg(JumpTarget.getReg(), RegState::Kill);
1685 } else if (RetOpcode == ARM::TCRETURNriND) {
1686 BuildMI(MBB, MBBI, dl, TII.get(ARM::TAILJMPrND)).
1687 addReg(JumpTarget.getReg(), RegState::Kill);
1690 MachineInstr *NewMI = prior(MBBI);
1691 for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
1692 NewMI->addOperand(MBBI->getOperand(i));
1694 // Delete the pseudo instruction TCRETURN.
1699 emitSPUpdate(isARM, MBB, MBBI, dl, TII, VARegSaveSize);
1702 #include "ARMGenRegisterInfo.inc"