1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineLocation.h"
30 #include "llvm/CodeGen/MachineModuleInfo.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/Target/TargetAsmInfo.h"
33 #include "llvm/Target/TargetFrameInfo.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include "llvm/ADT/BitVector.h"
38 #include "llvm/ADT/STLExtras.h"
39 #include "llvm/Support/Compiler.h"
42 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
43 const TargetInstrInfo &tii)
44 : X86GenRegisterInfo(X86::ADJCALLSTACKDOWN, X86::ADJCALLSTACKUP),
46 // Cache some information.
47 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
48 Is64Bit = Subtarget->is64Bit();
49 IsWin64 = Subtarget->isTargetWin64();
50 StackAlign = TM.getFrameInfo()->getStackAlignment();
62 // getDwarfRegNum - This function maps LLVM register identifiers to the
63 // Dwarf specific numbering, used in debug info and exception tables.
65 int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
66 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
67 unsigned Flavour = DWARFFlavour::X86_64;
68 if (!Subtarget->is64Bit()) {
69 if (Subtarget->isTargetDarwin()) {
71 Flavour = DWARFFlavour::X86_32_DarwinEH;
73 Flavour = DWARFFlavour::X86_32_Generic;
74 } else if (Subtarget->isTargetCygMing()) {
75 // Unsupported by now, just quick fallback
76 Flavour = DWARFFlavour::X86_32_Generic;
78 Flavour = DWARFFlavour::X86_32_Generic;
82 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour);
85 // getX86RegNum - This function maps LLVM register identifiers to their X86
86 // specific numbering, which is used in various places encoding instructions.
88 unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
90 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
91 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
92 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
93 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
94 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
96 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
98 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
100 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
103 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
105 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
107 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
109 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
111 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
113 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
115 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
117 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
120 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
121 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
122 return RegNo-X86::ST0;
124 case X86::XMM0: case X86::XMM8: case X86::MM0:
126 case X86::XMM1: case X86::XMM9: case X86::MM1:
128 case X86::XMM2: case X86::XMM10: case X86::MM2:
130 case X86::XMM3: case X86::XMM11: case X86::MM3:
132 case X86::XMM4: case X86::XMM12: case X86::MM4:
134 case X86::XMM5: case X86::XMM13: case X86::MM5:
136 case X86::XMM6: case X86::XMM14: case X86::MM6:
138 case X86::XMM7: case X86::XMM15: case X86::MM7:
142 assert(isVirtualRegister(RegNo) && "Unknown physical register!");
143 assert(0 && "Register allocator hasn't allocated reg correctly yet!");
148 const TargetRegisterClass *
149 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
150 if (RC == &X86::CCRRegClass) {
152 return &X86::GR64RegClass;
154 return &X86::GR32RegClass;
160 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
161 static const unsigned CalleeSavedRegs32Bit[] = {
162 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
165 static const unsigned CalleeSavedRegs32EHRet[] = {
166 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
169 static const unsigned CalleeSavedRegs64Bit[] = {
170 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
173 static const unsigned CalleeSavedRegsWin64[] = {
174 X86::RBX, X86::RBP, X86::RDI, X86::RSI,
175 X86::R12, X86::R13, X86::R14, X86::R15, 0
180 return CalleeSavedRegsWin64;
182 return CalleeSavedRegs64Bit;
185 MachineFrameInfo *MFI = MF->getFrameInfo();
186 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
187 if (MMI && MMI->callsEHReturn())
188 return CalleeSavedRegs32EHRet;
190 return CalleeSavedRegs32Bit;
194 const TargetRegisterClass* const*
195 X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
196 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = {
197 &X86::GR32RegClass, &X86::GR32RegClass,
198 &X86::GR32RegClass, &X86::GR32RegClass, 0
200 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = {
201 &X86::GR32RegClass, &X86::GR32RegClass,
202 &X86::GR32RegClass, &X86::GR32RegClass,
203 &X86::GR32RegClass, &X86::GR32RegClass, 0
205 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = {
206 &X86::GR64RegClass, &X86::GR64RegClass,
207 &X86::GR64RegClass, &X86::GR64RegClass,
208 &X86::GR64RegClass, &X86::GR64RegClass, 0
210 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = {
211 &X86::GR64RegClass, &X86::GR64RegClass,
212 &X86::GR64RegClass, &X86::GR64RegClass,
213 &X86::GR64RegClass, &X86::GR64RegClass,
214 &X86::GR64RegClass, &X86::GR64RegClass, 0
219 return CalleeSavedRegClassesWin64;
221 return CalleeSavedRegClasses64Bit;
224 MachineFrameInfo *MFI = MF->getFrameInfo();
225 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
226 if (MMI && MMI->callsEHReturn())
227 return CalleeSavedRegClasses32EHRet;
229 return CalleeSavedRegClasses32Bit;
234 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
235 BitVector Reserved(getNumRegs());
236 Reserved.set(X86::RSP);
237 Reserved.set(X86::ESP);
238 Reserved.set(X86::SP);
239 Reserved.set(X86::SPL);
241 Reserved.set(X86::RBP);
242 Reserved.set(X86::EBP);
243 Reserved.set(X86::BP);
244 Reserved.set(X86::BPL);
249 //===----------------------------------------------------------------------===//
250 // Stack Frame Processing methods
251 //===----------------------------------------------------------------------===//
253 static unsigned calculateMaxStackAlignment(const MachineFrameInfo *FFI) {
254 unsigned MaxAlign = 0;
255 for (int i = FFI->getObjectIndexBegin(),
256 e = FFI->getObjectIndexEnd(); i != e; ++i) {
257 if (FFI->isDeadObjectIndex(i))
259 unsigned Align = FFI->getObjectAlignment(i);
260 MaxAlign = std::max(MaxAlign, Align);
266 // hasFP - Return true if the specified function should have a dedicated frame
267 // pointer register. This is true if the function has variable sized allocas or
268 // if frame pointer elimination is disabled.
270 bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
271 MachineFrameInfo *MFI = MF.getFrameInfo();
272 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
274 return (NoFramePointerElim ||
275 needsStackRealignment(MF) ||
276 MFI->hasVarSizedObjects() ||
277 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
278 (MMI && MMI->callsUnwindInit()));
281 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
282 MachineFrameInfo *MFI = MF.getFrameInfo();;
284 // FIXME: Currently we don't support stack realignment for functions with
285 // variable-sized allocas
286 return (RealignStack &&
287 (MFI->getMaxAlignment() > StackAlign &&
288 !MFI->hasVarSizedObjects()));
291 bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
292 return !MF.getFrameInfo()->hasVarSizedObjects();
296 X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const {
297 int Offset = MF.getFrameInfo()->getObjectOffset(FI) + SlotSize;
298 uint64_t StackSize = MF.getFrameInfo()->getStackSize();
300 if (needsStackRealignment(MF)) {
302 // Skip the saved EBP
305 unsigned Align = MF.getFrameInfo()->getObjectAlignment(FI);
306 assert( (-(Offset + StackSize)) % Align == 0);
307 return Offset + StackSize;
310 // FIXME: Support tail calls
313 return Offset + StackSize;
315 // Skip the saved EBP
318 // Skip the RETADDR move area
319 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
320 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
321 if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta;
327 void X86RegisterInfo::
328 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
329 MachineBasicBlock::iterator I) const {
330 if (!hasReservedCallFrame(MF)) {
331 // If the stack pointer can be changed after prologue, turn the
332 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
333 // adjcallstackdown instruction into 'add ESP, <amt>'
334 // TODO: consider using push / pop instead of sub + store / add
335 MachineInstr *Old = I;
336 uint64_t Amount = Old->getOperand(0).getImm();
338 // We need to keep the stack aligned properly. To do this, we round the
339 // amount of space needed for the outgoing arguments up to the next
340 // alignment boundary.
341 Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
343 MachineInstr *New = 0;
344 if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
345 New=BuildMI(TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri), StackPtr)
346 .addReg(StackPtr).addImm(Amount);
348 assert(Old->getOpcode() == X86::ADJCALLSTACKUP);
349 // factor out the amount the callee already popped.
350 uint64_t CalleeAmt = Old->getOperand(1).getImm();
353 unsigned Opc = (Amount < 128) ?
354 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
355 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
356 New = BuildMI(TII.get(Opc), StackPtr).addReg(StackPtr).addImm(Amount);
360 // Replace the pseudo instruction with a new instruction...
361 if (New) MBB.insert(I, New);
363 } else if (I->getOpcode() == X86::ADJCALLSTACKUP) {
364 // If we are performing frame pointer elimination and if the callee pops
365 // something off the stack pointer, add it back. We do this until we have
366 // more advanced stack pointer tracking ability.
367 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) {
368 unsigned Opc = (CalleeAmt < 128) ?
369 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
370 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
372 BuildMI(TII.get(Opc), StackPtr).addReg(StackPtr).addImm(CalleeAmt);
380 void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
381 int SPAdj, RegScavenger *RS) const{
382 assert(SPAdj == 0 && "Unexpected");
385 MachineInstr &MI = *II;
386 MachineFunction &MF = *MI.getParent()->getParent();
387 while (!MI.getOperand(i).isFrameIndex()) {
389 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
392 int FrameIndex = MI.getOperand(i).getIndex();
395 if (needsStackRealignment(MF))
396 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
398 BasePtr = (hasFP(MF) ? FramePtr : StackPtr);
400 // This must be part of a four operand memory reference. Replace the
401 // FrameIndex with base register with EBP. Add an offset to the offset.
402 MI.getOperand(i).ChangeToRegister(BasePtr, false);
404 // Now add the frame object offset to the offset from EBP.
405 int64_t Offset = getFrameIndexOffset(MF, FrameIndex) +
406 MI.getOperand(i+3).getImm();
408 MI.getOperand(i+3).ChangeToImmediate(Offset);
412 X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
413 RegScavenger *RS) const {
414 MachineFrameInfo *FFI = MF.getFrameInfo();
416 // Calculate and set max stack object alignment early, so we can decide
417 // whether we will need stack realignment (and thus FP).
418 unsigned MaxAlign = std::max(FFI->getMaxAlignment(),
419 calculateMaxStackAlignment(FFI));
421 FFI->setMaxAlignment(MaxAlign);
425 X86RegisterInfo::processFunctionBeforeFrameFinalized(MachineFunction &MF) const{
426 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
427 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
428 if (TailCallReturnAddrDelta < 0) {
429 // create RETURNADDR area
439 CreateFixedObject(-TailCallReturnAddrDelta,
440 (-1*SlotSize)+TailCallReturnAddrDelta);
443 assert((TailCallReturnAddrDelta <= 0) &&
444 "The Delta should always be zero or negative");
445 // Create a frame entry for the EBP register that must be saved.
446 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize,
448 TailCallReturnAddrDelta);
449 assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() &&
450 "Slot for EBP register must be last in order to be found!");
454 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
455 /// stack pointer by a constant value.
457 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
458 unsigned StackPtr, int64_t NumBytes, bool Is64Bit,
459 const TargetInstrInfo &TII) {
460 bool isSub = NumBytes < 0;
461 uint64_t Offset = isSub ? -NumBytes : NumBytes;
464 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
465 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri))
467 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
468 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri));
469 uint64_t Chunk = (1LL << 31) - 1;
472 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
473 BuildMI(MBB, MBBI, TII.get(Opc), StackPtr).addReg(StackPtr).addImm(ThisVal);
478 // mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
480 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
481 unsigned StackPtr, uint64_t *NumBytes = NULL) {
482 if (MBBI == MBB.begin()) return;
484 MachineBasicBlock::iterator PI = prior(MBBI);
485 unsigned Opc = PI->getOpcode();
486 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
487 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
488 PI->getOperand(0).getReg() == StackPtr) {
490 *NumBytes += PI->getOperand(2).getImm();
492 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
493 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
494 PI->getOperand(0).getReg() == StackPtr) {
496 *NumBytes -= PI->getOperand(2).getImm();
501 // mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator.
503 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
504 MachineBasicBlock::iterator &MBBI,
505 unsigned StackPtr, uint64_t *NumBytes = NULL) {
508 if (MBBI == MBB.end()) return;
510 MachineBasicBlock::iterator NI = next(MBBI);
511 if (NI == MBB.end()) return;
513 unsigned Opc = NI->getOpcode();
514 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
515 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
516 NI->getOperand(0).getReg() == StackPtr) {
518 *NumBytes -= NI->getOperand(2).getImm();
521 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
522 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
523 NI->getOperand(0).getReg() == StackPtr) {
525 *NumBytes += NI->getOperand(2).getImm();
531 /// mergeSPUpdates - Checks the instruction before/after the passed
532 /// instruction. If it is an ADD/SUB instruction it is deleted
533 /// argument and the stack adjustment is returned as a positive value for ADD
534 /// and a negative for SUB.
535 static int mergeSPUpdates(MachineBasicBlock &MBB,
536 MachineBasicBlock::iterator &MBBI,
538 bool doMergeWithPrevious) {
540 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
541 (!doMergeWithPrevious && MBBI == MBB.end()))
546 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
547 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI);
548 unsigned Opc = PI->getOpcode();
549 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
550 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
551 PI->getOperand(0).getReg() == StackPtr){
552 Offset += PI->getOperand(2).getImm();
554 if (!doMergeWithPrevious) MBBI = NI;
555 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
556 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
557 PI->getOperand(0).getReg() == StackPtr) {
558 Offset -= PI->getOperand(2).getImm();
560 if (!doMergeWithPrevious) MBBI = NI;
566 void X86RegisterInfo::emitFrameMoves(MachineFunction &MF,
567 unsigned FrameLabelId,
568 unsigned ReadyLabelId) const {
569 MachineFrameInfo *MFI = MF.getFrameInfo();
570 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
574 uint64_t StackSize = MFI->getStackSize();
575 std::vector<MachineMove> &Moves = MMI->getFrameMoves();
576 const TargetData *TD = MF.getTarget().getTargetData();
578 // Calculate amount of bytes used for return address storing
580 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
581 TargetFrameInfo::StackGrowsUp ?
582 TD->getPointerSize() : -TD->getPointerSize());
585 // Show update of SP.
588 MachineLocation SPDst(MachineLocation::VirtualFP);
589 MachineLocation SPSrc(MachineLocation::VirtualFP, 2*stackGrowth);
590 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
592 MachineLocation SPDst(MachineLocation::VirtualFP);
593 MachineLocation SPSrc(MachineLocation::VirtualFP,
594 -StackSize+stackGrowth);
595 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
598 //FIXME: Verify & implement for FP
599 MachineLocation SPDst(StackPtr);
600 MachineLocation SPSrc(StackPtr, stackGrowth);
601 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
604 // Add callee saved registers to move list.
605 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
607 // FIXME: This is dirty hack. The code itself is pretty mess right now.
608 // It should be rewritten from scratch and generalized sometimes.
610 // Determine maximum offset (minumum due to stack growth)
611 int64_t MaxOffset = 0;
612 for (unsigned I = 0, E = CSI.size(); I!=E; ++I)
613 MaxOffset = std::min(MaxOffset,
614 MFI->getObjectOffset(CSI[I].getFrameIdx()));
617 int64_t saveAreaOffset = (hasFP(MF) ? 3 : 2)*stackGrowth;
618 for (unsigned I = 0, E = CSI.size(); I!=E; ++I) {
619 int64_t Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
620 unsigned Reg = CSI[I].getReg();
621 Offset = (MaxOffset-Offset+saveAreaOffset);
622 MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
623 MachineLocation CSSrc(Reg);
624 Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc));
629 MachineLocation FPDst(MachineLocation::VirtualFP, 2*stackGrowth);
630 MachineLocation FPSrc(FramePtr);
631 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc));
634 MachineLocation FPDst(hasFP(MF) ? FramePtr : StackPtr);
635 MachineLocation FPSrc(MachineLocation::VirtualFP);
636 Moves.push_back(MachineMove(ReadyLabelId, FPDst, FPSrc));
640 void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
641 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
642 MachineFrameInfo *MFI = MF.getFrameInfo();
643 const Function* Fn = MF.getFunction();
644 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
645 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
646 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
647 MachineBasicBlock::iterator MBBI = MBB.begin();
648 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) ||
649 !Fn->doesNotThrow() ||
650 UnwindTablesMandatory;
651 // Prepare for frame info.
652 unsigned FrameLabelId = 0;
654 // Get the number of bytes to allocate from the FrameInfo.
655 uint64_t StackSize = MFI->getStackSize();
656 // Get desired stack alignment
657 uint64_t MaxAlign = MFI->getMaxAlignment();
659 // Add RETADDR move area to callee saved frame size.
660 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
661 if (TailCallReturnAddrDelta < 0)
662 X86FI->setCalleeSavedFrameSize(
663 X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta));
665 // Insert stack pointer adjustment for later moving of return addr. Only
666 // applies to tail call optimized functions where the callee argument stack
667 // size is bigger than the callers.
668 if (TailCallReturnAddrDelta < 0) {
669 BuildMI(MBB, MBBI, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri),
670 StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta);
673 uint64_t NumBytes = 0;
675 // Calculate required stack adjustment
676 uint64_t FrameSize = StackSize - SlotSize;
677 if (needsStackRealignment(MF))
678 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
680 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
682 // Get the offset of the stack slot for the EBP register... which is
683 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
684 // Update the frame offset adjustment.
685 MFI->setOffsetAdjustment(-NumBytes);
687 // Save EBP into the appropriate stack slot...
688 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
691 if (needsFrameMoves) {
692 // Mark effective beginning of when frame pointer becomes valid.
693 FrameLabelId = MMI->NextLabelID();
694 BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
697 // Update EBP with the new base value...
698 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
702 if (needsStackRealignment(MF))
704 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
705 StackPtr).addReg(StackPtr).addImm(-MaxAlign);
707 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
709 unsigned ReadyLabelId = 0;
710 if (needsFrameMoves) {
711 // Mark effective beginning of when frame pointer is ready.
712 ReadyLabelId = MMI->NextLabelID();
713 BuildMI(MBB, MBBI, TII.get(X86::DBG_LABEL)).addImm(ReadyLabelId);
716 // Skip the callee-saved push instructions.
717 while (MBBI != MBB.end() &&
718 (MBBI->getOpcode() == X86::PUSH32r ||
719 MBBI->getOpcode() == X86::PUSH64r))
722 if (NumBytes) { // adjust stack pointer: ESP -= numbytes
723 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) {
724 // Check, whether EAX is livein for this function
725 bool isEAXAlive = false;
726 for (MachineRegisterInfo::livein_iterator
727 II = MF.getRegInfo().livein_begin(),
728 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) {
729 unsigned Reg = II->first;
730 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX ||
731 Reg == X86::AH || Reg == X86::AL);
734 // Function prologue calls _alloca to probe the stack when allocating
735 // more than 4k bytes in one go. Touching the stack at 4K increments is
736 // necessary to ensure that the guard pages used by the OS virtual memory
737 // manager are allocated in correct sequence.
739 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
740 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32))
741 .addExternalSymbol("_alloca");
744 BuildMI(MBB, MBBI, TII.get(X86::PUSH32r), X86::EAX);
745 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
746 // allocated bytes for EAX.
747 BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4);
748 BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32))
749 .addExternalSymbol("_alloca");
751 MachineInstr *MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm),X86::EAX),
752 StackPtr, NumBytes-4);
753 MBB.insert(MBBI, MI);
756 // If there is an SUB32ri of ESP immediately before this instruction,
757 // merge the two. This can be the case when tail call elimination is
758 // enabled and the callee has more arguments then the caller.
759 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
760 // If there is an ADD32ri or SUB32ri of ESP immediately after this
761 // instruction, merge the two instructions.
762 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
765 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
770 emitFrameMoves(MF, FrameLabelId, ReadyLabelId);
773 void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
774 MachineBasicBlock &MBB) const {
775 const MachineFrameInfo *MFI = MF.getFrameInfo();
776 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
777 MachineBasicBlock::iterator MBBI = prior(MBB.end());
778 unsigned RetOpcode = MBBI->getOpcode();
783 case X86::TCRETURNdi:
784 case X86::TCRETURNri:
785 case X86::TCRETURNri64:
786 case X86::TCRETURNdi64:
790 case X86::TAILJMPm: break; // These are ok
792 assert(0 && "Can only insert epilog into returning blocks");
795 // Get the number of bytes to allocate from the FrameInfo
796 uint64_t StackSize = MFI->getStackSize();
797 uint64_t MaxAlign = MFI->getMaxAlignment();
798 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
799 uint64_t NumBytes = 0;
802 // Calculate required stack adjustment
803 uint64_t FrameSize = StackSize - SlotSize;
804 if (needsStackRealignment(MF))
805 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
807 NumBytes = FrameSize - CSSize;
810 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
812 NumBytes = StackSize - CSSize;
814 // Skip the callee-saved pop instructions.
815 MachineBasicBlock::iterator LastCSPop = MBBI;
816 while (MBBI != MBB.begin()) {
817 MachineBasicBlock::iterator PI = prior(MBBI);
818 unsigned Opc = PI->getOpcode();
819 if (Opc != X86::POP32r && Opc != X86::POP64r &&
820 !PI->getDesc().isTerminator())
825 // If there is an ADD32ri or SUB32ri of ESP immediately before this
826 // instruction, merge the two instructions.
827 if (NumBytes || MFI->hasVarSizedObjects())
828 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
830 // If dynamic alloca is used, then reset esp to point to the last callee-saved
831 // slot before popping them off! Same applies for the case, when stack was
833 if (needsStackRealignment(MF)) {
834 // We cannot use LEA here, because stack pointer was realigned. We need to
835 // deallocate local frame back
837 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
838 MBBI = prior(LastCSPop);
842 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
843 StackPtr).addReg(FramePtr);
844 } else if (MFI->hasVarSizedObjects()) {
846 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
847 MachineInstr *MI = addRegOffset(BuildMI(TII.get(Opc), StackPtr),
849 MBB.insert(MBBI, MI);
851 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr).
855 // adjust stack pointer back: ESP += numbytes
857 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
860 // We're returning from function via eh_return.
861 if (RetOpcode == X86::EH_RETURN) {
862 MBBI = prior(MBB.end());
863 MachineOperand &DestAddr = MBBI->getOperand(0);
864 assert(DestAddr.isRegister() && "Offset should be in register!");
865 BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr).
866 addReg(DestAddr.getReg());
867 // Tail call return: adjust the stack pointer and jump to callee
868 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
869 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) {
870 MBBI = prior(MBB.end());
871 MachineOperand &JumpTarget = MBBI->getOperand(0);
872 MachineOperand &StackAdjust = MBBI->getOperand(1);
873 assert( StackAdjust.isImmediate() && "Expecting immediate value.");
875 // Adjust stack pointer.
876 int StackAdj = StackAdjust.getImm();
877 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
879 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
880 // Incoporate the retaddr area.
881 Offset = StackAdj-MaxTCDelta;
882 assert(Offset >= 0 && "Offset should never be negative");
884 // Check for possible merge with preceeding ADD instruction.
885 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
886 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII);
888 // Jump to label or value in register.
889 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64)
890 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPd)).
891 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
892 else if (RetOpcode== X86::TCRETURNri64) {
893 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
895 BuildMI(MBB, MBBI, TII.get(X86::TAILJMPr), JumpTarget.getReg());
896 // Delete the pseudo instruction TCRETURN.
898 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
899 (X86FI->getTCReturnAddrDelta() < 0)) {
900 // Add the return addr area delta back since we are not tail calling.
901 int delta = -1*X86FI->getTCReturnAddrDelta();
902 MBBI = prior(MBB.end());
903 // Check for possible merge with preceeding ADD instruction.
904 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
905 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII);
909 unsigned X86RegisterInfo::getRARegister() const {
911 return X86::RIP; // Should have dwarf #16
913 return X86::EIP; // Should have dwarf #8
916 unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const {
917 return hasFP(MF) ? FramePtr : StackPtr;
920 void X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves)
922 // Calculate amount of bytes used for return address storing
923 int stackGrowth = (Is64Bit ? -8 : -4);
925 // Initial state of the frame pointer is esp+4.
926 MachineLocation Dst(MachineLocation::VirtualFP);
927 MachineLocation Src(StackPtr, stackGrowth);
928 Moves.push_back(MachineMove(0, Dst, Src));
930 // Add return address to move list
931 MachineLocation CSDst(StackPtr, stackGrowth);
932 MachineLocation CSSrc(getRARegister());
933 Moves.push_back(MachineMove(0, CSDst, CSSrc));
936 unsigned X86RegisterInfo::getEHExceptionRegister() const {
937 assert(0 && "What is the exception register");
941 unsigned X86RegisterInfo::getEHHandlerRegister() const {
942 assert(0 && "What is the exception handler register");
947 unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) {
948 switch (VT.getSimpleVT()) {
954 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
956 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
958 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
960 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
966 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
968 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
970 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
972 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
974 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
976 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
978 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
980 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
982 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
984 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
986 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
988 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
990 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
992 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
994 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
996 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1002 default: return Reg;
1003 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1005 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1007 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1009 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1011 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1013 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1015 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1017 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1019 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1021 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1023 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1025 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1027 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1029 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1031 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1033 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1038 default: return Reg;
1039 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1041 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1043 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1045 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1047 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1049 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1051 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1053 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1055 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1057 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1059 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1061 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1063 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1065 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1067 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1069 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1074 default: return Reg;
1075 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1077 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1079 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1081 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1083 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1085 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1087 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1089 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1091 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1093 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1095 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1097 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1099 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1101 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1103 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1105 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1114 #include "X86GenRegisterInfo.inc"
1117 struct VISIBILITY_HIDDEN MSAC : public MachineFunctionPass {
1119 MSAC() : MachineFunctionPass((intptr_t)&ID) {}
1121 virtual bool runOnMachineFunction(MachineFunction &MF) {
1122 MachineFrameInfo *FFI = MF.getFrameInfo();
1123 MachineRegisterInfo &RI = MF.getRegInfo();
1125 // Calculate max stack alignment of all already allocated stack objects.
1126 unsigned MaxAlign = calculateMaxStackAlignment(FFI);
1128 // Be over-conservative: scan over all vreg defs and find, whether vector
1129 // registers are used. If yes - there is probability, that vector register
1130 // will be spilled and thus stack needs to be aligned properly.
1131 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister;
1132 RegNum < RI.getLastVirtReg(); ++RegNum)
1133 MaxAlign = std::max(MaxAlign, RI.getRegClass(RegNum)->getAlignment());
1135 FFI->setMaxAlignment(MaxAlign);
1140 virtual const char *getPassName() const {
1141 return "X86 Maximal Stack Alignment Calculator";
1149 llvm::createX86MaxStackAlignmentCalculatorPass() { return new MSAC(); }