1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Target/TargetOptions.h"
35 // FIXME: completely move here.
36 extern cl::opt<bool> ForceStackAlign;
38 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
39 return !MF.getFrameInfo()->hasVarSizedObjects();
42 /// hasFP - Return true if the specified function should have a dedicated frame
43 /// pointer register. This is true if the function has variable sized allocas
44 /// or if frame pointer elimination is disabled.
45 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
46 const MachineFrameInfo *MFI = MF.getFrameInfo();
47 const MachineModuleInfo &MMI = MF.getMMI();
48 const TargetRegisterInfo *RegInfo = TM.getRegisterInfo();
50 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
51 RegInfo->needsStackRealignment(MF) ||
52 MFI->hasVarSizedObjects() ||
53 MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
55 MMI.callsUnwindInit() || MMI.callsEHReturn());
58 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
62 return X86::SUB64ri32;
70 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
74 return X86::ADD64ri32;
82 static unsigned getLEArOpcode(unsigned IsLP64) {
83 return IsLP64 ? X86::LEA64r : X86::LEA32r;
86 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
87 /// when it reaches the "return" instruction. We can then pop a stack object
88 /// to this register without worry about clobbering it.
89 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
90 MachineBasicBlock::iterator &MBBI,
91 const TargetRegisterInfo &TRI,
93 const MachineFunction *MF = MBB.getParent();
94 const Function *F = MF->getFunction();
95 if (!F || MF->getMMI().callsEHReturn())
98 static const uint16_t CallerSavedRegs32Bit[] = {
99 X86::EAX, X86::EDX, X86::ECX, 0
102 static const uint16_t CallerSavedRegs64Bit[] = {
103 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
104 X86::R8, X86::R9, X86::R10, X86::R11, 0
107 unsigned Opc = MBBI->getOpcode();
114 case X86::TCRETURNdi:
115 case X86::TCRETURNri:
116 case X86::TCRETURNmi:
117 case X86::TCRETURNdi64:
118 case X86::TCRETURNri64:
119 case X86::TCRETURNmi64:
121 case X86::EH_RETURN64: {
122 SmallSet<uint16_t, 8> Uses;
123 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
124 MachineOperand &MO = MBBI->getOperand(i);
125 if (!MO.isReg() || MO.isDef())
127 unsigned Reg = MO.getReg();
130 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
134 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
136 if (!Uses.count(*CS))
145 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
146 /// stack pointer by a constant value.
148 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
149 unsigned StackPtr, int64_t NumBytes,
150 bool Is64Bit, bool IsLP64, bool UseLEA,
151 const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
152 bool isSub = NumBytes < 0;
153 uint64_t Offset = isSub ? -NumBytes : NumBytes;
156 Opc = getLEArOpcode(IsLP64);
159 ? getSUBriOpcode(IsLP64, Offset)
160 : getADDriOpcode(IsLP64, Offset);
162 uint64_t Chunk = (1LL << 31) - 1;
163 DebugLoc DL = MBB.findDebugLoc(MBBI);
166 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
167 if (ThisVal == (Is64Bit ? 8 : 4)) {
168 // Use push / pop instead.
170 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
171 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
174 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
175 : (Is64Bit ? X86::POP64r : X86::POP32r);
176 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
177 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
179 MI->setFlag(MachineInstr::FrameSetup);
185 MachineInstr *MI = nullptr;
188 MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
189 StackPtr, false, isSub ? -ThisVal : ThisVal);
191 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
194 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
198 MI->setFlag(MachineInstr::FrameSetup);
204 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
206 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
207 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
208 if (MBBI == MBB.begin()) return;
210 MachineBasicBlock::iterator PI = std::prev(MBBI);
211 unsigned Opc = PI->getOpcode();
212 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
213 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
214 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
215 PI->getOperand(0).getReg() == StackPtr) {
217 *NumBytes += PI->getOperand(2).getImm();
219 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
220 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
221 PI->getOperand(0).getReg() == StackPtr) {
223 *NumBytes -= PI->getOperand(2).getImm();
228 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower
231 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
232 MachineBasicBlock::iterator &MBBI,
233 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
234 // FIXME: THIS ISN'T RUN!!!
237 if (MBBI == MBB.end()) return;
239 MachineBasicBlock::iterator NI = std::next(MBBI);
240 if (NI == MBB.end()) return;
242 unsigned Opc = NI->getOpcode();
243 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
244 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
245 NI->getOperand(0).getReg() == StackPtr) {
247 *NumBytes -= NI->getOperand(2).getImm();
250 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
251 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
252 NI->getOperand(0).getReg() == StackPtr) {
254 *NumBytes += NI->getOperand(2).getImm();
260 /// mergeSPUpdates - Checks the instruction before/after the passed
261 /// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
262 /// the stack adjustment is returned as a positive value for ADD/LEA and a
263 /// negative for SUB.
264 static int mergeSPUpdates(MachineBasicBlock &MBB,
265 MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
266 bool doMergeWithPrevious) {
267 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
268 (!doMergeWithPrevious && MBBI == MBB.end()))
271 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
272 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
274 unsigned Opc = PI->getOpcode();
277 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
278 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
279 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
280 PI->getOperand(0).getReg() == StackPtr){
281 Offset += PI->getOperand(2).getImm();
283 if (!doMergeWithPrevious) MBBI = NI;
284 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
285 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
286 PI->getOperand(0).getReg() == StackPtr) {
287 Offset -= PI->getOperand(2).getImm();
289 if (!doMergeWithPrevious) MBBI = NI;
295 static bool isEAXLiveIn(MachineFunction &MF) {
296 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
297 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
298 unsigned Reg = II->first;
300 if (Reg == X86::EAX || Reg == X86::AX ||
301 Reg == X86::AH || Reg == X86::AL)
308 void X86FrameLowering::emitCalleeSavedFrameMoves(
309 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
310 unsigned FramePtr) const {
311 MachineFunction &MF = *MBB.getParent();
312 MachineFrameInfo *MFI = MF.getFrameInfo();
313 MachineModuleInfo &MMI = MF.getMMI();
314 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
315 const X86InstrInfo &TII = *TM.getInstrInfo();
317 // Add callee saved registers to move list.
318 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
319 if (CSI.empty()) return;
321 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
322 bool HasFP = hasFP(MF);
324 // Calculate amount of bytes used for return address storing.
325 int stackGrowth = -RegInfo->getSlotSize();
327 // FIXME: This is dirty hack. The code itself is pretty mess right now.
328 // It should be rewritten from scratch and generalized sometimes.
330 // Determine maximum offset (minimum due to stack growth).
331 int64_t MaxOffset = 0;
332 for (std::vector<CalleeSavedInfo>::const_iterator
333 I = CSI.begin(), E = CSI.end(); I != E; ++I)
334 MaxOffset = std::min(MaxOffset,
335 MFI->getObjectOffset(I->getFrameIdx()));
337 // Calculate offsets.
338 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
339 for (std::vector<CalleeSavedInfo>::const_iterator
340 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
341 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
342 unsigned Reg = I->getReg();
343 Offset = MaxOffset - Offset + saveAreaOffset;
345 // Don't output a new machine move if we're re-saving the frame
346 // pointer. This happens when the PrologEpilogInserter has inserted an extra
347 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
348 // generates one when frame pointers are used. If we generate a "machine
349 // move" for this extra "PUSH", the linker will lose track of the fact that
350 // the frame pointer should have the value of the first "PUSH" when it's
353 // FIXME: This looks inelegant. It's possibly correct, but it's covering up
354 // another bug. I.e., one where we generate a prolog like this:
362 // The immediate re-push of EBP is unnecessary. At the least, it's an
363 // optimization bug. EBP can be used as a scratch register in certain
364 // cases, but probably not when we have a frame pointer.
365 if (HasFP && FramePtr == Reg)
368 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
370 MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
372 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
373 .addCFIIndex(CFIIndex);
377 /// usesTheStack - This function checks if any of the users of EFLAGS
378 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
379 /// to use the stack, and if we don't adjust the stack we clobber the first
381 /// See X86InstrInfo::copyPhysReg.
382 static bool usesTheStack(const MachineFunction &MF) {
383 const MachineRegisterInfo &MRI = MF.getRegInfo();
385 for (MachineRegisterInfo::reg_instr_iterator
386 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
394 /// emitPrologue - Push callee-saved registers onto the stack, which
395 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
396 /// space for local variables. Also emit labels used by the exception handler to
397 /// generate the exception handling frames.
398 void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
399 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
400 MachineBasicBlock::iterator MBBI = MBB.begin();
401 MachineFrameInfo *MFI = MF.getFrameInfo();
402 const Function *Fn = MF.getFunction();
403 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
404 const X86InstrInfo &TII = *TM.getInstrInfo();
405 MachineModuleInfo &MMI = MF.getMMI();
406 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
407 bool needsFrameMoves = MMI.hasDebugInfo() ||
408 Fn->needsUnwindTableEntry();
409 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
410 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
411 bool HasFP = hasFP(MF);
412 bool Is64Bit = STI.is64Bit();
413 bool IsLP64 = STI.isTarget64BitLP64();
414 bool IsWin64 = STI.isTargetWin64();
415 bool UseLEA = STI.useLeaForSP();
416 unsigned StackAlign = getStackAlignment();
417 unsigned SlotSize = RegInfo->getSlotSize();
418 unsigned FramePtr = RegInfo->getFrameRegister(MF);
419 unsigned StackPtr = RegInfo->getStackRegister();
420 unsigned BasePtr = RegInfo->getBaseRegister();
423 // If we're forcing a stack realignment we can't rely on just the frame
424 // info, we need to know the ABI stack alignment as well in case we
425 // have a call out. Otherwise just make sure we have some alignment - we'll
426 // go with the minimum SlotSize.
427 if (ForceStackAlign) {
429 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
430 else if (MaxAlign < SlotSize)
434 // Add RETADDR move area to callee saved frame size.
435 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
436 if (TailCallReturnAddrDelta < 0)
437 X86FI->setCalleeSavedFrameSize(
438 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
440 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
441 // function, and use up to 128 bytes of stack space, don't have a frame
442 // pointer, calls, or dynamic alloca then we do not need to adjust the
443 // stack pointer (we fit in the Red Zone). We also check that we don't
444 // push and pop from the stack.
445 if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
446 Attribute::NoRedZone) &&
447 !RegInfo->needsStackRealignment(MF) &&
448 !MFI->hasVarSizedObjects() && // No dynamic alloca.
449 !MFI->adjustsStack() && // No calls.
450 !IsWin64 && // Win64 has no Red Zone
451 !usesTheStack(MF) && // Don't push and pop.
452 !MF.shouldSplitStack()) { // Regular stack
453 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
454 if (HasFP) MinSize += SlotSize;
455 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
456 MFI->setStackSize(StackSize);
459 // Insert stack pointer adjustment for later moving of return addr. Only
460 // applies to tail call optimized functions where the callee argument stack
461 // size is bigger than the callers.
462 if (TailCallReturnAddrDelta < 0) {
464 BuildMI(MBB, MBBI, DL,
465 TII.get(getSUBriOpcode(IsLP64, -TailCallReturnAddrDelta)),
468 .addImm(-TailCallReturnAddrDelta)
469 .setMIFlag(MachineInstr::FrameSetup);
470 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
473 // Mapping for machine moves:
475 // DST: VirtualFP AND
476 // SRC: VirtualFP => DW_CFA_def_cfa_offset
477 // ELSE => DW_CFA_def_cfa
479 // SRC: VirtualFP AND
480 // DST: Register => DW_CFA_def_cfa_register
483 // OFFSET < 0 => DW_CFA_offset_extended_sf
484 // REG < 64 => DW_CFA_offset + Reg
485 // ELSE => DW_CFA_offset_extended
487 uint64_t NumBytes = 0;
488 int stackGrowth = -SlotSize;
491 // Calculate required stack adjustment.
492 uint64_t FrameSize = StackSize - SlotSize;
493 if (RegInfo->needsStackRealignment(MF)) {
494 // Callee-saved registers are pushed on stack before the stack
496 FrameSize -= X86FI->getCalleeSavedFrameSize();
497 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
499 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
502 // Get the offset of the stack slot for the EBP register, which is
503 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
504 // Update the frame offset adjustment.
505 MFI->setOffsetAdjustment(-NumBytes);
507 // Save EBP/RBP into the appropriate stack slot.
508 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
509 .addReg(FramePtr, RegState::Kill)
510 .setMIFlag(MachineInstr::FrameSetup);
512 if (needsFrameMoves) {
513 // Mark the place where EBP/RBP was saved.
514 // Define the current CFA rule to use the provided offset.
516 unsigned CFIIndex = MMI.addFrameInst(
517 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
518 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
519 .addCFIIndex(CFIIndex);
521 // Change the rule for the FramePtr to be an "offset" rule.
522 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
523 CFIIndex = MMI.addFrameInst(
524 MCCFIInstruction::createOffset(nullptr,
525 DwarfFramePtr, 2 * stackGrowth));
526 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
527 .addCFIIndex(CFIIndex);
530 // Update EBP with the new base value.
531 BuildMI(MBB, MBBI, DL,
532 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
534 .setMIFlag(MachineInstr::FrameSetup);
536 if (needsFrameMoves) {
537 // Mark effective beginning of when frame pointer becomes valid.
538 // Define the current CFA to use the EBP/RBP register.
539 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
540 unsigned CFIIndex = MMI.addFrameInst(
541 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
542 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
543 .addCFIIndex(CFIIndex);
546 // Mark the FramePtr as live-in in every block except the entry.
547 for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
549 I->addLiveIn(FramePtr);
551 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
554 // Skip the callee-saved push instructions.
555 bool PushedRegs = false;
556 int StackOffset = 2 * stackGrowth;
558 while (MBBI != MBB.end() &&
559 (MBBI->getOpcode() == X86::PUSH32r ||
560 MBBI->getOpcode() == X86::PUSH64r)) {
562 MBBI->setFlag(MachineInstr::FrameSetup);
565 if (!HasFP && needsFrameMoves) {
566 // Mark callee-saved push instruction.
567 // Define the current CFA rule to use the provided offset.
569 unsigned CFIIndex = MMI.addFrameInst(
570 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
571 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
572 .addCFIIndex(CFIIndex);
573 StackOffset += stackGrowth;
577 // Realign stack after we pushed callee-saved registers (so that we'll be
578 // able to calculate their offsets from the frame pointer).
580 // NOTE: We push the registers before realigning the stack, so
581 // vector callee-saved (xmm) registers may be saved w/o proper
582 // alignment in this way. However, currently these regs are saved in
583 // stack slots (see X86FrameLowering::spillCalleeSavedRegisters()), so
584 // this shouldn't be a problem.
585 if (RegInfo->needsStackRealignment(MF)) {
586 assert(HasFP && "There should be a frame pointer if stack is realigned.");
588 BuildMI(MBB, MBBI, DL,
589 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr)
592 .setMIFlag(MachineInstr::FrameSetup);
594 // The EFLAGS implicit def is dead.
595 MI->getOperand(3).setIsDead();
598 // If there is an SUB32ri of ESP immediately before this instruction, merge
599 // the two. This can be the case when tail call elimination is enabled and
600 // the callee has more arguments then the caller.
601 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
603 // If there is an ADD32ri or SUB32ri of ESP immediately after this
604 // instruction, merge the two instructions.
605 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
607 // Adjust stack pointer: ESP -= numbytes.
609 // Windows and cygwin/mingw require a prologue helper routine when allocating
610 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
611 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
612 // stack and adjust the stack pointer in one go. The 64-bit version of
613 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
614 // responsible for adjusting the stack pointer. Touching the stack at 4K
615 // increments is necessary to ensure that the guard pages used by the OS
616 // virtual memory manager are allocated in correct sequence.
617 if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetMacho()) {
618 const char *StackProbeSymbol;
621 if (STI.isTargetCygMing()) {
622 StackProbeSymbol = "___chkstk_ms";
624 StackProbeSymbol = "__chkstk";
626 } else if (STI.isTargetCygMing())
627 StackProbeSymbol = "_alloca";
629 StackProbeSymbol = "_chkstk";
631 // Check whether EAX is livein for this function.
632 bool isEAXAlive = isEAXLiveIn(MF);
635 // Sanity check that EAX is not livein for this function.
636 // It should not be, so throw an assert.
637 assert(!Is64Bit && "EAX is livein in x64 case!");
640 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
641 .addReg(X86::EAX, RegState::Kill)
642 .setMIFlag(MachineInstr::FrameSetup);
646 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
647 // Function prologue is responsible for adjusting the stack pointer.
648 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
650 .setMIFlag(MachineInstr::FrameSetup);
652 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
653 // We'll also use 4 already allocated bytes for EAX.
654 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
655 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
656 .setMIFlag(MachineInstr::FrameSetup);
659 BuildMI(MBB, MBBI, DL,
660 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32))
661 .addExternalSymbol(StackProbeSymbol)
662 .addReg(StackPtr, RegState::Define | RegState::Implicit)
663 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
664 .setMIFlag(MachineInstr::FrameSetup);
667 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
668 // themself. It also does not clobber %rax so we can reuse it when
670 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
673 .setMIFlag(MachineInstr::FrameSetup);
677 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
679 StackPtr, false, NumBytes - 4);
680 MI->setFlag(MachineInstr::FrameSetup);
681 MBB.insert(MBBI, MI);
684 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64,
685 UseLEA, TII, *RegInfo);
687 // If we need a base pointer, set it up here. It's whatever the value
688 // of the stack pointer is at this point. Any variable size objects
689 // will be allocated after this, so we can still use the base pointer
690 // to reference locals.
691 if (RegInfo->hasBasePointer(MF)) {
692 // Update the frame pointer with the current stack pointer.
693 unsigned Opc = Is64Bit ? X86::MOV64rr : X86::MOV32rr;
694 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
696 .setMIFlag(MachineInstr::FrameSetup);
699 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
700 // Mark end of stack pointer adjustment.
701 if (!HasFP && NumBytes) {
702 // Define the current CFA rule to use the provided offset.
704 unsigned CFIIndex = MMI.addFrameInst(
705 MCCFIInstruction::createDefCfaOffset(nullptr,
706 -StackSize + stackGrowth));
708 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
709 .addCFIIndex(CFIIndex);
712 // Emit DWARF info specifying the offsets of the callee-saved registers.
714 emitCalleeSavedFrameMoves(MBB, MBBI, DL, HasFP ? FramePtr : StackPtr);
718 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
719 MachineBasicBlock &MBB) const {
720 const MachineFrameInfo *MFI = MF.getFrameInfo();
721 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
722 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
723 const X86InstrInfo &TII = *TM.getInstrInfo();
724 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
725 assert(MBBI != MBB.end() && "Returning block has no instructions");
726 unsigned RetOpcode = MBBI->getOpcode();
727 DebugLoc DL = MBBI->getDebugLoc();
728 bool Is64Bit = STI.is64Bit();
729 bool IsLP64 = STI.isTarget64BitLP64();
730 bool UseLEA = STI.useLeaForSP();
731 unsigned StackAlign = getStackAlignment();
732 unsigned SlotSize = RegInfo->getSlotSize();
733 unsigned FramePtr = RegInfo->getFrameRegister(MF);
734 unsigned StackPtr = RegInfo->getStackRegister();
738 llvm_unreachable("Can only insert epilog into returning blocks");
743 case X86::TCRETURNdi:
744 case X86::TCRETURNri:
745 case X86::TCRETURNmi:
746 case X86::TCRETURNdi64:
747 case X86::TCRETURNri64:
748 case X86::TCRETURNmi64:
750 case X86::EH_RETURN64:
751 break; // These are ok
754 // Get the number of bytes to allocate from the FrameInfo.
755 uint64_t StackSize = MFI->getStackSize();
756 uint64_t MaxAlign = MFI->getMaxAlignment();
757 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
758 uint64_t NumBytes = 0;
760 // If we're forcing a stack realignment we can't rely on just the frame
761 // info, we need to know the ABI stack alignment as well in case we
762 // have a call out. Otherwise just make sure we have some alignment - we'll
763 // go with the minimum.
764 if (ForceStackAlign) {
766 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
768 MaxAlign = MaxAlign ? MaxAlign : 4;
772 // Calculate required stack adjustment.
773 uint64_t FrameSize = StackSize - SlotSize;
774 if (RegInfo->needsStackRealignment(MF)) {
775 // Callee-saved registers were pushed on stack before the stack
778 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
780 NumBytes = FrameSize - CSSize;
784 BuildMI(MBB, MBBI, DL,
785 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
787 NumBytes = StackSize - CSSize;
790 // Skip the callee-saved pop instructions.
791 while (MBBI != MBB.begin()) {
792 MachineBasicBlock::iterator PI = std::prev(MBBI);
793 unsigned Opc = PI->getOpcode();
795 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
801 MachineBasicBlock::iterator FirstCSPop = MBBI;
803 DL = MBBI->getDebugLoc();
805 // If there is an ADD32ri or SUB32ri of ESP immediately before this
806 // instruction, merge the two instructions.
807 if (NumBytes || MFI->hasVarSizedObjects())
808 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
810 // If dynamic alloca is used, then reset esp to point to the last callee-saved
811 // slot before popping them off! Same applies for the case, when stack was
813 if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
814 if (RegInfo->needsStackRealignment(MF))
817 unsigned Opc = getLEArOpcode(IsLP64);
818 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
819 FramePtr, false, -CSSize);
821 unsigned Opc = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
822 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
825 } else if (NumBytes) {
826 // Adjust stack pointer back: ESP += numbytes.
827 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, IsLP64, UseLEA,
831 // We're returning from function via eh_return.
832 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
833 MBBI = MBB.getLastNonDebugInstr();
834 MachineOperand &DestAddr = MBBI->getOperand(0);
835 assert(DestAddr.isReg() && "Offset should be in register!");
836 BuildMI(MBB, MBBI, DL,
837 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
838 StackPtr).addReg(DestAddr.getReg());
839 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
840 RetOpcode == X86::TCRETURNmi ||
841 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
842 RetOpcode == X86::TCRETURNmi64) {
843 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
844 // Tail call return: adjust the stack pointer and jump to callee.
845 MBBI = MBB.getLastNonDebugInstr();
846 MachineOperand &JumpTarget = MBBI->getOperand(0);
847 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
848 assert(StackAdjust.isImm() && "Expecting immediate value.");
850 // Adjust stack pointer.
851 int StackAdj = StackAdjust.getImm();
852 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
854 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
856 // Incoporate the retaddr area.
857 Offset = StackAdj-MaxTCDelta;
858 assert(Offset >= 0 && "Offset should never be negative");
861 // Check for possible merge with preceding ADD instruction.
862 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
863 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, IsLP64,
864 UseLEA, TII, *RegInfo);
867 // Jump to label or value in register.
868 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
869 MachineInstrBuilder MIB =
870 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
871 ? X86::TAILJMPd : X86::TAILJMPd64));
872 if (JumpTarget.isGlobal())
873 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
874 JumpTarget.getTargetFlags());
876 assert(JumpTarget.isSymbol());
877 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
878 JumpTarget.getTargetFlags());
880 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
881 MachineInstrBuilder MIB =
882 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
883 ? X86::TAILJMPm : X86::TAILJMPm64));
884 for (unsigned i = 0; i != 5; ++i)
885 MIB.addOperand(MBBI->getOperand(i));
886 } else if (RetOpcode == X86::TCRETURNri64) {
887 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
888 addReg(JumpTarget.getReg(), RegState::Kill);
890 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
891 addReg(JumpTarget.getReg(), RegState::Kill);
894 MachineInstr *NewMI = std::prev(MBBI);
895 NewMI->copyImplicitOps(MF, MBBI);
897 // Delete the pseudo instruction TCRETURN.
899 } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
900 RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
901 (X86FI->getTCReturnAddrDelta() < 0)) {
902 // Add the return addr area delta back since we are not tail calling.
903 int delta = -1*X86FI->getTCReturnAddrDelta();
904 MBBI = MBB.getLastNonDebugInstr();
906 // Check for possible merge with preceding ADD instruction.
907 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
908 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, IsLP64, UseLEA, TII,
913 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
915 const X86RegisterInfo *RegInfo =
916 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
917 const MachineFrameInfo *MFI = MF.getFrameInfo();
918 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
919 uint64_t StackSize = MFI->getStackSize();
921 if (RegInfo->hasBasePointer(MF)) {
922 assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
924 // Skip the saved EBP.
925 return Offset + RegInfo->getSlotSize();
927 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
928 return Offset + StackSize;
930 } else if (RegInfo->needsStackRealignment(MF)) {
932 // Skip the saved EBP.
933 return Offset + RegInfo->getSlotSize();
935 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
936 return Offset + StackSize;
938 // FIXME: Support tail calls
941 return Offset + StackSize;
943 // Skip the saved EBP.
944 Offset += RegInfo->getSlotSize();
946 // Skip the RETADDR move area
947 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
948 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
949 if (TailCallReturnAddrDelta < 0)
950 Offset -= TailCallReturnAddrDelta;
956 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
957 unsigned &FrameReg) const {
958 const X86RegisterInfo *RegInfo =
959 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
960 // We can't calculate offset from frame pointer if the stack is realigned,
961 // so enforce usage of stack/base pointer. The base pointer is used when we
962 // have dynamic allocas in addition to dynamic realignment.
963 if (RegInfo->hasBasePointer(MF))
964 FrameReg = RegInfo->getBaseRegister();
965 else if (RegInfo->needsStackRealignment(MF))
966 FrameReg = RegInfo->getStackRegister();
968 FrameReg = RegInfo->getFrameRegister(MF);
969 return getFrameIndexOffset(MF, FI);
972 bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
973 MachineBasicBlock::iterator MI,
974 const std::vector<CalleeSavedInfo> &CSI,
975 const TargetRegisterInfo *TRI) const {
979 DebugLoc DL = MBB.findDebugLoc(MI);
981 MachineFunction &MF = *MBB.getParent();
983 unsigned SlotSize = STI.is64Bit() ? 8 : 4;
984 unsigned FPReg = TRI->getFrameRegister(MF);
985 unsigned CalleeFrameSize = 0;
987 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
988 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
990 // Push GPRs. It increases frame size.
991 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
992 for (unsigned i = CSI.size(); i != 0; --i) {
993 unsigned Reg = CSI[i-1].getReg();
994 if (!X86::GR64RegClass.contains(Reg) &&
995 !X86::GR32RegClass.contains(Reg))
997 // Add the callee-saved register as live-in. It's killed at the spill.
1000 // X86RegisterInfo::emitPrologue will handle spilling of frame register.
1002 CalleeFrameSize += SlotSize;
1003 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1004 .setMIFlag(MachineInstr::FrameSetup);
1007 X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
1009 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1010 // It can be done by spilling XMMs to stack frame.
1011 // Note that only Win64 ABI might spill XMMs.
1012 for (unsigned i = CSI.size(); i != 0; --i) {
1013 unsigned Reg = CSI[i-1].getReg();
1014 if (X86::GR64RegClass.contains(Reg) ||
1015 X86::GR32RegClass.contains(Reg))
1017 // Add the callee-saved register as live-in. It's killed at the spill.
1019 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1020 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
1027 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1028 MachineBasicBlock::iterator MI,
1029 const std::vector<CalleeSavedInfo> &CSI,
1030 const TargetRegisterInfo *TRI) const {
1034 DebugLoc DL = MBB.findDebugLoc(MI);
1036 MachineFunction &MF = *MBB.getParent();
1037 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1039 // Reload XMMs from stack frame.
1040 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1041 unsigned Reg = CSI[i].getReg();
1042 if (X86::GR64RegClass.contains(Reg) ||
1043 X86::GR32RegClass.contains(Reg))
1045 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1046 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
1051 unsigned FPReg = TRI->getFrameRegister(MF);
1052 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1053 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1054 unsigned Reg = CSI[i].getReg();
1055 if (!X86::GR64RegClass.contains(Reg) &&
1056 !X86::GR32RegClass.contains(Reg))
1059 // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
1061 BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
1067 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
1068 RegScavenger *RS) const {
1069 MachineFrameInfo *MFI = MF.getFrameInfo();
1070 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
1071 unsigned SlotSize = RegInfo->getSlotSize();
1073 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1074 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1076 if (TailCallReturnAddrDelta < 0) {
1077 // create RETURNADDR area
1086 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1087 TailCallReturnAddrDelta - SlotSize, true);
1091 assert((TailCallReturnAddrDelta <= 0) &&
1092 "The Delta should always be zero or negative");
1093 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
1095 // Create a frame entry for the EBP register that must be saved.
1096 int FrameIdx = MFI->CreateFixedObject(SlotSize,
1098 TFI.getOffsetOfLocalArea() +
1099 TailCallReturnAddrDelta,
1101 assert(FrameIdx == MFI->getObjectIndexBegin() &&
1102 "Slot for EBP register must be last in order to be found!");
1106 // Spill the BasePtr if it's used.
1107 if (RegInfo->hasBasePointer(MF))
1108 MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
1112 HasNestArgument(const MachineFunction *MF) {
1113 const Function *F = MF->getFunction();
1114 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1116 if (I->hasNestAttr())
1122 /// GetScratchRegister - Get a temp register for performing work in the
1123 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1124 /// and the properties of the function either one or two registers will be
1125 /// needed. Set primary to true for the first register, false for the second.
1127 GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) {
1128 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1131 if (CallingConvention == CallingConv::HiPE) {
1133 return Primary ? X86::R14 : X86::R13;
1135 return Primary ? X86::EBX : X86::EDI;
1139 return Primary ? X86::R11 : X86::R12;
1141 bool IsNested = HasNestArgument(&MF);
1143 if (CallingConvention == CallingConv::X86_FastCall ||
1144 CallingConvention == CallingConv::Fast) {
1146 report_fatal_error("Segmented stacks does not support fastcall with "
1147 "nested function.");
1148 return Primary ? X86::EAX : X86::ECX;
1151 return Primary ? X86::EDX : X86::EAX;
1152 return Primary ? X86::ECX : X86::EAX;
1155 // The stack limit in the TCB is set to this many bytes above the actual stack
1157 static const uint64_t kSplitStackAvailable = 256;
1160 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
1161 MachineBasicBlock &prologueMBB = MF.front();
1162 MachineFrameInfo *MFI = MF.getFrameInfo();
1163 const X86InstrInfo &TII = *TM.getInstrInfo();
1165 bool Is64Bit = STI.is64Bit();
1166 unsigned TlsReg, TlsOffset;
1169 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1170 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1171 "Scratch register is live-in");
1173 if (MF.getFunction()->isVarArg())
1174 report_fatal_error("Segmented stacks do not support vararg functions.");
1175 if (!STI.isTargetLinux() && !STI.isTargetDarwin() &&
1176 !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD())
1177 report_fatal_error("Segmented stacks not supported on this platform.");
1179 // Eventually StackSize will be calculated by a link-time pass; which will
1180 // also decide whether checking code needs to be injected into this particular
1182 StackSize = MFI->getStackSize();
1184 // Do not generate a prologue for functions with a stack of size zero
1188 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1189 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1190 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1191 bool IsNested = false;
1193 // We need to know if the function has a nest argument only in 64 bit mode.
1195 IsNested = HasNestArgument(&MF);
1197 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1198 // allocMBB needs to be last (terminating) instruction.
1200 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
1201 e = prologueMBB.livein_end(); i != e; i++) {
1202 allocMBB->addLiveIn(*i);
1203 checkMBB->addLiveIn(*i);
1207 allocMBB->addLiveIn(X86::R10);
1209 MF.push_front(allocMBB);
1210 MF.push_front(checkMBB);
1212 // When the frame size is less than 256 we just compare the stack
1213 // boundary directly to the value of the stack pointer, per gcc.
1214 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1216 // Read the limit off the current stacklet off the stack_guard location.
1218 if (STI.isTargetLinux()) {
1221 } else if (STI.isTargetDarwin()) {
1223 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1224 } else if (STI.isTargetWin64()) {
1226 TlsOffset = 0x28; // pvArbitrary, reserved for application use
1227 } else if (STI.isTargetFreeBSD()) {
1231 report_fatal_error("Segmented stacks not supported on this platform.");
1234 if (CompareStackPointer)
1235 ScratchReg = X86::RSP;
1237 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
1238 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1240 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
1241 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1243 if (STI.isTargetLinux()) {
1246 } else if (STI.isTargetDarwin()) {
1248 TlsOffset = 0x48 + 90*4;
1249 } else if (STI.isTargetWin32()) {
1251 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1252 } else if (STI.isTargetFreeBSD()) {
1253 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1255 report_fatal_error("Segmented stacks not supported on this platform.");
1258 if (CompareStackPointer)
1259 ScratchReg = X86::ESP;
1261 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1262 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1264 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64()) {
1265 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1266 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1267 } else if (STI.isTargetDarwin()) {
1269 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
1270 unsigned ScratchReg2;
1272 if (CompareStackPointer) {
1273 // The primary scratch register is available for holding the TLS offset.
1274 ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
1275 SaveScratch2 = false;
1277 // Need to use a second register to hold the TLS offset
1278 ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
1280 // Unfortunately, with fastcc the second scratch register may hold an
1282 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1285 // If Scratch2 is live-in then it needs to be saved.
1286 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1287 "Scratch register is live-in and not saved");
1290 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1291 .addReg(ScratchReg2, RegState::Kill);
1293 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1295 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1297 .addReg(ScratchReg2).addImm(1).addReg(0)
1302 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1306 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1307 // It jumps to normal execution of the function body.
1308 BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
1310 // On 32 bit we first push the arguments size and then the frame size. On 64
1311 // bit, we pass the stack frame size in r10 and the argument size in r11.
1313 // Functions with nested arguments use R10, so it needs to be saved across
1314 // the call to _morestack
1317 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10);
1319 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10)
1321 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11)
1322 .addImm(X86FI->getArgumentStackSize());
1323 MF.getRegInfo().setPhysRegUsed(X86::R10);
1324 MF.getRegInfo().setPhysRegUsed(X86::R11);
1326 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1327 .addImm(X86FI->getArgumentStackSize());
1328 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1332 // __morestack is in libgcc
1334 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1335 .addExternalSymbol("__morestack");
1337 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1338 .addExternalSymbol("__morestack");
1341 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1343 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1345 allocMBB->addSuccessor(&prologueMBB);
1347 checkMBB->addSuccessor(allocMBB);
1348 checkMBB->addSuccessor(&prologueMBB);
1355 /// Erlang programs may need a special prologue to handle the stack size they
1356 /// might need at runtime. That is because Erlang/OTP does not implement a C
1357 /// stack but uses a custom implementation of hybrid stack/heap architecture.
1358 /// (for more information see Eric Stenman's Ph.D. thesis:
1359 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1362 /// temp0 = sp - MaxStack
1363 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1367 /// call inc_stack # doubles the stack space
1368 /// temp0 = sp - MaxStack
1369 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1370 void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
1371 const X86InstrInfo &TII = *TM.getInstrInfo();
1372 MachineFrameInfo *MFI = MF.getFrameInfo();
1373 const unsigned SlotSize = TM.getRegisterInfo()->getSlotSize();
1374 const bool Is64Bit = STI.is64Bit();
1376 // HiPE-specific values
1377 const unsigned HipeLeafWords = 24;
1378 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1379 const unsigned Guaranteed = HipeLeafWords * SlotSize;
1380 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1381 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1382 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1384 assert(STI.isTargetLinux() &&
1385 "HiPE prologue is only supported on Linux operating systems.");
1387 // Compute the largest caller's frame that is needed to fit the callees'
1388 // frames. This 'MaxStack' is computed from:
1390 // a) the fixed frame size, which is the space needed for all spilled temps,
1391 // b) outgoing on-stack parameter areas, and
1392 // c) the minimum stack space this function needs to make available for the
1393 // functions it calls (a tunable ABI property).
1394 if (MFI->hasCalls()) {
1395 unsigned MoreStackForCalls = 0;
1397 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1398 MBBI != MBBE; ++MBBI)
1399 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1404 // Get callee operand.
1405 const MachineOperand &MO = MI->getOperand(0);
1407 // Only take account of global function calls (no closures etc.).
1411 const Function *F = dyn_cast<Function>(MO.getGlobal());
1415 // Do not update 'MaxStack' for primitive and built-in functions
1416 // (encoded with names either starting with "erlang."/"bif_" or not
1417 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1418 // "_", such as the BIF "suspend_0") as they are executed on another
1420 if (F->getName().find("erlang.") != StringRef::npos ||
1421 F->getName().find("bif_") != StringRef::npos ||
1422 F->getName().find_first_of("._") == StringRef::npos)
1425 unsigned CalleeStkArity =
1426 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1427 if (HipeLeafWords - 1 > CalleeStkArity)
1428 MoreStackForCalls = std::max(MoreStackForCalls,
1429 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1431 MaxStack += MoreStackForCalls;
1434 // If the stack frame needed is larger than the guaranteed then runtime checks
1435 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1436 if (MaxStack > Guaranteed) {
1437 MachineBasicBlock &prologueMBB = MF.front();
1438 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1439 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1441 for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),
1442 E = prologueMBB.livein_end(); I != E; I++) {
1443 stackCheckMBB->addLiveIn(*I);
1444 incStackMBB->addLiveIn(*I);
1447 MF.push_front(incStackMBB);
1448 MF.push_front(stackCheckMBB);
1450 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1451 unsigned LEAop, CMPop, CALLop;
1455 LEAop = X86::LEA64r;
1456 CMPop = X86::CMP64rm;
1457 CALLop = X86::CALL64pcrel32;
1458 SPLimitOffset = 0x90;
1462 LEAop = X86::LEA32r;
1463 CMPop = X86::CMP32rm;
1464 CALLop = X86::CALLpcrel32;
1465 SPLimitOffset = 0x4c;
1468 ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1469 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1470 "HiPE prologue scratch register is live-in");
1472 // Create new MBB for StackCheck:
1473 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
1474 SPReg, false, -MaxStack);
1475 // SPLimitOffset is in a fixed heap location (pointed by BP).
1476 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
1477 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1478 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB);
1480 // Create new MBB for IncStack:
1481 BuildMI(incStackMBB, DL, TII.get(CALLop)).
1482 addExternalSymbol("inc_stack_0");
1483 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
1484 SPReg, false, -MaxStack);
1485 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
1486 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1487 BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB);
1489 stackCheckMBB->addSuccessor(&prologueMBB, 99);
1490 stackCheckMBB->addSuccessor(incStackMBB, 1);
1491 incStackMBB->addSuccessor(&prologueMBB, 99);
1492 incStackMBB->addSuccessor(incStackMBB, 1);
1499 void X86FrameLowering::
1500 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1501 MachineBasicBlock::iterator I) const {
1502 const X86InstrInfo &TII = *TM.getInstrInfo();
1503 const X86RegisterInfo &RegInfo = *TM.getRegisterInfo();
1504 unsigned StackPtr = RegInfo.getStackRegister();
1505 bool reseveCallFrame = hasReservedCallFrame(MF);
1506 int Opcode = I->getOpcode();
1507 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
1508 bool IsLP64 = STI.isTarget64BitLP64();
1509 DebugLoc DL = I->getDebugLoc();
1510 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
1511 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
1514 if (!reseveCallFrame) {
1515 // If the stack pointer can be changed after prologue, turn the
1516 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
1517 // adjcallstackdown instruction into 'add ESP, <amt>'
1518 // TODO: consider using push / pop instead of sub + store / add
1522 // We need to keep the stack aligned properly. To do this, we round the
1523 // amount of space needed for the outgoing arguments up to the next
1524 // alignment boundary.
1525 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
1526 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
1528 MachineInstr *New = nullptr;
1529 if (Opcode == TII.getCallFrameSetupOpcode()) {
1530 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
1535 assert(Opcode == TII.getCallFrameDestroyOpcode());
1537 // Factor out the amount the callee already popped.
1538 Amount -= CalleeAmt;
1541 unsigned Opc = getADDriOpcode(IsLP64, Amount);
1542 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1543 .addReg(StackPtr).addImm(Amount);
1548 // The EFLAGS implicit def is dead.
1549 New->getOperand(3).setIsDead();
1551 // Replace the pseudo instruction with a new instruction.
1558 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
1559 // If we are performing frame pointer elimination and if the callee pops
1560 // something off the stack pointer, add it back. We do this until we have
1561 // more advanced stack pointer tracking ability.
1562 unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
1563 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1564 .addReg(StackPtr).addImm(CalleeAmt);
1566 // The EFLAGS implicit def is dead.
1567 New->getOperand(3).setIsDead();
1569 // We are not tracking the stack pointer adjustment by the callee, so make
1570 // sure we restore the stack pointer immediately after the call, there may
1571 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
1572 MachineBasicBlock::iterator B = MBB.begin();
1573 while (I != B && !std::prev(I)->isCall())