1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCSymbol.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Target/TargetOptions.h"
35 // FIXME: completely move here.
36 extern cl::opt<bool> ForceStackAlign;
38 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
39 return !MF.getFrameInfo()->hasVarSizedObjects();
42 /// hasFP - Return true if the specified function should have a dedicated frame
43 /// pointer register. This is true if the function has variable sized allocas
44 /// or if frame pointer elimination is disabled.
45 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
46 const MachineFrameInfo *MFI = MF.getFrameInfo();
47 const MachineModuleInfo &MMI = MF.getMMI();
48 const TargetRegisterInfo *RegInfo = TM.getRegisterInfo();
50 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
51 RegInfo->needsStackRealignment(MF) ||
52 MFI->hasVarSizedObjects() ||
53 MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
55 MMI.callsUnwindInit() || MMI.callsEHReturn());
58 static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
62 return X86::SUB64ri32;
70 static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
74 return X86::ADD64ri32;
82 static unsigned getLEArOpcode(unsigned IsLP64) {
83 return IsLP64 ? X86::LEA64r : X86::LEA32r;
86 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
87 /// when it reaches the "return" instruction. We can then pop a stack object
88 /// to this register without worry about clobbering it.
89 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
90 MachineBasicBlock::iterator &MBBI,
91 const TargetRegisterInfo &TRI,
93 const MachineFunction *MF = MBB.getParent();
94 const Function *F = MF->getFunction();
95 if (!F || MF->getMMI().callsEHReturn())
98 static const uint16_t CallerSavedRegs32Bit[] = {
99 X86::EAX, X86::EDX, X86::ECX, 0
102 static const uint16_t CallerSavedRegs64Bit[] = {
103 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
104 X86::R8, X86::R9, X86::R10, X86::R11, 0
107 unsigned Opc = MBBI->getOpcode();
114 case X86::TCRETURNdi:
115 case X86::TCRETURNri:
116 case X86::TCRETURNmi:
117 case X86::TCRETURNdi64:
118 case X86::TCRETURNri64:
119 case X86::TCRETURNmi64:
121 case X86::EH_RETURN64: {
122 SmallSet<uint16_t, 8> Uses;
123 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
124 MachineOperand &MO = MBBI->getOperand(i);
125 if (!MO.isReg() || MO.isDef())
127 unsigned Reg = MO.getReg();
130 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
134 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
136 if (!Uses.count(*CS))
145 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
146 /// stack pointer by a constant value.
148 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
149 unsigned StackPtr, int64_t NumBytes,
150 bool Is64Bit, bool IsLP64, bool UseLEA,
151 const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
152 bool isSub = NumBytes < 0;
153 uint64_t Offset = isSub ? -NumBytes : NumBytes;
156 Opc = getLEArOpcode(IsLP64);
159 ? getSUBriOpcode(IsLP64, Offset)
160 : getADDriOpcode(IsLP64, Offset);
162 uint64_t Chunk = (1LL << 31) - 1;
163 DebugLoc DL = MBB.findDebugLoc(MBBI);
166 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
167 if (ThisVal == (Is64Bit ? 8 : 4)) {
168 // Use push / pop instead.
170 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
171 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
174 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
175 : (Is64Bit ? X86::POP64r : X86::POP32r);
176 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
177 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
179 MI->setFlag(MachineInstr::FrameSetup);
185 MachineInstr *MI = nullptr;
188 MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
189 StackPtr, false, isSub ? -ThisVal : ThisVal);
191 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
194 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
198 MI->setFlag(MachineInstr::FrameSetup);
204 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
206 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
207 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
208 if (MBBI == MBB.begin()) return;
210 MachineBasicBlock::iterator PI = std::prev(MBBI);
211 unsigned Opc = PI->getOpcode();
212 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
213 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
214 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
215 PI->getOperand(0).getReg() == StackPtr) {
217 *NumBytes += PI->getOperand(2).getImm();
219 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
220 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
221 PI->getOperand(0).getReg() == StackPtr) {
223 *NumBytes -= PI->getOperand(2).getImm();
228 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
230 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
231 MachineBasicBlock::iterator &MBBI,
232 unsigned StackPtr, uint64_t *NumBytes = nullptr) {
233 // FIXME: THIS ISN'T RUN!!!
236 if (MBBI == MBB.end()) return;
238 MachineBasicBlock::iterator NI = std::next(MBBI);
239 if (NI == MBB.end()) return;
241 unsigned Opc = NI->getOpcode();
242 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
243 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
244 NI->getOperand(0).getReg() == StackPtr) {
246 *NumBytes -= NI->getOperand(2).getImm();
249 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
250 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
251 NI->getOperand(0).getReg() == StackPtr) {
253 *NumBytes += NI->getOperand(2).getImm();
259 /// mergeSPUpdates - Checks the instruction before/after the passed
260 /// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and the
261 /// stack adjustment is returned as a positive value for ADD/LEA and a negative for
263 static int mergeSPUpdates(MachineBasicBlock &MBB,
264 MachineBasicBlock::iterator &MBBI,
266 bool doMergeWithPrevious) {
267 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
268 (!doMergeWithPrevious && MBBI == MBB.end()))
271 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
272 MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
274 unsigned Opc = PI->getOpcode();
277 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
278 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
279 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
280 PI->getOperand(0).getReg() == StackPtr){
281 Offset += PI->getOperand(2).getImm();
283 if (!doMergeWithPrevious) MBBI = NI;
284 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
285 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
286 PI->getOperand(0).getReg() == StackPtr) {
287 Offset -= PI->getOperand(2).getImm();
289 if (!doMergeWithPrevious) MBBI = NI;
295 static bool isEAXLiveIn(MachineFunction &MF) {
296 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
297 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
298 unsigned Reg = II->first;
300 if (Reg == X86::EAX || Reg == X86::AX ||
301 Reg == X86::AH || Reg == X86::AL)
308 void X86FrameLowering::emitCalleeSavedFrameMoves(
309 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, DebugLoc DL,
310 unsigned FramePtr) const {
311 MachineFunction &MF = *MBB.getParent();
312 MachineFrameInfo *MFI = MF.getFrameInfo();
313 MachineModuleInfo &MMI = MF.getMMI();
314 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
315 const X86InstrInfo &TII = *TM.getInstrInfo();
317 // Add callee saved registers to move list.
318 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
319 if (CSI.empty()) return;
321 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
322 bool HasFP = hasFP(MF);
324 // Calculate amount of bytes used for return address storing.
325 int stackGrowth = -RegInfo->getSlotSize();
327 // FIXME: This is dirty hack. The code itself is pretty mess right now.
328 // It should be rewritten from scratch and generalized sometimes.
330 // Determine maximum offset (minimum due to stack growth).
331 int64_t MaxOffset = 0;
332 for (std::vector<CalleeSavedInfo>::const_iterator
333 I = CSI.begin(), E = CSI.end(); I != E; ++I)
334 MaxOffset = std::min(MaxOffset,
335 MFI->getObjectOffset(I->getFrameIdx()));
337 // Calculate offsets.
338 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
339 for (std::vector<CalleeSavedInfo>::const_iterator
340 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
341 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
342 unsigned Reg = I->getReg();
343 Offset = MaxOffset - Offset + saveAreaOffset;
345 // Don't output a new machine move if we're re-saving the frame
346 // pointer. This happens when the PrologEpilogInserter has inserted an extra
347 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
348 // generates one when frame pointers are used. If we generate a "machine
349 // move" for this extra "PUSH", the linker will lose track of the fact that
350 // the frame pointer should have the value of the first "PUSH" when it's
353 // FIXME: This looks inelegant. It's possibly correct, but it's covering up
354 // another bug. I.e., one where we generate a prolog like this:
362 // The immediate re-push of EBP is unnecessary. At the least, it's an
363 // optimization bug. EBP can be used as a scratch register in certain
364 // cases, but probably not when we have a frame pointer.
365 if (HasFP && FramePtr == Reg)
368 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
370 MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
372 BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION)).addCFIIndex(CFIIndex);
376 /// usesTheStack - This function checks if any of the users of EFLAGS
377 /// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
378 /// to use the stack, and if we don't adjust the stack we clobber the first
380 /// See X86InstrInfo::copyPhysReg.
381 static bool usesTheStack(const MachineFunction &MF) {
382 const MachineRegisterInfo &MRI = MF.getRegInfo();
384 for (MachineRegisterInfo::reg_instr_iterator
385 ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
393 /// emitPrologue - Push callee-saved registers onto the stack, which
394 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
395 /// space for local variables. Also emit labels used by the exception handler to
396 /// generate the exception handling frames.
397 void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
398 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
399 MachineBasicBlock::iterator MBBI = MBB.begin();
400 MachineFrameInfo *MFI = MF.getFrameInfo();
401 const Function *Fn = MF.getFunction();
402 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
403 const X86InstrInfo &TII = *TM.getInstrInfo();
404 MachineModuleInfo &MMI = MF.getMMI();
405 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
406 bool needsFrameMoves = MMI.hasDebugInfo() ||
407 Fn->needsUnwindTableEntry();
408 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
409 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
410 bool HasFP = hasFP(MF);
411 bool Is64Bit = STI.is64Bit();
412 bool IsLP64 = STI.isTarget64BitLP64();
413 bool IsWin64 = STI.isTargetWin64();
414 bool UseLEA = STI.useLeaForSP();
415 unsigned StackAlign = getStackAlignment();
416 unsigned SlotSize = RegInfo->getSlotSize();
417 unsigned FramePtr = RegInfo->getFrameRegister(MF);
418 unsigned StackPtr = RegInfo->getStackRegister();
419 unsigned BasePtr = RegInfo->getBaseRegister();
422 // If we're forcing a stack realignment we can't rely on just the frame
423 // info, we need to know the ABI stack alignment as well in case we
424 // have a call out. Otherwise just make sure we have some alignment - we'll
425 // go with the minimum SlotSize.
426 if (ForceStackAlign) {
428 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
429 else if (MaxAlign < SlotSize)
433 // Add RETADDR move area to callee saved frame size.
434 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
435 if (TailCallReturnAddrDelta < 0)
436 X86FI->setCalleeSavedFrameSize(
437 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
439 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
440 // function, and use up to 128 bytes of stack space, don't have a frame
441 // pointer, calls, or dynamic alloca then we do not need to adjust the
442 // stack pointer (we fit in the Red Zone). We also check that we don't
443 // push and pop from the stack.
444 if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
445 Attribute::NoRedZone) &&
446 !RegInfo->needsStackRealignment(MF) &&
447 !MFI->hasVarSizedObjects() && // No dynamic alloca.
448 !MFI->adjustsStack() && // No calls.
449 !IsWin64 && // Win64 has no Red Zone
450 !usesTheStack(MF) && // Don't push and pop.
451 !MF.shouldSplitStack()) { // Regular stack
452 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
453 if (HasFP) MinSize += SlotSize;
454 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
455 MFI->setStackSize(StackSize);
458 // Insert stack pointer adjustment for later moving of return addr. Only
459 // applies to tail call optimized functions where the callee argument stack
460 // size is bigger than the callers.
461 if (TailCallReturnAddrDelta < 0) {
463 BuildMI(MBB, MBBI, DL,
464 TII.get(getSUBriOpcode(IsLP64, -TailCallReturnAddrDelta)),
467 .addImm(-TailCallReturnAddrDelta)
468 .setMIFlag(MachineInstr::FrameSetup);
469 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
472 // Mapping for machine moves:
474 // DST: VirtualFP AND
475 // SRC: VirtualFP => DW_CFA_def_cfa_offset
476 // ELSE => DW_CFA_def_cfa
478 // SRC: VirtualFP AND
479 // DST: Register => DW_CFA_def_cfa_register
482 // OFFSET < 0 => DW_CFA_offset_extended_sf
483 // REG < 64 => DW_CFA_offset + Reg
484 // ELSE => DW_CFA_offset_extended
486 uint64_t NumBytes = 0;
487 int stackGrowth = -SlotSize;
490 // Calculate required stack adjustment.
491 uint64_t FrameSize = StackSize - SlotSize;
492 if (RegInfo->needsStackRealignment(MF)) {
493 // Callee-saved registers are pushed on stack before the stack
495 FrameSize -= X86FI->getCalleeSavedFrameSize();
496 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
498 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
501 // Get the offset of the stack slot for the EBP register, which is
502 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
503 // Update the frame offset adjustment.
504 MFI->setOffsetAdjustment(-NumBytes);
506 // Save EBP/RBP into the appropriate stack slot.
507 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
508 .addReg(FramePtr, RegState::Kill)
509 .setMIFlag(MachineInstr::FrameSetup);
511 if (needsFrameMoves) {
512 // Mark the place where EBP/RBP was saved.
513 // Define the current CFA rule to use the provided offset.
515 unsigned CFIIndex = MMI.addFrameInst(
516 MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
517 BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION))
518 .addCFIIndex(CFIIndex);
520 // Change the rule for the FramePtr to be an "offset" rule.
521 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
522 CFIIndex = MMI.addFrameInst(
523 MCCFIInstruction::createOffset(nullptr,
524 DwarfFramePtr, 2 * stackGrowth));
525 BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION))
526 .addCFIIndex(CFIIndex);
529 // Update EBP with the new base value.
530 BuildMI(MBB, MBBI, DL,
531 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
533 .setMIFlag(MachineInstr::FrameSetup);
535 if (needsFrameMoves) {
536 // Mark effective beginning of when frame pointer becomes valid.
537 // Define the current CFA to use the EBP/RBP register.
538 unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(FramePtr, true);
539 unsigned CFIIndex = MMI.addFrameInst(
540 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
541 BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION))
542 .addCFIIndex(CFIIndex);
545 // Mark the FramePtr as live-in in every block except the entry.
546 for (MachineFunction::iterator I = std::next(MF.begin()), E = MF.end();
548 I->addLiveIn(FramePtr);
550 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
553 // Skip the callee-saved push instructions.
554 bool PushedRegs = false;
555 int StackOffset = 2 * stackGrowth;
557 while (MBBI != MBB.end() &&
558 (MBBI->getOpcode() == X86::PUSH32r ||
559 MBBI->getOpcode() == X86::PUSH64r)) {
561 MBBI->setFlag(MachineInstr::FrameSetup);
564 if (!HasFP && needsFrameMoves) {
565 // Mark callee-saved push instruction.
566 // Define the current CFA rule to use the provided offset.
568 unsigned CFIIndex = MMI.addFrameInst(
569 MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
570 BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION))
571 .addCFIIndex(CFIIndex);
572 StackOffset += stackGrowth;
576 // Realign stack after we pushed callee-saved registers (so that we'll be
577 // able to calculate their offsets from the frame pointer).
579 // NOTE: We push the registers before realigning the stack, so
580 // vector callee-saved (xmm) registers may be saved w/o proper
581 // alignment in this way. However, currently these regs are saved in
582 // stack slots (see X86FrameLowering::spillCalleeSavedRegisters()), so
583 // this shouldn't be a problem.
584 if (RegInfo->needsStackRealignment(MF)) {
585 assert(HasFP && "There should be a frame pointer if stack is realigned.");
587 BuildMI(MBB, MBBI, DL,
588 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr)
591 .setMIFlag(MachineInstr::FrameSetup);
593 // The EFLAGS implicit def is dead.
594 MI->getOperand(3).setIsDead();
597 // If there is an SUB32ri of ESP immediately before this instruction, merge
598 // the two. This can be the case when tail call elimination is enabled and
599 // the callee has more arguments then the caller.
600 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
602 // If there is an ADD32ri or SUB32ri of ESP immediately after this
603 // instruction, merge the two instructions.
604 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
606 // Adjust stack pointer: ESP -= numbytes.
608 // Windows and cygwin/mingw require a prologue helper routine when allocating
609 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
610 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
611 // stack and adjust the stack pointer in one go. The 64-bit version of
612 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
613 // responsible for adjusting the stack pointer. Touching the stack at 4K
614 // increments is necessary to ensure that the guard pages used by the OS
615 // virtual memory manager are allocated in correct sequence.
616 if (NumBytes >= 4096 && STI.isOSWindows() && !STI.isTargetMacho()) {
617 const char *StackProbeSymbol;
620 if (STI.isTargetCygMing()) {
621 StackProbeSymbol = "___chkstk_ms";
623 StackProbeSymbol = "__chkstk";
625 } else if (STI.isTargetCygMing())
626 StackProbeSymbol = "_alloca";
628 StackProbeSymbol = "_chkstk";
630 // Check whether EAX is livein for this function.
631 bool isEAXAlive = isEAXLiveIn(MF);
634 // Sanity check that EAX is not livein for this function.
635 // It should not be, so throw an assert.
636 assert(!Is64Bit && "EAX is livein in x64 case!");
639 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
640 .addReg(X86::EAX, RegState::Kill)
641 .setMIFlag(MachineInstr::FrameSetup);
645 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
646 // Function prologue is responsible for adjusting the stack pointer.
647 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
649 .setMIFlag(MachineInstr::FrameSetup);
651 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
652 // We'll also use 4 already allocated bytes for EAX.
653 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
654 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
655 .setMIFlag(MachineInstr::FrameSetup);
658 BuildMI(MBB, MBBI, DL,
659 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32))
660 .addExternalSymbol(StackProbeSymbol)
661 .addReg(StackPtr, RegState::Define | RegState::Implicit)
662 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
663 .setMIFlag(MachineInstr::FrameSetup);
666 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
667 // themself. It also does not clobber %rax so we can reuse it when
669 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), StackPtr)
672 .setMIFlag(MachineInstr::FrameSetup);
676 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
678 StackPtr, false, NumBytes - 4);
679 MI->setFlag(MachineInstr::FrameSetup);
680 MBB.insert(MBBI, MI);
683 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, IsLP64,
684 UseLEA, TII, *RegInfo);
686 // If we need a base pointer, set it up here. It's whatever the value
687 // of the stack pointer is at this point. Any variable size objects
688 // will be allocated after this, so we can still use the base pointer
689 // to reference locals.
690 if (RegInfo->hasBasePointer(MF)) {
691 // Update the frame pointer with the current stack pointer.
692 unsigned Opc = Is64Bit ? X86::MOV64rr : X86::MOV32rr;
693 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
695 .setMIFlag(MachineInstr::FrameSetup);
698 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
699 // Mark end of stack pointer adjustment.
700 if (!HasFP && NumBytes) {
701 // Define the current CFA rule to use the provided offset.
703 unsigned CFIIndex = MMI.addFrameInst(
704 MCCFIInstruction::createDefCfaOffset(nullptr,
705 -StackSize + stackGrowth));
707 BuildMI(MBB, MBBI, DL, TII.get(X86::CFI_INSTRUCTION))
708 .addCFIIndex(CFIIndex);
711 // Emit DWARF info specifying the offsets of the callee-saved registers.
713 emitCalleeSavedFrameMoves(MBB, MBBI, DL, HasFP ? FramePtr : StackPtr);
717 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
718 MachineBasicBlock &MBB) const {
719 const MachineFrameInfo *MFI = MF.getFrameInfo();
720 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
721 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
722 const X86InstrInfo &TII = *TM.getInstrInfo();
723 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
724 assert(MBBI != MBB.end() && "Returning block has no instructions");
725 unsigned RetOpcode = MBBI->getOpcode();
726 DebugLoc DL = MBBI->getDebugLoc();
727 bool Is64Bit = STI.is64Bit();
728 bool IsLP64 = STI.isTarget64BitLP64();
729 bool UseLEA = STI.useLeaForSP();
730 unsigned StackAlign = getStackAlignment();
731 unsigned SlotSize = RegInfo->getSlotSize();
732 unsigned FramePtr = RegInfo->getFrameRegister(MF);
733 unsigned StackPtr = RegInfo->getStackRegister();
737 llvm_unreachable("Can only insert epilog into returning blocks");
742 case X86::TCRETURNdi:
743 case X86::TCRETURNri:
744 case X86::TCRETURNmi:
745 case X86::TCRETURNdi64:
746 case X86::TCRETURNri64:
747 case X86::TCRETURNmi64:
749 case X86::EH_RETURN64:
750 break; // These are ok
753 // Get the number of bytes to allocate from the FrameInfo.
754 uint64_t StackSize = MFI->getStackSize();
755 uint64_t MaxAlign = MFI->getMaxAlignment();
756 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
757 uint64_t NumBytes = 0;
759 // If we're forcing a stack realignment we can't rely on just the frame
760 // info, we need to know the ABI stack alignment as well in case we
761 // have a call out. Otherwise just make sure we have some alignment - we'll
762 // go with the minimum.
763 if (ForceStackAlign) {
765 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
767 MaxAlign = MaxAlign ? MaxAlign : 4;
771 // Calculate required stack adjustment.
772 uint64_t FrameSize = StackSize - SlotSize;
773 if (RegInfo->needsStackRealignment(MF)) {
774 // Callee-saved registers were pushed on stack before the stack
777 NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
779 NumBytes = FrameSize - CSSize;
783 BuildMI(MBB, MBBI, DL,
784 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
786 NumBytes = StackSize - CSSize;
789 // Skip the callee-saved pop instructions.
790 while (MBBI != MBB.begin()) {
791 MachineBasicBlock::iterator PI = std::prev(MBBI);
792 unsigned Opc = PI->getOpcode();
794 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
800 MachineBasicBlock::iterator FirstCSPop = MBBI;
802 DL = MBBI->getDebugLoc();
804 // If there is an ADD32ri or SUB32ri of ESP immediately before this
805 // instruction, merge the two instructions.
806 if (NumBytes || MFI->hasVarSizedObjects())
807 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
809 // If dynamic alloca is used, then reset esp to point to the last callee-saved
810 // slot before popping them off! Same applies for the case, when stack was
812 if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
813 if (RegInfo->needsStackRealignment(MF))
816 unsigned Opc = getLEArOpcode(IsLP64);
817 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
818 FramePtr, false, -CSSize);
820 unsigned Opc = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
821 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
824 } else if (NumBytes) {
825 // Adjust stack pointer back: ESP += numbytes.
826 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, IsLP64, UseLEA,
830 // We're returning from function via eh_return.
831 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
832 MBBI = MBB.getLastNonDebugInstr();
833 MachineOperand &DestAddr = MBBI->getOperand(0);
834 assert(DestAddr.isReg() && "Offset should be in register!");
835 BuildMI(MBB, MBBI, DL,
836 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
837 StackPtr).addReg(DestAddr.getReg());
838 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
839 RetOpcode == X86::TCRETURNmi ||
840 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
841 RetOpcode == X86::TCRETURNmi64) {
842 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
843 // Tail call return: adjust the stack pointer and jump to callee.
844 MBBI = MBB.getLastNonDebugInstr();
845 MachineOperand &JumpTarget = MBBI->getOperand(0);
846 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
847 assert(StackAdjust.isImm() && "Expecting immediate value.");
849 // Adjust stack pointer.
850 int StackAdj = StackAdjust.getImm();
851 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
853 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
855 // Incoporate the retaddr area.
856 Offset = StackAdj-MaxTCDelta;
857 assert(Offset >= 0 && "Offset should never be negative");
860 // Check for possible merge with preceding ADD instruction.
861 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
862 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, IsLP64,
863 UseLEA, TII, *RegInfo);
866 // Jump to label or value in register.
867 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
868 MachineInstrBuilder MIB =
869 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
870 ? X86::TAILJMPd : X86::TAILJMPd64));
871 if (JumpTarget.isGlobal())
872 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
873 JumpTarget.getTargetFlags());
875 assert(JumpTarget.isSymbol());
876 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
877 JumpTarget.getTargetFlags());
879 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
880 MachineInstrBuilder MIB =
881 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
882 ? X86::TAILJMPm : X86::TAILJMPm64));
883 for (unsigned i = 0; i != 5; ++i)
884 MIB.addOperand(MBBI->getOperand(i));
885 } else if (RetOpcode == X86::TCRETURNri64) {
886 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
887 addReg(JumpTarget.getReg(), RegState::Kill);
889 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
890 addReg(JumpTarget.getReg(), RegState::Kill);
893 MachineInstr *NewMI = std::prev(MBBI);
894 NewMI->copyImplicitOps(MF, MBBI);
896 // Delete the pseudo instruction TCRETURN.
898 } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
899 RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
900 (X86FI->getTCReturnAddrDelta() < 0)) {
901 // Add the return addr area delta back since we are not tail calling.
902 int delta = -1*X86FI->getTCReturnAddrDelta();
903 MBBI = MBB.getLastNonDebugInstr();
905 // Check for possible merge with preceding ADD instruction.
906 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
907 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, IsLP64, UseLEA, TII,
912 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
913 const X86RegisterInfo *RegInfo =
914 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
915 const MachineFrameInfo *MFI = MF.getFrameInfo();
916 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
917 uint64_t StackSize = MFI->getStackSize();
919 if (RegInfo->hasBasePointer(MF)) {
920 assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
922 // Skip the saved EBP.
923 return Offset + RegInfo->getSlotSize();
925 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
926 return Offset + StackSize;
928 } else if (RegInfo->needsStackRealignment(MF)) {
930 // Skip the saved EBP.
931 return Offset + RegInfo->getSlotSize();
933 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
934 return Offset + StackSize;
936 // FIXME: Support tail calls
939 return Offset + StackSize;
941 // Skip the saved EBP.
942 Offset += RegInfo->getSlotSize();
944 // Skip the RETADDR move area
945 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
946 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
947 if (TailCallReturnAddrDelta < 0)
948 Offset -= TailCallReturnAddrDelta;
954 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
955 unsigned &FrameReg) const {
956 const X86RegisterInfo *RegInfo =
957 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
958 // We can't calculate offset from frame pointer if the stack is realigned,
959 // so enforce usage of stack/base pointer. The base pointer is used when we
960 // have dynamic allocas in addition to dynamic realignment.
961 if (RegInfo->hasBasePointer(MF))
962 FrameReg = RegInfo->getBaseRegister();
963 else if (RegInfo->needsStackRealignment(MF))
964 FrameReg = RegInfo->getStackRegister();
966 FrameReg = RegInfo->getFrameRegister(MF);
967 return getFrameIndexOffset(MF, FI);
970 bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
971 MachineBasicBlock::iterator MI,
972 const std::vector<CalleeSavedInfo> &CSI,
973 const TargetRegisterInfo *TRI) const {
977 DebugLoc DL = MBB.findDebugLoc(MI);
979 MachineFunction &MF = *MBB.getParent();
981 unsigned SlotSize = STI.is64Bit() ? 8 : 4;
982 unsigned FPReg = TRI->getFrameRegister(MF);
983 unsigned CalleeFrameSize = 0;
985 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
986 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
988 // Push GPRs. It increases frame size.
989 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
990 for (unsigned i = CSI.size(); i != 0; --i) {
991 unsigned Reg = CSI[i-1].getReg();
992 if (!X86::GR64RegClass.contains(Reg) &&
993 !X86::GR32RegClass.contains(Reg))
995 // Add the callee-saved register as live-in. It's killed at the spill.
998 // X86RegisterInfo::emitPrologue will handle spilling of frame register.
1000 CalleeFrameSize += SlotSize;
1001 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1002 .setMIFlag(MachineInstr::FrameSetup);
1005 X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
1007 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1008 // It can be done by spilling XMMs to stack frame.
1009 // Note that only Win64 ABI might spill XMMs.
1010 for (unsigned i = CSI.size(); i != 0; --i) {
1011 unsigned Reg = CSI[i-1].getReg();
1012 if (X86::GR64RegClass.contains(Reg) ||
1013 X86::GR32RegClass.contains(Reg))
1015 // Add the callee-saved register as live-in. It's killed at the spill.
1017 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1018 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
1025 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1026 MachineBasicBlock::iterator MI,
1027 const std::vector<CalleeSavedInfo> &CSI,
1028 const TargetRegisterInfo *TRI) const {
1032 DebugLoc DL = MBB.findDebugLoc(MI);
1034 MachineFunction &MF = *MBB.getParent();
1035 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1037 // Reload XMMs from stack frame.
1038 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1039 unsigned Reg = CSI[i].getReg();
1040 if (X86::GR64RegClass.contains(Reg) ||
1041 X86::GR32RegClass.contains(Reg))
1043 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1044 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
1049 unsigned FPReg = TRI->getFrameRegister(MF);
1050 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1051 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1052 unsigned Reg = CSI[i].getReg();
1053 if (!X86::GR64RegClass.contains(Reg) &&
1054 !X86::GR32RegClass.contains(Reg))
1057 // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
1059 BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
1065 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
1066 RegScavenger *RS) const {
1067 MachineFrameInfo *MFI = MF.getFrameInfo();
1068 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
1069 unsigned SlotSize = RegInfo->getSlotSize();
1071 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1072 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1074 if (TailCallReturnAddrDelta < 0) {
1075 // create RETURNADDR area
1084 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1085 TailCallReturnAddrDelta - SlotSize, true);
1089 assert((TailCallReturnAddrDelta <= 0) &&
1090 "The Delta should always be zero or negative");
1091 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
1093 // Create a frame entry for the EBP register that must be saved.
1094 int FrameIdx = MFI->CreateFixedObject(SlotSize,
1096 TFI.getOffsetOfLocalArea() +
1097 TailCallReturnAddrDelta,
1099 assert(FrameIdx == MFI->getObjectIndexBegin() &&
1100 "Slot for EBP register must be last in order to be found!");
1104 // Spill the BasePtr if it's used.
1105 if (RegInfo->hasBasePointer(MF))
1106 MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
1110 HasNestArgument(const MachineFunction *MF) {
1111 const Function *F = MF->getFunction();
1112 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1114 if (I->hasNestAttr())
1120 /// GetScratchRegister - Get a temp register for performing work in the
1121 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
1122 /// and the properties of the function either one or two registers will be
1123 /// needed. Set primary to true for the first register, false for the second.
1125 GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) {
1126 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1129 if (CallingConvention == CallingConv::HiPE) {
1131 return Primary ? X86::R14 : X86::R13;
1133 return Primary ? X86::EBX : X86::EDI;
1137 return Primary ? X86::R11 : X86::R12;
1139 bool IsNested = HasNestArgument(&MF);
1141 if (CallingConvention == CallingConv::X86_FastCall ||
1142 CallingConvention == CallingConv::Fast) {
1144 report_fatal_error("Segmented stacks does not support fastcall with "
1145 "nested function.");
1146 return Primary ? X86::EAX : X86::ECX;
1149 return Primary ? X86::EDX : X86::EAX;
1150 return Primary ? X86::ECX : X86::EAX;
1153 // The stack limit in the TCB is set to this many bytes above the actual stack
1155 static const uint64_t kSplitStackAvailable = 256;
1158 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
1159 MachineBasicBlock &prologueMBB = MF.front();
1160 MachineFrameInfo *MFI = MF.getFrameInfo();
1161 const X86InstrInfo &TII = *TM.getInstrInfo();
1163 bool Is64Bit = STI.is64Bit();
1164 unsigned TlsReg, TlsOffset;
1167 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1168 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1169 "Scratch register is live-in");
1171 if (MF.getFunction()->isVarArg())
1172 report_fatal_error("Segmented stacks do not support vararg functions.");
1173 if (!STI.isTargetLinux() && !STI.isTargetDarwin() &&
1174 !STI.isTargetWin32() && !STI.isTargetWin64() && !STI.isTargetFreeBSD())
1175 report_fatal_error("Segmented stacks not supported on this platform.");
1177 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1178 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1179 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1180 bool IsNested = false;
1182 // We need to know if the function has a nest argument only in 64 bit mode.
1184 IsNested = HasNestArgument(&MF);
1186 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1187 // allocMBB needs to be last (terminating) instruction.
1189 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
1190 e = prologueMBB.livein_end(); i != e; i++) {
1191 allocMBB->addLiveIn(*i);
1192 checkMBB->addLiveIn(*i);
1196 allocMBB->addLiveIn(X86::R10);
1198 MF.push_front(allocMBB);
1199 MF.push_front(checkMBB);
1201 // Eventually StackSize will be calculated by a link-time pass; which will
1202 // also decide whether checking code needs to be injected into this particular
1204 StackSize = MFI->getStackSize();
1206 // When the frame size is less than 256 we just compare the stack
1207 // boundary directly to the value of the stack pointer, per gcc.
1208 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1210 // Read the limit off the current stacklet off the stack_guard location.
1212 if (STI.isTargetLinux()) {
1215 } else if (STI.isTargetDarwin()) {
1217 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1218 } else if (STI.isTargetWin64()) {
1220 TlsOffset = 0x28; // pvArbitrary, reserved for application use
1221 } else if (STI.isTargetFreeBSD()) {
1225 report_fatal_error("Segmented stacks not supported on this platform.");
1228 if (CompareStackPointer)
1229 ScratchReg = X86::RSP;
1231 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
1232 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1234 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
1235 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1237 if (STI.isTargetLinux()) {
1240 } else if (STI.isTargetDarwin()) {
1242 TlsOffset = 0x48 + 90*4;
1243 } else if (STI.isTargetWin32()) {
1245 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1246 } else if (STI.isTargetFreeBSD()) {
1247 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1249 report_fatal_error("Segmented stacks not supported on this platform.");
1252 if (CompareStackPointer)
1253 ScratchReg = X86::ESP;
1255 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1256 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1258 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64()) {
1259 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1260 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1261 } else if (STI.isTargetDarwin()) {
1263 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register
1264 unsigned ScratchReg2;
1266 if (CompareStackPointer) {
1267 // The primary scratch register is available for holding the TLS offset
1268 ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
1269 SaveScratch2 = false;
1271 // Need to use a second register to hold the TLS offset
1272 ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
1274 // Unfortunately, with fastcc the second scratch register may hold an arg
1275 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1278 // If Scratch2 is live-in then it needs to be saved
1279 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1280 "Scratch register is live-in and not saved");
1283 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1284 .addReg(ScratchReg2, RegState::Kill);
1286 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1288 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1290 .addReg(ScratchReg2).addImm(1).addReg(0)
1295 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1299 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1300 // It jumps to normal execution of the function body.
1301 BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
1303 // On 32 bit we first push the arguments size and then the frame size. On 64
1304 // bit, we pass the stack frame size in r10 and the argument size in r11.
1306 // Functions with nested arguments use R10, so it needs to be saved across
1307 // the call to _morestack
1310 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10);
1312 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10)
1314 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11)
1315 .addImm(X86FI->getArgumentStackSize());
1316 MF.getRegInfo().setPhysRegUsed(X86::R10);
1317 MF.getRegInfo().setPhysRegUsed(X86::R11);
1319 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1320 .addImm(X86FI->getArgumentStackSize());
1321 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1325 // __morestack is in libgcc
1327 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1328 .addExternalSymbol("__morestack");
1330 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1331 .addExternalSymbol("__morestack");
1334 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1336 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1338 allocMBB->addSuccessor(&prologueMBB);
1340 checkMBB->addSuccessor(allocMBB);
1341 checkMBB->addSuccessor(&prologueMBB);
1348 /// Erlang programs may need a special prologue to handle the stack size they
1349 /// might need at runtime. That is because Erlang/OTP does not implement a C
1350 /// stack but uses a custom implementation of hybrid stack/heap architecture.
1351 /// (for more information see Eric Stenman's Ph.D. thesis:
1352 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
1355 /// temp0 = sp - MaxStack
1356 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1360 /// call inc_stack # doubles the stack space
1361 /// temp0 = sp - MaxStack
1362 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
1363 void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
1364 const X86InstrInfo &TII = *TM.getInstrInfo();
1365 MachineFrameInfo *MFI = MF.getFrameInfo();
1366 const unsigned SlotSize = TM.getRegisterInfo()->getSlotSize();
1367 const bool Is64Bit = STI.is64Bit();
1369 // HiPE-specific values
1370 const unsigned HipeLeafWords = 24;
1371 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
1372 const unsigned Guaranteed = HipeLeafWords * SlotSize;
1373 unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
1374 MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
1375 unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
1377 assert(STI.isTargetLinux() &&
1378 "HiPE prologue is only supported on Linux operating systems.");
1380 // Compute the largest caller's frame that is needed to fit the callees'
1381 // frames. This 'MaxStack' is computed from:
1383 // a) the fixed frame size, which is the space needed for all spilled temps,
1384 // b) outgoing on-stack parameter areas, and
1385 // c) the minimum stack space this function needs to make available for the
1386 // functions it calls (a tunable ABI property).
1387 if (MFI->hasCalls()) {
1388 unsigned MoreStackForCalls = 0;
1390 for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
1391 MBBI != MBBE; ++MBBI)
1392 for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
1397 // Get callee operand.
1398 const MachineOperand &MO = MI->getOperand(0);
1400 // Only take account of global function calls (no closures etc.).
1404 const Function *F = dyn_cast<Function>(MO.getGlobal());
1408 // Do not update 'MaxStack' for primitive and built-in functions
1409 // (encoded with names either starting with "erlang."/"bif_" or not
1410 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
1411 // "_", such as the BIF "suspend_0") as they are executed on another
1413 if (F->getName().find("erlang.") != StringRef::npos ||
1414 F->getName().find("bif_") != StringRef::npos ||
1415 F->getName().find_first_of("._") == StringRef::npos)
1418 unsigned CalleeStkArity =
1419 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
1420 if (HipeLeafWords - 1 > CalleeStkArity)
1421 MoreStackForCalls = std::max(MoreStackForCalls,
1422 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
1424 MaxStack += MoreStackForCalls;
1427 // If the stack frame needed is larger than the guaranteed then runtime checks
1428 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
1429 if (MaxStack > Guaranteed) {
1430 MachineBasicBlock &prologueMBB = MF.front();
1431 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
1432 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
1434 for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),
1435 E = prologueMBB.livein_end(); I != E; I++) {
1436 stackCheckMBB->addLiveIn(*I);
1437 incStackMBB->addLiveIn(*I);
1440 MF.push_front(incStackMBB);
1441 MF.push_front(stackCheckMBB);
1443 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
1444 unsigned LEAop, CMPop, CALLop;
1448 LEAop = X86::LEA64r;
1449 CMPop = X86::CMP64rm;
1450 CALLop = X86::CALL64pcrel32;
1451 SPLimitOffset = 0x90;
1455 LEAop = X86::LEA32r;
1456 CMPop = X86::CMP32rm;
1457 CALLop = X86::CALLpcrel32;
1458 SPLimitOffset = 0x4c;
1461 ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1462 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1463 "HiPE prologue scratch register is live-in");
1465 // Create new MBB for StackCheck:
1466 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
1467 SPReg, false, -MaxStack);
1468 // SPLimitOffset is in a fixed heap location (pointed by BP).
1469 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
1470 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1471 BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_4)).addMBB(&prologueMBB);
1473 // Create new MBB for IncStack:
1474 BuildMI(incStackMBB, DL, TII.get(CALLop)).
1475 addExternalSymbol("inc_stack_0");
1476 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
1477 SPReg, false, -MaxStack);
1478 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
1479 .addReg(ScratchReg), PReg, false, SPLimitOffset);
1480 BuildMI(incStackMBB, DL, TII.get(X86::JLE_4)).addMBB(incStackMBB);
1482 stackCheckMBB->addSuccessor(&prologueMBB, 99);
1483 stackCheckMBB->addSuccessor(incStackMBB, 1);
1484 incStackMBB->addSuccessor(&prologueMBB, 99);
1485 incStackMBB->addSuccessor(incStackMBB, 1);
1492 void X86FrameLowering::
1493 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
1494 MachineBasicBlock::iterator I) const {
1495 const X86InstrInfo &TII = *TM.getInstrInfo();
1496 const X86RegisterInfo &RegInfo = *TM.getRegisterInfo();
1497 unsigned StackPtr = RegInfo.getStackRegister();
1498 bool reseveCallFrame = hasReservedCallFrame(MF);
1499 int Opcode = I->getOpcode();
1500 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
1501 bool IsLP64 = STI.isTarget64BitLP64();
1502 DebugLoc DL = I->getDebugLoc();
1503 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
1504 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
1507 if (!reseveCallFrame) {
1508 // If the stack pointer can be changed after prologue, turn the
1509 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
1510 // adjcallstackdown instruction into 'add ESP, <amt>'
1511 // TODO: consider using push / pop instead of sub + store / add
1515 // We need to keep the stack aligned properly. To do this, we round the
1516 // amount of space needed for the outgoing arguments up to the next
1517 // alignment boundary.
1518 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
1519 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
1521 MachineInstr *New = nullptr;
1522 if (Opcode == TII.getCallFrameSetupOpcode()) {
1523 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
1528 assert(Opcode == TII.getCallFrameDestroyOpcode());
1530 // Factor out the amount the callee already popped.
1531 Amount -= CalleeAmt;
1534 unsigned Opc = getADDriOpcode(IsLP64, Amount);
1535 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1536 .addReg(StackPtr).addImm(Amount);
1541 // The EFLAGS implicit def is dead.
1542 New->getOperand(3).setIsDead();
1544 // Replace the pseudo instruction with a new instruction.
1551 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
1552 // If we are performing frame pointer elimination and if the callee pops
1553 // something off the stack pointer, add it back. We do this until we have
1554 // more advanced stack pointer tracking ability.
1555 unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
1556 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
1557 .addReg(StackPtr).addImm(CalleeAmt);
1559 // The EFLAGS implicit def is dead.
1560 New->getOperand(3).setIsDead();
1562 // We are not tracking the stack pointer adjustment by the callee, so make
1563 // sure we restore the stack pointer immediately after the call, there may
1564 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
1565 MachineBasicBlock::iterator B = MBB.begin();
1566 while (I != B && !std::prev(I)->isCall())