-//===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file contains the X86 implementation of TargetFrameLowering class.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-#include "X86FrameLowering.h"\r
-#include "X86InstrBuilder.h"\r
-#include "X86InstrInfo.h"\r
-#include "X86MachineFunctionInfo.h"\r
-#include "X86Subtarget.h"\r
-#include "X86TargetMachine.h"\r
-#include "llvm/ADT/SmallSet.h"\r
-#include "llvm/CodeGen/MachineFrameInfo.h"\r
-#include "llvm/CodeGen/MachineFunction.h"\r
-#include "llvm/CodeGen/MachineInstrBuilder.h"\r
-#include "llvm/CodeGen/MachineModuleInfo.h"\r
-#include "llvm/CodeGen/MachineRegisterInfo.h"\r
-#include "llvm/IR/DataLayout.h"\r
-#include "llvm/IR/Function.h"\r
-#include "llvm/MC/MCAsmInfo.h"\r
-#include "llvm/MC/MCSymbol.h"\r
-#include "llvm/Support/CommandLine.h"\r
-#include "llvm/Target/TargetOptions.h"\r
-#include "llvm/Support/Debug.h"\r
-#include <cstdlib>\r
-\r
-using namespace llvm;\r
-\r
-// FIXME: completely move here.\r
-extern cl::opt<bool> ForceStackAlign;\r
-\r
-bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {\r
- return !MF.getFrameInfo()->hasVarSizedObjects() &&\r
- !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();\r
-}\r
-\r
-/// canSimplifyCallFramePseudos - If there is a reserved call frame, the\r
-/// call frame pseudos can be simplified. Having a FP, as in the default\r
-/// implementation, is not sufficient here since we can't always use it.\r
-/// Use a more nuanced condition.\r
-bool\r
-X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {\r
- const X86RegisterInfo *TRI = static_cast<const X86RegisterInfo *>\r
- (MF.getSubtarget().getRegisterInfo());\r
- return hasReservedCallFrame(MF) ||\r
- (hasFP(MF) && !TRI->needsStackRealignment(MF))\r
- || TRI->hasBasePointer(MF);\r
-}\r
-\r
-// needsFrameIndexResolution - Do we need to perform FI resolution for\r
-// this function. Normally, this is required only when the function\r
-// has any stack objects. However, FI resolution actually has another job,\r
-// not apparent from the title - it resolves callframesetup/destroy \r
-// that were not simplified earlier.\r
-// So, this is required for x86 functions that have push sequences even\r
-// when there are no stack objects.\r
-bool\r
-X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {\r
- return MF.getFrameInfo()->hasStackObjects() ||\r
- MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();\r
-}\r
-\r
-/// hasFP - Return true if the specified function should have a dedicated frame\r
-/// pointer register. This is true if the function has variable sized allocas\r
-/// or if frame pointer elimination is disabled.\r
-bool X86FrameLowering::hasFP(const MachineFunction &MF) const {\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const MachineModuleInfo &MMI = MF.getMMI();\r
- const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();\r
-\r
- return (MF.getTarget().Options.DisableFramePointerElim(MF) ||\r
- RegInfo->needsStackRealignment(MF) ||\r
- MFI->hasVarSizedObjects() ||\r
- MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||\r
- MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||\r
- MMI.callsUnwindInit() || MMI.callsEHReturn() ||\r
- MFI->hasStackMap() || MFI->hasPatchPoint());\r
-}\r
-\r
-static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {\r
- if (IsLP64) {\r
- if (isInt<8>(Imm))\r
- return X86::SUB64ri8;\r
- return X86::SUB64ri32;\r
- } else {\r
- if (isInt<8>(Imm))\r
- return X86::SUB32ri8;\r
- return X86::SUB32ri;\r
- }\r
-}\r
-\r
-static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {\r
- if (IsLP64) {\r
- if (isInt<8>(Imm))\r
- return X86::ADD64ri8;\r
- return X86::ADD64ri32;\r
- } else {\r
- if (isInt<8>(Imm))\r
- return X86::ADD32ri8;\r
- return X86::ADD32ri;\r
- }\r
-}\r
-\r
-static unsigned getSUBrrOpcode(unsigned isLP64) {\r
- return isLP64 ? X86::SUB64rr : X86::SUB32rr;\r
-}\r
-\r
-static unsigned getADDrrOpcode(unsigned isLP64) {\r
- return isLP64 ? X86::ADD64rr : X86::ADD32rr;\r
-}\r
-\r
-static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {\r
- if (IsLP64) {\r
- if (isInt<8>(Imm))\r
- return X86::AND64ri8;\r
- return X86::AND64ri32;\r
- }\r
- if (isInt<8>(Imm))\r
- return X86::AND32ri8;\r
- return X86::AND32ri;\r
-}\r
-\r
-static unsigned getLEArOpcode(unsigned IsLP64) {\r
- return IsLP64 ? X86::LEA64r : X86::LEA32r;\r
-}\r
-\r
-/// findDeadCallerSavedReg - Return a caller-saved register that isn't live\r
-/// when it reaches the "return" instruction. We can then pop a stack object\r
-/// to this register without worry about clobbering it.\r
-static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator &MBBI,\r
- const TargetRegisterInfo &TRI,\r
- bool Is64Bit) {\r
- const MachineFunction *MF = MBB.getParent();\r
- const Function *F = MF->getFunction();\r
- if (!F || MF->getMMI().callsEHReturn())\r
- return 0;\r
-\r
- static const uint16_t CallerSavedRegs32Bit[] = {\r
- X86::EAX, X86::EDX, X86::ECX, 0\r
- };\r
-\r
- static const uint16_t CallerSavedRegs64Bit[] = {\r
- X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,\r
- X86::R8, X86::R9, X86::R10, X86::R11, 0\r
- };\r
-\r
- unsigned Opc = MBBI->getOpcode();\r
- switch (Opc) {\r
- default: return 0;\r
- case X86::RETL:\r
- case X86::RETQ:\r
- case X86::RETIL:\r
- case X86::RETIQ:\r
- case X86::TCRETURNdi:\r
- case X86::TCRETURNri:\r
- case X86::TCRETURNmi:\r
- case X86::TCRETURNdi64:\r
- case X86::TCRETURNri64:\r
- case X86::TCRETURNmi64:\r
- case X86::EH_RETURN:\r
- case X86::EH_RETURN64: {\r
- SmallSet<uint16_t, 8> Uses;\r
- for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {\r
- MachineOperand &MO = MBBI->getOperand(i);\r
- if (!MO.isReg() || MO.isDef())\r
- continue;\r
- unsigned Reg = MO.getReg();\r
- if (!Reg)\r
- continue;\r
- for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)\r
- Uses.insert(*AI);\r
- }\r
-\r
- const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;\r
- for (; *CS; ++CS)\r
- if (!Uses.count(*CS))\r
- return *CS;\r
- }\r
- }\r
-\r
- return 0;\r
-}\r
-\r
-static bool isEAXLiveIn(MachineFunction &MF) {\r
- for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),\r
- EE = MF.getRegInfo().livein_end(); II != EE; ++II) {\r
- unsigned Reg = II->first;\r
-\r
- if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||\r
- Reg == X86::AH || Reg == X86::AL)\r
- return true;\r
- }\r
-\r
- return false;\r
-}\r
-\r
-/// emitSPUpdate - Emit a series of instructions to increment / decrement the\r
-/// stack pointer by a constant value.\r
-static\r
-void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,\r
- unsigned StackPtr, int64_t NumBytes,\r
- bool Is64BitTarget, bool Is64BitStackPtr, bool UseLEA,\r
- const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {\r
- bool isSub = NumBytes < 0;\r
- uint64_t Offset = isSub ? -NumBytes : NumBytes;\r
- unsigned Opc;\r
- if (UseLEA)\r
- Opc = getLEArOpcode(Is64BitStackPtr);\r
- else\r
- Opc = isSub\r
- ? getSUBriOpcode(Is64BitStackPtr, Offset)\r
- : getADDriOpcode(Is64BitStackPtr, Offset);\r
-\r
- uint64_t Chunk = (1LL << 31) - 1;\r
- DebugLoc DL = MBB.findDebugLoc(MBBI);\r
-\r
- while (Offset) {\r
- if (Offset > Chunk) {\r
- // Rather than emit a long series of instructions for large offsets,\r
- // load the offset into a register and do one sub/add\r
- unsigned Reg = 0;\r
-\r
- if (isSub && !isEAXLiveIn(*MBB.getParent()))\r
- Reg = (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX);\r
- else\r
- Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);\r
-\r
- if (Reg) {\r
- Opc = Is64BitTarget ? X86::MOV64ri : X86::MOV32ri;\r
- BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)\r
- .addImm(Offset);\r
- Opc = isSub\r
- ? getSUBrrOpcode(Is64BitTarget)\r
- : getADDrrOpcode(Is64BitTarget);\r
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr)\r
- .addReg(Reg);\r
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.\r
- Offset = 0;\r
- continue;\r
- }\r
- }\r
-\r
- uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;\r
- if (ThisVal == (Is64BitTarget ? 8 : 4)) {\r
- // Use push / pop instead.\r
- unsigned Reg = isSub\r
- ? (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX)\r
- : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);\r
- if (Reg) {\r
- Opc = isSub\r
- ? (Is64BitTarget ? X86::PUSH64r : X86::PUSH32r)\r
- : (Is64BitTarget ? X86::POP64r : X86::POP32r);\r
- MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))\r
- .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));\r
- if (isSub)\r
- MI->setFlag(MachineInstr::FrameSetup);\r
- Offset -= ThisVal;\r
- continue;\r
- }\r
- }\r
-\r
- MachineInstr *MI = nullptr;\r
-\r
- if (UseLEA) {\r
- MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),\r
- StackPtr, false, isSub ? -ThisVal : ThisVal);\r
- } else {\r
- MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr)\r
- .addImm(ThisVal);\r
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.\r
- }\r
-\r
- if (isSub)\r
- MI->setFlag(MachineInstr::FrameSetup);\r
-\r
- Offset -= ThisVal;\r
- }\r
-}\r
-\r
-/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.\r
-static\r
-void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,\r
- unsigned StackPtr, uint64_t *NumBytes = nullptr) {\r
- if (MBBI == MBB.begin()) return;\r
-\r
- MachineBasicBlock::iterator PI = std::prev(MBBI);\r
- unsigned Opc = PI->getOpcode();\r
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||\r
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||\r
- Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&\r
- PI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes += PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||\r
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&\r
- PI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes -= PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- }\r
-}\r
-\r
-/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower\r
-/// iterator.\r
-static\r
-void mergeSPUpdatesDown(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator &MBBI,\r
- unsigned StackPtr, uint64_t *NumBytes = nullptr) {\r
- // FIXME: THIS ISN'T RUN!!!\r
- return;\r
-\r
- if (MBBI == MBB.end()) return;\r
-\r
- MachineBasicBlock::iterator NI = std::next(MBBI);\r
- if (NI == MBB.end()) return;\r
-\r
- unsigned Opc = NI->getOpcode();\r
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||\r
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&\r
- NI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes -= NI->getOperand(2).getImm();\r
- MBB.erase(NI);\r
- MBBI = NI;\r
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||\r
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&\r
- NI->getOperand(0).getReg() == StackPtr) {\r
- if (NumBytes)\r
- *NumBytes += NI->getOperand(2).getImm();\r
- MBB.erase(NI);\r
- MBBI = NI;\r
- }\r
-}\r
-\r
-/// mergeSPUpdates - Checks the instruction before/after the passed\r
-/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and\r
-/// the stack adjustment is returned as a positive value for ADD/LEA and a\r
-/// negative for SUB.\r
-static int mergeSPUpdates(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator &MBBI, unsigned StackPtr,\r
- bool doMergeWithPrevious) {\r
- if ((doMergeWithPrevious && MBBI == MBB.begin()) ||\r
- (!doMergeWithPrevious && MBBI == MBB.end()))\r
- return 0;\r
-\r
- MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;\r
- MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr\r
- : std::next(MBBI);\r
- unsigned Opc = PI->getOpcode();\r
- int Offset = 0;\r
-\r
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||\r
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||\r
- Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&\r
- PI->getOperand(0).getReg() == StackPtr){\r
- Offset += PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- if (!doMergeWithPrevious) MBBI = NI;\r
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||\r
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&\r
- PI->getOperand(0).getReg() == StackPtr) {\r
- Offset -= PI->getOperand(2).getImm();\r
- MBB.erase(PI);\r
- if (!doMergeWithPrevious) MBBI = NI;\r
- }\r
-\r
- return Offset;\r
-}\r
-\r
-void\r
-X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MBBI,\r
- DebugLoc DL) const {\r
- MachineFunction &MF = *MBB.getParent();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- MachineModuleInfo &MMI = MF.getMMI();\r
- const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
-\r
- // Add callee saved registers to move list.\r
- const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();\r
- if (CSI.empty()) return;\r
-\r
- // Calculate offsets.\r
- for (std::vector<CalleeSavedInfo>::const_iterator\r
- I = CSI.begin(), E = CSI.end(); I != E; ++I) {\r
- int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());\r
- unsigned Reg = I->getReg();\r
-\r
- unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);\r
- unsigned CFIIndex =\r
- MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,\r
- Offset));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-}\r
-\r
-/// usesTheStack - This function checks if any of the users of EFLAGS\r
-/// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has\r
-/// to use the stack, and if we don't adjust the stack we clobber the first\r
-/// frame index.\r
-/// See X86InstrInfo::copyPhysReg.\r
-static bool usesTheStack(const MachineFunction &MF) {\r
- const MachineRegisterInfo &MRI = MF.getRegInfo();\r
-\r
- for (MachineRegisterInfo::reg_instr_iterator\r
- ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();\r
- ri != re; ++ri)\r
- if (ri->isCopy())\r
- return true;\r
-\r
- return false;\r
-}\r
-\r
-void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,\r
- MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MBBI,\r
- DebugLoc DL) {\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;\r
-\r
- unsigned CallOp;\r
- if (Is64Bit)\r
- CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;\r
- else\r
- CallOp = X86::CALLpcrel32;\r
-\r
- const char *Symbol;\r
- if (Is64Bit) {\r
- if (STI.isTargetCygMing()) {\r
- Symbol = "___chkstk_ms";\r
- } else {\r
- Symbol = "__chkstk";\r
- }\r
- } else if (STI.isTargetCygMing())\r
- Symbol = "_alloca";\r
- else\r
- Symbol = "_chkstk";\r
-\r
- MachineInstrBuilder CI;\r
-\r
- // All current stack probes take AX and SP as input, clobber flags, and\r
- // preserve all registers. x86_64 probes leave RSP unmodified.\r
- if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {\r
- // For the large code model, we have to call through a register. Use R11,\r
- // as it is scratch in all supported calling conventions.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)\r
- .addExternalSymbol(Symbol);\r
- CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);\r
- } else {\r
- CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);\r
- }\r
-\r
- unsigned AX = Is64Bit ? X86::RAX : X86::EAX;\r
- unsigned SP = Is64Bit ? X86::RSP : X86::ESP;\r
- CI.addReg(AX, RegState::Implicit)\r
- .addReg(SP, RegState::Implicit)\r
- .addReg(AX, RegState::Define | RegState::Implicit)\r
- .addReg(SP, RegState::Define | RegState::Implicit)\r
- .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);\r
-\r
- if (Is64Bit) {\r
- // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp\r
- // themselves. It also does not clobber %rax so we can reuse it when\r
- // adjusting %rsp.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)\r
- .addReg(X86::RSP)\r
- .addReg(X86::RAX);\r
- }\r
-}\r
-\r
-/// emitPrologue - Push callee-saved registers onto the stack, which\r
-/// automatically adjust the stack pointer. Adjust the stack pointer to allocate\r
-/// space for local variables. Also emit labels used by the exception handler to\r
-/// generate the exception handling frames.\r
-\r
-/*\r
- Here's a gist of what gets emitted:\r
-\r
- ; Establish frame pointer, if needed\r
- [if needs FP]\r
- push %rbp\r
- .cfi_def_cfa_offset 16\r
- .cfi_offset %rbp, -16\r
- .seh_pushreg %rpb\r
- mov %rsp, %rbp\r
- .cfi_def_cfa_register %rbp\r
-\r
- ; Spill general-purpose registers\r
- [for all callee-saved GPRs]\r
- pushq %<reg>\r
- [if not needs FP]\r
- .cfi_def_cfa_offset (offset from RETADDR)\r
- .seh_pushreg %<reg>\r
-\r
- ; If the required stack alignment > default stack alignment\r
- ; rsp needs to be re-aligned. This creates a "re-alignment gap"\r
- ; of unknown size in the stack frame.\r
- [if stack needs re-alignment]\r
- and $MASK, %rsp\r
-\r
- ; Allocate space for locals\r
- [if target is Windows and allocated space > 4096 bytes]\r
- ; Windows needs special care for allocations larger\r
- ; than one page.\r
- mov $NNN, %rax\r
- call ___chkstk_ms/___chkstk\r
- sub %rax, %rsp\r
- [else]\r
- sub $NNN, %rsp\r
-\r
- [if needs FP]\r
- .seh_stackalloc (size of XMM spill slots)\r
- .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots\r
- [else]\r
- .seh_stackalloc NNN\r
-\r
- ; Spill XMMs\r
- ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,\r
- ; they may get spilled on any platform, if the current function\r
- ; calls @llvm.eh.unwind.init\r
- [if needs FP]\r
- [for all callee-saved XMM registers]\r
- movaps %<xmm reg>, -MMM(%rbp)\r
- [for all callee-saved XMM registers]\r
- .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)\r
- ; i.e. the offset relative to (%rbp - SEHFrameOffset)\r
- [else]\r
- [for all callee-saved XMM registers]\r
- movaps %<xmm reg>, KKK(%rsp)\r
- [for all callee-saved XMM registers]\r
- .seh_savexmm %<xmm reg>, KKK\r
-\r
- .seh_endprologue\r
-\r
- [if needs base pointer]\r
- mov %rsp, %rbx\r
- [if needs to restore base pointer]\r
- mov %rsp, -MMM(%rbp)\r
-\r
- ; Emit CFI info\r
- [if needs FP]\r
- [for all callee-saved registers]\r
- .cfi_offset %<reg>, (offset from %rbp)\r
- [else]\r
- .cfi_def_cfa_offset (offset from RETADDR)\r
- [for all callee-saved registers]\r
- .cfi_offset %<reg>, (offset from %rsp)\r
-\r
- Notes:\r
- - .seh directives are emitted only for Windows 64 ABI\r
- - .cfi directives are emitted for all other ABIs\r
- - for 32-bit code, substitute %e?? registers for %r??\r
-*/\r
-\r
-void X86FrameLowering::emitPrologue(MachineFunction &MF) const {\r
- MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.\r
- MachineBasicBlock::iterator MBBI = MBB.begin();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const Function *Fn = MF.getFunction();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- MachineModuleInfo &MMI = MF.getMMI();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.\r
- uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.\r
- bool HasFP = hasFP(MF);\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.\r
- const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();\r
- bool IsWin64 = STI.isTargetWin64();\r
- // Not necessarily synonymous with IsWin64.\r
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();\r
- bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();\r
- bool NeedsDwarfCFI =\r
- !IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());\r
- bool UseLEA = STI.useLeaForSP();\r
- unsigned StackAlign = getStackAlignment();\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
- unsigned FramePtr = RegInfo->getFrameRegister(MF);\r
- const unsigned MachineFramePtr = STI.isTarget64BitILP32() ?\r
- getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;\r
- unsigned StackPtr = RegInfo->getStackRegister();\r
- unsigned BasePtr = RegInfo->getBaseRegister();\r
- DebugLoc DL;\r
-\r
- // If we're forcing a stack realignment we can't rely on just the frame\r
- // info, we need to know the ABI stack alignment as well in case we\r
- // have a call out. Otherwise just make sure we have some alignment - we'll\r
- // go with the minimum SlotSize.\r
- if (ForceStackAlign) {\r
- if (MFI->hasCalls())\r
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;\r
- else if (MaxAlign < SlotSize)\r
- MaxAlign = SlotSize;\r
- }\r
-\r
- // Add RETADDR move area to callee saved frame size.\r
- int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();\r
- if (TailCallReturnAddrDelta < 0)\r
- X86FI->setCalleeSavedFrameSize(\r
- X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);\r
-\r
- bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());\r
-\r
- // The default stack probe size is 4096 if the function has no stackprobesize\r
- // attribute.\r
- unsigned StackProbeSize = 4096;\r
- if (Fn->hasFnAttribute("stack-probe-size"))\r
- Fn->getFnAttribute("stack-probe-size")\r
- .getValueAsString()\r
- .getAsInteger(0, StackProbeSize);\r
-\r
- // If this is x86-64 and the Red Zone is not disabled, if we are a leaf\r
- // function, and use up to 128 bytes of stack space, don't have a frame\r
- // pointer, calls, or dynamic alloca then we do not need to adjust the\r
- // stack pointer (we fit in the Red Zone). We also check that we don't\r
- // push and pop from the stack.\r
- if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,\r
- Attribute::NoRedZone) &&\r
- !RegInfo->needsStackRealignment(MF) &&\r
- !MFI->hasVarSizedObjects() && // No dynamic alloca.\r
- !MFI->adjustsStack() && // No calls.\r
- !IsWin64 && // Win64 has no Red Zone\r
- !usesTheStack(MF) && // Don't push and pop.\r
- !MF.shouldSplitStack()) { // Regular stack\r
- uint64_t MinSize = X86FI->getCalleeSavedFrameSize();\r
- if (HasFP) MinSize += SlotSize;\r
- StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);\r
- MFI->setStackSize(StackSize);\r
- }\r
-\r
- // Insert stack pointer adjustment for later moving of return addr. Only\r
- // applies to tail call optimized functions where the callee argument stack\r
- // size is bigger than the callers.\r
- if (TailCallReturnAddrDelta < 0) {\r
- MachineInstr *MI =\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(getSUBriOpcode(Uses64BitFramePtr, -TailCallReturnAddrDelta)),\r
- StackPtr)\r
- .addReg(StackPtr)\r
- .addImm(-TailCallReturnAddrDelta)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.\r
- }\r
-\r
- // Mapping for machine moves:\r
- //\r
- // DST: VirtualFP AND\r
- // SRC: VirtualFP => DW_CFA_def_cfa_offset\r
- // ELSE => DW_CFA_def_cfa\r
- //\r
- // SRC: VirtualFP AND\r
- // DST: Register => DW_CFA_def_cfa_register\r
- //\r
- // ELSE\r
- // OFFSET < 0 => DW_CFA_offset_extended_sf\r
- // REG < 64 => DW_CFA_offset + Reg\r
- // ELSE => DW_CFA_offset_extended\r
-\r
- uint64_t NumBytes = 0;\r
- int stackGrowth = -SlotSize;\r
-\r
- if (HasFP) {\r
- // Calculate required stack adjustment.\r
- uint64_t FrameSize = StackSize - SlotSize;\r
- // If required, include space for extra hidden slot for stashing base pointer.\r
- if (X86FI->getRestoreBasePointer())\r
- FrameSize += SlotSize;\r
- if (RegInfo->needsStackRealignment(MF)) {\r
- // Callee-saved registers are pushed on stack before the stack\r
- // is realigned.\r
- FrameSize -= X86FI->getCalleeSavedFrameSize();\r
- NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;\r
- } else {\r
- NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();\r
- }\r
-\r
- // Get the offset of the stack slot for the EBP register, which is\r
- // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.\r
- // Update the frame offset adjustment.\r
- MFI->setOffsetAdjustment(-NumBytes);\r
-\r
- // Save EBP/RBP into the appropriate stack slot.\r
- BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))\r
- .addReg(MachineFramePtr, RegState::Kill)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
-\r
- if (NeedsDwarfCFI) {\r
- // Mark the place where EBP/RBP was saved.\r
- // Define the current CFA rule to use the provided offset.\r
- assert(StackSize);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
-\r
- // Change the rule for the FramePtr to be an "offset" rule.\r
- unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);\r
- CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createOffset(nullptr,\r
- DwarfFramePtr, 2 * stackGrowth));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-\r
- if (NeedsWinEH) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))\r
- .addImm(FramePtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // Update EBP with the new base value.\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), FramePtr)\r
- .addReg(StackPtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
-\r
- if (NeedsDwarfCFI) {\r
- // Mark effective beginning of when frame pointer becomes valid.\r
- // Define the current CFA to use the EBP/RBP register.\r
- unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-\r
- // Mark the FramePtr as live-in in every block.\r
- for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)\r
- I->addLiveIn(MachineFramePtr);\r
- } else {\r
- NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();\r
- }\r
-\r
- // Skip the callee-saved push instructions.\r
- bool PushedRegs = false;\r
- int StackOffset = 2 * stackGrowth;\r
-\r
- while (MBBI != MBB.end() &&\r
- (MBBI->getOpcode() == X86::PUSH32r ||\r
- MBBI->getOpcode() == X86::PUSH64r)) {\r
- PushedRegs = true;\r
- unsigned Reg = MBBI->getOperand(0).getReg();\r
- ++MBBI;\r
-\r
- if (!HasFP && NeedsDwarfCFI) {\r
- // Mark callee-saved push instruction.\r
- // Define the current CFA rule to use the provided offset.\r
- assert(StackSize);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- StackOffset += stackGrowth;\r
- }\r
-\r
- if (NeedsWinEH) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(\r
- MachineInstr::FrameSetup);\r
- }\r
- }\r
-\r
- // Realign stack after we pushed callee-saved registers (so that we'll be\r
- // able to calculate their offsets from the frame pointer).\r
- if (RegInfo->needsStackRealignment(MF)) {\r
- assert(HasFP && "There should be a frame pointer if stack is realigned.");\r
- uint64_t Val = -MaxAlign;\r
- MachineInstr *MI =\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(getANDriOpcode(Uses64BitFramePtr, Val)), StackPtr)\r
- .addReg(StackPtr)\r
- .addImm(Val)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
-\r
- // The EFLAGS implicit def is dead.\r
- MI->getOperand(3).setIsDead();\r
- }\r
-\r
- // If there is an SUB32ri of ESP immediately before this instruction, merge\r
- // the two. This can be the case when tail call elimination is enabled and\r
- // the callee has more arguments then the caller.\r
- NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);\r
-\r
- // If there is an ADD32ri or SUB32ri of ESP immediately after this\r
- // instruction, merge the two instructions.\r
- mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);\r
-\r
- // Adjust stack pointer: ESP -= numbytes.\r
-\r
- // Windows and cygwin/mingw require a prologue helper routine when allocating\r
- // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw\r
- // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the\r
- // stack and adjust the stack pointer in one go. The 64-bit version of\r
- // __chkstk is only responsible for probing the stack. The 64-bit prologue is\r
- // responsible for adjusting the stack pointer. Touching the stack at 4K\r
- // increments is necessary to ensure that the guard pages used by the OS\r
- // virtual memory manager are allocated in correct sequence.\r
- if (NumBytes >= StackProbeSize && UseStackProbe) {\r
- // Check whether EAX is livein for this function.\r
- bool isEAXAlive = isEAXLiveIn(MF);\r
-\r
- if (isEAXAlive) {\r
- // Sanity check that EAX is not livein for this function.\r
- // It should not be, so throw an assert.\r
- assert(!Is64Bit && "EAX is livein in x64 case!");\r
-\r
- // Save EAX\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))\r
- .addReg(X86::EAX, RegState::Kill)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- if (Is64Bit) {\r
- // Handle the 64-bit Windows ABI case where we need to call __chkstk.\r
- // Function prologue is responsible for adjusting the stack pointer.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)\r
- .addImm(NumBytes)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- } else {\r
- // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.\r
- // We'll also use 4 already allocated bytes for EAX.\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)\r
- .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // Save a pointer to the MI where we set AX.\r
- MachineBasicBlock::iterator SetRAX = MBBI;\r
- --SetRAX;\r
-\r
- // Call __chkstk, __chkstk_ms, or __alloca.\r
- emitStackProbeCall(MF, MBB, MBBI, DL);\r
-\r
- // Apply the frame setup flag to all inserted instrs.\r
- for (; SetRAX != MBBI; ++SetRAX)\r
- SetRAX->setFlag(MachineInstr::FrameSetup);\r
-\r
- if (isEAXAlive) {\r
- // Restore EAX\r
- MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),\r
- X86::EAX),\r
- StackPtr, false, NumBytes - 4);\r
- MI->setFlag(MachineInstr::FrameSetup);\r
- MBB.insert(MBBI, MI);\r
- }\r
- } else if (NumBytes) {\r
- emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, Uses64BitFramePtr,\r
- UseLEA, TII, *RegInfo);\r
- }\r
-\r
- int SEHFrameOffset = 0;\r
- if (NeedsWinEH) {\r
- if (HasFP) {\r
- // We need to set frame base offset low enough such that all saved\r
- // register offsets would be positive relative to it, but we can't\r
- // just use NumBytes, because .seh_setframe offset must be <=240.\r
- // So we pretend to have only allocated enough space to spill the\r
- // non-volatile registers.\r
- // We don't care about the rest of stack allocation, because unwinder\r
- // will restore SP to (BP - SEHFrameOffset)\r
- for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {\r
- int offset = MFI->getObjectOffset(Info.getFrameIdx());\r
- SEHFrameOffset = std::max(SEHFrameOffset, std::abs(offset));\r
- }\r
- SEHFrameOffset += SEHFrameOffset % 16; // ensure alignmant\r
-\r
- // This only needs to account for XMM spill slots, GPR slots\r
- // are covered by the .seh_pushreg's emitted above.\r
- unsigned Size = SEHFrameOffset - X86FI->getCalleeSavedFrameSize();\r
- if (Size) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))\r
- .addImm(Size)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))\r
- .addImm(FramePtr)\r
- .addImm(SEHFrameOffset)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- } else {\r
- // SP will be the base register for restoring XMMs\r
- if (NumBytes) {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))\r
- .addImm(NumBytes)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
- }\r
- }\r
-\r
- // Skip the rest of register spilling code\r
- while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))\r
- ++MBBI;\r
-\r
- // Emit SEH info for non-GPRs\r
- if (NeedsWinEH) {\r
- for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {\r
- unsigned Reg = Info.getReg();\r
- if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))\r
- continue;\r
- assert(X86::FR64RegClass.contains(Reg) && "Unexpected register class");\r
-\r
- int Offset = getFrameIndexOffset(MF, Info.getFrameIdx());\r
- Offset += SEHFrameOffset;\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))\r
- .addImm(Reg)\r
- .addImm(Offset)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // If we need a base pointer, set it up here. It's whatever the value\r
- // of the stack pointer is at this point. Any variable size objects\r
- // will be allocated after this, so we can still use the base pointer\r
- // to reference locals.\r
- if (RegInfo->hasBasePointer(MF)) {\r
- // Update the base pointer with the current stack pointer.\r
- unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;\r
- BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)\r
- .addReg(StackPtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- if (X86FI->getRestoreBasePointer()) {\r
- // Stash value of base pointer. Saving RSP instead of EBP shortens dependence chain.\r
- unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;\r
- addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),\r
- FramePtr, true, X86FI->getRestoreBasePointerOffset())\r
- .addReg(StackPtr)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
- }\r
-\r
- if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {\r
- // Mark end of stack pointer adjustment.\r
- if (!HasFP && NumBytes) {\r
- // Define the current CFA rule to use the provided offset.\r
- assert(StackSize);\r
- unsigned CFIIndex = MMI.addFrameInst(\r
- MCCFIInstruction::createDefCfaOffset(nullptr,\r
- -StackSize + stackGrowth));\r
-\r
- BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))\r
- .addCFIIndex(CFIIndex);\r
- }\r
-\r
- // Emit DWARF info specifying the offsets of the callee-saved registers.\r
- if (PushedRegs)\r
- emitCalleeSavedFrameMoves(MBB, MBBI, DL);\r
- }\r
-}\r
-\r
-void X86FrameLowering::emitEpilogue(MachineFunction &MF,\r
- MachineBasicBlock &MBB) const {\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();\r
- assert(MBBI != MBB.end() && "Returning block has no instructions");\r
- unsigned RetOpcode = MBBI->getOpcode();\r
- DebugLoc DL = MBBI->getDebugLoc();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.\r
- const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();\r
- const bool Is64BitILP32 = STI.isTarget64BitILP32();\r
- bool UseLEA = STI.useLeaForSP();\r
- unsigned StackAlign = getStackAlignment();\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
- unsigned FramePtr = RegInfo->getFrameRegister(MF);\r
- unsigned MachineFramePtr = Is64BitILP32 ?\r
- getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;\r
- unsigned StackPtr = RegInfo->getStackRegister();\r
-\r
- bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();\r
- bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();\r
-\r
- switch (RetOpcode) {\r
- default:\r
- llvm_unreachable("Can only insert epilog into returning blocks");\r
- case X86::RETQ:\r
- case X86::RETL:\r
- case X86::RETIL:\r
- case X86::RETIQ:\r
- case X86::TCRETURNdi:\r
- case X86::TCRETURNri:\r
- case X86::TCRETURNmi:\r
- case X86::TCRETURNdi64:\r
- case X86::TCRETURNri64:\r
- case X86::TCRETURNmi64:\r
- case X86::EH_RETURN:\r
- case X86::EH_RETURN64:\r
- break; // These are ok\r
- }\r
-\r
- // Get the number of bytes to allocate from the FrameInfo.\r
- uint64_t StackSize = MFI->getStackSize();\r
- uint64_t MaxAlign = MFI->getMaxAlignment();\r
- unsigned CSSize = X86FI->getCalleeSavedFrameSize();\r
- uint64_t NumBytes = 0;\r
-\r
- // If we're forcing a stack realignment we can't rely on just the frame\r
- // info, we need to know the ABI stack alignment as well in case we\r
- // have a call out. Otherwise just make sure we have some alignment - we'll\r
- // go with the minimum.\r
- if (ForceStackAlign) {\r
- if (MFI->hasCalls())\r
- MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;\r
- else\r
- MaxAlign = MaxAlign ? MaxAlign : 4;\r
- }\r
-\r
- if (hasFP(MF)) {\r
- // Calculate required stack adjustment.\r
- uint64_t FrameSize = StackSize - SlotSize;\r
- if (RegInfo->needsStackRealignment(MF)) {\r
- // Callee-saved registers were pushed on stack before the stack\r
- // was realigned.\r
- FrameSize -= CSSize;\r
- NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;\r
- } else {\r
- NumBytes = FrameSize - CSSize;\r
- }\r
-\r
- // Pop EBP.\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr);\r
- } else {\r
- NumBytes = StackSize - CSSize;\r
- }\r
-\r
- // Skip the callee-saved pop instructions.\r
- while (MBBI != MBB.begin()) {\r
- MachineBasicBlock::iterator PI = std::prev(MBBI);\r
- unsigned Opc = PI->getOpcode();\r
-\r
- if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&\r
- !PI->isTerminator())\r
- break;\r
-\r
- --MBBI;\r
- }\r
- MachineBasicBlock::iterator FirstCSPop = MBBI;\r
-\r
- DL = MBBI->getDebugLoc();\r
-\r
- // If there is an ADD32ri or SUB32ri of ESP immediately before this\r
- // instruction, merge the two instructions.\r
- if (NumBytes || MFI->hasVarSizedObjects())\r
- mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);\r
-\r
- // If dynamic alloca is used, then reset esp to point to the last callee-saved\r
- // slot before popping them off! Same applies for the case, when stack was\r
- // realigned.\r
- if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {\r
- if (RegInfo->needsStackRealignment(MF))\r
- MBBI = FirstCSPop;\r
- if (CSSize != 0) {\r
- unsigned Opc = getLEArOpcode(Uses64BitFramePtr);\r
- addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),\r
- FramePtr, false, -CSSize);\r
- --MBBI;\r
- } else {\r
- unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);\r
- BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)\r
- .addReg(FramePtr);\r
- --MBBI;\r
- }\r
- } else if (NumBytes) {\r
- // Adjust stack pointer back: ESP += numbytes.\r
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr, UseLEA,\r
- TII, *RegInfo);\r
- --MBBI;\r
- }\r
-\r
- // Windows unwinder will not invoke function's exception handler if IP is\r
- // either in prologue or in epilogue. This behavior causes a problem when a\r
- // call immediately precedes an epilogue, because the return address points\r
- // into the epilogue. To cope with that, we insert an epilogue marker here,\r
- // then replace it with a 'nop' if it ends up immediately after a CALL in the\r
- // final emitted code.\r
- if (NeedsWinEH)\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));\r
-\r
- // We're returning from function via eh_return.\r
- if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {\r
- MBBI = MBB.getLastNonDebugInstr();\r
- MachineOperand &DestAddr = MBBI->getOperand(0);\r
- assert(DestAddr.isReg() && "Offset should be in register!");\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),\r
- StackPtr).addReg(DestAddr.getReg());\r
- } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||\r
- RetOpcode == X86::TCRETURNmi ||\r
- RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||\r
- RetOpcode == X86::TCRETURNmi64) {\r
- bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;\r
- // Tail call return: adjust the stack pointer and jump to callee.\r
- MBBI = MBB.getLastNonDebugInstr();\r
- MachineOperand &JumpTarget = MBBI->getOperand(0);\r
- MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);\r
- assert(StackAdjust.isImm() && "Expecting immediate value.");\r
-\r
- // Adjust stack pointer.\r
- int StackAdj = StackAdjust.getImm();\r
- int MaxTCDelta = X86FI->getTCReturnAddrDelta();\r
- int Offset = 0;\r
- assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");\r
-\r
- // Incoporate the retaddr area.\r
- Offset = StackAdj-MaxTCDelta;\r
- assert(Offset >= 0 && "Offset should never be negative");\r
-\r
- if (Offset) {\r
- // Check for possible merge with preceding ADD instruction.\r
- Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);\r
- emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,\r
- UseLEA, TII, *RegInfo);\r
- }\r
-\r
- // Jump to label or value in register.\r
- bool IsWin64 = STI.isTargetWin64();\r
- if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {\r
- unsigned Op = (RetOpcode == X86::TCRETURNdi)\r
- ? X86::TAILJMPd\r
- : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);\r
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));\r
- if (JumpTarget.isGlobal())\r
- MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),\r
- JumpTarget.getTargetFlags());\r
- else {\r
- assert(JumpTarget.isSymbol());\r
- MIB.addExternalSymbol(JumpTarget.getSymbolName(),\r
- JumpTarget.getTargetFlags());\r
- }\r
- } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {\r
- unsigned Op = (RetOpcode == X86::TCRETURNmi)\r
- ? X86::TAILJMPm\r
- : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);\r
- MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));\r
- for (unsigned i = 0; i != 5; ++i)\r
- MIB.addOperand(MBBI->getOperand(i));\r
- } else if (RetOpcode == X86::TCRETURNri64) {\r
- BuildMI(MBB, MBBI, DL,\r
- TII.get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))\r
- .addReg(JumpTarget.getReg(), RegState::Kill);\r
- } else {\r
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).\r
- addReg(JumpTarget.getReg(), RegState::Kill);\r
- }\r
-\r
- MachineInstr *NewMI = std::prev(MBBI);\r
- NewMI->copyImplicitOps(MF, MBBI);\r
-\r
- // Delete the pseudo instruction TCRETURN.\r
- MBB.erase(MBBI);\r
- } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||\r
- RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&\r
- (X86FI->getTCReturnAddrDelta() < 0)) {\r
- // Add the return addr area delta back since we are not tail calling.\r
- int delta = -1*X86FI->getTCReturnAddrDelta();\r
- MBBI = MBB.getLastNonDebugInstr();\r
-\r
- // Check for possible merge with preceding ADD instruction.\r
- delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);\r
- emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr, UseLEA, TII,\r
- *RegInfo);\r
- }\r
-}\r
-\r
-int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,\r
- int FI) const {\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();\r
- uint64_t StackSize = MFI->getStackSize();\r
-\r
- if (RegInfo->hasBasePointer(MF)) {\r
- assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");\r
- if (FI < 0) {\r
- // Skip the saved EBP.\r
- return Offset + RegInfo->getSlotSize();\r
- } else {\r
- assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);\r
- return Offset + StackSize;\r
- }\r
- } else if (RegInfo->needsStackRealignment(MF)) {\r
- if (FI < 0) {\r
- // Skip the saved EBP.\r
- return Offset + RegInfo->getSlotSize();\r
- } else {\r
- assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);\r
- return Offset + StackSize;\r
- }\r
- // FIXME: Support tail calls\r
- } else {\r
- if (!hasFP(MF))\r
- return Offset + StackSize;\r
-\r
- // Skip the saved EBP.\r
- Offset += RegInfo->getSlotSize();\r
-\r
- // Skip the RETADDR move area\r
- const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();\r
- if (TailCallReturnAddrDelta < 0)\r
- Offset -= TailCallReturnAddrDelta;\r
- }\r
-\r
- return Offset;\r
-}\r
-\r
-int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,\r
- unsigned &FrameReg) const {\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- // We can't calculate offset from frame pointer if the stack is realigned,\r
- // so enforce usage of stack/base pointer. The base pointer is used when we\r
- // have dynamic allocas in addition to dynamic realignment.\r
- if (RegInfo->hasBasePointer(MF))\r
- FrameReg = RegInfo->getBaseRegister();\r
- else if (RegInfo->needsStackRealignment(MF))\r
- FrameReg = RegInfo->getStackRegister();\r
- else\r
- FrameReg = RegInfo->getFrameRegister(MF);\r
- return getFrameIndexOffset(MF, FI);\r
-}\r
-\r
-// Simplified from getFrameIndexOffset keeping only StackPointer cases\r
-int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const {\r
- const MachineFrameInfo *MFI = MF.getFrameInfo();\r
- // Does not include any dynamic realign.\r
- const uint64_t StackSize = MFI->getStackSize();\r
- {\r
-#ifndef NDEBUG\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());\r
- // Note: LLVM arranges the stack as:\r
- // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)\r
- // > "Stack Slots" (<--SP)\r
- // We can always address StackSlots from RSP. We can usually (unless\r
- // needsStackRealignment) address CSRs from RSP, but sometimes need to\r
- // address them from RBP. FixedObjects can be placed anywhere in the stack\r
- // frame depending on their specific requirements (i.e. we can actually\r
- // refer to arguments to the function which are stored in the *callers*\r
- // frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs\r
- // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.\r
-\r
- assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");\r
-\r
- // We don't handle tail calls, and shouldn't be seeing them\r
- // either.\r
- int TailCallReturnAddrDelta =\r
- MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();\r
- assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");\r
-#endif\r
- }\r
-\r
- // This is how the math works out:\r
- //\r
- // %rsp grows (i.e. gets lower) left to right. Each box below is\r
- // one word (eight bytes). Obj0 is the stack slot we're trying to\r
- // get to.\r
- //\r
- // ----------------------------------\r
- // | BP | Obj0 | Obj1 | ... | ObjN |\r
- // ----------------------------------\r
- // ^ ^ ^ ^\r
- // A B C E\r
- //\r
- // A is the incoming stack pointer.\r
- // (B - A) is the local area offset (-8 for x86-64) [1]\r
- // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]\r
- //\r
- // |(E - B)| is the StackSize (absolute value, positive). For a\r
- // stack that grown down, this works out to be (B - E). [3]\r
- //\r
- // E is also the value of %rsp after stack has been set up, and we\r
- // want (C - E) -- the value we can add to %rsp to get to Obj0. Now\r
- // (C - E) == (C - A) - (B - A) + (B - E)\r
- // { Using [1], [2] and [3] above }\r
- // == getObjectOffset - LocalAreaOffset + StackSize\r
- //\r
-\r
- // Get the Offset from the StackPointer\r
- int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();\r
-\r
- return Offset + StackSize;\r
-}\r
-// Simplified from getFrameIndexReference keeping only StackPointer cases\r
-int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,\r
- unsigned &FrameReg) const {\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());\r
-\r
- assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");\r
-\r
- FrameReg = RegInfo->getStackRegister();\r
- return getFrameIndexOffsetFromSP(MF, FI);\r
-}\r
-\r
-bool X86FrameLowering::assignCalleeSavedSpillSlots(\r
- MachineFunction &MF, const TargetRegisterInfo *TRI,\r
- std::vector<CalleeSavedInfo> &CSI) const {\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
-\r
- unsigned CalleeSavedFrameSize = 0;\r
- int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();\r
-\r
- if (hasFP(MF)) {\r
- // emitPrologue always spills frame register the first thing.\r
- SpillSlotOffset -= SlotSize;\r
- MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);\r
-\r
- // Since emitPrologue and emitEpilogue will handle spilling and restoring of\r
- // the frame register, we can delete it from CSI list and not have to worry\r
- // about avoiding it later.\r
- unsigned FPReg = RegInfo->getFrameRegister(MF);\r
- for (unsigned i = 0; i < CSI.size(); ++i) {\r
- if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {\r
- CSI.erase(CSI.begin() + i);\r
- break;\r
- }\r
- }\r
- }\r
-\r
- // Assign slots for GPRs. It increases frame size.\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i - 1].getReg();\r
-\r
- if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- SpillSlotOffset -= SlotSize;\r
- CalleeSavedFrameSize += SlotSize;\r
-\r
- int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);\r
- CSI[i - 1].setFrameIdx(SlotIndex);\r
- }\r
-\r
- X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);\r
-\r
- // Assign slots for XMMs.\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i - 1].getReg();\r
- if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);\r
- // ensure alignment\r
- SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();\r
- // spill into slot\r
- SpillSlotOffset -= RC->getSize();\r
- int SlotIndex =\r
- MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);\r
- CSI[i - 1].setFrameIdx(SlotIndex);\r
- MFI->ensureMaxAlignment(RC->getAlignment());\r
- }\r
-\r
- return true;\r
-}\r
-\r
-bool X86FrameLowering::spillCalleeSavedRegisters(\r
- MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,\r
- const std::vector<CalleeSavedInfo> &CSI,\r
- const TargetRegisterInfo *TRI) const {\r
- DebugLoc DL = MBB.findDebugLoc(MI);\r
-\r
- MachineFunction &MF = *MBB.getParent();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
-\r
- // Push GPRs. It increases frame size.\r
- unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i - 1].getReg();\r
-\r
- if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))\r
- continue;\r
- // Add the callee-saved register as live-in. It's killed at the spill.\r
- MBB.addLiveIn(Reg);\r
-\r
- BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)\r
- .setMIFlag(MachineInstr::FrameSetup);\r
- }\r
-\r
- // Make XMM regs spilled. X86 does not have ability of push/pop XMM.\r
- // It can be done by spilling XMMs to stack frame.\r
- for (unsigned i = CSI.size(); i != 0; --i) {\r
- unsigned Reg = CSI[i-1].getReg();\r
- if (X86::GR64RegClass.contains(Reg) ||\r
- X86::GR32RegClass.contains(Reg))\r
- continue;\r
- // Add the callee-saved register as live-in. It's killed at the spill.\r
- MBB.addLiveIn(Reg);\r
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);\r
-\r
- TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,\r
- TRI);\r
- --MI;\r
- MI->setFlag(MachineInstr::FrameSetup);\r
- ++MI;\r
- }\r
-\r
- return true;\r
-}\r
-\r
-bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator MI,\r
- const std::vector<CalleeSavedInfo> &CSI,\r
- const TargetRegisterInfo *TRI) const {\r
- if (CSI.empty())\r
- return false;\r
-\r
- DebugLoc DL = MBB.findDebugLoc(MI);\r
-\r
- MachineFunction &MF = *MBB.getParent();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
-\r
- // Reload XMMs from stack frame.\r
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {\r
- unsigned Reg = CSI[i].getReg();\r
- if (X86::GR64RegClass.contains(Reg) ||\r
- X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);\r
- TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);\r
- }\r
-\r
- // POP GPRs.\r
- unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;\r
- for (unsigned i = 0, e = CSI.size(); i != e; ++i) {\r
- unsigned Reg = CSI[i].getReg();\r
- if (!X86::GR64RegClass.contains(Reg) &&\r
- !X86::GR32RegClass.contains(Reg))\r
- continue;\r
-\r
- BuildMI(MBB, MI, DL, TII.get(Opc), Reg);\r
- }\r
- return true;\r
-}\r
-\r
-void\r
-X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,\r
- RegScavenger *RS) const {\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const X86RegisterInfo *RegInfo =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());\r
- unsigned SlotSize = RegInfo->getSlotSize();\r
-\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();\r
-\r
- if (TailCallReturnAddrDelta < 0) {\r
- // create RETURNADDR area\r
- // arg\r
- // arg\r
- // RETADDR\r
- // { ...\r
- // RETADDR area\r
- // ...\r
- // }\r
- // [EBP]\r
- MFI->CreateFixedObject(-TailCallReturnAddrDelta,\r
- TailCallReturnAddrDelta - SlotSize, true);\r
- }\r
-\r
- // Spill the BasePtr if it's used.\r
- if (RegInfo->hasBasePointer(MF))\r
- MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());\r
-}\r
-\r
-static bool\r
-HasNestArgument(const MachineFunction *MF) {\r
- const Function *F = MF->getFunction();\r
- for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();\r
- I != E; I++) {\r
- if (I->hasNestAttr())\r
- return true;\r
- }\r
- return false;\r
-}\r
-\r
-/// GetScratchRegister - Get a temp register for performing work in the\r
-/// segmented stack and the Erlang/HiPE stack prologue. Depending on platform\r
-/// and the properties of the function either one or two registers will be\r
-/// needed. Set primary to true for the first register, false for the second.\r
-static unsigned\r
-GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {\r
- CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();\r
-\r
- // Erlang stuff.\r
- if (CallingConvention == CallingConv::HiPE) {\r
- if (Is64Bit)\r
- return Primary ? X86::R14 : X86::R13;\r
- else\r
- return Primary ? X86::EBX : X86::EDI;\r
- }\r
-\r
- if (Is64Bit) {\r
- if (IsLP64)\r
- return Primary ? X86::R11 : X86::R12;\r
- else\r
- return Primary ? X86::R11D : X86::R12D;\r
- }\r
-\r
- bool IsNested = HasNestArgument(&MF);\r
-\r
- if (CallingConvention == CallingConv::X86_FastCall ||\r
- CallingConvention == CallingConv::Fast) {\r
- if (IsNested)\r
- report_fatal_error("Segmented stacks does not support fastcall with "\r
- "nested function.");\r
- return Primary ? X86::EAX : X86::ECX;\r
- }\r
- if (IsNested)\r
- return Primary ? X86::EDX : X86::EAX;\r
- return Primary ? X86::ECX : X86::EAX;\r
-}\r
-\r
-// The stack limit in the TCB is set to this many bytes above the actual stack\r
-// limit.\r
-static const uint64_t kSplitStackAvailable = 256;\r
-\r
-void\r
-X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {\r
- MachineBasicBlock &prologueMBB = MF.front();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- uint64_t StackSize;\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool Is64Bit = STI.is64Bit();\r
- const bool IsLP64 = STI.isTarget64BitLP64();\r
- unsigned TlsReg, TlsOffset;\r
- DebugLoc DL;\r
-\r
- unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);\r
- assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&\r
- "Scratch register is live-in");\r
-\r
- if (MF.getFunction()->isVarArg())\r
- report_fatal_error("Segmented stacks do not support vararg functions.");\r
- if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&\r
- !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&\r
- !STI.isTargetDragonFly())\r
- report_fatal_error("Segmented stacks not supported on this platform.");\r
-\r
- // Eventually StackSize will be calculated by a link-time pass; which will\r
- // also decide whether checking code needs to be injected into this particular\r
- // prologue.\r
- StackSize = MFI->getStackSize();\r
-\r
- // Do not generate a prologue for functions with a stack of size zero\r
- if (StackSize == 0)\r
- return;\r
-\r
- MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();\r
- MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();\r
- X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();\r
- bool IsNested = false;\r
-\r
- // We need to know if the function has a nest argument only in 64 bit mode.\r
- if (Is64Bit)\r
- IsNested = HasNestArgument(&MF);\r
-\r
- // The MOV R10, RAX needs to be in a different block, since the RET we emit in\r
- // allocMBB needs to be last (terminating) instruction.\r
-\r
- for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),\r
- e = prologueMBB.livein_end(); i != e; i++) {\r
- allocMBB->addLiveIn(*i);\r
- checkMBB->addLiveIn(*i);\r
- }\r
-\r
- if (IsNested)\r
- allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);\r
-\r
- MF.push_front(allocMBB);\r
- MF.push_front(checkMBB);\r
-\r
- // When the frame size is less than 256 we just compare the stack\r
- // boundary directly to the value of the stack pointer, per gcc.\r
- bool CompareStackPointer = StackSize < kSplitStackAvailable;\r
-\r
- // Read the limit off the current stacklet off the stack_guard location.\r
- if (Is64Bit) {\r
- if (STI.isTargetLinux()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = IsLP64 ? 0x70 : 0x40;\r
- } else if (STI.isTargetDarwin()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.\r
- } else if (STI.isTargetWin64()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x28; // pvArbitrary, reserved for application use\r
- } else if (STI.isTargetFreeBSD()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x18;\r
- } else if (STI.isTargetDragonFly()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x20; // use tls_tcb.tcb_segstack\r
- } else {\r
- report_fatal_error("Segmented stacks not supported on this platform.");\r
- }\r
-\r
- if (CompareStackPointer)\r
- ScratchReg = IsLP64 ? X86::RSP : X86::ESP;\r
- else\r
- BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)\r
- .addImm(1).addReg(0).addImm(-StackSize).addReg(0);\r
-\r
- BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)\r
- .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);\r
- } else {\r
- if (STI.isTargetLinux()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x30;\r
- } else if (STI.isTargetDarwin()) {\r
- TlsReg = X86::GS;\r
- TlsOffset = 0x48 + 90*4;\r
- } else if (STI.isTargetWin32()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x14; // pvArbitrary, reserved for application use\r
- } else if (STI.isTargetDragonFly()) {\r
- TlsReg = X86::FS;\r
- TlsOffset = 0x10; // use tls_tcb.tcb_segstack\r
- } else if (STI.isTargetFreeBSD()) {\r
- report_fatal_error("Segmented stacks not supported on FreeBSD i386.");\r
- } else {\r
- report_fatal_error("Segmented stacks not supported on this platform.");\r
- }\r
-\r
- if (CompareStackPointer)\r
- ScratchReg = X86::ESP;\r
- else\r
- BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)\r
- .addImm(1).addReg(0).addImm(-StackSize).addReg(0);\r
-\r
- if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||\r
- STI.isTargetDragonFly()) {\r
- BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)\r
- .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);\r
- } else if (STI.isTargetDarwin()) {\r
-\r
- // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.\r
- unsigned ScratchReg2;\r
- bool SaveScratch2;\r
- if (CompareStackPointer) {\r
- // The primary scratch register is available for holding the TLS offset.\r
- ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);\r
- SaveScratch2 = false;\r
- } else {\r
- // Need to use a second register to hold the TLS offset\r
- ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);\r
-\r
- // Unfortunately, with fastcc the second scratch register may hold an\r
- // argument.\r
- SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);\r
- }\r
-\r
- // If Scratch2 is live-in then it needs to be saved.\r
- assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&\r
- "Scratch register is live-in and not saved");\r
-\r
- if (SaveScratch2)\r
- BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))\r
- .addReg(ScratchReg2, RegState::Kill);\r
-\r
- BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)\r
- .addImm(TlsOffset);\r
- BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))\r
- .addReg(ScratchReg)\r
- .addReg(ScratchReg2).addImm(1).addReg(0)\r
- .addImm(0)\r
- .addReg(TlsReg);\r
-\r
- if (SaveScratch2)\r
- BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);\r
- }\r
- }\r
-\r
- // This jump is taken if SP >= (Stacklet Limit + Stack Space required).\r
- // It jumps to normal execution of the function body.\r
- BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&prologueMBB);\r
-\r
- // On 32 bit we first push the arguments size and then the frame size. On 64\r
- // bit, we pass the stack frame size in r10 and the argument size in r11.\r
- if (Is64Bit) {\r
- // Functions with nested arguments use R10, so it needs to be saved across\r
- // the call to _morestack\r
-\r
- const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;\r
- const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;\r
- const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;\r
- const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;\r
- const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;\r
-\r
- if (IsNested)\r
- BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);\r
-\r
- BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)\r
- .addImm(StackSize);\r
- BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)\r
- .addImm(X86FI->getArgumentStackSize());\r
- MF.getRegInfo().setPhysRegUsed(Reg10);\r
- MF.getRegInfo().setPhysRegUsed(Reg11);\r
- } else {\r
- BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))\r
- .addImm(X86FI->getArgumentStackSize());\r
- BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))\r
- .addImm(StackSize);\r
- }\r
-\r
- // __morestack is in libgcc\r
- if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {\r
- // Under the large code model, we cannot assume that __morestack lives\r
- // within 2^31 bytes of the call site, so we cannot use pc-relative\r
- // addressing. We cannot perform the call via a temporary register,\r
- // as the rax register may be used to store the static chain, and all\r
- // other suitable registers may be either callee-save or used for\r
- // parameter passing. We cannot use the stack at this point either\r
- // because __morestack manipulates the stack directly.\r
- //\r
- // To avoid these issues, perform an indirect call via a read-only memory\r
- // location containing the address.\r
- //\r
- // This solution is not perfect, as it assumes that the .rodata section\r
- // is laid out within 2^31 bytes of each function body, but this seems\r
- // to be sufficient for JIT.\r
- BuildMI(allocMBB, DL, TII.get(X86::CALL64m))\r
- .addReg(X86::RIP)\r
- .addImm(0)\r
- .addReg(0)\r
- .addExternalSymbol("__morestack_addr")\r
- .addReg(0);\r
- MF.getMMI().setUsesMorestackAddr(true);\r
- } else {\r
- if (Is64Bit)\r
- BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))\r
- .addExternalSymbol("__morestack");\r
- else\r
- BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))\r
- .addExternalSymbol("__morestack");\r
- }\r
-\r
- if (IsNested)\r
- BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));\r
- else\r
- BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));\r
-\r
- allocMBB->addSuccessor(&prologueMBB);\r
-\r
- checkMBB->addSuccessor(allocMBB);\r
- checkMBB->addSuccessor(&prologueMBB);\r
-\r
-#ifdef XDEBUG\r
- MF.verify();\r
-#endif\r
-}\r
-\r
-/// Erlang programs may need a special prologue to handle the stack size they\r
-/// might need at runtime. That is because Erlang/OTP does not implement a C\r
-/// stack but uses a custom implementation of hybrid stack/heap architecture.\r
-/// (for more information see Eric Stenman's Ph.D. thesis:\r
-/// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)\r
-///\r
-/// CheckStack:\r
-/// temp0 = sp - MaxStack\r
-/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart\r
-/// OldStart:\r
-/// ...\r
-/// IncStack:\r
-/// call inc_stack # doubles the stack space\r
-/// temp0 = sp - MaxStack\r
-/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart\r
-void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- MachineFrameInfo *MFI = MF.getFrameInfo();\r
- const unsigned SlotSize =\r
- static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo())\r
- ->getSlotSize();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- const bool Is64Bit = STI.is64Bit();\r
- const bool IsLP64 = STI.isTarget64BitLP64();\r
- DebugLoc DL;\r
- // HiPE-specific values\r
- const unsigned HipeLeafWords = 24;\r
- const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;\r
- const unsigned Guaranteed = HipeLeafWords * SlotSize;\r
- unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?\r
- MF.getFunction()->arg_size() - CCRegisteredArgs : 0;\r
- unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;\r
-\r
- assert(STI.isTargetLinux() &&\r
- "HiPE prologue is only supported on Linux operating systems.");\r
-\r
- // Compute the largest caller's frame that is needed to fit the callees'\r
- // frames. This 'MaxStack' is computed from:\r
- //\r
- // a) the fixed frame size, which is the space needed for all spilled temps,\r
- // b) outgoing on-stack parameter areas, and\r
- // c) the minimum stack space this function needs to make available for the\r
- // functions it calls (a tunable ABI property).\r
- if (MFI->hasCalls()) {\r
- unsigned MoreStackForCalls = 0;\r
-\r
- for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();\r
- MBBI != MBBE; ++MBBI)\r
- for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();\r
- MI != ME; ++MI) {\r
- if (!MI->isCall())\r
- continue;\r
-\r
- // Get callee operand.\r
- const MachineOperand &MO = MI->getOperand(0);\r
-\r
- // Only take account of global function calls (no closures etc.).\r
- if (!MO.isGlobal())\r
- continue;\r
-\r
- const Function *F = dyn_cast<Function>(MO.getGlobal());\r
- if (!F)\r
- continue;\r
-\r
- // Do not update 'MaxStack' for primitive and built-in functions\r
- // (encoded with names either starting with "erlang."/"bif_" or not\r
- // having a ".", such as a simple <Module>.<Function>.<Arity>, or an\r
- // "_", such as the BIF "suspend_0") as they are executed on another\r
- // stack.\r
- if (F->getName().find("erlang.") != StringRef::npos ||\r
- F->getName().find("bif_") != StringRef::npos ||\r
- F->getName().find_first_of("._") == StringRef::npos)\r
- continue;\r
-\r
- unsigned CalleeStkArity =\r
- F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;\r
- if (HipeLeafWords - 1 > CalleeStkArity)\r
- MoreStackForCalls = std::max(MoreStackForCalls,\r
- (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);\r
- }\r
- MaxStack += MoreStackForCalls;\r
- }\r
-\r
- // If the stack frame needed is larger than the guaranteed then runtime checks\r
- // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.\r
- if (MaxStack > Guaranteed) {\r
- MachineBasicBlock &prologueMBB = MF.front();\r
- MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();\r
- MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();\r
-\r
- for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),\r
- E = prologueMBB.livein_end(); I != E; I++) {\r
- stackCheckMBB->addLiveIn(*I);\r
- incStackMBB->addLiveIn(*I);\r
- }\r
-\r
- MF.push_front(incStackMBB);\r
- MF.push_front(stackCheckMBB);\r
-\r
- unsigned ScratchReg, SPReg, PReg, SPLimitOffset;\r
- unsigned LEAop, CMPop, CALLop;\r
- if (Is64Bit) {\r
- SPReg = X86::RSP;\r
- PReg = X86::RBP;\r
- LEAop = X86::LEA64r;\r
- CMPop = X86::CMP64rm;\r
- CALLop = X86::CALL64pcrel32;\r
- SPLimitOffset = 0x90;\r
- } else {\r
- SPReg = X86::ESP;\r
- PReg = X86::EBP;\r
- LEAop = X86::LEA32r;\r
- CMPop = X86::CMP32rm;\r
- CALLop = X86::CALLpcrel32;\r
- SPLimitOffset = 0x4c;\r
- }\r
-\r
- ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);\r
- assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&\r
- "HiPE prologue scratch register is live-in");\r
-\r
- // Create new MBB for StackCheck:\r
- addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),\r
- SPReg, false, -MaxStack);\r
- // SPLimitOffset is in a fixed heap location (pointed by BP).\r
- addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))\r
- .addReg(ScratchReg), PReg, false, SPLimitOffset);\r
- BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&prologueMBB);\r
-\r
- // Create new MBB for IncStack:\r
- BuildMI(incStackMBB, DL, TII.get(CALLop)).\r
- addExternalSymbol("inc_stack_0");\r
- addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),\r
- SPReg, false, -MaxStack);\r
- addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))\r
- .addReg(ScratchReg), PReg, false, SPLimitOffset);\r
- BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);\r
-\r
- stackCheckMBB->addSuccessor(&prologueMBB, 99);\r
- stackCheckMBB->addSuccessor(incStackMBB, 1);\r
- incStackMBB->addSuccessor(&prologueMBB, 99);\r
- incStackMBB->addSuccessor(incStackMBB, 1);\r
- }\r
-#ifdef XDEBUG\r
- MF.verify();\r
-#endif\r
-}\r
-\r
-void X86FrameLowering::\r
-eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator I) const {\r
- const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();\r
- const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(\r
- MF.getSubtarget().getRegisterInfo());\r
- unsigned StackPtr = RegInfo.getStackRegister();\r
- bool reserveCallFrame = hasReservedCallFrame(MF);\r
- int Opcode = I->getOpcode();\r
- bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool IsLP64 = STI.isTarget64BitLP64();\r
- DebugLoc DL = I->getDebugLoc();\r
- uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;\r
- uint64_t InternalAmt = (isDestroy || Amount) ? I->getOperand(1).getImm() : 0;\r
- I = MBB.erase(I);\r
-\r
- if (!reserveCallFrame) {\r
- // If the stack pointer can be changed after prologue, turn the\r
- // adjcallstackup instruction into a 'sub ESP, <amt>' and the\r
- // adjcallstackdown instruction into 'add ESP, <amt>'\r
- if (Amount == 0)\r
- return;\r
-\r
- // We need to keep the stack aligned properly. To do this, we round the\r
- // amount of space needed for the outgoing arguments up to the next\r
- // alignment boundary.\r
- unsigned StackAlign = MF.getTarget()\r
- .getSubtargetImpl()\r
- ->getFrameLowering()\r
- ->getStackAlignment();\r
- Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;\r
-\r
- MachineInstr *New = nullptr;\r
-\r
- // Factor out the amount that gets handled inside the sequence\r
- // (Pushes of argument for frame setup, callee pops for frame destroy)\r
- Amount -= InternalAmt;\r
-\r
- if (Amount) {\r
- if (Opcode == TII.getCallFrameSetupOpcode()) {\r
- New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)), StackPtr)\r
- .addReg(StackPtr).addImm(Amount);\r
- } else {\r
- assert(Opcode == TII.getCallFrameDestroyOpcode());\r
-\r
- unsigned Opc = getADDriOpcode(IsLP64, Amount);\r
- New = BuildMI(MF, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr).addImm(Amount);\r
- }\r
- }\r
-\r
- if (New) {\r
- // The EFLAGS implicit def is dead.\r
- New->getOperand(3).setIsDead();\r
-\r
- // Replace the pseudo instruction with a new instruction.\r
- MBB.insert(I, New);\r
- }\r
-\r
- return;\r
- }\r
-\r
- if (Opcode == TII.getCallFrameDestroyOpcode() && InternalAmt) {\r
- // If we are performing frame pointer elimination and if the callee pops\r
- // something off the stack pointer, add it back. We do this until we have\r
- // more advanced stack pointer tracking ability.\r
- unsigned Opc = getSUBriOpcode(IsLP64, InternalAmt);\r
- MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)\r
- .addReg(StackPtr).addImm(InternalAmt);\r
-\r
- // The EFLAGS implicit def is dead.\r
- New->getOperand(3).setIsDead();\r
-\r
- // We are not tracking the stack pointer adjustment by the callee, so make\r
- // sure we restore the stack pointer immediately after the call, there may\r
- // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.\r
- MachineBasicBlock::iterator B = MBB.begin();\r
- while (I != B && !std::prev(I)->isCall())\r
- --I;\r
- MBB.insert(I, New);\r
- }\r
-}\r
-\r
+//===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the X86 implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "X86FrameLowering.h"
+#include "X86InstrBuilder.h"
+#include "X86InstrInfo.h"
+#include "X86MachineFunctionInfo.h"
+#include "X86Subtarget.h"
+#include "X86TargetMachine.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Support/Debug.h"
+#include <cstdlib>
+
+using namespace llvm;
+
+// FIXME: completely move here.
+extern cl::opt<bool> ForceStackAlign;
+
+bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ return !MF.getFrameInfo()->hasVarSizedObjects();
+}
+
+/// hasFP - Return true if the specified function should have a dedicated frame
+/// pointer register. This is true if the function has variable sized allocas
+/// or if frame pointer elimination is disabled.
+bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ const MachineModuleInfo &MMI = MF.getMMI();
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+
+ return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ RegInfo->needsStackRealignment(MF) ||
+ MFI->hasVarSizedObjects() ||
+ MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
+ MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
+ MMI.callsUnwindInit() || MMI.callsEHReturn() ||
+ MFI->hasStackMap() || MFI->hasPatchPoint());
+}
+
+static unsigned getSUBriOpcode(unsigned IsLP64, int64_t Imm) {
+ if (IsLP64) {
+ if (isInt<8>(Imm))
+ return X86::SUB64ri8;
+ return X86::SUB64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::SUB32ri8;
+ return X86::SUB32ri;
+ }
+}
+
+static unsigned getADDriOpcode(unsigned IsLP64, int64_t Imm) {
+ if (IsLP64) {
+ if (isInt<8>(Imm))
+ return X86::ADD64ri8;
+ return X86::ADD64ri32;
+ } else {
+ if (isInt<8>(Imm))
+ return X86::ADD32ri8;
+ return X86::ADD32ri;
+ }
+}
+
+static unsigned getSUBrrOpcode(unsigned isLP64) {
+ return isLP64 ? X86::SUB64rr : X86::SUB32rr;
+}
+
+static unsigned getADDrrOpcode(unsigned isLP64) {
+ return isLP64 ? X86::ADD64rr : X86::ADD32rr;
+}
+
+static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
+ if (IsLP64) {
+ if (isInt<8>(Imm))
+ return X86::AND64ri8;
+ return X86::AND64ri32;
+ }
+ if (isInt<8>(Imm))
+ return X86::AND32ri8;
+ return X86::AND32ri;
+}
+
+static unsigned getPUSHiOpcode(bool IsLP64, MachineOperand MO) {
+ // We don't support LP64 for now.
+ assert(!IsLP64);
+
+ if (MO.isImm() && isInt<8>(MO.getImm()))
+ return X86::PUSH32i8;
+
+ return X86::PUSHi32;;
+}
+
+static unsigned getLEArOpcode(unsigned IsLP64) {
+ return IsLP64 ? X86::LEA64r : X86::LEA32r;
+}
+
+/// findDeadCallerSavedReg - Return a caller-saved register that isn't live
+/// when it reaches the "return" instruction. We can then pop a stack object
+/// to this register without worry about clobbering it.
+static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ const TargetRegisterInfo &TRI,
+ bool Is64Bit) {
+ const MachineFunction *MF = MBB.getParent();
+ const Function *F = MF->getFunction();
+ if (!F || MF->getMMI().callsEHReturn())
+ return 0;
+
+ static const uint16_t CallerSavedRegs32Bit[] = {
+ X86::EAX, X86::EDX, X86::ECX, 0
+ };
+
+ static const uint16_t CallerSavedRegs64Bit[] = {
+ X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
+ X86::R8, X86::R9, X86::R10, X86::R11, 0
+ };
+
+ unsigned Opc = MBBI->getOpcode();
+ switch (Opc) {
+ default: return 0;
+ case X86::RETL:
+ case X86::RETQ:
+ case X86::RETIL:
+ case X86::RETIQ:
+ case X86::TCRETURNdi:
+ case X86::TCRETURNri:
+ case X86::TCRETURNmi:
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64: {
+ SmallSet<uint16_t, 8> Uses;
+ for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MBBI->getOperand(i);
+ if (!MO.isReg() || MO.isDef())
+ continue;
+ unsigned Reg = MO.getReg();
+ if (!Reg)
+ continue;
+ for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
+ Uses.insert(*AI);
+ }
+
+ const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
+ for (; *CS; ++CS)
+ if (!Uses.count(*CS))
+ return *CS;
+ }
+ }
+
+ return 0;
+}
+
+static bool isEAXLiveIn(MachineFunction &MF) {
+ for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
+ EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
+ unsigned Reg = II->first;
+
+ if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
+ Reg == X86::AH || Reg == X86::AL)
+ return true;
+ }
+
+ return false;
+}
+
+/// emitSPUpdate - Emit a series of instructions to increment / decrement the
+/// stack pointer by a constant value.
+static
+void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, int64_t NumBytes,
+ bool Is64BitTarget, bool Is64BitStackPtr, bool UseLEA,
+ const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
+ bool isSub = NumBytes < 0;
+ uint64_t Offset = isSub ? -NumBytes : NumBytes;
+ unsigned Opc;
+ if (UseLEA)
+ Opc = getLEArOpcode(Is64BitStackPtr);
+ else
+ Opc = isSub
+ ? getSUBriOpcode(Is64BitStackPtr, Offset)
+ : getADDriOpcode(Is64BitStackPtr, Offset);
+
+ uint64_t Chunk = (1LL << 31) - 1;
+ DebugLoc DL = MBB.findDebugLoc(MBBI);
+
+ while (Offset) {
+ if (Offset > Chunk) {
+ // Rather than emit a long series of instructions for large offsets,
+ // load the offset into a register and do one sub/add
+ unsigned Reg = 0;
+
+ if (isSub && !isEAXLiveIn(*MBB.getParent()))
+ Reg = (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX);
+ else
+ Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
+
+ if (Reg) {
+ Opc = Is64BitTarget ? X86::MOV64ri : X86::MOV32ri;
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), Reg)
+ .addImm(Offset);
+ Opc = isSub
+ ? getSUBrrOpcode(Is64BitTarget)
+ : getADDrrOpcode(Is64BitTarget);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addReg(Reg);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ Offset = 0;
+ continue;
+ }
+ }
+
+ uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
+ if (ThisVal == (Is64BitTarget ? 8 : 4)) {
+ // Use push / pop instead.
+ unsigned Reg = isSub
+ ? (unsigned)(Is64BitTarget ? X86::RAX : X86::EAX)
+ : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64BitTarget);
+ if (Reg) {
+ Opc = isSub
+ ? (Is64BitTarget ? X86::PUSH64r : X86::PUSH32r)
+ : (Is64BitTarget ? X86::POP64r : X86::POP32r);
+ MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
+ .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
+ if (isSub)
+ MI->setFlag(MachineInstr::FrameSetup);
+ Offset -= ThisVal;
+ continue;
+ }
+ }
+
+ MachineInstr *MI = nullptr;
+
+ if (UseLEA) {
+ MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
+ StackPtr, false, isSub ? -ThisVal : ThisVal);
+ } else {
+ MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr)
+ .addImm(ThisVal);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ }
+
+ if (isSub)
+ MI->setFlag(MachineInstr::FrameSetup);
+
+ Offset -= ThisVal;
+ }
+}
+
+/// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
+static
+void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, uint64_t *NumBytes = nullptr) {
+ if (MBBI == MBB.begin()) return;
+
+ MachineBasicBlock::iterator PI = std::prev(MBBI);
+ unsigned Opc = PI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
+ Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes += PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes -= PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ }
+}
+
+/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower
+/// iterator.
+static
+void mergeSPUpdatesDown(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI,
+ unsigned StackPtr, uint64_t *NumBytes = nullptr) {
+ // FIXME: THIS ISN'T RUN!!!
+ return;
+
+ if (MBBI == MBB.end()) return;
+
+ MachineBasicBlock::iterator NI = std::next(MBBI);
+ if (NI == MBB.end()) return;
+
+ unsigned Opc = NI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ NI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes -= NI->getOperand(2).getImm();
+ MBB.erase(NI);
+ MBBI = NI;
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ NI->getOperand(0).getReg() == StackPtr) {
+ if (NumBytes)
+ *NumBytes += NI->getOperand(2).getImm();
+ MBB.erase(NI);
+ MBBI = NI;
+ }
+}
+
+/// mergeSPUpdates - Checks the instruction before/after the passed
+/// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and
+/// the stack adjustment is returned as a positive value for ADD/LEA and a
+/// negative for SUB.
+static int mergeSPUpdates(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MBBI, unsigned StackPtr,
+ bool doMergeWithPrevious) {
+ if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
+ (!doMergeWithPrevious && MBBI == MBB.end()))
+ return 0;
+
+ MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
+ MachineBasicBlock::iterator NI = doMergeWithPrevious ? nullptr
+ : std::next(MBBI);
+ unsigned Opc = PI->getOpcode();
+ int Offset = 0;
+
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
+ Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
+ PI->getOperand(0).getReg() == StackPtr){
+ Offset += PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ Offset -= PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ if (!doMergeWithPrevious) MBBI = NI;
+ }
+
+ return Offset;
+}
+
+void
+X86FrameLowering::emitCalleeSavedFrameMoves(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) const {
+ MachineFunction &MF = *MBB.getParent();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+
+ // Add callee saved registers to move list.
+ const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
+ if (CSI.empty()) return;
+
+ // Calculate offsets.
+ for (std::vector<CalleeSavedInfo>::const_iterator
+ I = CSI.begin(), E = CSI.end(); I != E; ++I) {
+ int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
+ unsigned Reg = I->getReg();
+
+ unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
+ unsigned CFIIndex =
+ MMI.addFrameInst(MCCFIInstruction::createOffset(nullptr, DwarfReg,
+ Offset));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+}
+
+/// usesTheStack - This function checks if any of the users of EFLAGS
+/// copies the EFLAGS. We know that the code that lowers COPY of EFLAGS has
+/// to use the stack, and if we don't adjust the stack we clobber the first
+/// frame index.
+/// See X86InstrInfo::copyPhysReg.
+static bool usesTheStack(const MachineFunction &MF) {
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+
+ for (MachineRegisterInfo::reg_instr_iterator
+ ri = MRI.reg_instr_begin(X86::EFLAGS), re = MRI.reg_instr_end();
+ ri != re; ++ri)
+ if (ri->isCopy())
+ return true;
+
+ return false;
+}
+
+void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ DebugLoc DL) {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
+
+ unsigned CallOp;
+ if (Is64Bit)
+ CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
+ else
+ CallOp = X86::CALLpcrel32;
+
+ const char *Symbol;
+ if (Is64Bit) {
+ if (STI.isTargetCygMing()) {
+ Symbol = "___chkstk_ms";
+ } else {
+ Symbol = "__chkstk";
+ }
+ } else if (STI.isTargetCygMing())
+ Symbol = "_alloca";
+ else
+ Symbol = "_chkstk";
+
+ MachineInstrBuilder CI;
+
+ // All current stack probes take AX and SP as input, clobber flags, and
+ // preserve all registers. x86_64 probes leave RSP unmodified.
+ if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
+ // For the large code model, we have to call through a register. Use R11,
+ // as it is scratch in all supported calling conventions.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
+ .addExternalSymbol(Symbol);
+ CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
+ } else {
+ CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addExternalSymbol(Symbol);
+ }
+
+ unsigned AX = Is64Bit ? X86::RAX : X86::EAX;
+ unsigned SP = Is64Bit ? X86::RSP : X86::ESP;
+ CI.addReg(AX, RegState::Implicit)
+ .addReg(SP, RegState::Implicit)
+ .addReg(AX, RegState::Define | RegState::Implicit)
+ .addReg(SP, RegState::Define | RegState::Implicit)
+ .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
+
+ if (Is64Bit) {
+ // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
+ // themselves. It also does not clobber %rax so we can reuse it when
+ // adjusting %rsp.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
+ .addReg(X86::RSP)
+ .addReg(X86::RAX);
+ }
+}
+
+/// emitPrologue - Push callee-saved registers onto the stack, which
+/// automatically adjust the stack pointer. Adjust the stack pointer to allocate
+/// space for local variables. Also emit labels used by the exception handler to
+/// generate the exception handling frames.
+
+/*
+ Here's a gist of what gets emitted:
+
+ ; Establish frame pointer, if needed
+ [if needs FP]
+ push %rbp
+ .cfi_def_cfa_offset 16
+ .cfi_offset %rbp, -16
+ .seh_pushreg %rpb
+ mov %rsp, %rbp
+ .cfi_def_cfa_register %rbp
+
+ ; Spill general-purpose registers
+ [for all callee-saved GPRs]
+ pushq %<reg>
+ [if not needs FP]
+ .cfi_def_cfa_offset (offset from RETADDR)
+ .seh_pushreg %<reg>
+
+ ; If the required stack alignment > default stack alignment
+ ; rsp needs to be re-aligned. This creates a "re-alignment gap"
+ ; of unknown size in the stack frame.
+ [if stack needs re-alignment]
+ and $MASK, %rsp
+
+ ; Allocate space for locals
+ [if target is Windows and allocated space > 4096 bytes]
+ ; Windows needs special care for allocations larger
+ ; than one page.
+ mov $NNN, %rax
+ call ___chkstk_ms/___chkstk
+ sub %rax, %rsp
+ [else]
+ sub $NNN, %rsp
+
+ [if needs FP]
+ .seh_stackalloc (size of XMM spill slots)
+ .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
+ [else]
+ .seh_stackalloc NNN
+
+ ; Spill XMMs
+ ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
+ ; they may get spilled on any platform, if the current function
+ ; calls @llvm.eh.unwind.init
+ [if needs FP]
+ [for all callee-saved XMM registers]
+ movaps %<xmm reg>, -MMM(%rbp)
+ [for all callee-saved XMM registers]
+ .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
+ ; i.e. the offset relative to (%rbp - SEHFrameOffset)
+ [else]
+ [for all callee-saved XMM registers]
+ movaps %<xmm reg>, KKK(%rsp)
+ [for all callee-saved XMM registers]
+ .seh_savexmm %<xmm reg>, KKK
+
+ .seh_endprologue
+
+ [if needs base pointer]
+ mov %rsp, %rbx
+ [if needs to restore base pointer]
+ mov %rsp, -MMM(%rbp)
+
+ ; Emit CFI info
+ [if needs FP]
+ [for all callee-saved registers]
+ .cfi_offset %<reg>, (offset from %rbp)
+ [else]
+ .cfi_def_cfa_offset (offset from RETADDR)
+ [for all callee-saved registers]
+ .cfi_offset %<reg>, (offset from %rsp)
+
+ Notes:
+ - .seh directives are emitted only for Windows 64 ABI
+ - .cfi directives are emitted for all other ABIs
+ - for 32-bit code, substitute %e?? registers for %r??
+*/
+
+void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
+ MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const Function *Fn = MF.getFunction();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ MachineModuleInfo &MMI = MF.getMMI();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
+ uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
+ bool HasFP = hasFP(MF);
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
+ const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
+ bool IsWin64 = STI.isTargetWin64();
+ // Not necessarily synonymous with IsWin64.
+ bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsWinEH = IsWinEH && Fn->needsUnwindTableEntry();
+ bool NeedsDwarfCFI =
+ !IsWinEH && (MMI.hasDebugInfo() || Fn->needsUnwindTableEntry());
+ bool UseLEA = STI.useLeaForSP();
+ unsigned StackAlign = getStackAlignment();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ const unsigned MachineFramePtr = STI.isTarget64BitILP32() ?
+ getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
+ unsigned StackPtr = RegInfo->getStackRegister();
+ unsigned BasePtr = RegInfo->getBaseRegister();
+ DebugLoc DL;
+
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum SlotSize.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else if (MaxAlign < SlotSize)
+ MaxAlign = SlotSize;
+ }
+
+ // Add RETADDR move area to callee saved frame size.
+ int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ if (TailCallReturnAddrDelta < 0)
+ X86FI->setCalleeSavedFrameSize(
+ X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
+
+ bool UseStackProbe = (STI.isOSWindows() && !STI.isTargetMachO());
+
+ // The default stack probe size is 4096 if the function has no stackprobesize
+ // attribute.
+ unsigned StackProbeSize = 4096;
+ if (Fn->hasFnAttribute("stack-probe-size"))
+ Fn->getFnAttribute("stack-probe-size")
+ .getValueAsString()
+ .getAsInteger(0, StackProbeSize);
+
+ // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
+ // function, and use up to 128 bytes of stack space, don't have a frame
+ // pointer, calls, or dynamic alloca then we do not need to adjust the
+ // stack pointer (we fit in the Red Zone). We also check that we don't
+ // push and pop from the stack.
+ if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
+ Attribute::NoRedZone) &&
+ !RegInfo->needsStackRealignment(MF) &&
+ !MFI->hasVarSizedObjects() && // No dynamic alloca.
+ !MFI->adjustsStack() && // No calls.
+ !IsWin64 && // Win64 has no Red Zone
+ !usesTheStack(MF) && // Don't push and pop.
+ !MF.shouldSplitStack()) { // Regular stack
+ uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
+ if (HasFP) MinSize += SlotSize;
+ StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
+ MFI->setStackSize(StackSize);
+ }
+
+ // Insert stack pointer adjustment for later moving of return addr. Only
+ // applies to tail call optimized functions where the callee argument stack
+ // size is bigger than the callers.
+ if (TailCallReturnAddrDelta < 0) {
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(getSUBriOpcode(Uses64BitFramePtr, -TailCallReturnAddrDelta)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(-TailCallReturnAddrDelta)
+ .setMIFlag(MachineInstr::FrameSetup);
+ MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
+ }
+
+ // Mapping for machine moves:
+ //
+ // DST: VirtualFP AND
+ // SRC: VirtualFP => DW_CFA_def_cfa_offset
+ // ELSE => DW_CFA_def_cfa
+ //
+ // SRC: VirtualFP AND
+ // DST: Register => DW_CFA_def_cfa_register
+ //
+ // ELSE
+ // OFFSET < 0 => DW_CFA_offset_extended_sf
+ // REG < 64 => DW_CFA_offset + Reg
+ // ELSE => DW_CFA_offset_extended
+
+ uint64_t NumBytes = 0;
+ int stackGrowth = -SlotSize;
+
+ if (HasFP) {
+ // Calculate required stack adjustment.
+ uint64_t FrameSize = StackSize - SlotSize;
+ // If required, include space for extra hidden slot for stashing base pointer.
+ if (X86FI->getRestoreBasePointer())
+ FrameSize += SlotSize;
+ if (RegInfo->needsStackRealignment(MF)) {
+ // Callee-saved registers are pushed on stack before the stack
+ // is realigned.
+ FrameSize -= X86FI->getCalleeSavedFrameSize();
+ NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
+ } else {
+ NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
+ }
+
+ // Get the offset of the stack slot for the EBP register, which is
+ // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
+ // Update the frame offset adjustment.
+ MFI->setOffsetAdjustment(-NumBytes);
+
+ // Save EBP/RBP into the appropriate stack slot.
+ BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(MachineFramePtr, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ if (NeedsDwarfCFI) {
+ // Mark the place where EBP/RBP was saved.
+ // Define the current CFA rule to use the provided offset.
+ assert(StackSize);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, 2 * stackGrowth));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Change the rule for the FramePtr to be an "offset" rule.
+ unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
+ CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createOffset(nullptr,
+ DwarfFramePtr, 2 * stackGrowth));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ if (NeedsWinEH) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
+ .addImm(FramePtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Update EBP with the new base value.
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr), FramePtr)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ if (NeedsDwarfCFI) {
+ // Mark effective beginning of when frame pointer becomes valid.
+ // Define the current CFA to use the EBP/RBP register.
+ unsigned DwarfFramePtr = RegInfo->getDwarfRegNum(MachineFramePtr, true);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ // Mark the FramePtr as live-in in every block.
+ for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I)
+ I->addLiveIn(MachineFramePtr);
+ } else {
+ NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
+ }
+
+ // Skip the callee-saved push instructions.
+ bool PushedRegs = false;
+ int StackOffset = 2 * stackGrowth;
+
+ while (MBBI != MBB.end() &&
+ (MBBI->getOpcode() == X86::PUSH32r ||
+ MBBI->getOpcode() == X86::PUSH64r)) {
+ PushedRegs = true;
+ unsigned Reg = MBBI->getOperand(0).getReg();
+ ++MBBI;
+
+ if (!HasFP && NeedsDwarfCFI) {
+ // Mark callee-saved push instruction.
+ // Define the current CFA rule to use the provided offset.
+ assert(StackSize);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr, StackOffset));
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ StackOffset += stackGrowth;
+ }
+
+ if (NeedsWinEH) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg)).addImm(Reg).setMIFlag(
+ MachineInstr::FrameSetup);
+ }
+ }
+
+ // Realign stack after we pushed callee-saved registers (so that we'll be
+ // able to calculate their offsets from the frame pointer).
+ if (RegInfo->needsStackRealignment(MF)) {
+ assert(HasFP && "There should be a frame pointer if stack is realigned.");
+ uint64_t Val = -MaxAlign;
+ MachineInstr *MI =
+ BuildMI(MBB, MBBI, DL,
+ TII.get(getANDriOpcode(Uses64BitFramePtr, Val)), StackPtr)
+ .addReg(StackPtr)
+ .addImm(Val)
+ .setMIFlag(MachineInstr::FrameSetup);
+
+ // The EFLAGS implicit def is dead.
+ MI->getOperand(3).setIsDead();
+ }
+
+ // If there is an SUB32ri of ESP immediately before this instruction, merge
+ // the two. This can be the case when tail call elimination is enabled and
+ // the callee has more arguments then the caller.
+ NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
+
+ // If there is an ADD32ri or SUB32ri of ESP immediately after this
+ // instruction, merge the two instructions.
+ mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
+
+ // Adjust stack pointer: ESP -= numbytes.
+
+ // Windows and cygwin/mingw require a prologue helper routine when allocating
+ // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
+ // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
+ // stack and adjust the stack pointer in one go. The 64-bit version of
+ // __chkstk is only responsible for probing the stack. The 64-bit prologue is
+ // responsible for adjusting the stack pointer. Touching the stack at 4K
+ // increments is necessary to ensure that the guard pages used by the OS
+ // virtual memory manager are allocated in correct sequence.
+ if (NumBytes >= StackProbeSize && UseStackProbe) {
+ // Check whether EAX is livein for this function.
+ bool isEAXAlive = isEAXLiveIn(MF);
+
+ if (isEAXAlive) {
+ // Sanity check that EAX is not livein for this function.
+ // It should not be, so throw an assert.
+ assert(!Is64Bit && "EAX is livein in x64 case!");
+
+ // Save EAX
+ BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
+ .addReg(X86::EAX, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ if (Is64Bit) {
+ // Handle the 64-bit Windows ABI case where we need to call __chkstk.
+ // Function prologue is responsible for adjusting the stack pointer.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else {
+ // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
+ // We'll also use 4 already allocated bytes for EAX.
+ BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
+ .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Save a pointer to the MI where we set AX.
+ MachineBasicBlock::iterator SetRAX = MBBI;
+ --SetRAX;
+
+ // Call __chkstk, __chkstk_ms, or __alloca.
+ emitStackProbeCall(MF, MBB, MBBI, DL);
+
+ // Apply the frame setup flag to all inserted instrs.
+ for (; SetRAX != MBBI; ++SetRAX)
+ SetRAX->setFlag(MachineInstr::FrameSetup);
+
+ if (isEAXAlive) {
+ // Restore EAX
+ MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
+ X86::EAX),
+ StackPtr, false, NumBytes - 4);
+ MI->setFlag(MachineInstr::FrameSetup);
+ MBB.insert(MBBI, MI);
+ }
+ } else if (NumBytes) {
+ emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, Uses64BitFramePtr,
+ UseLEA, TII, *RegInfo);
+ }
+
+ int SEHFrameOffset = 0;
+ if (NeedsWinEH) {
+ if (HasFP) {
+ // We need to set frame base offset low enough such that all saved
+ // register offsets would be positive relative to it, but we can't
+ // just use NumBytes, because .seh_setframe offset must be <=240.
+ // So we pretend to have only allocated enough space to spill the
+ // non-volatile registers.
+ // We don't care about the rest of stack allocation, because unwinder
+ // will restore SP to (BP - SEHFrameOffset)
+ for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
+ int offset = MFI->getObjectOffset(Info.getFrameIdx());
+ SEHFrameOffset = std::max(SEHFrameOffset, std::abs(offset));
+ }
+ SEHFrameOffset += SEHFrameOffset % 16; // ensure alignmant
+
+ // This only needs to account for XMM spill slots, GPR slots
+ // are covered by the .seh_pushreg's emitted above.
+ unsigned Size = SEHFrameOffset - X86FI->getCalleeSavedFrameSize();
+ if (Size) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
+ .addImm(Size)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
+ .addImm(FramePtr)
+ .addImm(SEHFrameOffset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ } else {
+ // SP will be the base register for restoring XMMs
+ if (NumBytes) {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
+ .addImm(NumBytes)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+ }
+
+ // Skip the rest of register spilling code
+ while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
+ ++MBBI;
+
+ // Emit SEH info for non-GPRs
+ if (NeedsWinEH) {
+ for (const CalleeSavedInfo &Info : MFI->getCalleeSavedInfo()) {
+ unsigned Reg = Info.getReg();
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
+ continue;
+ assert(X86::FR64RegClass.contains(Reg) && "Unexpected register class");
+
+ int Offset = getFrameIndexOffset(MF, Info.getFrameIdx());
+ Offset += SEHFrameOffset;
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
+ .addImm(Reg)
+ .addImm(Offset)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // If we need a base pointer, set it up here. It's whatever the value
+ // of the stack pointer is at this point. Any variable size objects
+ // will be allocated after this, so we can still use the base pointer
+ // to reference locals.
+ if (RegInfo->hasBasePointer(MF)) {
+ // Update the base pointer with the current stack pointer.
+ unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ if (X86FI->getRestoreBasePointer()) {
+ // Stash value of base pointer. Saving RSP instead of EBP shortens dependence chain.
+ unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
+ FramePtr, true, X86FI->getRestoreBasePointerOffset())
+ .addReg(StackPtr)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+ }
+
+ if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
+ // Mark end of stack pointer adjustment.
+ if (!HasFP && NumBytes) {
+ // Define the current CFA rule to use the provided offset.
+ assert(StackSize);
+ unsigned CFIIndex = MMI.addFrameInst(
+ MCCFIInstruction::createDefCfaOffset(nullptr,
+ -StackSize + stackGrowth));
+
+ BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+ }
+
+ // Emit DWARF info specifying the offsets of the callee-saved registers.
+ if (PushedRegs)
+ emitCalleeSavedFrameMoves(MBB, MBBI, DL);
+ }
+}
+
+void X86FrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ assert(MBBI != MBB.end() && "Returning block has no instructions");
+ unsigned RetOpcode = MBBI->getOpcode();
+ DebugLoc DL = MBBI->getDebugLoc();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
+ const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
+ const bool Is64BitILP32 = STI.isTarget64BitILP32();
+ bool UseLEA = STI.useLeaForSP();
+ unsigned StackAlign = getStackAlignment();
+ unsigned SlotSize = RegInfo->getSlotSize();
+ unsigned FramePtr = RegInfo->getFrameRegister(MF);
+ unsigned MachineFramePtr = Is64BitILP32 ?
+ getX86SubSuperRegister(FramePtr, MVT::i64, false) : FramePtr;
+ unsigned StackPtr = RegInfo->getStackRegister();
+
+ bool IsWinEH = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
+ bool NeedsWinEH = IsWinEH && MF.getFunction()->needsUnwindTableEntry();
+
+ switch (RetOpcode) {
+ default:
+ llvm_unreachable("Can only insert epilog into returning blocks");
+ case X86::RETQ:
+ case X86::RETL:
+ case X86::RETIL:
+ case X86::RETIQ:
+ case X86::TCRETURNdi:
+ case X86::TCRETURNri:
+ case X86::TCRETURNmi:
+ case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
+ case X86::EH_RETURN:
+ case X86::EH_RETURN64:
+ break; // These are ok
+ }
+
+ // Get the number of bytes to allocate from the FrameInfo.
+ uint64_t StackSize = MFI->getStackSize();
+ uint64_t MaxAlign = MFI->getMaxAlignment();
+ unsigned CSSize = X86FI->getCalleeSavedFrameSize();
+ uint64_t NumBytes = 0;
+
+ // If we're forcing a stack realignment we can't rely on just the frame
+ // info, we need to know the ABI stack alignment as well in case we
+ // have a call out. Otherwise just make sure we have some alignment - we'll
+ // go with the minimum.
+ if (ForceStackAlign) {
+ if (MFI->hasCalls())
+ MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
+ else
+ MaxAlign = MaxAlign ? MaxAlign : 4;
+ }
+
+ if (hasFP(MF)) {
+ // Calculate required stack adjustment.
+ uint64_t FrameSize = StackSize - SlotSize;
+ if (RegInfo->needsStackRealignment(MF)) {
+ // Callee-saved registers were pushed on stack before the stack
+ // was realigned.
+ FrameSize -= CSSize;
+ NumBytes = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
+ } else {
+ NumBytes = FrameSize - CSSize;
+ }
+
+ // Pop EBP.
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Is64Bit ? X86::POP64r : X86::POP32r), MachineFramePtr);
+ } else {
+ NumBytes = StackSize - CSSize;
+ }
+
+ // Skip the callee-saved pop instructions.
+ while (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PI = std::prev(MBBI);
+ unsigned Opc = PI->getOpcode();
+
+ if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
+ !PI->isTerminator())
+ break;
+
+ --MBBI;
+ }
+ MachineBasicBlock::iterator FirstCSPop = MBBI;
+
+ DL = MBBI->getDebugLoc();
+
+ // If there is an ADD32ri or SUB32ri of ESP immediately before this
+ // instruction, merge the two instructions.
+ if (NumBytes || MFI->hasVarSizedObjects())
+ mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
+
+ // If dynamic alloca is used, then reset esp to point to the last callee-saved
+ // slot before popping them off! Same applies for the case, when stack was
+ // realigned.
+ if (RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects()) {
+ if (RegInfo->needsStackRealignment(MF))
+ MBBI = FirstCSPop;
+ if (CSSize != 0) {
+ unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
+ addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
+ FramePtr, false, -CSSize);
+ --MBBI;
+ } else {
+ unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
+ BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
+ .addReg(FramePtr);
+ --MBBI;
+ }
+ } else if (NumBytes) {
+ // Adjust stack pointer back: ESP += numbytes.
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, Uses64BitFramePtr, UseLEA,
+ TII, *RegInfo);
+ --MBBI;
+ }
+
+ // Windows unwinder will not invoke function's exception handler if IP is
+ // either in prologue or in epilogue. This behavior causes a problem when a
+ // call immediately precedes an epilogue, because the return address points
+ // into the epilogue. To cope with that, we insert an epilogue marker here,
+ // then replace it with a 'nop' if it ends up immediately after a CALL in the
+ // final emitted code.
+ if (NeedsWinEH)
+ BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
+
+ // We're returning from function via eh_return.
+ if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &DestAddr = MBBI->getOperand(0);
+ assert(DestAddr.isReg() && "Offset should be in register!");
+ BuildMI(MBB, MBBI, DL,
+ TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
+ StackPtr).addReg(DestAddr.getReg());
+ } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
+ RetOpcode == X86::TCRETURNmi ||
+ RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
+ RetOpcode == X86::TCRETURNmi64) {
+ bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
+ // Tail call return: adjust the stack pointer and jump to callee.
+ MBBI = MBB.getLastNonDebugInstr();
+ MachineOperand &JumpTarget = MBBI->getOperand(0);
+ MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
+ assert(StackAdjust.isImm() && "Expecting immediate value.");
+
+ // Adjust stack pointer.
+ int StackAdj = StackAdjust.getImm();
+ int MaxTCDelta = X86FI->getTCReturnAddrDelta();
+ int Offset = 0;
+ assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
+
+ // Incoporate the retaddr area.
+ Offset = StackAdj-MaxTCDelta;
+ assert(Offset >= 0 && "Offset should never be negative");
+
+ if (Offset) {
+ // Check for possible merge with preceding ADD instruction.
+ Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
+ emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, Uses64BitFramePtr,
+ UseLEA, TII, *RegInfo);
+ }
+
+ // Jump to label or value in register.
+ bool IsWin64 = STI.isTargetWin64();
+ if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
+ unsigned Op = (RetOpcode == X86::TCRETURNdi)
+ ? X86::TAILJMPd
+ : (IsWin64 ? X86::TAILJMPd64_REX : X86::TAILJMPd64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
+ if (JumpTarget.isGlobal())
+ MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
+ JumpTarget.getTargetFlags());
+ else {
+ assert(JumpTarget.isSymbol());
+ MIB.addExternalSymbol(JumpTarget.getSymbolName(),
+ JumpTarget.getTargetFlags());
+ }
+ } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
+ unsigned Op = (RetOpcode == X86::TCRETURNmi)
+ ? X86::TAILJMPm
+ : (IsWin64 ? X86::TAILJMPm64_REX : X86::TAILJMPm64);
+ MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII.get(Op));
+ for (unsigned i = 0; i != 5; ++i)
+ MIB.addOperand(MBBI->getOperand(i));
+ } else if (RetOpcode == X86::TCRETURNri64) {
+ BuildMI(MBB, MBBI, DL,
+ TII.get(IsWin64 ? X86::TAILJMPr64_REX : X86::TAILJMPr64))
+ .addReg(JumpTarget.getReg(), RegState::Kill);
+ } else {
+ BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
+ addReg(JumpTarget.getReg(), RegState::Kill);
+ }
+
+ MachineInstr *NewMI = std::prev(MBBI);
+ NewMI->copyImplicitOps(MF, MBBI);
+
+ // Delete the pseudo instruction TCRETURN.
+ MBB.erase(MBBI);
+ } else if ((RetOpcode == X86::RETQ || RetOpcode == X86::RETL ||
+ RetOpcode == X86::RETIQ || RetOpcode == X86::RETIL) &&
+ (X86FI->getTCReturnAddrDelta() < 0)) {
+ // Add the return addr area delta back since we are not tail calling.
+ int delta = -1*X86FI->getTCReturnAddrDelta();
+ MBBI = MBB.getLastNonDebugInstr();
+
+ // Check for possible merge with preceding ADD instruction.
+ delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
+ emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, Uses64BitFramePtr, UseLEA, TII,
+ *RegInfo);
+ }
+}
+
+int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF,
+ int FI) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
+ uint64_t StackSize = MFI->getStackSize();
+
+ if (RegInfo->hasBasePointer(MF)) {
+ assert (hasFP(MF) && "VLAs and dynamic stack realign, but no FP?!");
+ if (FI < 0) {
+ // Skip the saved EBP.
+ return Offset + RegInfo->getSlotSize();
+ } else {
+ assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
+ return Offset + StackSize;
+ }
+ } else if (RegInfo->needsStackRealignment(MF)) {
+ if (FI < 0) {
+ // Skip the saved EBP.
+ return Offset + RegInfo->getSlotSize();
+ } else {
+ assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
+ return Offset + StackSize;
+ }
+ // FIXME: Support tail calls
+ } else {
+ if (!hasFP(MF))
+ return Offset + StackSize;
+
+ // Skip the saved EBP.
+ Offset += RegInfo->getSlotSize();
+
+ // Skip the RETADDR move area
+ const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+ if (TailCallReturnAddrDelta < 0)
+ Offset -= TailCallReturnAddrDelta;
+ }
+
+ return Offset;
+}
+
+int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ // We can't calculate offset from frame pointer if the stack is realigned,
+ // so enforce usage of stack/base pointer. The base pointer is used when we
+ // have dynamic allocas in addition to dynamic realignment.
+ if (RegInfo->hasBasePointer(MF))
+ FrameReg = RegInfo->getBaseRegister();
+ else if (RegInfo->needsStackRealignment(MF))
+ FrameReg = RegInfo->getStackRegister();
+ else
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return getFrameIndexOffset(MF, FI);
+}
+
+// Simplified from getFrameIndexOffset keeping only StackPointer cases
+int X86FrameLowering::getFrameIndexOffsetFromSP(const MachineFunction &MF, int FI) const {
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ // Does not include any dynamic realign.
+ const uint64_t StackSize = MFI->getStackSize();
+ {
+#ifndef NDEBUG
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());
+ // Note: LLVM arranges the stack as:
+ // Args > Saved RetPC (<--FP) > CSRs > dynamic alignment (<--BP)
+ // > "Stack Slots" (<--SP)
+ // We can always address StackSlots from RSP. We can usually (unless
+ // needsStackRealignment) address CSRs from RSP, but sometimes need to
+ // address them from RBP. FixedObjects can be placed anywhere in the stack
+ // frame depending on their specific requirements (i.e. we can actually
+ // refer to arguments to the function which are stored in the *callers*
+ // frame). As a result, THE RESULT OF THIS CALL IS MEANINGLESS FOR CSRs
+ // AND FixedObjects IFF needsStackRealignment or hasVarSizedObject.
+
+ assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+
+ // We don't handle tail calls, and shouldn't be seeing them
+ // either.
+ int TailCallReturnAddrDelta =
+ MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta();
+ assert(!(TailCallReturnAddrDelta < 0) && "we don't handle this case!");
+#endif
+ }
+
+ // This is how the math works out:
+ //
+ // %rsp grows (i.e. gets lower) left to right. Each box below is
+ // one word (eight bytes). Obj0 is the stack slot we're trying to
+ // get to.
+ //
+ // ----------------------------------
+ // | BP | Obj0 | Obj1 | ... | ObjN |
+ // ----------------------------------
+ // ^ ^ ^ ^
+ // A B C E
+ //
+ // A is the incoming stack pointer.
+ // (B - A) is the local area offset (-8 for x86-64) [1]
+ // (C - A) is the Offset returned by MFI->getObjectOffset for Obj0 [2]
+ //
+ // |(E - B)| is the StackSize (absolute value, positive). For a
+ // stack that grown down, this works out to be (B - E). [3]
+ //
+ // E is also the value of %rsp after stack has been set up, and we
+ // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
+ // (C - E) == (C - A) - (B - A) + (B - E)
+ // { Using [1], [2] and [3] above }
+ // == getObjectOffset - LocalAreaOffset + StackSize
+ //
+
+ // Get the Offset from the StackPointer
+ int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
+
+ return Offset + StackSize;
+}
+// Simplified from getFrameIndexReference keeping only StackPointer cases
+int X86FrameLowering::getFrameIndexReferenceFromSP(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo*>(MF.getSubtarget().getRegisterInfo());
+
+ assert(!RegInfo->hasBasePointer(MF) && "we don't handle this case");
+
+ FrameReg = RegInfo->getStackRegister();
+ return getFrameIndexOffsetFromSP(MF, FI);
+}
+
+bool X86FrameLowering::assignCalleeSavedSpillSlots(
+ MachineFunction &MF, const TargetRegisterInfo *TRI,
+ std::vector<CalleeSavedInfo> &CSI) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ unsigned SlotSize = RegInfo->getSlotSize();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+
+ unsigned CalleeSavedFrameSize = 0;
+ int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
+
+ if (hasFP(MF)) {
+ // emitPrologue always spills frame register the first thing.
+ SpillSlotOffset -= SlotSize;
+ MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+
+ // Since emitPrologue and emitEpilogue will handle spilling and restoring of
+ // the frame register, we can delete it from CSI list and not have to worry
+ // about avoiding it later.
+ unsigned FPReg = RegInfo->getFrameRegister(MF);
+ for (unsigned i = 0; i < CSI.size(); ++i) {
+ if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
+ CSI.erase(CSI.begin() + i);
+ break;
+ }
+ }
+ }
+
+ // Assign slots for GPRs. It increases frame size.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+
+ if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
+ continue;
+
+ SpillSlotOffset -= SlotSize;
+ CalleeSavedFrameSize += SlotSize;
+
+ int SlotIndex = MFI->CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
+ CSI[i - 1].setFrameIdx(SlotIndex);
+ }
+
+ X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
+
+ // Assign slots for XMMs.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+ if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
+ continue;
+
+ const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg);
+ // ensure alignment
+ SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
+ // spill into slot
+ SpillSlotOffset -= RC->getSize();
+ int SlotIndex =
+ MFI->CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
+ CSI[i - 1].setFrameIdx(SlotIndex);
+ MFI->ensureMaxAlignment(RC->getAlignment());
+ }
+
+ return true;
+}
+
+bool X86FrameLowering::spillCalleeSavedRegisters(
+ MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+
+ // Push GPRs. It increases frame size.
+ unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i - 1].getReg();
+
+ if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
+ continue;
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+
+ BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
+ .setMIFlag(MachineInstr::FrameSetup);
+ }
+
+ // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
+ // It can be done by spilling XMMs to stack frame.
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ if (X86::GR64RegClass.contains(Reg) ||
+ X86::GR32RegClass.contains(Reg))
+ continue;
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+
+ TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
+ TRI);
+ --MI;
+ MI->setFlag(MachineInstr::FrameSetup);
+ ++MI;
+ }
+
+ return true;
+}
+
+bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI,
+ const TargetRegisterInfo *TRI) const {
+ if (CSI.empty())
+ return false;
+
+ DebugLoc DL = MBB.findDebugLoc(MI);
+
+ MachineFunction &MF = *MBB.getParent();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+
+ // Reload XMMs from stack frame.
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (X86::GR64RegClass.contains(Reg) ||
+ X86::GR32RegClass.contains(Reg))
+ continue;
+
+ const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
+ }
+
+ // POP GPRs.
+ unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ if (!X86::GR64RegClass.contains(Reg) &&
+ !X86::GR32RegClass.contains(Reg))
+ continue;
+
+ BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
+ }
+ return true;
+}
+
+void
+X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
+ RegScavenger *RS) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const X86RegisterInfo *RegInfo =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ unsigned SlotSize = RegInfo->getSlotSize();
+
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
+
+ if (TailCallReturnAddrDelta < 0) {
+ // create RETURNADDR area
+ // arg
+ // arg
+ // RETADDR
+ // { ...
+ // RETADDR area
+ // ...
+ // }
+ // [EBP]
+ MFI->CreateFixedObject(-TailCallReturnAddrDelta,
+ TailCallReturnAddrDelta - SlotSize, true);
+ }
+
+ // Spill the BasePtr if it's used.
+ if (RegInfo->hasBasePointer(MF))
+ MF.getRegInfo().setPhysRegUsed(RegInfo->getBaseRegister());
+}
+
+static bool
+HasNestArgument(const MachineFunction *MF) {
+ const Function *F = MF->getFunction();
+ for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
+ I != E; I++) {
+ if (I->hasNestAttr())
+ return true;
+ }
+ return false;
+}
+
+/// GetScratchRegister - Get a temp register for performing work in the
+/// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
+/// and the properties of the function either one or two registers will be
+/// needed. Set primary to true for the first register, false for the second.
+static unsigned
+GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
+ CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
+
+ // Erlang stuff.
+ if (CallingConvention == CallingConv::HiPE) {
+ if (Is64Bit)
+ return Primary ? X86::R14 : X86::R13;
+ else
+ return Primary ? X86::EBX : X86::EDI;
+ }
+
+ if (Is64Bit) {
+ if (IsLP64)
+ return Primary ? X86::R11 : X86::R12;
+ else
+ return Primary ? X86::R11D : X86::R12D;
+ }
+
+ bool IsNested = HasNestArgument(&MF);
+
+ if (CallingConvention == CallingConv::X86_FastCall ||
+ CallingConvention == CallingConv::Fast) {
+ if (IsNested)
+ report_fatal_error("Segmented stacks does not support fastcall with "
+ "nested function.");
+ return Primary ? X86::EAX : X86::ECX;
+ }
+ if (IsNested)
+ return Primary ? X86::EDX : X86::EAX;
+ return Primary ? X86::ECX : X86::EAX;
+}
+
+// The stack limit in the TCB is set to this many bytes above the actual stack
+// limit.
+static const uint64_t kSplitStackAvailable = 256;
+
+void
+X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
+ MachineBasicBlock &prologueMBB = MF.front();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ uint64_t StackSize;
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool Is64Bit = STI.is64Bit();
+ const bool IsLP64 = STI.isTarget64BitLP64();
+ unsigned TlsReg, TlsOffset;
+ DebugLoc DL;
+
+ unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
+ assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
+ "Scratch register is live-in");
+
+ if (MF.getFunction()->isVarArg())
+ report_fatal_error("Segmented stacks do not support vararg functions.");
+ if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
+ !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
+ !STI.isTargetDragonFly())
+ report_fatal_error("Segmented stacks not supported on this platform.");
+
+ // Eventually StackSize will be calculated by a link-time pass; which will
+ // also decide whether checking code needs to be injected into this particular
+ // prologue.
+ StackSize = MFI->getStackSize();
+
+ // Do not generate a prologue for functions with a stack of size zero
+ if (StackSize == 0)
+ return;
+
+ MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ bool IsNested = false;
+
+ // We need to know if the function has a nest argument only in 64 bit mode.
+ if (Is64Bit)
+ IsNested = HasNestArgument(&MF);
+
+ // The MOV R10, RAX needs to be in a different block, since the RET we emit in
+ // allocMBB needs to be last (terminating) instruction.
+
+ for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
+ e = prologueMBB.livein_end(); i != e; i++) {
+ allocMBB->addLiveIn(*i);
+ checkMBB->addLiveIn(*i);
+ }
+
+ if (IsNested)
+ allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
+
+ MF.push_front(allocMBB);
+ MF.push_front(checkMBB);
+
+ // When the frame size is less than 256 we just compare the stack
+ // boundary directly to the value of the stack pointer, per gcc.
+ bool CompareStackPointer = StackSize < kSplitStackAvailable;
+
+ // Read the limit off the current stacklet off the stack_guard location.
+ if (Is64Bit) {
+ if (STI.isTargetLinux()) {
+ TlsReg = X86::FS;
+ TlsOffset = IsLP64 ? 0x70 : 0x40;
+ } else if (STI.isTargetDarwin()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
+ } else if (STI.isTargetWin64()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x28; // pvArbitrary, reserved for application use
+ } else if (STI.isTargetFreeBSD()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x18;
+ } else if (STI.isTargetDragonFly()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x20; // use tls_tcb.tcb_segstack
+ } else {
+ report_fatal_error("Segmented stacks not supported on this platform.");
+ }
+
+ if (CompareStackPointer)
+ ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
+ else
+ BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
+ .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
+
+ BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
+ .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
+ } else {
+ if (STI.isTargetLinux()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x30;
+ } else if (STI.isTargetDarwin()) {
+ TlsReg = X86::GS;
+ TlsOffset = 0x48 + 90*4;
+ } else if (STI.isTargetWin32()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x14; // pvArbitrary, reserved for application use
+ } else if (STI.isTargetDragonFly()) {
+ TlsReg = X86::FS;
+ TlsOffset = 0x10; // use tls_tcb.tcb_segstack
+ } else if (STI.isTargetFreeBSD()) {
+ report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
+ } else {
+ report_fatal_error("Segmented stacks not supported on this platform.");
+ }
+
+ if (CompareStackPointer)
+ ScratchReg = X86::ESP;
+ else
+ BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
+ .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
+
+ if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
+ STI.isTargetDragonFly()) {
+ BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
+ .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
+ } else if (STI.isTargetDarwin()) {
+
+ // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
+ unsigned ScratchReg2;
+ bool SaveScratch2;
+ if (CompareStackPointer) {
+ // The primary scratch register is available for holding the TLS offset.
+ ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
+ SaveScratch2 = false;
+ } else {
+ // Need to use a second register to hold the TLS offset
+ ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
+
+ // Unfortunately, with fastcc the second scratch register may hold an
+ // argument.
+ SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
+ }
+
+ // If Scratch2 is live-in then it needs to be saved.
+ assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
+ "Scratch register is live-in and not saved");
+
+ if (SaveScratch2)
+ BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
+ .addReg(ScratchReg2, RegState::Kill);
+
+ BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
+ .addImm(TlsOffset);
+ BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
+ .addReg(ScratchReg)
+ .addReg(ScratchReg2).addImm(1).addReg(0)
+ .addImm(0)
+ .addReg(TlsReg);
+
+ if (SaveScratch2)
+ BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
+ }
+ }
+
+ // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
+ // It jumps to normal execution of the function body.
+ BuildMI(checkMBB, DL, TII.get(X86::JA_1)).addMBB(&prologueMBB);
+
+ // On 32 bit we first push the arguments size and then the frame size. On 64
+ // bit, we pass the stack frame size in r10 and the argument size in r11.
+ if (Is64Bit) {
+ // Functions with nested arguments use R10, so it needs to be saved across
+ // the call to _morestack
+
+ const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
+ const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
+ const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
+ const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
+ const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
+
+ if (IsNested)
+ BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
+
+ BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
+ .addImm(StackSize);
+ BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
+ .addImm(X86FI->getArgumentStackSize());
+ MF.getRegInfo().setPhysRegUsed(Reg10);
+ MF.getRegInfo().setPhysRegUsed(Reg11);
+ } else {
+ BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
+ .addImm(X86FI->getArgumentStackSize());
+ BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
+ .addImm(StackSize);
+ }
+
+ // __morestack is in libgcc
+ if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
+ // Under the large code model, we cannot assume that __morestack lives
+ // within 2^31 bytes of the call site, so we cannot use pc-relative
+ // addressing. We cannot perform the call via a temporary register,
+ // as the rax register may be used to store the static chain, and all
+ // other suitable registers may be either callee-save or used for
+ // parameter passing. We cannot use the stack at this point either
+ // because __morestack manipulates the stack directly.
+ //
+ // To avoid these issues, perform an indirect call via a read-only memory
+ // location containing the address.
+ //
+ // This solution is not perfect, as it assumes that the .rodata section
+ // is laid out within 2^31 bytes of each function body, but this seems
+ // to be sufficient for JIT.
+ BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
+ .addReg(X86::RIP)
+ .addImm(0)
+ .addReg(0)
+ .addExternalSymbol("__morestack_addr")
+ .addReg(0);
+ MF.getMMI().setUsesMorestackAddr(true);
+ } else {
+ if (Is64Bit)
+ BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
+ .addExternalSymbol("__morestack");
+ else
+ BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
+ .addExternalSymbol("__morestack");
+ }
+
+ if (IsNested)
+ BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
+ else
+ BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
+
+ allocMBB->addSuccessor(&prologueMBB);
+
+ checkMBB->addSuccessor(allocMBB);
+ checkMBB->addSuccessor(&prologueMBB);
+
+#ifdef XDEBUG
+ MF.verify();
+#endif
+}
+
+/// Erlang programs may need a special prologue to handle the stack size they
+/// might need at runtime. That is because Erlang/OTP does not implement a C
+/// stack but uses a custom implementation of hybrid stack/heap architecture.
+/// (for more information see Eric Stenman's Ph.D. thesis:
+/// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
+///
+/// CheckStack:
+/// temp0 = sp - MaxStack
+/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
+/// OldStart:
+/// ...
+/// IncStack:
+/// call inc_stack # doubles the stack space
+/// temp0 = sp - MaxStack
+/// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
+void X86FrameLowering::adjustForHiPEPrologue(MachineFunction &MF) const {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ const unsigned SlotSize =
+ static_cast<const X86RegisterInfo *>(MF.getSubtarget().getRegisterInfo())
+ ->getSlotSize();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ const bool Is64Bit = STI.is64Bit();
+ const bool IsLP64 = STI.isTarget64BitLP64();
+ DebugLoc DL;
+ // HiPE-specific values
+ const unsigned HipeLeafWords = 24;
+ const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
+ const unsigned Guaranteed = HipeLeafWords * SlotSize;
+ unsigned CallerStkArity = MF.getFunction()->arg_size() > CCRegisteredArgs ?
+ MF.getFunction()->arg_size() - CCRegisteredArgs : 0;
+ unsigned MaxStack = MFI->getStackSize() + CallerStkArity*SlotSize + SlotSize;
+
+ assert(STI.isTargetLinux() &&
+ "HiPE prologue is only supported on Linux operating systems.");
+
+ // Compute the largest caller's frame that is needed to fit the callees'
+ // frames. This 'MaxStack' is computed from:
+ //
+ // a) the fixed frame size, which is the space needed for all spilled temps,
+ // b) outgoing on-stack parameter areas, and
+ // c) the minimum stack space this function needs to make available for the
+ // functions it calls (a tunable ABI property).
+ if (MFI->hasCalls()) {
+ unsigned MoreStackForCalls = 0;
+
+ for (MachineFunction::iterator MBBI = MF.begin(), MBBE = MF.end();
+ MBBI != MBBE; ++MBBI)
+ for (MachineBasicBlock::iterator MI = MBBI->begin(), ME = MBBI->end();
+ MI != ME; ++MI) {
+ if (!MI->isCall())
+ continue;
+
+ // Get callee operand.
+ const MachineOperand &MO = MI->getOperand(0);
+
+ // Only take account of global function calls (no closures etc.).
+ if (!MO.isGlobal())
+ continue;
+
+ const Function *F = dyn_cast<Function>(MO.getGlobal());
+ if (!F)
+ continue;
+
+ // Do not update 'MaxStack' for primitive and built-in functions
+ // (encoded with names either starting with "erlang."/"bif_" or not
+ // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
+ // "_", such as the BIF "suspend_0") as they are executed on another
+ // stack.
+ if (F->getName().find("erlang.") != StringRef::npos ||
+ F->getName().find("bif_") != StringRef::npos ||
+ F->getName().find_first_of("._") == StringRef::npos)
+ continue;
+
+ unsigned CalleeStkArity =
+ F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
+ if (HipeLeafWords - 1 > CalleeStkArity)
+ MoreStackForCalls = std::max(MoreStackForCalls,
+ (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
+ }
+ MaxStack += MoreStackForCalls;
+ }
+
+ // If the stack frame needed is larger than the guaranteed then runtime checks
+ // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
+ if (MaxStack > Guaranteed) {
+ MachineBasicBlock &prologueMBB = MF.front();
+ MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
+ MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
+
+ for (MachineBasicBlock::livein_iterator I = prologueMBB.livein_begin(),
+ E = prologueMBB.livein_end(); I != E; I++) {
+ stackCheckMBB->addLiveIn(*I);
+ incStackMBB->addLiveIn(*I);
+ }
+
+ MF.push_front(incStackMBB);
+ MF.push_front(stackCheckMBB);
+
+ unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
+ unsigned LEAop, CMPop, CALLop;
+ if (Is64Bit) {
+ SPReg = X86::RSP;
+ PReg = X86::RBP;
+ LEAop = X86::LEA64r;
+ CMPop = X86::CMP64rm;
+ CALLop = X86::CALL64pcrel32;
+ SPLimitOffset = 0x90;
+ } else {
+ SPReg = X86::ESP;
+ PReg = X86::EBP;
+ LEAop = X86::LEA32r;
+ CMPop = X86::CMP32rm;
+ CALLop = X86::CALLpcrel32;
+ SPLimitOffset = 0x4c;
+ }
+
+ ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
+ assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
+ "HiPE prologue scratch register is live-in");
+
+ // Create new MBB for StackCheck:
+ addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
+ SPReg, false, -MaxStack);
+ // SPLimitOffset is in a fixed heap location (pointed by BP).
+ addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
+ .addReg(ScratchReg), PReg, false, SPLimitOffset);
+ BuildMI(stackCheckMBB, DL, TII.get(X86::JAE_1)).addMBB(&prologueMBB);
+
+ // Create new MBB for IncStack:
+ BuildMI(incStackMBB, DL, TII.get(CALLop)).
+ addExternalSymbol("inc_stack_0");
+ addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
+ SPReg, false, -MaxStack);
+ addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
+ .addReg(ScratchReg), PReg, false, SPLimitOffset);
+ BuildMI(incStackMBB, DL, TII.get(X86::JLE_1)).addMBB(incStackMBB);
+
+ stackCheckMBB->addSuccessor(&prologueMBB, 99);
+ stackCheckMBB->addSuccessor(incStackMBB, 1);
+ incStackMBB->addSuccessor(&prologueMBB, 99);
+ incStackMBB->addSuccessor(incStackMBB, 1);
+ }
+#ifdef XDEBUG
+ MF.verify();
+#endif
+}
+
+bool X86FrameLowering::
+convertArgMovsToPushes(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, uint64_t Amount) const {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo());
+ unsigned StackPtr = RegInfo.getStackRegister();
+
+ // Scan the call setup sequence for the pattern we're looking for.
+ // We only handle a simple case now - a sequence of MOV32mi or MOV32mr
+ // instructions, that push a sequence of 32-bit values onto the stack, with
+ // no gaps.
+ std::map<int64_t, MachineBasicBlock::iterator> MovMap;
+ do {
+ int Opcode = I->getOpcode();
+ if (Opcode != X86::MOV32mi && Opcode != X86::MOV32mr)
+ break;
+
+ // We only want movs of the form:
+ // movl imm/r32, k(%ecx)
+ // If we run into something else, bail
+ // Note that AddrBaseReg may, counterintuitively, not be a register...
+ if (!I->getOperand(X86::AddrBaseReg).isReg() ||
+ (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
+ !I->getOperand(X86::AddrScaleAmt).isImm() ||
+ (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
+ (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
+ (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
+ !I->getOperand(X86::AddrDisp).isImm())
+ return false;
+
+ int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
+
+ // We don't want to consider the unaligned case.
+ if (StackDisp % 4)
+ return false;
+
+ // If the same stack slot is being filled twice, something's fishy.
+ if (!MovMap.insert(std::pair<int64_t, MachineInstr*>(StackDisp, I)).second)
+ return false;
+
+ ++I;
+ } while (I != MBB.end());
+
+ // We now expect the end of the sequence - a call and a stack adjust.
+ if (I == MBB.end())
+ return false;
+ if (!I->isCall())
+ return false;
+ MachineBasicBlock::iterator Call = I;
+ if ((++I)->getOpcode() != TII.getCallFrameDestroyOpcode())
+ return false;
+
+ // Now, go through the map, and see that we don't have any gaps,
+ // but only a series of 32-bit MOVs.
+ // Since std::map provides ordered iteration, the original order
+ // of the MOVs doesn't matter.
+ int64_t ExpectedDist = 0;
+ for (auto MMI = MovMap.begin(), MME = MovMap.end(); MMI != MME;
+ ++MMI, ExpectedDist += 4)
+ if (MMI->first != ExpectedDist)
+ return false;
+
+ // Ok, everything looks fine. Do the transformation.
+ DebugLoc DL = I->getDebugLoc();
+
+ // It's possible the original stack adjustment amount was larger than
+ // that done by the pushes. If so, we still need a SUB.
+ Amount -= ExpectedDist;
+ if (Amount) {
+ MachineInstr* Sub = BuildMI(MBB, Call, DL,
+ TII.get(getSUBriOpcode(false, Amount)), StackPtr)
+ .addReg(StackPtr).addImm(Amount);
+ Sub->getOperand(3).setIsDead();
+ }
+
+ // Now, iterate through the map in reverse order, and replace the movs
+ // with pushes. MOVmi/MOVmr doesn't have any defs, so need to replace uses.
+ for (auto MMI = MovMap.rbegin(), MME = MovMap.rend(); MMI != MME; ++MMI) {
+ MachineBasicBlock::iterator MOV = MMI->second;
+ MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
+
+ // Replace MOVmr with PUSH32r, and MOVmi with PUSHi of appropriate size
+ int PushOpcode = X86::PUSH32r;
+ if (MOV->getOpcode() == X86::MOV32mi)
+ PushOpcode = getPUSHiOpcode(false, PushOp);
+
+ BuildMI(MBB, Call, DL, TII.get(PushOpcode)).addOperand(PushOp);
+ MBB.erase(MOV);
+ }
+
+ return true;
+}
+
+void X86FrameLowering::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+ const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo());
+ unsigned StackPtr = RegInfo.getStackRegister();
+ bool reserveCallFrame = hasReservedCallFrame(MF);
+ int Opcode = I->getOpcode();
+ bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
+ const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();
+ bool IsLP64 = STI.isTarget64BitLP64();
+ DebugLoc DL = I->getDebugLoc();
+ uint64_t Amount = !reserveCallFrame ? I->getOperand(0).getImm() : 0;
+ uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
+ I = MBB.erase(I);
+
+ if (!reserveCallFrame) {
+ // If the stack pointer can be changed after prologue, turn the
+ // adjcallstackup instruction into a 'sub ESP, <amt>' and the
+ // adjcallstackdown instruction into 'add ESP, <amt>'
+ if (Amount == 0)
+ return;
+
+ // We need to keep the stack aligned properly. To do this, we round the
+ // amount of space needed for the outgoing arguments up to the next
+ // alignment boundary.
+ unsigned StackAlign = MF.getTarget()
+ .getSubtargetImpl()
+ ->getFrameLowering()
+ ->getStackAlignment();
+ Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
+
+ MachineInstr *New = nullptr;
+ if (Opcode == TII.getCallFrameSetupOpcode()) {
+ // Try to convert movs to the stack into pushes.
+ // We currently only look for a pattern that appears in 32-bit
+ // calling conventions.
+ if (!IsLP64 && convertArgMovsToPushes(MF, MBB, I, Amount))
+ return;
+
+ New = BuildMI(MF, DL, TII.get(getSUBriOpcode(IsLP64, Amount)),
+ StackPtr)
+ .addReg(StackPtr)
+ .addImm(Amount);
+ } else {
+ assert(Opcode == TII.getCallFrameDestroyOpcode());
+
+ // Factor out the amount the callee already popped.
+ Amount -= CalleeAmt;
+
+ if (Amount) {
+ unsigned Opc = getADDriOpcode(IsLP64, Amount);
+ New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr).addImm(Amount);
+ }
+ }
+
+ if (New) {
+ // The EFLAGS implicit def is dead.
+ New->getOperand(3).setIsDead();
+
+ // Replace the pseudo instruction with a new instruction.
+ MBB.insert(I, New);
+ }
+
+ return;
+ }
+
+ if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
+ // If we are performing frame pointer elimination and if the callee pops
+ // something off the stack pointer, add it back. We do this until we have
+ // more advanced stack pointer tracking ability.
+ unsigned Opc = getSUBriOpcode(IsLP64, CalleeAmt);
+ MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
+ .addReg(StackPtr).addImm(CalleeAmt);
+
+ // The EFLAGS implicit def is dead.
+ New->getOperand(3).setIsDead();
+
+ // We are not tracking the stack pointer adjustment by the callee, so make
+ // sure we restore the stack pointer immediately after the call, there may
+ // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
+ MachineBasicBlock::iterator B = MBB.begin();
+ while (I != B && !std::prev(I)->isCall())
+ --I;
+ MBB.insert(I, New);
+ }
+}
+