-//===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//\r
-//\r
-// The LLVM Compiler Infrastructure\r
-//\r
-// This file is distributed under the University of Illinois Open Source\r
-// License. See LICENSE.TXT for details.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-//\r
-// This file defines a pass that optimizes call sequences on x86.\r
-// Currently, it converts movs of function parameters onto the stack into \r
-// pushes. This is beneficial for two main reasons:\r
-// 1) The push instruction encoding is much smaller than an esp-relative mov\r
-// 2) It is possible to push memory arguments directly. So, if the\r
-// the transformation is preformed pre-reg-alloc, it can help relieve\r
-// register pressure.\r
-//\r
-//===----------------------------------------------------------------------===//\r
-\r
-#include <algorithm>\r
-\r
-#include "X86.h"\r
-#include "X86InstrInfo.h"\r
-#include "X86Subtarget.h"\r
-#include "X86MachineFunctionInfo.h"\r
-#include "llvm/ADT/Statistic.h"\r
-#include "llvm/CodeGen/MachineFunctionPass.h"\r
-#include "llvm/CodeGen/MachineInstrBuilder.h"\r
-#include "llvm/CodeGen/MachineRegisterInfo.h"\r
-#include "llvm/CodeGen/Passes.h"\r
-#include "llvm/IR/Function.h"\r
-#include "llvm/Support/Debug.h"\r
-#include "llvm/Support/raw_ostream.h"\r
-#include "llvm/Target/TargetInstrInfo.h"\r
-\r
-using namespace llvm;\r
-\r
-#define DEBUG_TYPE "x86-cf-opt"\r
-\r
-cl::opt<bool> NoX86CFOpt("no-x86-call-frame-opt",\r
- cl::desc("Avoid optimizing x86 call frames for size"),\r
- cl::init(false), cl::Hidden);\r
-\r
-namespace {\r
-class X86CallFrameOptimization : public MachineFunctionPass {\r
-public:\r
- X86CallFrameOptimization() : MachineFunctionPass(ID) {}\r
-\r
- bool runOnMachineFunction(MachineFunction &MF) override;\r
-\r
-private:\r
- bool shouldPerformTransformation(MachineFunction &MF);\r
-\r
- bool adjustCallSequence(MachineFunction &MF, MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator I);\r
-\r
- MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,\r
- unsigned Reg);\r
-\r
- const char *getPassName() const override {\r
- return "X86 Optimize Call Frame";\r
- }\r
-\r
- const TargetInstrInfo *TII;\r
- const TargetFrameLowering *TFL;\r
- const MachineRegisterInfo *MRI;\r
- static char ID;\r
-};\r
-\r
-char X86CallFrameOptimization::ID = 0;\r
-}\r
-\r
-FunctionPass *llvm::createX86CallFrameOptimization() {\r
- return new X86CallFrameOptimization();\r
-}\r
-\r
-// This checks whether the transformation is legal and profitable\r
-bool X86CallFrameOptimization::shouldPerformTransformation(MachineFunction &MF) {\r
- if (NoX86CFOpt.getValue())\r
- return false;\r
-\r
- // We currently only support call sequences where *all* parameters.\r
- // are passed on the stack.\r
- // No point in running this in 64-bit mode, since some arguments are\r
- // passed in-register in all common calling conventions, so the pattern\r
- // we're looking for will never match.\r
- const X86Subtarget &STI = MF.getTarget().getSubtarget<X86Subtarget>();\r
- if (STI.is64Bit())\r
- return false;\r
-\r
- // You would expect straight-line code between call-frame setup and\r
- // call-frame destroy. You would be wrong. There are circumstances (e.g.\r
- // CMOV_GR8 expansion of a select that feeds a function call!) where we can\r
- // end up with the setup and the destroy in different basic blocks.\r
- // This is bad, and breaks SP adjustment.\r
- // So, check that all of the frames in the function are closed inside\r
- // the same block, and, for good measure, that there are no nested frames.\r
- int FrameSetupOpcode = TII->getCallFrameSetupOpcode();\r
- int FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();\r
- for (MachineBasicBlock &BB : MF) {\r
- bool InsideFrameSequence = false;\r
- for (MachineInstr &MI : BB) {\r
- if (MI.getOpcode() == FrameSetupOpcode) {\r
- if (InsideFrameSequence)\r
- return false;\r
- InsideFrameSequence = true;\r
- }\r
- else if (MI.getOpcode() == FrameDestroyOpcode) {\r
- if (!InsideFrameSequence)\r
- return false;\r
- InsideFrameSequence = false;\r
- }\r
- }\r
-\r
- if (InsideFrameSequence)\r
- return false;\r
- }\r
-\r
- // Now that we know the transformation is legal, check if it is\r
- // profitable.\r
- // TODO: Add a heuristic that actually looks at the function,\r
- // and enable this for more cases.\r
-\r
- // This transformation is always a win when we expected to have\r
- // a reserved call frame. Under other circumstances, it may be either \r
- // a win or a loss, and requires a heuristic.\r
- // For now, enable it only for the relatively clear win cases.\r
- bool CannotReserveFrame = MF.getFrameInfo()->hasVarSizedObjects();\r
- if (CannotReserveFrame)\r
- return true;\r
-\r
- // For now, don't even try to evaluate the profitability when\r
- // not optimizing for size.\r
- AttributeSet FnAttrs = MF.getFunction()->getAttributes();\r
- bool OptForSize =\r
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex,\r
- Attribute::OptimizeForSize) ||\r
- FnAttrs.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);\r
-\r
- if (!OptForSize)\r
- return false;\r
-\r
- // Stack re-alignment can make this unprofitable even in terms of size.\r
- // As mentioned above, a better heuristic is needed. For now, don't do this\r
- // when the required alignment is above 8. (4 would be the safe choice, but\r
- // some experimentation showed 8 is generally good).\r
- if (TFL->getStackAlignment() > 8)\r
- return false;\r
-\r
- return true;\r
-}\r
-\r
-bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {\r
- TII = MF.getSubtarget().getInstrInfo();\r
- TFL = MF.getSubtarget().getFrameLowering();\r
- MRI = &MF.getRegInfo();\r
-\r
- if (!shouldPerformTransformation(MF))\r
- return false;\r
-\r
- int FrameSetupOpcode = TII->getCallFrameSetupOpcode();\r
-\r
- bool Changed = false;\r
-\r
- for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)\r
- for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)\r
- if (I->getOpcode() == FrameSetupOpcode)\r
- Changed |= adjustCallSequence(MF, *BB, I);\r
-\r
- return Changed;\r
-}\r
-\r
-bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,\r
- MachineBasicBlock &MBB,\r
- MachineBasicBlock::iterator I) {\r
-\r
- // Check that this particular call sequence is amenable to the\r
- // transformation.\r
- const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(\r
- MF.getSubtarget().getRegisterInfo());\r
- unsigned StackPtr = RegInfo.getStackRegister();\r
- int FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();\r
-\r
- // We expect to enter this at the beginning of a call sequence\r
- assert(I->getOpcode() == TII->getCallFrameSetupOpcode());\r
- MachineBasicBlock::iterator FrameSetup = I++;\r
-\r
- \r
- // For globals in PIC mode, we can have some LEAs here.\r
- // Ignore them, they don't bother us.\r
- // TODO: Extend this to something that covers more cases.\r
- while (I->getOpcode() == X86::LEA32r)\r
- ++I;\r
- \r
- // We expect a copy instruction here.\r
- // TODO: The copy instruction is a lowering artifact.\r
- // We should also support a copy-less version, where the stack\r
- // pointer is used directly.\r
- if (!I->isCopy() || !I->getOperand(0).isReg())\r
- return false;\r
- MachineBasicBlock::iterator SPCopy = I++;\r
- StackPtr = SPCopy->getOperand(0).getReg();\r
-\r
- // Scan the call setup sequence for the pattern we're looking for.\r
- // We only handle a simple case - a sequence of MOV32mi or MOV32mr\r
- // instructions, that push a sequence of 32-bit values onto the stack, with\r
- // no gaps between them.\r
- SmallVector<MachineInstr*, 4> MovVector(4, nullptr);\r
- unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;\r
- if (MaxAdjust > 4)\r
- MovVector.resize(MaxAdjust, nullptr);\r
-\r
- do {\r
- int Opcode = I->getOpcode();\r
- if (Opcode != X86::MOV32mi && Opcode != X86::MOV32mr)\r
- break;\r
-\r
- // We only want movs of the form:\r
- // movl imm/r32, k(%esp)\r
- // If we run into something else, bail.\r
- // Note that AddrBaseReg may, counter to its name, not be a register,\r
- // but rather a frame index.\r
- // TODO: Support the fi case. This should probably work now that we\r
- // have the infrastructure to track the stack pointer within a call\r
- // sequence.\r
- if (!I->getOperand(X86::AddrBaseReg).isReg() ||\r
- (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||\r
- !I->getOperand(X86::AddrScaleAmt).isImm() ||\r
- (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||\r
- (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||\r
- (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||\r
- !I->getOperand(X86::AddrDisp).isImm())\r
- return false;\r
-\r
- int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();\r
- assert(StackDisp >= 0 && "Negative stack displacement when passing parameters");\r
-\r
- // We really don't want to consider the unaligned case.\r
- if (StackDisp % 4)\r
- return false;\r
- StackDisp /= 4;\r
-\r
- assert((size_t)StackDisp < MovVector.size() &&\r
- "Function call has more parameters than the stack is adjusted for.");\r
-\r
- // If the same stack slot is being filled twice, something's fishy.\r
- if (MovVector[StackDisp] != nullptr)\r
- return false;\r
- MovVector[StackDisp] = I;\r
-\r
- ++I;\r
- } while (I != MBB.end());\r
-\r
- // We now expect the end of the sequence - a call and a stack adjust.\r
- if (I == MBB.end())\r
- return false;\r
-\r
- // For PCrel calls, we expect an additional COPY of the basereg.\r
- // If we find one, skip it.\r
- if (I->isCopy()) {\r
- if (I->getOperand(1).getReg() ==\r
- MF.getInfo<X86MachineFunctionInfo>()->getGlobalBaseReg())\r
- ++I;\r
- else\r
- return false;\r
- }\r
-\r
- if (!I->isCall())\r
- return false;\r
- MachineBasicBlock::iterator Call = I;\r
- if ((++I)->getOpcode() != FrameDestroyOpcode)\r
- return false;\r
-\r
- // Now, go through the vector, and see that we don't have any gaps,\r
- // but only a series of 32-bit MOVs.\r
- \r
- int64_t ExpectedDist = 0;\r
- auto MMI = MovVector.begin(), MME = MovVector.end();\r
- for (; MMI != MME; ++MMI, ExpectedDist += 4)\r
- if (*MMI == nullptr)\r
- break;\r
- \r
- // If the call had no parameters, do nothing\r
- if (!ExpectedDist)\r
- return false;\r
-\r
- // We are either at the last parameter, or a gap. \r
- // Make sure it's not a gap\r
- for (; MMI != MME; ++MMI)\r
- if (*MMI != nullptr)\r
- return false;\r
-\r
- // Ok, we can in fact do the transformation for this call.\r
- // Do not remove the FrameSetup instruction, but adjust the parameters.\r
- // PEI will end up finalizing the handling of this.\r
- FrameSetup->getOperand(1).setImm(ExpectedDist);\r
-\r
- DebugLoc DL = I->getDebugLoc();\r
- // Now, iterate through the vector in reverse order, and replace the movs\r
- // with pushes. MOVmi/MOVmr doesn't have any defs, so no need to \r
- // replace uses.\r
- for (int Idx = (ExpectedDist / 4) - 1; Idx >= 0; --Idx) {\r
- MachineBasicBlock::iterator MOV = *MovVector[Idx];\r
- MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);\r
- if (MOV->getOpcode() == X86::MOV32mi) {\r
- unsigned PushOpcode = X86::PUSHi32;\r
- // If the operand is a small (8-bit) immediate, we can use a\r
- // PUSH instruction with a shorter encoding.\r
- // Note that isImm() may fail even though this is a MOVmi, because\r
- // the operand can also be a symbol.\r
- if (PushOp.isImm()) {\r
- int64_t Val = PushOp.getImm();\r
- if (isInt<8>(Val))\r
- PushOpcode = X86::PUSH32i8;\r
- }\r
- BuildMI(MBB, Call, DL, TII->get(PushOpcode)).addOperand(PushOp);\r
- } else {\r
- unsigned int Reg = PushOp.getReg();\r
-\r
- // If PUSHrmm is not slow on this target, try to fold the source of the\r
- // push into the instruction.\r
- const X86Subtarget &ST = MF.getTarget().getSubtarget<X86Subtarget>();\r
- bool SlowPUSHrmm = ST.isAtom() || ST.isSLM();\r
-\r
- // Check that this is legal to fold. Right now, we're extremely\r
- // conservative about that.\r
- MachineInstr *DefMov = nullptr;\r
- if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {\r
- MachineInstr *Push = BuildMI(MBB, Call, DL, TII->get(X86::PUSH32rmm));\r
-\r
- unsigned NumOps = DefMov->getDesc().getNumOperands();\r
- for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)\r
- Push->addOperand(DefMov->getOperand(i));\r
-\r
- DefMov->eraseFromParent();\r
- } else {\r
- BuildMI(MBB, Call, DL, TII->get(X86::PUSH32r)).addReg(Reg).getInstr();\r
- }\r
- }\r
-\r
- MBB.erase(MOV);\r
- }\r
-\r
- // The stack-pointer copy is no longer used in the call sequences.\r
- // There should not be any other users, but we can't commit to that, so:\r
- if (MRI->use_empty(SPCopy->getOperand(0).getReg()))\r
- SPCopy->eraseFromParent();\r
-\r
- // Once we've done this, we need to make sure PEI doesn't assume a reserved\r
- // frame.\r
- X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();\r
- FuncInfo->setHasPushSequences(true);\r
-\r
- return true;\r
-}\r
-\r
-MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(\r
- MachineBasicBlock::iterator FrameSetup, unsigned Reg) {\r
- // Do an extremely restricted form of load folding.\r
- // ISel will often create patterns like:\r
- // movl 4(%edi), %eax\r
- // movl 8(%edi), %ecx\r
- // movl 12(%edi), %edx\r
- // movl %edx, 8(%esp)\r
- // movl %ecx, 4(%esp)\r
- // movl %eax, (%esp)\r
- // call\r
- // Get rid of those with prejudice.\r
- if (!TargetRegisterInfo::isVirtualRegister(Reg))\r
- return nullptr;\r
-\r
- // Make sure this is the only use of Reg.\r
- if (!MRI->hasOneNonDBGUse(Reg))\r
- return nullptr;\r
-\r
- MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);\r
-\r
- // Make sure the def is a MOV from memory.\r
- // If the def is an another block, give up.\r
- if (DefMI->getOpcode() != X86::MOV32rm ||\r
- DefMI->getParent() != FrameSetup->getParent())\r
- return nullptr;\r
-\r
- // Be careful with movs that load from a stack slot, since it may get\r
- // resolved incorrectly.\r
- // TODO: Again, we already have the infrastructure, so this should work.\r
- if (!DefMI->getOperand(1).isReg())\r
- return nullptr;\r
-\r
- // Now, make sure everything else up until the ADJCALLSTACK is a sequence\r
- // of MOVs. To be less conservative would require duplicating a lot of the\r
- // logic from PeepholeOptimizer.\r
- // FIXME: A possibly better approach would be to teach the PeepholeOptimizer\r
- // to be smarter about folding into pushes. \r
- for (auto I = DefMI; I != FrameSetup; ++I)\r
- if (I->getOpcode() != X86::MOV32rm)\r
- return nullptr;\r
-\r
- return DefMI;\r
-}\r
+//===----- X86CallFrameOptimization.cpp - Optimize x86 call sequences -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a pass that optimizes call sequences on x86.
+// Currently, it converts movs of function parameters onto the stack into
+// pushes. This is beneficial for two main reasons:
+// 1) The push instruction encoding is much smaller than an esp-relative mov
+// 2) It is possible to push memory arguments directly. So, if the
+// the transformation is preformed pre-reg-alloc, it can help relieve
+// register pressure.
+//
+//===----------------------------------------------------------------------===//
+
+#include <algorithm>
+
+#include "X86.h"
+#include "X86InstrInfo.h"
+#include "X86Subtarget.h"
+#include "X86MachineFunctionInfo.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "x86-cf-opt"
+
+static cl::opt<bool>
+ NoX86CFOpt("no-x86-call-frame-opt",
+ cl::desc("Avoid optimizing x86 call frames for size"),
+ cl::init(false), cl::Hidden);
+
+namespace {
+class X86CallFrameOptimization : public MachineFunctionPass {
+public:
+ X86CallFrameOptimization() : MachineFunctionPass(ID) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+private:
+ // Information we know about a particular call site
+ struct CallContext {
+ CallContext()
+ : Call(nullptr), SPCopy(nullptr), ExpectedDist(0),
+ MovVector(4, nullptr), NoStackParams(false), UsePush(false){};
+
+ // Actuall call instruction
+ MachineInstr *Call;
+
+ // A copy of the stack pointer
+ MachineInstr *SPCopy;
+
+ // The total displacement of all passed parameters
+ int64_t ExpectedDist;
+
+ // The sequence of movs used to pass the parameters
+ SmallVector<MachineInstr *, 4> MovVector;
+
+ // True if this call site has no stack parameters
+ bool NoStackParams;
+
+ // True of this callsite can use push instructions
+ bool UsePush;
+ };
+
+ typedef DenseMap<MachineInstr *, CallContext> ContextMap;
+
+ bool isLegal(MachineFunction &MF);
+
+ bool isProfitable(MachineFunction &MF, ContextMap &CallSeqMap);
+
+ void collectCallInfo(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I, CallContext &Context);
+
+ bool adjustCallSequence(MachineFunction &MF, MachineBasicBlock::iterator I,
+ const CallContext &Context);
+
+ MachineInstr *canFoldIntoRegPush(MachineBasicBlock::iterator FrameSetup,
+ unsigned Reg);
+
+ const char *getPassName() const override { return "X86 Optimize Call Frame"; }
+
+ const TargetInstrInfo *TII;
+ const TargetFrameLowering *TFL;
+ const MachineRegisterInfo *MRI;
+ static char ID;
+};
+
+char X86CallFrameOptimization::ID = 0;
+}
+
+FunctionPass *llvm::createX86CallFrameOptimization() {
+ return new X86CallFrameOptimization();
+}
+
+// This checks whether the transformation is legal.
+// Also returns false in cases where it's potentially legal, but
+// we don't even want to try.
+bool X86CallFrameOptimization::isLegal(MachineFunction &MF) {
+ if (NoX86CFOpt.getValue())
+ return false;
+
+ // We currently only support call sequences where *all* parameters.
+ // are passed on the stack.
+ // No point in running this in 64-bit mode, since some arguments are
+ // passed in-register in all common calling conventions, so the pattern
+ // we're looking for will never match.
+ const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
+ if (STI.is64Bit())
+ return false;
+
+ // You would expect straight-line code between call-frame setup and
+ // call-frame destroy. You would be wrong. There are circumstances (e.g.
+ // CMOV_GR8 expansion of a select that feeds a function call!) where we can
+ // end up with the setup and the destroy in different basic blocks.
+ // This is bad, and breaks SP adjustment.
+ // So, check that all of the frames in the function are closed inside
+ // the same block, and, for good measure, that there are no nested frames.
+ unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
+ unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
+ for (MachineBasicBlock &BB : MF) {
+ bool InsideFrameSequence = false;
+ for (MachineInstr &MI : BB) {
+ if (MI.getOpcode() == FrameSetupOpcode) {
+ if (InsideFrameSequence)
+ return false;
+ InsideFrameSequence = true;
+ } else if (MI.getOpcode() == FrameDestroyOpcode) {
+ if (!InsideFrameSequence)
+ return false;
+ InsideFrameSequence = false;
+ }
+ }
+
+ if (InsideFrameSequence)
+ return false;
+ }
+
+ return true;
+}
+
+// Check whether this trasnformation is profitable for a particular
+// function - in terms of code size.
+bool X86CallFrameOptimization::isProfitable(MachineFunction &MF,
+ ContextMap &CallSeqMap) {
+ // This transformation is always a win when we do not expect to have
+ // a reserved call frame. Under other circumstances, it may be either
+ // a win or a loss, and requires a heuristic.
+ bool CannotReserveFrame = MF.getFrameInfo()->hasVarSizedObjects();
+ if (CannotReserveFrame)
+ return true;
+
+ // Don't do this when not optimizing for size.
+ bool OptForSize =
+ MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize) ||
+ MF.getFunction()->hasFnAttribute(Attribute::MinSize);
+
+ if (!OptForSize)
+ return false;
+
+
+ unsigned StackAlign = TFL->getStackAlignment();
+
+ int64_t Advantage = 0;
+ for (auto CC : CallSeqMap) {
+ // Call sites where no parameters are passed on the stack
+ // do not affect the cost, since there needs to be no
+ // stack adjustment.
+ if (CC.second.NoStackParams)
+ continue;
+
+ if (!CC.second.UsePush) {
+ // If we don't use pushes for a particular call site,
+ // we pay for not having a reserved call frame with an
+ // additional sub/add esp pair. The cost is ~3 bytes per instruction,
+ // depending on the size of the constant.
+ // TODO: Callee-pop functions should have a smaller penalty, because
+ // an add is needed even with a reserved call frame.
+ Advantage -= 6;
+ } else {
+ // We can use pushes. First, account for the fixed costs.
+ // We'll need a add after the call.
+ Advantage -= 3;
+ // If we have to realign the stack, we'll also need and sub before
+ if (CC.second.ExpectedDist % StackAlign)
+ Advantage -= 3;
+ // Now, for each push, we save ~3 bytes. For small constants, we actually,
+ // save more (up to 5 bytes), but 3 should be a good approximation.
+ Advantage += (CC.second.ExpectedDist / 4) * 3;
+ }
+ }
+
+ return (Advantage >= 0);
+}
+
+
+bool X86CallFrameOptimization::runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getSubtarget().getInstrInfo();
+ TFL = MF.getSubtarget().getFrameLowering();
+ MRI = &MF.getRegInfo();
+
+ if (!isLegal(MF))
+ return false;
+
+ unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
+
+ bool Changed = false;
+
+ ContextMap CallSeqMap;
+
+ for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
+ for (MachineBasicBlock::iterator I = BB->begin(); I != BB->end(); ++I)
+ if (I->getOpcode() == FrameSetupOpcode) {
+ CallContext &Context = CallSeqMap[I];
+ collectCallInfo(MF, *BB, I, Context);
+ }
+
+ if (!isProfitable(MF, CallSeqMap))
+ return false;
+
+ for (auto CC : CallSeqMap)
+ if (CC.second.UsePush)
+ Changed |= adjustCallSequence(MF, CC.first, CC.second);
+
+ return Changed;
+}
+
+void X86CallFrameOptimization::collectCallInfo(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ CallContext &Context) {
+ // Check that this particular call sequence is amenable to the
+ // transformation.
+ const X86RegisterInfo &RegInfo = *static_cast<const X86RegisterInfo *>(
+ MF.getSubtarget().getRegisterInfo());
+ unsigned StackPtr = RegInfo.getStackRegister();
+ unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
+
+ // We expect to enter this at the beginning of a call sequence
+ assert(I->getOpcode() == TII->getCallFrameSetupOpcode());
+ MachineBasicBlock::iterator FrameSetup = I++;
+
+ // How much do we adjust the stack? This puts an upper bound on
+ // the number of parameters actually passed on it.
+ unsigned int MaxAdjust = FrameSetup->getOperand(0).getImm() / 4;
+
+ // A zero adjustment means no stack parameters
+ if (!MaxAdjust) {
+ Context.NoStackParams = true;
+ return;
+ }
+
+ // For globals in PIC mode, we can have some LEAs here.
+ // Ignore them, they don't bother us.
+ // TODO: Extend this to something that covers more cases.
+ while (I->getOpcode() == X86::LEA32r)
+ ++I;
+
+ // We expect a copy instruction here.
+ // TODO: The copy instruction is a lowering artifact.
+ // We should also support a copy-less version, where the stack
+ // pointer is used directly.
+ if (!I->isCopy() || !I->getOperand(0).isReg())
+ return;
+ Context.SPCopy = I++;
+ StackPtr = Context.SPCopy->getOperand(0).getReg();
+
+ // Scan the call setup sequence for the pattern we're looking for.
+ // We only handle a simple case - a sequence of MOV32mi or MOV32mr
+ // instructions, that push a sequence of 32-bit values onto the stack, with
+ // no gaps between them.
+ if (MaxAdjust > 4)
+ Context.MovVector.resize(MaxAdjust, nullptr);
+
+ do {
+ int Opcode = I->getOpcode();
+ if (Opcode != X86::MOV32mi && Opcode != X86::MOV32mr)
+ break;
+
+ // We only want movs of the form:
+ // movl imm/r32, k(%esp)
+ // If we run into something else, bail.
+ // Note that AddrBaseReg may, counter to its name, not be a register,
+ // but rather a frame index.
+ // TODO: Support the fi case. This should probably work now that we
+ // have the infrastructure to track the stack pointer within a call
+ // sequence.
+ if (!I->getOperand(X86::AddrBaseReg).isReg() ||
+ (I->getOperand(X86::AddrBaseReg).getReg() != StackPtr) ||
+ !I->getOperand(X86::AddrScaleAmt).isImm() ||
+ (I->getOperand(X86::AddrScaleAmt).getImm() != 1) ||
+ (I->getOperand(X86::AddrIndexReg).getReg() != X86::NoRegister) ||
+ (I->getOperand(X86::AddrSegmentReg).getReg() != X86::NoRegister) ||
+ !I->getOperand(X86::AddrDisp).isImm())
+ return;
+
+ int64_t StackDisp = I->getOperand(X86::AddrDisp).getImm();
+ assert(StackDisp >= 0 &&
+ "Negative stack displacement when passing parameters");
+
+ // We really don't want to consider the unaligned case.
+ if (StackDisp % 4)
+ return;
+ StackDisp /= 4;
+
+ assert((size_t)StackDisp < Context.MovVector.size() &&
+ "Function call has more parameters than the stack is adjusted for.");
+
+ // If the same stack slot is being filled twice, something's fishy.
+ if (Context.MovVector[StackDisp] != nullptr)
+ return;
+ Context.MovVector[StackDisp] = I;
+
+ ++I;
+ } while (I != MBB.end());
+
+ // We now expect the end of the sequence - a call and a stack adjust.
+ if (I == MBB.end())
+ return;
+
+ // For PCrel calls, we expect an additional COPY of the basereg.
+ // If we find one, skip it.
+ if (I->isCopy()) {
+ if (I->getOperand(1).getReg() ==
+ MF.getInfo<X86MachineFunctionInfo>()->getGlobalBaseReg())
+ ++I;
+ else
+ return;
+ }
+
+ if (!I->isCall())
+ return;
+
+ Context.Call = I;
+ if ((++I)->getOpcode() != FrameDestroyOpcode)
+ return;
+
+ // Now, go through the vector, and see that we don't have any gaps,
+ // but only a series of 32-bit MOVs.
+ auto MMI = Context.MovVector.begin(), MME = Context.MovVector.end();
+ for (; MMI != MME; ++MMI, Context.ExpectedDist += 4)
+ if (*MMI == nullptr)
+ break;
+
+ // If the call had no parameters, do nothing
+ if (MMI == Context.MovVector.begin())
+ return;
+
+ // We are either at the last parameter, or a gap.
+ // Make sure it's not a gap
+ for (; MMI != MME; ++MMI)
+ if (*MMI != nullptr)
+ return;
+
+ Context.UsePush = true;
+ return;
+}
+
+bool X86CallFrameOptimization::adjustCallSequence(MachineFunction &MF,
+ MachineBasicBlock::iterator I,
+ const CallContext &Context) {
+ // Ok, we can in fact do the transformation for this call.
+ // Do not remove the FrameSetup instruction, but adjust the parameters.
+ // PEI will end up finalizing the handling of this.
+ MachineBasicBlock::iterator FrameSetup = I;
+ MachineBasicBlock &MBB = *(I->getParent());
+ FrameSetup->getOperand(1).setImm(Context.ExpectedDist);
+
+ DebugLoc DL = I->getDebugLoc();
+ // Now, iterate through the vector in reverse order, and replace the movs
+ // with pushes. MOVmi/MOVmr doesn't have any defs, so no need to
+ // replace uses.
+ for (int Idx = (Context.ExpectedDist / 4) - 1; Idx >= 0; --Idx) {
+ MachineBasicBlock::iterator MOV = *Context.MovVector[Idx];
+ MachineOperand PushOp = MOV->getOperand(X86::AddrNumOperands);
+ if (MOV->getOpcode() == X86::MOV32mi) {
+ unsigned PushOpcode = X86::PUSHi32;
+ // If the operand is a small (8-bit) immediate, we can use a
+ // PUSH instruction with a shorter encoding.
+ // Note that isImm() may fail even though this is a MOVmi, because
+ // the operand can also be a symbol.
+ if (PushOp.isImm()) {
+ int64_t Val = PushOp.getImm();
+ if (isInt<8>(Val))
+ PushOpcode = X86::PUSH32i8;
+ }
+ BuildMI(MBB, Context.Call, DL, TII->get(PushOpcode)).addOperand(PushOp);
+ } else {
+ unsigned int Reg = PushOp.getReg();
+
+ // If PUSHrmm is not slow on this target, try to fold the source of the
+ // push into the instruction.
+ const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
+ bool SlowPUSHrmm = ST.isAtom() || ST.isSLM();
+
+ // Check that this is legal to fold. Right now, we're extremely
+ // conservative about that.
+ MachineInstr *DefMov = nullptr;
+ if (!SlowPUSHrmm && (DefMov = canFoldIntoRegPush(FrameSetup, Reg))) {
+ MachineInstr *Push =
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32rmm));
+
+ unsigned NumOps = DefMov->getDesc().getNumOperands();
+ for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
+ Push->addOperand(DefMov->getOperand(i));
+
+ DefMov->eraseFromParent();
+ } else {
+ BuildMI(MBB, Context.Call, DL, TII->get(X86::PUSH32r))
+ .addReg(Reg)
+ .getInstr();
+ }
+ }
+
+ MBB.erase(MOV);
+ }
+
+ // The stack-pointer copy is no longer used in the call sequences.
+ // There should not be any other users, but we can't commit to that, so:
+ if (MRI->use_empty(Context.SPCopy->getOperand(0).getReg()))
+ Context.SPCopy->eraseFromParent();
+
+ // Once we've done this, we need to make sure PEI doesn't assume a reserved
+ // frame.
+ X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
+ FuncInfo->setHasPushSequences(true);
+
+ return true;
+}
+
+MachineInstr *X86CallFrameOptimization::canFoldIntoRegPush(
+ MachineBasicBlock::iterator FrameSetup, unsigned Reg) {
+ // Do an extremely restricted form of load folding.
+ // ISel will often create patterns like:
+ // movl 4(%edi), %eax
+ // movl 8(%edi), %ecx
+ // movl 12(%edi), %edx
+ // movl %edx, 8(%esp)
+ // movl %ecx, 4(%esp)
+ // movl %eax, (%esp)
+ // call
+ // Get rid of those with prejudice.
+ if (!TargetRegisterInfo::isVirtualRegister(Reg))
+ return nullptr;
+
+ // Make sure this is the only use of Reg.
+ if (!MRI->hasOneNonDBGUse(Reg))
+ return nullptr;
+
+ MachineBasicBlock::iterator DefMI = MRI->getVRegDef(Reg);
+
+ // Make sure the def is a MOV from memory.
+ // If the def is an another block, give up.
+ if (DefMI->getOpcode() != X86::MOV32rm ||
+ DefMI->getParent() != FrameSetup->getParent())
+ return nullptr;
+
+ // Now, make sure everything else up until the ADJCALLSTACK is a sequence
+ // of MOVs. To be less conservative would require duplicating a lot of the
+ // logic from PeepholeOptimizer.
+ // FIXME: A possibly better approach would be to teach the PeepholeOptimizer
+ // to be smarter about folding into pushes.
+ for (auto I = DefMI; I != FrameSetup; ++I)
+ if (I->getOpcode() != X86::MOV32rm)
+ return nullptr;
+
+ return DefMI;
+}