}
}
+// getX86RegNum - This function maps LLVM register identifiers to their X86
+// specific numbering, which is used in various places encoding instructions.
+//
+unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
+ switch(RegNo) {
+ case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
+ case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
+ case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
+ case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
+ case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
+ return N86::ESP;
+ case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
+ return N86::EBP;
+ case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
+ return N86::ESI;
+ case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
+ return N86::EDI;
+
+ case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
+ return N86::EAX;
+ case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
+ return N86::ECX;
+ case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
+ return N86::EDX;
+ case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
+ return N86::EBX;
+ case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
+ return N86::ESP;
+ case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
+ return N86::EBP;
+ case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
+ return N86::ESI;
+ case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
+ return N86::EDI;
+
+ case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
+ case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
+ return RegNo-X86::ST0;
+
+ case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
+ case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7:
+ return getDwarfRegNum(RegNo) - getDwarfRegNum(X86::XMM0);
+ case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
+ case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
+ return getDwarfRegNum(RegNo) - getDwarfRegNum(X86::XMM8);
+
+ default:
+ assert(isVirtualRegister(RegNo) && "Unknown physical register!");
+ assert(0 && "Register allocator hasn't allocated reg correctly yet!");
+ return 0;
+ }
+}
+
+bool X86RegisterInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI) const {
+ if (CSI.empty())
+ return false;
+
+ MachineFunction &MF = *MBB.getParent();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize);
+ unsigned Opc = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
+ for (unsigned i = CSI.size(); i != 0; --i) {
+ unsigned Reg = CSI[i-1].getReg();
+ // Add the callee-saved register as live-in. It's killed at the spill.
+ MBB.addLiveIn(Reg);
+ BuildMI(MBB, MI, TII.get(Opc)).addReg(Reg);
+ }
+ return true;
+}
+
+bool X86RegisterInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MI,
+ const std::vector<CalleeSavedInfo> &CSI) const {
+ if (CSI.empty())
+ return false;
+
+ unsigned Opc = Is64Bit ? X86::POP64r : X86::POP32r;
+ for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
+ unsigned Reg = CSI[i].getReg();
+ BuildMI(MBB, MI, TII.get(Opc), Reg);
+ }
+ return true;
+}
+
void X86RegisterInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, int FrameIdx,
Opc = X86::MOV32_mr;
} else if (RC == &X86::GR16_RegClass) {
Opc = X86::MOV16_mr;
- } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) {
+ } else if (RC == &X86::RFP80RegClass) {
+ Opc = X86::ST_FpP80m; // pops
+ } else if (RC == &X86::RFP64RegClass) {
Opc = X86::ST_Fp64m;
} else if (RC == &X86::RFP32RegClass) {
Opc = X86::ST_Fp32m;
Opc = X86::MOV32_rm;
} else if (RC == &X86::GR16_RegClass) {
Opc = X86::MOV16_rm;
- } else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) {
+ } else if (RC == &X86::RFP80RegClass) {
+ Opc = X86::LD_Fp80m;
+ } else if (RC == &X86::RFP64RegClass) {
Opc = X86::LD_Fp64m;
} else if (RC == &X86::RFP32RegClass) {
Opc = X86::LD_Fp32m;
Opc = X86::MOV_Fp3232;
} else if (RC == &X86::RFP64RegClass || RC == &X86::RSTRegClass) {
Opc = X86::MOV_Fp6464;
+ } else if (RC == &X86::RFP80RegClass) {
+ Opc = X86::MOV_Fp8080;
} else if (RC == &X86::FR32RegClass) {
Opc = X86::FsMOVAPSrr;
} else if (RC == &X86::FR64RegClass) {
MachineBasicBlock::iterator I,
unsigned DestReg,
const MachineInstr *Orig) const {
- MachineInstr *MI = Orig->clone();
- MI->getOperand(0).setReg(DestReg);
- MBB.insert(I, MI);
+ // MOV32r0 etc. are implemented with xor which clobbers condition code.
+ // Re-materialize them as movri instructions to avoid side effects.
+ switch (Orig->getOpcode()) {
+ case X86::MOV8r0:
+ BuildMI(MBB, I, TII.get(X86::MOV8ri), DestReg).addImm(0);
+ break;
+ case X86::MOV16r0:
+ BuildMI(MBB, I, TII.get(X86::MOV16ri), DestReg).addImm(0);
+ break;
+ case X86::MOV32r0:
+ BuildMI(MBB, I, TII.get(X86::MOV32ri), DestReg).addImm(0);
+ break;
+ case X86::MOV64r0:
+ BuildMI(MBB, I, TII.get(X86::MOV64ri32), DestReg).addImm(0);
+ break;
+ default: {
+ MachineInstr *MI = Orig->clone();
+ MI->getOperand(0).setReg(DestReg);
+ MBB.insert(I, MI);
+ break;
+ }
+ }
+}
+
+static const MachineInstrBuilder &FuseInstrAddOperand(MachineInstrBuilder &MIB,
+ MachineOperand &MO) {
+ if (MO.isRegister())
+ MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
+ else if (MO.isImmediate())
+ MIB = MIB.addImm(MO.getImm());
+ else if (MO.isFrameIndex())
+ MIB = MIB.addFrameIndex(MO.getFrameIndex());
+ else if (MO.isGlobalAddress())
+ MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
+ else if (MO.isConstantPoolIndex())
+ MIB = MIB.addConstantPoolIndex(MO.getConstantPoolIndex(), MO.getOffset());
+ else if (MO.isJumpTableIndex())
+ MIB = MIB.addJumpTableIndex(MO.getJumpTableIndex());
+ else if (MO.isExternalSymbol())
+ MIB = MIB.addExternalSymbol(MO.getSymbolName());
+ else
+ assert(0 && "Unknown operand for FuseInst!");
+
+ return MIB;
}
-static MachineInstr *FuseTwoAddrInst(unsigned Opcode, unsigned FrameIndex,
- MachineInstr *MI,
- const TargetInstrInfo &TII) {
+static MachineInstr *FuseTwoAddrInst(unsigned Opcode,
+ SmallVector<MachineOperand,4> &MOs,
+ MachineInstr *MI, const TargetInstrInfo &TII) {
unsigned NumOps = TII.getNumOperands(MI->getOpcode())-2;
+
// Create the base instruction with the memory operand as the first part.
- MachineInstrBuilder MIB = addFrameReference(BuildMI(TII.get(Opcode)),
- FrameIndex);
+ MachineInstrBuilder MIB = BuildMI(TII.get(Opcode));
+ unsigned NumAddrOps = MOs.size();
+ for (unsigned i = 0; i != NumAddrOps; ++i)
+ MIB = FuseInstrAddOperand(MIB, MOs[i]);
+ if (NumAddrOps < 4) // FrameIndex only
+ MIB.addImm(1).addReg(0).addImm(0);
// Loop over the rest of the ri operands, converting them over.
for (unsigned i = 0; i != NumOps; ++i) {
MachineOperand &MO = MI->getOperand(i+2);
- if (MO.isReg())
- MIB = MIB.addReg(MO.getReg(), false, MO.isImplicit());
- else if (MO.isImm())
- MIB = MIB.addImm(MO.getImm());
- else if (MO.isGlobalAddress())
- MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
- else if (MO.isJumpTableIndex())
- MIB = MIB.addJumpTableIndex(MO.getJumpTableIndex());
- else if (MO.isExternalSymbol())
- MIB = MIB.addExternalSymbol(MO.getSymbolName());
- else
- assert(0 && "Unknown operand type!");
+ MIB = FuseInstrAddOperand(MIB, MO);
}
return MIB;
}
static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
- unsigned FrameIndex, MachineInstr *MI,
- const TargetInstrInfo &TII) {
+ SmallVector<MachineOperand,4> &MOs,
+ MachineInstr *MI, const TargetInstrInfo &TII) {
MachineInstrBuilder MIB = BuildMI(TII.get(Opcode));
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) {
- assert(MO.isReg() && "Expected to fold into reg operand!");
- MIB = addFrameReference(MIB, FrameIndex);
- } else if (MO.isReg())
- MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
- else if (MO.isImm())
- MIB = MIB.addImm(MO.getImm());
- else if (MO.isGlobalAddress())
- MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
- else if (MO.isJumpTableIndex())
- MIB = MIB.addJumpTableIndex(MO.getJumpTableIndex());
- else if (MO.isExternalSymbol())
- MIB = MIB.addExternalSymbol(MO.getSymbolName());
- else
- assert(0 && "Unknown operand for FuseInst!");
+ assert(MO.isRegister() && "Expected to fold into reg operand!");
+ unsigned NumAddrOps = MOs.size();
+ for (unsigned i = 0; i != NumAddrOps; ++i)
+ MIB = FuseInstrAddOperand(MIB, MOs[i]);
+ if (NumAddrOps < 4) // FrameIndex only
+ MIB.addImm(1).addReg(0).addImm(0);
+ } else {
+ MIB = FuseInstrAddOperand(MIB, MO);
+ }
}
return MIB;
}
-static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII,
- unsigned Opcode, unsigned FrameIndex,
+static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
+ SmallVector<MachineOperand,4> &MOs,
MachineInstr *MI) {
- return addFrameReference(BuildMI(TII.get(Opcode)), FrameIndex).addImm(0);
+ MachineInstrBuilder MIB = BuildMI(TII.get(Opcode));
+
+ unsigned NumAddrOps = MOs.size();
+ for (unsigned i = 0; i != NumAddrOps; ++i)
+ MIB = FuseInstrAddOperand(MIB, MOs[i]);
+ if (NumAddrOps < 4) // FrameIndex only
+ MIB.addImm(1).addReg(0).addImm(0);
+ return MIB.addImm(0);
}
return NULL;
}
-#define ARRAY_SIZE(TABLE) \
- (sizeof(TABLE)/sizeof(TABLE[0]))
-
#ifdef NDEBUG
#define ASSERT_SORTED(TABLE)
#else
#define ASSERT_SORTED(TABLE) \
{ static bool TABLE##Checked = false; \
if (!TABLE##Checked) { \
- assert(TableIsSorted(TABLE, ARRAY_SIZE(TABLE)) && \
+ assert(TableIsSorted(TABLE, array_lengthof(TABLE)) && \
"All lookup tables must be sorted for efficient access!"); \
TABLE##Checked = true; \
} \
}
#endif
-
-MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
- unsigned i,
- int FrameIndex) const {
- // Check switch flag
- if (NoFusing) return NULL;
-
+MachineInstr*
+X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
+ SmallVector<MachineOperand,4> &MOs) const {
// Table (and size) to search
const TableEntry *OpcodeTablePtr = NULL;
unsigned OpcodeTableSize = 0;
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
if (isTwoAddr && NumOps >= 2 && i < 2 &&
- MI->getOperand(0).isReg() &&
- MI->getOperand(1).isReg() &&
- MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
+ MI->getOperand(0).isRegister() &&
+ MI->getOperand(1).isRegister() &&
+ MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
static const TableEntry OpcodeTable[] = {
{ X86::ADC32ri, X86::ADC32mi },
{ X86::ADC32ri8, X86::ADC32mi8 },
};
ASSERT_SORTED(OpcodeTable);
OpcodeTablePtr = OpcodeTable;
- OpcodeTableSize = ARRAY_SIZE(OpcodeTable);
+ OpcodeTableSize = array_lengthof(OpcodeTable);
isTwoAddrFold = true;
} else if (i == 0) { // If operand 0
if (MI->getOpcode() == X86::MOV16r0)
- NewMI = MakeM0Inst(TII, X86::MOV16mi, FrameIndex, MI);
+ NewMI = MakeM0Inst(TII, X86::MOV16mi, MOs, MI);
else if (MI->getOpcode() == X86::MOV32r0)
- NewMI = MakeM0Inst(TII, X86::MOV32mi, FrameIndex, MI);
+ NewMI = MakeM0Inst(TII, X86::MOV32mi, MOs, MI);
else if (MI->getOpcode() == X86::MOV64r0)
- NewMI = MakeM0Inst(TII, X86::MOV64mi32, FrameIndex, MI);
+ NewMI = MakeM0Inst(TII, X86::MOV64mi32, MOs, MI);
else if (MI->getOpcode() == X86::MOV8r0)
- NewMI = MakeM0Inst(TII, X86::MOV8mi, FrameIndex, MI);
+ NewMI = MakeM0Inst(TII, X86::MOV8mi, MOs, MI);
if (NewMI) {
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
static const TableEntry OpcodeTable[] = {
+ { X86::CALL32r, X86::CALL32m },
+ { X86::CALL64r, X86::CALL64m },
{ X86::CMP16ri, X86::CMP16mi },
{ X86::CMP16ri8, X86::CMP16mi8 },
{ X86::CMP32ri, X86::CMP32mi },
{ X86::CMP32ri8, X86::CMP32mi8 },
+ { X86::CMP64ri32, X86::CMP64mi32 },
+ { X86::CMP64ri8, X86::CMP64mi8 },
{ X86::CMP8ri, X86::CMP8mi },
{ X86::DIV16r, X86::DIV16m },
{ X86::DIV32r, X86::DIV32m },
{ X86::IMUL32r, X86::IMUL32m },
{ X86::IMUL64r, X86::IMUL64m },
{ X86::IMUL8r, X86::IMUL8m },
+ { X86::JMP32r, X86::JMP32m },
+ { X86::JMP64r, X86::JMP64m },
{ X86::MOV16ri, X86::MOV16mi },
{ X86::MOV16rr, X86::MOV16mr },
{ X86::MOV32ri, X86::MOV32mi },
{ X86::MUL32r, X86::MUL32m },
{ X86::MUL64r, X86::MUL64m },
{ X86::MUL8r, X86::MUL8m },
+
+ // TEMPORARY
+ { X86::NEW_CMP16ri, X86::NEW_CMP16mi },
+ { X86::NEW_CMP16ri8,X86::NEW_CMP16mi8 },
+ { X86::NEW_CMP32ri, X86::NEW_CMP32mi },
+ { X86::NEW_CMP32ri8,X86::NEW_CMP32mi8 },
+ { X86::NEW_CMP64ri32,X86::NEW_CMP64mi32 },
+ { X86::NEW_CMP64ri8,X86::NEW_CMP64mi8 },
+ { X86::NEW_CMP8ri, X86::NEW_CMP8mi },
+ { X86::NEW_SETAEr, X86::NEW_SETAEm },
+ { X86::NEW_SETAr, X86::NEW_SETAm },
+ { X86::NEW_SETBEr, X86::NEW_SETBEm },
+ { X86::NEW_SETBr, X86::NEW_SETBm },
+ { X86::NEW_SETEr, X86::NEW_SETEm },
+ { X86::NEW_SETGEr, X86::NEW_SETGEm },
+ { X86::NEW_SETGr, X86::NEW_SETGm },
+ { X86::NEW_SETLEr, X86::NEW_SETLEm },
+ { X86::NEW_SETLr, X86::NEW_SETLm },
+ { X86::NEW_SETNEr, X86::NEW_SETNEm },
+ { X86::NEW_SETNPr, X86::NEW_SETNPm },
+ { X86::NEW_SETNSr, X86::NEW_SETNSm },
+ { X86::NEW_SETPr, X86::NEW_SETPm },
+ { X86::NEW_SETSr, X86::NEW_SETSm },
+
{ X86::SETAEr, X86::SETAEm },
{ X86::SETAr, X86::SETAm },
{ X86::SETBEr, X86::SETBEm },
{ X86::SETNSr, X86::SETNSm },
{ X86::SETPr, X86::SETPm },
{ X86::SETSr, X86::SETSm },
+ { X86::TAILJMPr, X86::TAILJMPm },
{ X86::TEST16ri, X86::TEST16mi },
{ X86::TEST32ri, X86::TEST32mi },
{ X86::TEST64ri32, X86::TEST64mi32 },
{ X86::XCHG64rr, X86::XCHG64mr },
{ X86::XCHG8rr, X86::XCHG8mr }
};
+
ASSERT_SORTED(OpcodeTable);
OpcodeTablePtr = OpcodeTable;
- OpcodeTableSize = ARRAY_SIZE(OpcodeTable);
+ OpcodeTableSize = array_lengthof(OpcodeTable);
} else if (i == 1) {
static const TableEntry OpcodeTable[] = {
{ X86::CMP16rr, X86::CMP16rm },
{ X86::CMP32rr, X86::CMP32rm },
- { X86::CMP64ri32, X86::CMP64mi32 },
- { X86::CMP64ri8, X86::CMP64mi8 },
{ X86::CMP64rr, X86::CMP64rm },
{ X86::CMP8rr, X86::CMP8rm },
- { X86::CMPPDrri, X86::CMPPDrmi },
- { X86::CMPPSrri, X86::CMPPSrmi },
- { X86::CMPSDrr, X86::CMPSDrm },
- { X86::CMPSSrr, X86::CMPSSrm },
{ X86::CVTSD2SSrr, X86::CVTSD2SSrm },
{ X86::CVTSI2SD64rr, X86::CVTSI2SD64rm },
{ X86::CVTSI2SDrr, X86::CVTSI2SDrm },
{ X86::IMUL16rri8, X86::IMUL16rmi8 },
{ X86::IMUL32rri, X86::IMUL32rmi },
{ X86::IMUL32rri8, X86::IMUL32rmi8 },
- { X86::IMUL64rr, X86::IMUL64rm },
{ X86::IMUL64rri32, X86::IMUL64rmi32 },
{ X86::IMUL64rri8, X86::IMUL64rmi8 },
{ X86::Int_CMPSDrr, X86::Int_CMPSDrm },
{ X86::MOVZX32rr8, X86::MOVZX32rm8 },
{ X86::MOVZX64rr16, X86::MOVZX64rm16 },
{ X86::MOVZX64rr8, X86::MOVZX64rm8 },
+
+ // TEMPORARY
+ { X86::NEW_Int_COMISDrr, X86::NEW_Int_COMISDrm },
+ { X86::NEW_Int_COMISSrr, X86::NEW_Int_COMISSrm },
+ { X86::NEW_Int_UCOMISDrr, X86::NEW_Int_UCOMISDrm },
+ { X86::NEW_Int_UCOMISSrr, X86::NEW_Int_UCOMISSrm },
+ { X86::NEW_TEST16rr, X86::NEW_TEST16rm },
+ { X86::NEW_TEST32rr, X86::NEW_TEST32rm },
+ { X86::NEW_TEST64rr, X86::NEW_TEST64rm },
+ { X86::NEW_TEST8rr, X86::NEW_TEST8rm },
+ { X86::NEW_UCOMISDrr, X86::NEW_UCOMISDrm },
+ { X86::NEW_UCOMISSrr, X86::NEW_UCOMISSrm },
+
{ X86::PSHUFDri, X86::PSHUFDmi },
{ X86::PSHUFHWri, X86::PSHUFHWmi },
{ X86::PSHUFLWri, X86::PSHUFLWmi },
{ X86::PsMOVZX64rr32, X86::PsMOVZX64rm32 },
+ { X86::RCPPSr, X86::RCPPSm },
+ { X86::RCPPSr_Int, X86::RCPPSm_Int },
+ { X86::RSQRTPSr, X86::RSQRTPSm },
+ { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int },
+ { X86::RSQRTSSr, X86::RSQRTSSm },
+ { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int },
+ { X86::SQRTPDr, X86::SQRTPDm },
+ { X86::SQRTPDr_Int, X86::SQRTPDm_Int },
+ { X86::SQRTPSr, X86::SQRTPSm },
+ { X86::SQRTPSr_Int, X86::SQRTPSm_Int },
+ { X86::SQRTSDr, X86::SQRTSDm },
+ { X86::SQRTSDr_Int, X86::SQRTSDm_Int },
+ { X86::SQRTSSr, X86::SQRTSSm },
+ { X86::SQRTSSr_Int, X86::SQRTSSm_Int },
{ X86::TEST16rr, X86::TEST16rm },
{ X86::TEST32rr, X86::TEST32rm },
{ X86::TEST64rr, X86::TEST64rm },
{ X86::XCHG64rr, X86::XCHG64rm },
{ X86::XCHG8rr, X86::XCHG8rm }
};
+
ASSERT_SORTED(OpcodeTable);
OpcodeTablePtr = OpcodeTable;
- OpcodeTableSize = ARRAY_SIZE(OpcodeTable);
+ OpcodeTableSize = array_lengthof(OpcodeTable);
} else if (i == 2) {
static const TableEntry OpcodeTable[] = {
{ X86::ADC32rr, X86::ADC32rm },
{ X86::CMOVS16rr, X86::CMOVS16rm },
{ X86::CMOVS32rr, X86::CMOVS32rm },
{ X86::CMOVS64rr, X86::CMOVS64rm },
+ { X86::CMPPDrri, X86::CMPPDrmi },
+ { X86::CMPPSrri, X86::CMPPSrmi },
+ { X86::CMPSDrr, X86::CMPSDrm },
+ { X86::CMPSSrr, X86::CMPSSrm },
{ X86::DIVPDrr, X86::DIVPDrm },
{ X86::DIVPSrr, X86::DIVPSrm },
{ X86::DIVSDrr, X86::DIVSDrm },
{ X86::HSUBPSrr, X86::HSUBPSrm },
{ X86::IMUL16rr, X86::IMUL16rm },
{ X86::IMUL32rr, X86::IMUL32rm },
+ { X86::IMUL64rr, X86::IMUL64rm },
{ X86::MAXPDrr, X86::MAXPDrm },
{ X86::MAXPDrr_Int, X86::MAXPDrm_Int },
{ X86::MAXPSrr, X86::MAXPSrm },
{ X86::MULPSrr, X86::MULPSrm },
{ X86::MULSDrr, X86::MULSDrm },
{ X86::MULSSrr, X86::MULSSrm },
+
+ // TEMPORARY
+ { X86::NEW_CMOVA16rr, X86::NEW_CMOVA16rm },
+ { X86::NEW_CMOVA32rr, X86::NEW_CMOVA32rm },
+ { X86::NEW_CMOVA64rr, X86::NEW_CMOVA64rm },
+ { X86::NEW_CMOVAE16rr, X86::NEW_CMOVAE16rm },
+ { X86::NEW_CMOVAE32rr, X86::NEW_CMOVAE32rm },
+ { X86::NEW_CMOVAE64rr, X86::NEW_CMOVAE64rm },
+ { X86::NEW_CMOVB16rr, X86::NEW_CMOVB16rm },
+ { X86::NEW_CMOVB32rr, X86::NEW_CMOVB32rm },
+ { X86::NEW_CMOVB64rr, X86::NEW_CMOVB64rm },
+ { X86::NEW_CMOVBE16rr, X86::NEW_CMOVBE16rm },
+ { X86::NEW_CMOVBE32rr, X86::NEW_CMOVBE32rm },
+ { X86::NEW_CMOVBE64rr, X86::NEW_CMOVBE64rm },
+ { X86::NEW_CMOVE16rr, X86::NEW_CMOVE16rm },
+ { X86::NEW_CMOVE32rr, X86::NEW_CMOVE32rm },
+ { X86::NEW_CMOVE64rr, X86::NEW_CMOVE64rm },
+ { X86::NEW_CMOVG16rr, X86::NEW_CMOVG16rm },
+ { X86::NEW_CMOVG32rr, X86::NEW_CMOVG32rm },
+ { X86::NEW_CMOVG64rr, X86::NEW_CMOVG64rm },
+ { X86::NEW_CMOVGE16rr, X86::NEW_CMOVGE16rm },
+ { X86::NEW_CMOVGE32rr, X86::NEW_CMOVGE32rm },
+ { X86::NEW_CMOVGE64rr, X86::NEW_CMOVGE64rm },
+ { X86::NEW_CMOVL16rr, X86::NEW_CMOVL16rm },
+ { X86::NEW_CMOVL32rr, X86::NEW_CMOVL32rm },
+ { X86::NEW_CMOVL64rr, X86::NEW_CMOVL64rm },
+ { X86::NEW_CMOVLE16rr, X86::NEW_CMOVLE16rm },
+ { X86::NEW_CMOVLE32rr, X86::NEW_CMOVLE32rm },
+ { X86::NEW_CMOVLE64rr, X86::NEW_CMOVLE64rm },
+ { X86::NEW_CMOVNE16rr, X86::NEW_CMOVNE16rm },
+ { X86::NEW_CMOVNE32rr, X86::NEW_CMOVNE32rm },
+ { X86::NEW_CMOVNE64rr, X86::NEW_CMOVNE64rm },
+ { X86::NEW_CMOVNP16rr, X86::NEW_CMOVNP16rm },
+ { X86::NEW_CMOVNP32rr, X86::NEW_CMOVNP32rm },
+ { X86::NEW_CMOVNP64rr, X86::NEW_CMOVNP64rm },
+ { X86::NEW_CMOVNS16rr, X86::NEW_CMOVNS16rm },
+ { X86::NEW_CMOVNS32rr, X86::NEW_CMOVNS32rm },
+ { X86::NEW_CMOVNS64rr, X86::NEW_CMOVNS64rm },
+ { X86::NEW_CMOVP16rr, X86::NEW_CMOVP16rm },
+ { X86::NEW_CMOVP32rr, X86::NEW_CMOVP32rm },
+ { X86::NEW_CMOVP64rr, X86::NEW_CMOVP64rm },
+ { X86::NEW_CMOVS16rr, X86::NEW_CMOVS16rm },
+ { X86::NEW_CMOVS32rr, X86::NEW_CMOVS32rm },
+ { X86::NEW_CMOVS64rr, X86::NEW_CMOVS64rm },
+
{ X86::OR16rr, X86::OR16rm },
{ X86::OR32rr, X86::OR32rm },
{ X86::OR64rr, X86::OR64rm },
{ X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm },
{ X86::PUNPCKLWDrr, X86::PUNPCKLWDrm },
{ X86::PXORrr, X86::PXORrm },
- { X86::RCPPSr, X86::RCPPSm },
- { X86::RCPPSr_Int, X86::RCPPSm_Int },
- { X86::RSQRTPSr, X86::RSQRTPSm },
- { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int },
- { X86::RSQRTSSr, X86::RSQRTSSm },
- { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int },
{ X86::SBB32rr, X86::SBB32rm },
{ X86::SBB64rr, X86::SBB64rm },
{ X86::SHUFPDrri, X86::SHUFPDrmi },
{ X86::SHUFPSrri, X86::SHUFPSrmi },
- { X86::SQRTPDr, X86::SQRTPDm },
- { X86::SQRTPDr_Int, X86::SQRTPDm_Int },
- { X86::SQRTPSr, X86::SQRTPSm },
- { X86::SQRTPSr_Int, X86::SQRTPSm_Int },
- { X86::SQRTSDr, X86::SQRTSDm },
- { X86::SQRTSDr_Int, X86::SQRTSDm_Int },
- { X86::SQRTSSr, X86::SQRTSSm },
- { X86::SQRTSSr_Int, X86::SQRTSSm_Int },
{ X86::SUB16rr, X86::SUB16rm },
{ X86::SUB32rr, X86::SUB32rm },
{ X86::SUB64rr, X86::SUB64rm },
{ X86::XORPDrr, X86::XORPDrm },
{ X86::XORPSrr, X86::XORPSrm }
};
+
ASSERT_SORTED(OpcodeTable);
OpcodeTablePtr = OpcodeTable;
- OpcodeTableSize = ARRAY_SIZE(OpcodeTable);
+ OpcodeTableSize = array_lengthof(OpcodeTable);
}
// If table selected...
if (const TableEntry *Entry = TableLookup(OpcodeTablePtr, OpcodeTableSize,
fromOpcode)) {
if (isTwoAddrFold)
- NewMI = FuseTwoAddrInst(Entry->to, FrameIndex, MI, TII);
+ NewMI = FuseTwoAddrInst(Entry->to, MOs, MI, TII);
else
- NewMI = FuseInst(Entry->to, i, FrameIndex, MI, TII);
+ NewMI = FuseInst(Entry->to, i, MOs, MI, TII);
NewMI->copyKillDeadInfo(MI);
return NewMI;
}
}
-const unsigned *X86RegisterInfo::getCalleeSavedRegs() const {
+MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
+ int FrameIndex) const {
+ // Check switch flag
+ if (NoFusing) return NULL;
+ SmallVector<MachineOperand,4> MOs;
+ MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
+ return foldMemoryOperand(MI, OpNum, MOs);
+}
+
+MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
+ MachineInstr *LoadMI) const {
+ // Check switch flag
+ if (NoFusing) return NULL;
+ SmallVector<MachineOperand,4> MOs;
+ unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
+ for (unsigned i = NumOps - 4; i != NumOps; ++i)
+ MOs.push_back(LoadMI->getOperand(i));
+ return foldMemoryOperand(MI, OpNum, MOs);
+}
+
+const unsigned *
+X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
static const unsigned CalleeSavedRegs32Bit[] = {
X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
};
+
+ static const unsigned CalleeSavedRegs32EHRet[] = {
+ X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
+ };
+
static const unsigned CalleeSavedRegs64Bit[] = {
X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
};
- return Is64Bit ? CalleeSavedRegs64Bit : CalleeSavedRegs32Bit;
+ if (Is64Bit)
+ return CalleeSavedRegs64Bit;
+ else {
+ if (MF) {
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+ if (MMI && MMI->callsEHReturn())
+ return CalleeSavedRegs32EHRet;
+ }
+ return CalleeSavedRegs32Bit;
+ }
}
const TargetRegisterClass* const*
-X86RegisterInfo::getCalleeSavedRegClasses() const {
+X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = {
&X86::GR32RegClass, &X86::GR32RegClass,
&X86::GR32RegClass, &X86::GR32RegClass, 0
};
+ static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = {
+ &X86::GR32RegClass, &X86::GR32RegClass,
+ &X86::GR32RegClass, &X86::GR32RegClass,
+ &X86::GR32RegClass, &X86::GR32RegClass, 0
+ };
static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = {
&X86::GR64RegClass, &X86::GR64RegClass,
&X86::GR64RegClass, &X86::GR64RegClass,
&X86::GR64RegClass, &X86::GR64RegClass, 0
};
- return Is64Bit ? CalleeSavedRegClasses64Bit : CalleeSavedRegClasses32Bit;
+ if (Is64Bit)
+ return CalleeSavedRegClasses64Bit;
+ else {
+ if (MF) {
+ MachineFrameInfo *MFI = MF->getFrameInfo();
+ MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+ if (MMI && MMI->callsEHReturn())
+ return CalleeSavedRegClasses32EHRet;
+ }
+ return CalleeSavedRegClasses32Bit;
+ }
+
}
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
// if frame pointer elimination is disabled.
//
bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+
return (NoFramePointerElim ||
- MF.getFrameInfo()->hasVarSizedObjects() ||
- MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer());
+ MFI->hasVarSizedObjects() ||
+ MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
+ (MMI && MMI->callsUnwindInit()));
+}
+
+bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
+ return !MF.getFrameInfo()->hasVarSizedObjects();
}
void X86RegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
- if (hasFP(MF)) {
- // If we have a frame pointer, turn the adjcallstackup instruction into a
- // 'sub ESP, <amt>' and the adjcallstackdown instruction into 'add ESP,
- // <amt>'
+ if (!hasReservedCallFrame(MF)) {
+ // If the stack pointer can be changed after prologue, turn the
+ // adjcallstackup instruction into a 'sub ESP, <amt>' and the
+ // adjcallstackdown instruction into 'add ESP, <amt>'
+ // TODO: consider using push / pop instead of sub + store / add
MachineInstr *Old = I;
uint64_t Amount = Old->getOperand(0).getImm();
if (Amount != 0) {
void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
- MachineBasicBlock::iterator MBBI = MBB.begin();
MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
const Function* Fn = MF.getFunction();
const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
- MachineInstr *MI;
MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
+ MachineBasicBlock::iterator MBBI = MBB.begin();
// Prepare for frame info.
- unsigned FrameLabelId = 0, StartLabelId = 0;
+ unsigned FrameLabelId = 0;
// Get the number of bytes to allocate from the FrameInfo
- uint64_t NumBytes = MFI->getStackSize();
+ uint64_t StackSize = MFI->getStackSize();
+ uint64_t NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
+
+ if (hasFP(MF)) {
+ // Get the offset of the stack slot for the EBP register... which is
+ // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
+ // Update the frame offset adjustment.
+ MFI->setOffsetAdjustment(SlotSize-NumBytes);
+
+ // Save EBP into the appropriate stack slot...
+ BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
+ .addReg(FramePtr);
+ NumBytes -= SlotSize;
+
+ if (MMI && MMI->needsFrameInfo()) {
+ // Mark effective beginning of when frame pointer becomes valid.
+ FrameLabelId = MMI->NextLabelID();
+ BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(FrameLabelId);
+ }
+ // Update EBP with the new base value...
+ BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
+ .addReg(StackPtr);
+ }
+
+ unsigned ReadyLabelId = 0;
if (MMI && MMI->needsFrameInfo()) {
- // Mark function start
- StartLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(StartLabelId);
+ // Mark effective beginning of when frame pointer is ready.
+ ReadyLabelId = MMI->NextLabelID();
+ BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(ReadyLabelId);
}
+ // Skip the callee-saved push instructions.
+ while (MBBI != MBB.end() &&
+ (MBBI->getOpcode() == X86::PUSH32r ||
+ MBBI->getOpcode() == X86::PUSH64r))
+ ++MBBI;
+
if (NumBytes) { // adjust stack pointer: ESP -= numbytes
if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) {
// Check, whether EAX is livein for this function
// necessary to ensure that the guard pages used by the OS virtual memory
// manager are allocated in correct sequence.
if (!isEAXAlive) {
- MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
- MBB.insert(MBBI, MI);
- MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
- MBB.insert(MBBI, MI);
+ BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes);
+ BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32))
+ .addExternalSymbol("_alloca");
} else {
// Save EAX
- MI = BuildMI(TII.get(X86::PUSH32r), X86::EAX);
- MBB.insert(MBBI, MI);
+ BuildMI(MBB, MBBI, TII.get(X86::PUSH32r), X86::EAX);
// Allocate NumBytes-4 bytes on stack. We'll also use 4 already
// allocated bytes for EAX.
- MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4);
- MBB.insert(MBBI, MI);
- MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
- MBB.insert(MBBI, MI);
+ BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(NumBytes-4);
+ BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32))
+ .addExternalSymbol("_alloca");
// Restore EAX
- MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm), X86::EAX),
- StackPtr, NumBytes-4);
+ MachineInstr *MI = addRegOffset(BuildMI(TII.get(X86::MOV32rm),X86::EAX),
+ StackPtr, NumBytes-4);
MBB.insert(MBBI, MI);
}
} else {
- emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
- }
- }
-
- if (MMI && MMI->needsFrameInfo()) {
- // Mark effective beginning of when frame pointer becomes valid.
- FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(FrameLabelId);
- }
-
- if (hasFP(MF)) {
- // Get the offset of the stack slot for the EBP register... which is
- // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
- int64_t EBPOffset =
- MFI->getObjectOffset(MFI->getObjectIndexBegin())+SlotSize;
- // Update the frame offset adjustment.
- MFI->setOffsetAdjustment(SlotSize-NumBytes);
-
- // Save EBP into the appropriate stack slot...
- // mov [ESP-<offset>], EBP
- MI = addRegOffset(BuildMI(TII.get(Is64Bit ? X86::MOV64mr : X86::MOV32mr)),
- StackPtr, EBPOffset+NumBytes).addReg(FramePtr);
- MBB.insert(MBBI, MI);
+ // If there is an ADD32ri or SUB32ri of ESP immediately after this
+ // instruction, merge the two instructions.
+ if (MBBI != MBB.end()) {
+ MachineBasicBlock::iterator NI = next(MBBI);
+ unsigned Opc = MBBI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ MBBI->getOperand(0).getReg() == StackPtr) {
+ NumBytes -= MBBI->getOperand(2).getImm();
+ MBB.erase(MBBI);
+ MBBI = NI;
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ MBBI->getOperand(0).getReg() == StackPtr) {
+ NumBytes += MBBI->getOperand(2).getImm();
+ MBB.erase(MBBI);
+ MBBI = NI;
+ }
+ }
- // Update EBP with the new base value...
- if (NumBytes == SlotSize) // mov EBP, ESP
- MI = BuildMI(TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr).
- addReg(StackPtr);
- else // lea EBP, [ESP+StackSize]
- MI = addRegOffset(BuildMI(TII.get(Is64Bit ? X86::LEA64r : X86::LEA32r),
- FramePtr), StackPtr, NumBytes-SlotSize);
-
- MBB.insert(MBBI, MI);
+ if (NumBytes)
+ emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
+ }
}
if (MMI && MMI->needsFrameInfo()) {
TargetFrameInfo::StackGrowsUp ?
TAI->getAddressSize() : -TAI->getAddressSize());
- if (NumBytes) {
+ if (StackSize) {
// Show update of SP.
if (hasFP(MF)) {
// Adjust SP
Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
} else {
MachineLocation SPDst(MachineLocation::VirtualFP);
- MachineLocation SPSrc(MachineLocation::VirtualFP, -NumBytes+stackGrowth);
+ MachineLocation SPSrc(MachineLocation::VirtualFP, -StackSize+stackGrowth);
Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
}
} else {
// Add callee saved registers to move list.
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
- for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
+
+ // FIXME: This is dirty hack. The code itself is pretty mess right now.
+ // It should be rewritten from scratch and generalized sometimes.
+
+ // Determine maximum offset (minumum due to stack growth)
+ int64_t MaxOffset = 0;
+ for (unsigned I = 0, E = CSI.size(); I!=E; ++I)
+ MaxOffset = std::min(MaxOffset,
+ MFI->getObjectOffset(CSI[I].getFrameIdx()));
+
+ // Calculate offsets
+ for (unsigned I = 0, E = CSI.size(); I!=E; ++I) {
int64_t Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
unsigned Reg = CSI[I].getReg();
+ Offset = (MaxOffset-Offset+3*stackGrowth);
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc));
}
- // Mark effective beginning of when frame pointer is ready.
- unsigned ReadyLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, TII.get(X86::LABEL)).addImm(ReadyLabelId);
-
if (hasFP(MF)) {
// Save FP
MachineLocation FPDst(MachineLocation::VirtualFP, 2*stackGrowth);
// If it's main() on Cygwin\Mingw32 we should align stack as well
if (Fn->hasExternalLinkage() && Fn->getName() == "main" &&
Subtarget->isTargetCygMing()) {
- MI= BuildMI(TII.get(X86::AND32ri), X86::ESP)
+ BuildMI(MBB, MBBI, TII.get(X86::AND32ri), X86::ESP)
.addReg(X86::ESP).addImm(-Align);
- MBB.insert(MBBI, MI);
// Probe the stack
- MI = BuildMI(TII.get(X86::MOV32ri), X86::EAX).addImm(Align);
- MBB.insert(MBBI, MI);
- MI = BuildMI(TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
- MBB.insert(MBBI, MI);
+ BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(Align);
+ BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
}
}
void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
MachineBasicBlock &MBB) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
+ X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
MachineBasicBlock::iterator MBBI = prior(MBB.end());
+ unsigned RetOpcode = MBBI->getOpcode();
- switch (MBBI->getOpcode()) {
+ switch (RetOpcode) {
case X86::RET:
case X86::RETI:
+ case X86::EH_RETURN:
case X86::TAILJMPd:
case X86::TAILJMPr:
case X86::TAILJMPm: break; // These are ok
assert(0 && "Can only insert epilog into returning blocks");
}
- if (hasFP(MF)) {
- // mov ESP, EBP
- BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr).
- addReg(FramePtr);
+ // Get the number of bytes to allocate from the FrameInfo
+ uint64_t StackSize = MFI->getStackSize();
+ unsigned CSSize = X86FI->getCalleeSavedFrameSize();
+ uint64_t NumBytes = StackSize - CSSize;
- // pop EBP
+ if (hasFP(MF)) {
+ // pop EBP.
BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
- } else {
- // Get the number of bytes allocated from the FrameInfo.
- uint64_t NumBytes = MFI->getStackSize();
+ NumBytes -= SlotSize;
+ }
- if (NumBytes) { // adjust stack pointer back: ESP += numbytes
- // If there is an ADD32ri or SUB32ri of ESP immediately before this
- // instruction, merge the two instructions.
- if (MBBI != MBB.begin()) {
- MachineBasicBlock::iterator PI = prior(MBBI);
- unsigned Opc = PI->getOpcode();
- if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
- Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
- PI->getOperand(0).getReg() == StackPtr) {
- NumBytes += PI->getOperand(2).getImm();
- MBB.erase(PI);
- } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
- Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
- PI->getOperand(0).getReg() == StackPtr) {
- NumBytes -= PI->getOperand(2).getImm();
- MBB.erase(PI);
- }
- }
+ // Skip the callee-saved pop instructions.
+ while (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PI = prior(MBBI);
+ unsigned Opc = PI->getOpcode();
+ if (Opc != X86::POP32r && Opc != X86::POP64r && !TII.isTerminatorInstr(Opc))
+ break;
+ --MBBI;
+ }
- if (NumBytes)
- emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
+ if (NumBytes || MFI->hasVarSizedObjects()) {
+ // If there is an ADD32ri or SUB32ri of ESP immediately before this
+ // instruction, merge the two instructions.
+ if (MBBI != MBB.begin()) {
+ MachineBasicBlock::iterator PI = prior(MBBI);
+ unsigned Opc = PI->getOpcode();
+ if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
+ Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ NumBytes += PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
+ Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
+ PI->getOperand(0).getReg() == StackPtr) {
+ NumBytes -= PI->getOperand(2).getImm();
+ MBB.erase(PI);
+ }
}
}
+
+ // If dynamic alloca is used, then reset esp to point to the last
+ // callee-saved slot before popping them off!
+ if (MFI->hasVarSizedObjects()) {
+ unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
+ if (CSSize) {
+ MachineInstr *MI = addRegOffset(BuildMI(TII.get(Opc), StackPtr),
+ FramePtr, -CSSize);
+ MBB.insert(MBBI, MI);
+ } else
+ BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr).
+ addReg(FramePtr);
+
+ NumBytes = 0;
+ }
+
+ // adjust stack pointer back: ESP += numbytes
+ if (NumBytes)
+ emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
+
+ // We're returning from function via eh_return.
+ if (RetOpcode == X86::EH_RETURN) {
+ MBBI = prior(MBB.end());
+ MachineOperand &DestAddr = MBBI->getOperand(0);
+ assert(DestAddr.isRegister() && "Offset should be in register!");
+ BuildMI(MBB, MBBI, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),StackPtr).
+ addReg(DestAddr.getReg());
+ }
}
unsigned X86RegisterInfo::getRARegister() const {