// Cache some information.
const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
Is64Bit = Subtarget->is64Bit();
+ StackAlign = TM.getFrameInfo()->getStackAlignment();
if (Is64Bit) {
SlotSize = 8;
StackPtr = X86::RSP;
assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?");
}
+// getDwarfRegNum - This function maps LLVM register identifiers to the
+// Dwarf specific numbering, used in debug info and exception tables.
+
+int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
+ const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
+ unsigned Flavour = DWARFFlavour::X86_64;
+ if (!Subtarget->is64Bit()) {
+ if (Subtarget->isTargetDarwin()) {
+ Flavour = DWARFFlavour::X86_32_Darwin;
+ } else if (Subtarget->isTargetCygMing()) {
+ // Unsupported by now, just quick fallback
+ Flavour = DWARFFlavour::X86_32_ELF;
+ } else {
+ Flavour = DWARFFlavour::X86_32_ELF;
+ }
+ }
+
+ return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour);
+}
+
// getX86RegNum - This function maps LLVM register identifiers to their X86
// specific numbering, which is used in various places encoding instructions.
//
case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
return RegNo-X86::ST0;
- case X86::XMM0: case X86::XMM1: case X86::XMM2: case X86::XMM3:
- case X86::XMM4: case X86::XMM5: case X86::XMM6: case X86::XMM7:
- return getDwarfRegNum(RegNo) - getDwarfRegNum(X86::XMM0);
- case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
- case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
- return getDwarfRegNum(RegNo) - getDwarfRegNum(X86::XMM8);
+ case X86::XMM0: case X86::XMM8:
+ return 0;
+ case X86::XMM1: case X86::XMM9:
+ return 1;
+ case X86::XMM2: case X86::XMM10:
+ return 2;
+ case X86::XMM3: case X86::XMM11:
+ return 3;
+ case X86::XMM4: case X86::XMM12:
+ return 4;
+ case X86::XMM5: case X86::XMM13:
+ return 5;
+ case X86::XMM6: case X86::XMM14:
+ return 6;
+ case X86::XMM7: case X86::XMM15:
+ return 7;
default:
assert(isVirtualRegister(RegNo) && "Unknown physical register!");
static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
MachineOperand &MO) {
if (MO.isRegister())
- MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
+ MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
+ false, false, MO.getSubReg());
else if (MO.isImmediate())
MIB = MIB.addImm(MO.getImm());
else if (MO.isFrameIndex())
return MIB;
}
-static unsigned getStoreRegOpcode(const TargetRegisterClass *RC) {
+static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
+ unsigned StackAlign) {
unsigned Opc = 0;
if (RC == &X86::GR64RegClass) {
Opc = X86::MOV64mr;
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDmr;
} else if (RC == &X86::VR128RegClass) {
- Opc = X86::MOVAPSmr;
+ // FIXME: Use movaps once we are capable of selectively
+ // aligning functions that spill SSE registers on 16-byte boundaries.
+ Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
} else if (RC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64mr;
} else {
MachineBasicBlock::iterator MI,
unsigned SrcReg, int FrameIdx,
const TargetRegisterClass *RC) const {
- unsigned Opc = getStoreRegOpcode(RC);
+ unsigned Opc = getStoreRegOpcode(RC, StackAlign);
addFrameReference(BuildMI(MBB, MI, TII.get(Opc)), FrameIdx)
.addReg(SrcReg, false, false, true);
}
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Opc = getStoreRegOpcode(RC);
+ unsigned Opc = getStoreRegOpcode(RC, StackAlign);
MachineInstrBuilder MIB = BuildMI(TII.get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB = X86InstrAddOperand(MIB, Addr[i]);
NewMIs.push_back(MIB);
}
-static unsigned getLoadRegOpcode(const TargetRegisterClass *RC) {
+static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
+ unsigned StackAlign) {
unsigned Opc = 0;
if (RC == &X86::GR64RegClass) {
Opc = X86::MOV64rm;
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDrm;
} else if (RC == &X86::VR128RegClass) {
- Opc = X86::MOVAPSrm;
+ // FIXME: Use movaps once we are capable of selectively
+ // aligning functions that spill SSE registers on 16-byte boundaries.
+ Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
} else if (RC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64rm;
} else {
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC) const{
- unsigned Opc = getLoadRegOpcode(RC);
+ unsigned Opc = getLoadRegOpcode(RC, StackAlign);
addFrameReference(BuildMI(MBB, MI, TII.get(Opc), DestReg), FrameIdx);
}
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Opc = getLoadRegOpcode(RC);
+ unsigned Opc = getLoadRegOpcode(RC, StackAlign);
MachineInstrBuilder MIB = BuildMI(TII.get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB = X86InstrAddOperand(MIB, Addr[i]);
SDNode *Load = 0;
if (FoldedLoad) {
MVT::ValueType VT = *RC->vt_begin();
- Load = DAG.getTargetNode(getLoadRegOpcode(RC), VT, MVT::Other,
+ Load = DAG.getTargetNode(getLoadRegOpcode(RC, StackAlign), VT, MVT::Other,
&AddrOps[0], AddrOps.size());
NewNodes.push_back(Load);
}
AddrOps.pop_back();
AddrOps.push_back(SDOperand(NewNode, 0));
AddrOps.push_back(Chain);
- SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC),
+ SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC, StackAlign),
MVT::Other, &AddrOps[0], AddrOps.size());
NewNodes.push_back(Store);
}
// We need to keep the stack aligned properly. To do this, we round the
// amount of space needed for the outgoing arguments up to the next
// alignment boundary.
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
- Amount = (Amount+Align-1)/Align*Align;
+ Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
MachineInstr *New = 0;
if (Old->getOpcode() == X86::ADJCALLSTACKDOWN) {
unsigned Opc = (Amount < 128) ?
(Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
(Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
- New = BuildMI(TII.get(Opc), StackPtr)
- .addReg(StackPtr).addImm(Amount);
+ New = BuildMI(TII.get(Opc), StackPtr).addReg(StackPtr).addImm(Amount);
}
}
void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
MachineFrameInfo *MFI = MF.getFrameInfo();
- unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
const Function* Fn = MF.getFunction();
const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
MFI->getObjectOffset(CSI[I].getFrameIdx()));
// Calculate offsets
+ int64_t saveAreaOffset = (hasFP(MF) ? 3 : 2)*stackGrowth;
for (unsigned I = 0, E = CSI.size(); I!=E; ++I) {
int64_t Offset = MFI->getObjectOffset(CSI[I].getFrameIdx());
unsigned Reg = CSI[I].getReg();
- Offset = (MaxOffset-Offset+3*stackGrowth);
+ Offset = (MaxOffset-Offset+saveAreaOffset);
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
Moves.push_back(MachineMove(FrameLabelId, CSDst, CSSrc));
if (Fn->hasExternalLinkage() && Fn->getName() == "main" &&
Subtarget->isTargetCygMing()) {
BuildMI(MBB, MBBI, TII.get(X86::AND32ri), X86::ESP)
- .addReg(X86::ESP).addImm(-Align);
+ .addReg(X86::ESP).addImm(-StackAlign);
// Probe the stack
- BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(Align);
+ BuildMI(MBB, MBBI, TII.get(X86::MOV32ri), X86::EAX).addImm(StackAlign);
BuildMI(MBB, MBBI, TII.get(X86::CALLpcrel32)).addExternalSymbol("_alloca");
}
}