#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
+#include "llvm/DerivedTypes.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
unsigned RegOp = OpTbl2Addr[i][0];
unsigned MemOp = OpTbl2Addr[i][1];
- if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp, MemOp)))
+ if (!RegOp2MemOpTable2Addr.insert(std::make_pair((unsigned*)RegOp,
+ MemOp)).second)
assert(false && "Duplicated entries?");
unsigned AuxInfo = 0 | (1 << 4) | (1 << 5); // Index 0,folded load and store
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp, AuxInfo))))
+ std::make_pair(RegOp,
+ AuxInfo))).second)
AmbEntries.push_back(MemOp);
}
// If the third value is 1, then it's folding either a load or a store.
static const unsigned OpTbl0[][3] = {
+ { X86::BT16ri8, X86::BT16mi8, 1 },
+ { X86::BT32ri8, X86::BT32mi8, 1 },
+ { X86::BT64ri8, X86::BT64mi8, 1 },
{ X86::CALL32r, X86::CALL32m, 1 },
{ X86::CALL64r, X86::CALL64m, 1 },
{ X86::CMP16ri, X86::CMP16mi, 1 },
{ X86::DIV32r, X86::DIV32m, 1 },
{ X86::DIV64r, X86::DIV64m, 1 },
{ X86::DIV8r, X86::DIV8m, 1 },
+ { X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0 },
{ X86::FsMOVAPDrr, X86::MOVSDmr, 0 },
{ X86::FsMOVAPSrr, X86::MOVSSmr, 0 },
{ X86::IDIV16r, X86::IDIV16m, 1 },
{ X86::JMP64r, X86::JMP64m, 1 },
{ X86::MOV16ri, X86::MOV16mi, 0 },
{ X86::MOV16rr, X86::MOV16mr, 0 },
- { X86::MOV16to16_, X86::MOV16_mr, 0 },
{ X86::MOV32ri, X86::MOV32mi, 0 },
{ X86::MOV32rr, X86::MOV32mr, 0 },
- { X86::MOV32to32_, X86::MOV32_mr, 0 },
{ X86::MOV64ri32, X86::MOV64mi32, 0 },
{ X86::MOV64rr, X86::MOV64mr, 0 },
{ X86::MOV8ri, X86::MOV8mi, 0 },
{ X86::MOV8rr, X86::MOV8mr, 0 },
+ { X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, 0 },
{ X86::MOVAPDrr, X86::MOVAPDmr, 0 },
{ X86::MOVAPSrr, X86::MOVAPSmr, 0 },
+ { X86::MOVDQArr, X86::MOVDQAmr, 0 },
{ X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0 },
{ X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0 },
{ X86::MOVPS2SSrr, X86::MOVPS2SSmr, 0 },
{ X86::SETLEr, X86::SETLEm, 0 },
{ X86::SETLr, X86::SETLm, 0 },
{ X86::SETNEr, X86::SETNEm, 0 },
+ { X86::SETNOr, X86::SETNOm, 0 },
{ X86::SETNPr, X86::SETNPm, 0 },
{ X86::SETNSr, X86::SETNSm, 0 },
+ { X86::SETOr, X86::SETOm, 0 },
{ X86::SETPr, X86::SETPm, 0 },
{ X86::SETSr, X86::SETSm, 0 },
{ X86::TAILJMPr, X86::TAILJMPm, 1 },
for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
unsigned RegOp = OpTbl0[i][0];
unsigned MemOp = OpTbl0[i][1];
- if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp, MemOp)))
+ if (!RegOp2MemOpTable0.insert(std::make_pair((unsigned*)RegOp,
+ MemOp)).second)
assert(false && "Duplicated entries?");
unsigned FoldedLoad = OpTbl0[i][2];
// Index 0, folded load or store.
unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5);
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp, AuxInfo))))
+ std::make_pair(RegOp, AuxInfo))).second)
AmbEntries.push_back(MemOp);
}
{ X86::Int_UCOMISDrr, X86::Int_UCOMISDrm },
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm },
{ X86::MOV16rr, X86::MOV16rm },
- { X86::MOV16to16_, X86::MOV16_rm },
{ X86::MOV32rr, X86::MOV32rm },
- { X86::MOV32to32_, X86::MOV32_rm },
{ X86::MOV64rr, X86::MOV64rm },
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm },
{ X86::MOV64toSDrr, X86::MOV64toSDrm },
{ X86::MOVDDUPrr, X86::MOVDDUPrm },
{ X86::MOVDI2PDIrr, X86::MOVDI2PDIrm },
{ X86::MOVDI2SSrr, X86::MOVDI2SSrm },
+ { X86::MOVDQArr, X86::MOVDQArm },
{ X86::MOVSD2PDrr, X86::MOVSD2PDrm },
{ X86::MOVSDrr, X86::MOVSDrm },
{ X86::MOVSHDUPrr, X86::MOVSHDUPrm },
{ X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm },
{ X86::MOVZX16rr8, X86::MOVZX16rm8 },
{ X86::MOVZX32rr16, X86::MOVZX32rm16 },
+ { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8 },
{ X86::MOVZX32rr8, X86::MOVZX32rm8 },
{ X86::MOVZX64rr16, X86::MOVZX64rm16 },
+ { X86::MOVZX64rr32, X86::MOVZX64rm32 },
{ X86::MOVZX64rr8, X86::MOVZX64rm8 },
{ X86::PSHUFDri, X86::PSHUFDmi },
{ X86::PSHUFHWri, X86::PSHUFHWmi },
for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
unsigned RegOp = OpTbl1[i][0];
unsigned MemOp = OpTbl1[i][1];
- if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp, MemOp)))
+ if (!RegOp2MemOpTable1.insert(std::make_pair((unsigned*)RegOp,
+ MemOp)).second)
assert(false && "Duplicated entries?");
unsigned AuxInfo = 1 | (1 << 4); // Index 1, folded load
if (RegOp != X86::FsMOVAPDrr && RegOp != X86::FsMOVAPSrr)
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp, AuxInfo))))
+ std::make_pair(RegOp, AuxInfo))).second)
AmbEntries.push_back(MemOp);
}
{ X86::CMOVNE16rr, X86::CMOVNE16rm },
{ X86::CMOVNE32rr, X86::CMOVNE32rm },
{ X86::CMOVNE64rr, X86::CMOVNE64rm },
+ { X86::CMOVNO16rr, X86::CMOVNO16rm },
+ { X86::CMOVNO32rr, X86::CMOVNO32rm },
+ { X86::CMOVNO64rr, X86::CMOVNO64rm },
{ X86::CMOVNP16rr, X86::CMOVNP16rm },
{ X86::CMOVNP32rr, X86::CMOVNP32rm },
{ X86::CMOVNP64rr, X86::CMOVNP64rm },
{ X86::CMOVNS16rr, X86::CMOVNS16rm },
{ X86::CMOVNS32rr, X86::CMOVNS32rm },
{ X86::CMOVNS64rr, X86::CMOVNS64rm },
+ { X86::CMOVO16rr, X86::CMOVO16rm },
+ { X86::CMOVO32rr, X86::CMOVO32rm },
+ { X86::CMOVO64rr, X86::CMOVO64rm },
{ X86::CMOVP16rr, X86::CMOVP16rm },
{ X86::CMOVP32rr, X86::CMOVP32rm },
{ X86::CMOVP64rr, X86::CMOVP64rm },
{ X86::PMINSWrr, X86::PMINSWrm },
{ X86::PMINUBrr, X86::PMINUBrm },
{ X86::PMULDQrr, X86::PMULDQrm },
- { X86::PMULDQrr_int, X86::PMULDQrm_int },
{ X86::PMULHUWrr, X86::PMULHUWrm },
{ X86::PMULHWrr, X86::PMULHWrm },
{ X86::PMULLDrr, X86::PMULLDrm },
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
unsigned RegOp = OpTbl2[i][0];
unsigned MemOp = OpTbl2[i][1];
- if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp, MemOp)))
+ if (!RegOp2MemOpTable2.insert(std::make_pair((unsigned*)RegOp,
+ MemOp)).second)
assert(false && "Duplicated entries?");
- unsigned AuxInfo = 2 | (1 << 4); // Index 1, folded load
+ unsigned AuxInfo = 2 | (1 << 4); // Index 2, folded load
if (!MemOp2RegOpTable.insert(std::make_pair((unsigned*)MemOp,
- std::make_pair(RegOp, AuxInfo))))
+ std::make_pair(RegOp, AuxInfo))).second)
AmbEntries.push_back(MemOp);
}
}
bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
- unsigned& sourceReg,
- unsigned& destReg) const {
+ unsigned &SrcReg, unsigned &DstReg,
+ unsigned &SrcSubIdx, unsigned &DstSubIdx) const {
switch (MI.getOpcode()) {
default:
return false;
case X86::MOV8rr:
+ case X86::MOV8rr_NOREX:
case X86::MOV16rr:
case X86::MOV32rr:
case X86::MOV64rr:
- case X86::MOV16to16_:
- case X86::MOV32to32_:
case X86::MOVSSrr:
case X86::MOVSDrr:
case X86::FsMOVAPDrr:
case X86::MOVAPSrr:
case X86::MOVAPDrr:
+ case X86::MOVDQArr:
case X86::MOVSS2PSrr:
case X86::MOVSD2PDrr:
case X86::MOVPS2SSrr:
case X86::MOVPD2SDrr:
- case X86::MMX_MOVD64rr:
case X86::MMX_MOVQ64rr:
assert(MI.getNumOperands() >= 2 &&
- MI.getOperand(0).isRegister() &&
- MI.getOperand(1).isRegister() &&
+ MI.getOperand(0).isReg() &&
+ MI.getOperand(1).isReg() &&
"invalid register-register move instruction");
- sourceReg = MI.getOperand(1).getReg();
- destReg = MI.getOperand(0).getReg();
+ SrcReg = MI.getOperand(1).getReg();
+ DstReg = MI.getOperand(0).getReg();
+ SrcSubIdx = MI.getOperand(1).getSubReg();
+ DstSubIdx = MI.getOperand(0).getSubReg();
return true;
}
}
-unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI,
+unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
case X86::MOV8rm:
case X86::MOV16rm:
- case X86::MOV16_rm:
case X86::MOV32rm:
- case X86::MOV32_rm:
case X86::MOV64rm:
case X86::LD_Fp64m:
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MOVAPSrm:
case X86::MOVAPDrm:
+ case X86::MOVDQArm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
return 0;
}
-unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI,
+unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const {
switch (MI->getOpcode()) {
default: break;
case X86::MOV8mr:
case X86::MOV16mr:
- case X86::MOV16_mr:
case X86::MOV32mr:
- case X86::MOV32_mr:
case X86::MOV64mr:
case X86::ST_FpP64m:
case X86::MOVSSmr:
case X86::MOVSDmr:
case X86::MOVAPSmr:
case X86::MOVAPDmr:
+ case X86::MOVDQAmr:
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
case X86::MMX_MOVNTQmr:
MI->getOperand(2).getReg() == 0 &&
MI->getOperand(3).getImm() == 0) {
FrameIndex = MI->getOperand(0).getIndex();
- return MI->getOperand(4).getReg();
+ return MI->getOperand(X86AddrNumOperands).getReg();
}
break;
}
/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
/// X86::MOVPC32r.
-static bool regIsPICBase(unsigned BaseReg, MachineRegisterInfo &MRI) {
+static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
bool isPICBase = false;
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
E = MRI.def_end(); I != E; ++I) {
default: break;
case X86::MOV8rm:
case X86::MOV16rm:
- case X86::MOV16_rm:
case X86::MOV32rm:
- case X86::MOV32_rm:
case X86::MOV64rm:
case X86::LD_Fp64m:
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MOVAPSrm:
case X86::MOVAPDrm:
+ case X86::MOVDQArm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm: {
// Loads from constant pools are trivially rematerializable.
// Allow re-materialization of PIC load.
if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
return false;
- MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
bool isPICBase = false;
for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
E = MRI.def_end(); I != E; ++I) {
case X86::LEA32r:
case X86::LEA64r: {
- if (MI->getOperand(1).isReg() &&
- MI->getOperand(2).isImm() &&
+ if (MI->getOperand(2).isImm() &&
MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
!MI->getOperand(4).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
+ if (!MI->getOperand(1).isReg())
+ return true;
unsigned BaseReg = MI->getOperand(1).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of lea PICBase + x.
- MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
+ const MachineFunction &MF = *MI->getParent()->getParent();
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
return false;
/// two instructions it assumes it's not safe.
static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) {
+ // It's always safe to clobber EFLAGS at the end of a block.
+ if (I == MBB.end())
+ return true;
+
// For compile time consideration, if we are not able to determine the
// safety after visiting 2 instructions, we will assume it's not safe.
for (unsigned i = 0; i < 2; ++i) {
- if (I == MBB.end())
- // Reached end of block, it's safe.
- return true;
bool SeenDef = false;
for (unsigned j = 0, e = I->getNumOperands(); j != e; ++j) {
MachineOperand &MO = I->getOperand(j);
- if (!MO.isRegister())
+ if (!MO.isReg())
continue;
if (MO.getReg() == X86::EFLAGS) {
if (MO.isUse())
// This instruction defines EFLAGS, no need to look any further.
return true;
++I;
+
+ // If we make it to the end of the block, it's safe to clobber EFLAGS.
+ if (I == MBB.end())
+ return true;
}
// Conservative answer.
MachineBasicBlock::iterator I,
unsigned DestReg,
const MachineInstr *Orig) const {
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ if (I != MBB.end()) DL = I->getDebugLoc();
+
unsigned SubIdx = Orig->getOperand(0).isReg()
? Orig->getOperand(0).getSubReg() : 0;
bool ChangeSubIdx = SubIdx != 0;
case X86::MOV32r0: Opc = X86::MOV32ri; break;
case X86::MOV64r0: Opc = X86::MOV64ri32; break;
}
- BuildMI(MBB, I, get(Opc), DestReg).addImm(0);
+ BuildMI(MBB, I, DL, get(Opc), DestReg).addImm(0);
Emitted = true;
}
break;
}
if (!Emitted) {
- MachineInstr *MI = Orig->clone();
+ MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
MI->getOperand(0).setReg(DestReg);
MBB.insert(I, MI);
}
/// from the argument area of a function if it does not change. This should
/// only return true of *all* loads the instruction does are invariant (if it
/// does multiple loads).
-bool X86InstrInfo::isInvariantLoad(MachineInstr *MI) const {
+bool X86InstrInfo::isInvariantLoad(const MachineInstr *MI) const {
// This code cares about loads from three cases: constant pool entries,
// invariant argument slots, and global stubs. In order to handle these cases
// for all of the myriad of X86 instructions, we just scan for a CP/FI/GV
static bool hasLiveCondCodeDef(MachineInstr *MI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (MO.isRegister() && MO.isDef() &&
+ if (MO.isReg() && MO.isDef() &&
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
return true;
}
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const {
MachineInstr *MI = MBBI;
+ MachineFunction &MF = *MI->getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
unsigned Dest = MI->getOperand(0).getReg();
unsigned Src = MI->getOperand(1).getReg();
+ bool isDead = MI->getOperand(0).isDead();
+ bool isKill = MI->getOperand(1).isKill();
MachineInstr *NewMI = NULL;
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
- unsigned A = MI->getOperand(0).getReg();
unsigned B = MI->getOperand(1).getReg();
unsigned C = MI->getOperand(2).getReg();
- unsigned M = MI->getOperand(3).getImm();
if (B != C) return 0;
- NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M);
+ unsigned A = MI->getOperand(0).getReg();
+ unsigned M = MI->getOperand(3).getImm();
+ NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
+ .addReg(A, true, false, false, isDead)
+ .addReg(B, false, false, isKill).addImm(M);
break;
}
case X86::SHL64ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
// the flags produced by a shift yet, so this is safe.
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
-
- NewMI = BuildMI(get(X86::LEA64r), Dest)
- .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
+
+ NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
+ .addReg(Dest, true, false, false, isDead)
+ .addReg(0).addImm(1 << ShAmt).addReg(Src, false, false, isKill).addImm(0);
break;
}
case X86::SHL32ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
// the flags produced by a shift yet, so this is safe.
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
-
+
unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ?
X86::LEA64_32r : X86::LEA32r;
- NewMI = BuildMI(get(Opc), Dest)
- .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
+ NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addReg(Dest, true, false, false, isDead)
+ .addReg(0).addImm(1 << ShAmt)
+ .addReg(Src, false, false, isKill).addImm(0);
break;
}
case X86::SHL16ri: {
assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
// NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
// the flags produced by a shift yet, so this is safe.
- unsigned Dest = MI->getOperand(0).getReg();
- unsigned Src = MI->getOperand(1).getReg();
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 0 || ShAmt >= 4) return 0;
-
+
if (DisableLEA16) {
// If 16-bit LEA is disabled, use 32-bit LEA via subregisters.
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
- MachineInstr *Undef = BuildMI(get(X86::IMPLICIT_DEF), leaInReg);
-
- MachineInstr *Ins =
- BuildMI(get(X86::INSERT_SUBREG),leaInReg)
- .addReg(leaInReg).addReg(Src).addImm(X86::SUBREG_16BIT);
-
- NewMI = BuildMI(get(Opc), leaOutReg)
- .addReg(0).addImm(1 << ShAmt).addReg(leaInReg).addImm(0);
+ BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
+ MachineInstr *InsMI =
+ BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg)
+ .addReg(leaInReg).addReg(Src, false, false, isKill)
+ .addImm(X86::SUBREG_16BIT);
- MachineInstr *Ext =
- BuildMI(get(X86::EXTRACT_SUBREG), Dest)
- .addReg(leaOutReg).addImm(X86::SUBREG_16BIT);
- Ext->copyKillDeadInfo(MI);
+ NewMI = BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(Opc), leaOutReg)
+ .addReg(0).addImm(1 << ShAmt)
+ .addReg(leaInReg, false, false, true).addImm(0);
- MFI->insert(MBBI, Undef);
- MFI->insert(MBBI, Ins); // Insert the insert_subreg
+ MachineInstr *ExtMI =
+ BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::EXTRACT_SUBREG))
+ .addReg(Dest, true, false, false, isDead)
+ .addReg(leaOutReg, false, false, true).addImm(X86::SUBREG_16BIT);
+
if (LV) {
- LV->instructionChanged(MI, NewMI); // Update live variables
- LV->addVirtualRegisterKilled(leaInReg, NewMI);
+ // Update live variables
+ LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
+ LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
+ if (isKill)
+ LV->replaceKillInstruction(Src, MI, InsMI);
+ if (isDead)
+ LV->replaceKillInstruction(Dest, MI, ExtMI);
}
- MFI->insert(MBBI, NewMI); // Insert the new inst
- if (LV) LV->addVirtualRegisterKilled(leaOutReg, Ext);
- MFI->insert(MBBI, Ext); // Insert the extract_subreg
- return Ext;
+ return ExtMI;
} else {
- NewMI = BuildMI(get(X86::LEA16r), Dest)
- .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
+ NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addReg(Dest, true, false, false, isDead)
+ .addReg(0).addImm(1 << ShAmt)
+ .addReg(Src, false, false, isKill).addImm(0);
}
break;
}
switch (MIOpc) {
default: return 0;
case X86::INC64r:
- case X86::INC32r: {
+ case X86::INC32r:
+ case X86::INC64_32r: {
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, 1);
+ NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, 1);
break;
}
case X86::INC16r:
case X86::INC64_16r:
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
- NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1);
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, 1);
break;
case X86::DEC64r:
- case X86::DEC32r: {
+ case X86::DEC32r:
+ case X86::DEC64_32r: {
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, -1);
+ NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, -1);
break;
}
case X86::DEC16r:
case X86::DEC64_16r:
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
- NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1);
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, -1);
break;
case X86::ADD64rr:
case X86::ADD32rr: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
- NewMI = addRegReg(BuildMI(get(Opc), Dest), Src,
- MI->getOperand(2).getReg());
+ unsigned Src2 = MI->getOperand(2).getReg();
+ bool isKill2 = MI->getOperand(2).isKill();
+ NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, Src2, isKill2);
+ if (LV && isKill2)
+ LV->replaceKillInstruction(Src2, MI, NewMI);
break;
}
- case X86::ADD16rr:
+ case X86::ADD16rr: {
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src,
- MI->getOperand(2).getReg());
+ unsigned Src2 = MI->getOperand(2).getReg();
+ bool isKill2 = MI->getOperand(2).isKill();
+ NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, Src2, isKill2);
+ if (LV && isKill2)
+ LV->replaceKillInstruction(Src2, MI, NewMI);
break;
+ }
case X86::ADD64ri32:
case X86::ADD64ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- if (MI->getOperand(2).isImmediate())
- NewMI = addRegOffset(BuildMI(get(X86::LEA64r), Dest), Src,
- MI->getOperand(2).getImm());
+ if (MI->getOperand(2).isImm())
+ NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, MI->getOperand(2).getImm());
break;
case X86::ADD32ri:
case X86::ADD32ri8:
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- if (MI->getOperand(2).isImmediate()) {
+ if (MI->getOperand(2).isImm()) {
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
- NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src,
- MI->getOperand(2).getImm());
+ NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, MI->getOperand(2).getImm());
}
break;
case X86::ADD16ri:
case X86::ADD16ri8:
if (DisableLEA16) return 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
- if (MI->getOperand(2).isImmediate())
- NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src,
- MI->getOperand(2).getImm());
+ if (MI->getOperand(2).isImm())
+ NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
+ .addReg(Dest, true, false, false, isDead),
+ Src, isKill, MI->getOperand(2).getImm());
break;
case X86::SHL16ri:
if (DisableLEA16) return 0;
case X86::SHL32ri:
case X86::SHL64ri: {
- assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() &&
+ assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImm() &&
"Unknown shl instruction!");
unsigned ShAmt = MI->getOperand(2).getImm();
if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r
: (MIOpc == X86::SHL32ri
? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r);
- NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM);
+ NewMI = addFullAddress(BuildMI(MF, MI->getDebugLoc(), get(Opc))
+ .addReg(Dest, true, false, false, isDead), AM);
+ if (isKill)
+ NewMI->getOperand(3).setIsKill(true);
}
break;
}
if (!NewMI) return 0;
- NewMI->copyKillDeadInfo(MI);
- if (LV) LV->instructionChanged(MI, NewMI); // Update live variables
+ if (LV) { // Update live variables
+ if (isKill)
+ LV->replaceKillInstruction(Src, MI, NewMI);
+ if (isDead)
+ LV->replaceKillInstruction(Dest, MI, NewMI);
+ }
+
MFI->insert(MBBI, NewMI); // Insert the new inst
return NewMI;
}
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
}
unsigned Amt = MI->getOperand(3).getImm();
- unsigned A = MI->getOperand(0).getReg();
- unsigned B = MI->getOperand(1).getReg();
- unsigned C = MI->getOperand(2).getReg();
- bool AisDead = MI->getOperand(0).isDead();
- bool BisKill = MI->getOperand(1).isKill();
- bool CisKill = MI->getOperand(2).isKill();
- // If machine instrs are no longer in two-address forms, update
- // destination register as well.
- if (A == B) {
- // Must be two address instruction!
- assert(MI->getDesc().getOperandConstraint(0, TOI::TIED_TO) &&
- "Expecting a two-address instruction!");
- A = C;
- CisKill = false;
+ if (NewMI) {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ MI = MF.CloneMachineInstr(MI);
+ NewMI = false;
}
- return BuildMI(get(Opc)).addReg(A, true, false, false, AisDead)
- .addReg(C, false, false, CisKill)
- .addReg(B, false, false, BisKill).addImm(Size-Amt);
+ MI->setDesc(get(Opc));
+ MI->getOperand(3).setImm(Size-Amt);
+ return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
}
case X86::CMOVB16rr:
case X86::CMOVB32rr:
case X86::CMOVP64rr:
case X86::CMOVNP16rr:
case X86::CMOVNP32rr:
- case X86::CMOVNP64rr: {
+ case X86::CMOVNP64rr:
+ case X86::CMOVO16rr:
+ case X86::CMOVO32rr:
+ case X86::CMOVO64rr:
+ case X86::CMOVNO16rr:
+ case X86::CMOVNO32rr:
+ case X86::CMOVNO64rr: {
unsigned Opc = 0;
switch (MI->getOpcode()) {
default: break;
case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
- case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break;
+ case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break;
case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
- case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break;
+ case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break;
case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
+ case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break;
+ case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break;
+ case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break;
+ case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
+ case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
+ case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
+ }
+ if (NewMI) {
+ MachineFunction &MF = *MI->getParent()->getParent();
+ MI = MF.CloneMachineInstr(MI);
+ NewMI = false;
}
-
MI->setDesc(get(Opc));
// Fallthrough intended.
}
bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
- std::vector<MachineOperand> &Cond) const {
- // If the block has no terminators, it just falls into the block after it.
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ // Start from the bottom of the block and work up, examining the
+ // terminator instructions.
MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this))
- return false;
-
- // Get the last instruction in the block.
- MachineInstr *LastInst = I;
-
- // If there is only one terminator instruction, process it.
- if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) {
- if (!LastInst->getDesc().isBranch())
+ while (I != MBB.begin()) {
+ --I;
+ // Working from the bottom, when we see a non-terminator
+ // instruction, we're done.
+ if (!isBrAnalysisUnpredicatedTerminator(I, *this))
+ break;
+ // A terminator that isn't a branch can't easily be handled
+ // by this analysis.
+ if (!I->getDesc().isBranch())
return true;
-
- // If the block ends with a branch there are 3 possibilities:
- // it's an unconditional, conditional, or indirect branch.
-
- if (LastInst->getOpcode() == X86::JMP) {
- TBB = LastInst->getOperand(0).getMBB();
- return false;
+ // Handle unconditional branches.
+ if (I->getOpcode() == X86::JMP) {
+ if (!AllowModify) {
+ TBB = I->getOperand(0).getMBB();
+ return false;
+ }
+
+ // If the block has any instructions after a JMP, delete them.
+ while (next(I) != MBB.end())
+ next(I)->eraseFromParent();
+ Cond.clear();
+ FBB = 0;
+ // Delete the JMP if it's equivalent to a fall-through.
+ if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
+ TBB = 0;
+ I->eraseFromParent();
+ I = MBB.end();
+ continue;
+ }
+ // TBB is used to indicate the unconditinal destination.
+ TBB = I->getOperand(0).getMBB();
+ continue;
}
- X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode());
+ // Handle conditional branches.
+ X86::CondCode BranchCode = GetCondFromBranchOpc(I->getOpcode());
if (BranchCode == X86::COND_INVALID)
return true; // Can't handle indirect branch.
-
- // Otherwise, block ends with fall-through condbranch.
- TBB = LastInst->getOperand(0).getMBB();
- Cond.push_back(MachineOperand::CreateImm(BranchCode));
- return false;
- }
-
- // Get the instruction before it if it's a terminator.
- MachineInstr *SecondLastInst = I;
-
- // If there are three terminators, we don't know what sort of block this is.
- if (SecondLastInst && I != MBB.begin() &&
- isBrAnalysisUnpredicatedTerminator(--I, *this))
- return true;
-
- // If the block ends with X86::JMP and a conditional branch, handle it.
- X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode());
- if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- Cond.push_back(MachineOperand::CreateImm(BranchCode));
- FBB = LastInst->getOperand(0).getMBB();
- return false;
- }
-
- // If the block ends with two X86::JMPs, handle it. The second one is not
- // executed, so remove it.
- if (SecondLastInst->getOpcode() == X86::JMP &&
- LastInst->getOpcode() == X86::JMP) {
- TBB = SecondLastInst->getOperand(0).getMBB();
- I = LastInst;
- I->eraseFromParent();
- return false;
+ // Working from the bottom, handle the first conditional branch.
+ if (Cond.empty()) {
+ FBB = TBB;
+ TBB = I->getOperand(0).getMBB();
+ Cond.push_back(MachineOperand::CreateImm(BranchCode));
+ continue;
+ }
+ // Handle subsequent conditional branches. Only handle the case
+ // where all conditional branches branch to the same destination
+ // and their condition opcodes fit one of the special
+ // multi-branch idioms.
+ assert(Cond.size() == 1);
+ assert(TBB);
+ // Only handle the case where all conditional branches branch to
+ // the same destination.
+ if (TBB != I->getOperand(0).getMBB())
+ return true;
+ X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
+ // If the conditions are the same, we can leave them alone.
+ if (OldBranchCode == BranchCode)
+ continue;
+ // If they differ, see if they fit one of the known patterns.
+ // Theoretically we could handle more patterns here, but
+ // we shouldn't expect to see them if instruction selection
+ // has done a reasonable job.
+ if ((OldBranchCode == X86::COND_NP &&
+ BranchCode == X86::COND_E) ||
+ (OldBranchCode == X86::COND_E &&
+ BranchCode == X86::COND_NP))
+ BranchCode = X86::COND_NP_OR_E;
+ else if ((OldBranchCode == X86::COND_P &&
+ BranchCode == X86::COND_NE) ||
+ (OldBranchCode == X86::COND_NE &&
+ BranchCode == X86::COND_P))
+ BranchCode = X86::COND_NE_OR_P;
+ else
+ return true;
+ // Update the MachineOperand.
+ Cond[0].setImm(BranchCode);
}
- // Otherwise, can't handle this.
- return true;
+ return false;
}
unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
MachineBasicBlock::iterator I = MBB.end();
- if (I == MBB.begin()) return 0;
- --I;
- if (I->getOpcode() != X86::JMP &&
- GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
- return 0;
-
- // Remove the branch.
- I->eraseFromParent();
-
- I = MBB.end();
-
- if (I == MBB.begin()) return 1;
- --I;
- if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
- return 1;
-
- // Remove the branch.
- I->eraseFromParent();
- return 2;
-}
+ unsigned Count = 0;
-static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
- MachineOperand &MO) {
- if (MO.isRegister())
- MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
- false, false, MO.getSubReg());
- else if (MO.isImmediate())
- MIB = MIB.addImm(MO.getImm());
- else if (MO.isFrameIndex())
- MIB = MIB.addFrameIndex(MO.getIndex());
- else if (MO.isGlobalAddress())
- MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
- else if (MO.isConstantPoolIndex())
- MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
- else if (MO.isJumpTableIndex())
- MIB = MIB.addJumpTableIndex(MO.getIndex());
- else if (MO.isExternalSymbol())
- MIB = MIB.addExternalSymbol(MO.getSymbolName());
- else
- assert(0 && "Unknown operand for X86InstrAddOperand!");
-
- return MIB;
+ while (I != MBB.begin()) {
+ --I;
+ if (I->getOpcode() != X86::JMP &&
+ GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
+ break;
+ // Remove the branch.
+ I->eraseFromParent();
+ I = MBB.end();
+ ++Count;
+ }
+
+ return Count;
}
unsigned
X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
- const std::vector<MachineOperand> &Cond) const {
+ const SmallVectorImpl<MachineOperand> &Cond) const {
+ // FIXME this should probably have a DebugLoc operand
+ DebugLoc dl = DebugLoc::getUnknownLoc();
// Shouldn't be a fall through.
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
"X86 branch conditions have one component!");
- if (FBB == 0) { // One way branch.
- if (Cond.empty()) {
- // Unconditional branch?
- BuildMI(&MBB, get(X86::JMP)).addMBB(TBB);
- } else {
- // Conditional branch.
- unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
- BuildMI(&MBB, get(Opc)).addMBB(TBB);
- }
+ if (Cond.empty()) {
+ // Unconditional branch?
+ assert(!FBB && "Unconditional branch with multiple successors!");
+ BuildMI(&MBB, dl, get(X86::JMP)).addMBB(TBB);
return 1;
}
-
- // Two-way Conditional branch.
- unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
- BuildMI(&MBB, get(Opc)).addMBB(TBB);
- BuildMI(&MBB, get(X86::JMP)).addMBB(FBB);
- return 2;
+
+ // Conditional branch.
+ unsigned Count = 0;
+ X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
+ switch (CC) {
+ case X86::COND_NP_OR_E:
+ // Synthesize NP_OR_E with two branches.
+ BuildMI(&MBB, dl, get(X86::JNP)).addMBB(TBB);
+ ++Count;
+ BuildMI(&MBB, dl, get(X86::JE)).addMBB(TBB);
+ ++Count;
+ break;
+ case X86::COND_NE_OR_P:
+ // Synthesize NE_OR_P with two branches.
+ BuildMI(&MBB, dl, get(X86::JNE)).addMBB(TBB);
+ ++Count;
+ BuildMI(&MBB, dl, get(X86::JP)).addMBB(TBB);
+ ++Count;
+ break;
+ default: {
+ unsigned Opc = GetCondBranchFromCond(CC);
+ BuildMI(&MBB, dl, get(Opc)).addMBB(TBB);
+ ++Count;
+ }
+ }
+ if (FBB) {
+ // Two-way Conditional branch. Insert the second branch.
+ BuildMI(&MBB, dl, get(X86::JMP)).addMBB(FBB);
+ ++Count;
+ }
+ return Count;
}
-void X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
+/// isHReg - Test if the given register is a physical h register.
+static bool isHReg(unsigned Reg) {
+ return X86::GR8_ABCD_HRegClass.contains(Reg);
+}
+
+bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
- if (DestRC == SrcRC) {
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
+ // Determine if DstRC and SrcRC have a common superclass in common.
+ const TargetRegisterClass *CommonRC = DestRC;
+ if (DestRC == SrcRC)
+ /* Source and destination have the same register class. */;
+ else if (CommonRC->hasSuperClass(SrcRC))
+ CommonRC = SrcRC;
+ else if (!DestRC->hasSubClass(SrcRC))
+ CommonRC = 0;
+
+ if (CommonRC) {
unsigned Opc;
- if (DestRC == &X86::GR64RegClass) {
+ if (CommonRC == &X86::GR64RegClass) {
Opc = X86::MOV64rr;
- } else if (DestRC == &X86::GR32RegClass) {
+ } else if (CommonRC == &X86::GR32RegClass) {
Opc = X86::MOV32rr;
- } else if (DestRC == &X86::GR16RegClass) {
+ } else if (CommonRC == &X86::GR16RegClass) {
Opc = X86::MOV16rr;
- } else if (DestRC == &X86::GR8RegClass) {
+ } else if (CommonRC == &X86::GR8RegClass) {
+ // Copying to or from a physical H register on x86-64 requires a NOREX
+ // move. Otherwise use a normal move.
+ if ((isHReg(DestReg) || isHReg(SrcReg)) &&
+ TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8rr_NOREX;
+ else
+ Opc = X86::MOV8rr;
+ } else if (CommonRC == &X86::GR64_ABCDRegClass) {
+ Opc = X86::MOV64rr;
+ } else if (CommonRC == &X86::GR32_ABCDRegClass) {
+ Opc = X86::MOV32rr;
+ } else if (CommonRC == &X86::GR16_ABCDRegClass) {
+ Opc = X86::MOV16rr;
+ } else if (CommonRC == &X86::GR8_ABCD_LRegClass) {
+ Opc = X86::MOV8rr;
+ } else if (CommonRC == &X86::GR8_ABCD_HRegClass) {
+ if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8rr_NOREX;
+ else
+ Opc = X86::MOV8rr;
+ } else if (CommonRC == &X86::GR64_NOREXRegClass) {
+ Opc = X86::MOV64rr;
+ } else if (CommonRC == &X86::GR32_NOREXRegClass) {
+ Opc = X86::MOV32rr;
+ } else if (CommonRC == &X86::GR16_NOREXRegClass) {
+ Opc = X86::MOV16rr;
+ } else if (CommonRC == &X86::GR8_NOREXRegClass) {
Opc = X86::MOV8rr;
- } else if (DestRC == &X86::GR32_RegClass) {
- Opc = X86::MOV32_rr;
- } else if (DestRC == &X86::GR16_RegClass) {
- Opc = X86::MOV16_rr;
- } else if (DestRC == &X86::RFP32RegClass) {
+ } else if (CommonRC == &X86::RFP32RegClass) {
Opc = X86::MOV_Fp3232;
- } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) {
+ } else if (CommonRC == &X86::RFP64RegClass || CommonRC == &X86::RSTRegClass) {
Opc = X86::MOV_Fp6464;
- } else if (DestRC == &X86::RFP80RegClass) {
+ } else if (CommonRC == &X86::RFP80RegClass) {
Opc = X86::MOV_Fp8080;
- } else if (DestRC == &X86::FR32RegClass) {
+ } else if (CommonRC == &X86::FR32RegClass) {
Opc = X86::FsMOVAPSrr;
- } else if (DestRC == &X86::FR64RegClass) {
+ } else if (CommonRC == &X86::FR64RegClass) {
Opc = X86::FsMOVAPDrr;
- } else if (DestRC == &X86::VR128RegClass) {
+ } else if (CommonRC == &X86::VR128RegClass) {
Opc = X86::MOVAPSrr;
- } else if (DestRC == &X86::VR64RegClass) {
+ } else if (CommonRC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64rr;
} else {
- assert(0 && "Unknown regclass");
- abort();
+ return false;
}
- BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg);
- return;
+ BuildMI(MBB, MI, DL, get(Opc), DestReg).addReg(SrcReg);
+ return true;
}
// Moving EFLAGS to / from another register requires a push and a pop.
if (SrcRC == &X86::CCRRegClass) {
- assert(SrcReg == X86::EFLAGS);
+ if (SrcReg != X86::EFLAGS)
+ return false;
if (DestRC == &X86::GR64RegClass) {
- BuildMI(MBB, MI, get(X86::PUSHFQ));
- BuildMI(MBB, MI, get(X86::POP64r), DestReg);
- return;
+ BuildMI(MBB, MI, DL, get(X86::PUSHFQ));
+ BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
+ return true;
} else if (DestRC == &X86::GR32RegClass) {
- BuildMI(MBB, MI, get(X86::PUSHFD));
- BuildMI(MBB, MI, get(X86::POP32r), DestReg);
- return;
+ BuildMI(MBB, MI, DL, get(X86::PUSHFD));
+ BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
+ return true;
}
} else if (DestRC == &X86::CCRRegClass) {
- assert(DestReg == X86::EFLAGS);
+ if (DestReg != X86::EFLAGS)
+ return false;
if (SrcRC == &X86::GR64RegClass) {
- BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg);
- BuildMI(MBB, MI, get(X86::POPFQ));
- return;
+ BuildMI(MBB, MI, DL, get(X86::PUSH64r)).addReg(SrcReg);
+ BuildMI(MBB, MI, DL, get(X86::POPFQ));
+ return true;
} else if (SrcRC == &X86::GR32RegClass) {
- BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg);
- BuildMI(MBB, MI, get(X86::POPFD));
- return;
+ BuildMI(MBB, MI, DL, get(X86::PUSH32r)).addReg(SrcReg);
+ BuildMI(MBB, MI, DL, get(X86::POPFD));
+ return true;
}
}
-
+
// Moving from ST(0) turns into FpGET_ST0_32 etc.
if (SrcRC == &X86::RSTRegClass) {
// Copying from ST(0)/ST(1).
- assert((SrcReg == X86::ST0 || SrcReg == X86::ST1) &&
- "Can only copy from ST(0)/ST(1) right now");
+ if (SrcReg != X86::ST0 && SrcReg != X86::ST1)
+ // Can only copy from ST(0)/ST(1) right now
+ return false;
bool isST0 = SrcReg == X86::ST0;
unsigned Opc;
if (DestRC == &X86::RFP32RegClass)
else if (DestRC == &X86::RFP64RegClass)
Opc = isST0 ? X86::FpGET_ST0_64 : X86::FpGET_ST1_64;
else {
- assert(DestRC == &X86::RFP80RegClass);
+ if (DestRC != &X86::RFP80RegClass)
+ return false;
Opc = isST0 ? X86::FpGET_ST0_80 : X86::FpGET_ST1_80;
}
- BuildMI(MBB, MI, get(Opc), DestReg);
- return;
+ BuildMI(MBB, MI, DL, get(Opc), DestReg);
+ return true;
}
// Moving to ST(0) turns into FpSET_ST0_32 etc.
if (DestRC == &X86::RSTRegClass) {
- // Copying to ST(0). FIXME: handle ST(1) also
- assert(DestReg == X86::ST0 && "Can only copy to TOS right now");
+ // Copying to ST(0) / ST(1).
+ if (DestReg != X86::ST0 && DestReg != X86::ST1)
+ // Can only copy to TOS right now
+ return false;
+ bool isST0 = DestReg == X86::ST0;
unsigned Opc;
if (SrcRC == &X86::RFP32RegClass)
- Opc = X86::FpSET_ST0_32;
+ Opc = isST0 ? X86::FpSET_ST0_32 : X86::FpSET_ST1_32;
else if (SrcRC == &X86::RFP64RegClass)
- Opc = X86::FpSET_ST0_64;
+ Opc = isST0 ? X86::FpSET_ST0_64 : X86::FpSET_ST1_64;
else {
- assert(SrcRC == &X86::RFP80RegClass);
- Opc = X86::FpSET_ST0_80;
+ if (SrcRC != &X86::RFP80RegClass)
+ return false;
+ Opc = isST0 ? X86::FpSET_ST0_80 : X86::FpSET_ST1_80;
}
- BuildMI(MBB, MI, get(Opc)).addReg(SrcReg);
- return;
+ BuildMI(MBB, MI, DL, get(Opc)).addReg(SrcReg);
+ return true;
}
- assert(0 && "Not yet supported!");
- abort();
+ // Not yet supported!
+ return false;
}
-static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
- unsigned StackAlign) {
+static unsigned getStoreRegOpcode(unsigned SrcReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ TargetMachine &TM) {
unsigned Opc = 0;
if (RC == &X86::GR64RegClass) {
Opc = X86::MOV64mr;
} else if (RC == &X86::GR16RegClass) {
Opc = X86::MOV16mr;
} else if (RC == &X86::GR8RegClass) {
+ // Copying to or from a physical H register on x86-64 requires a NOREX
+ // move. Otherwise use a normal move.
+ if (isHReg(SrcReg) &&
+ TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8mr_NOREX;
+ else
+ Opc = X86::MOV8mr;
+ } else if (RC == &X86::GR64_ABCDRegClass) {
+ Opc = X86::MOV64mr;
+ } else if (RC == &X86::GR32_ABCDRegClass) {
+ Opc = X86::MOV32mr;
+ } else if (RC == &X86::GR16_ABCDRegClass) {
+ Opc = X86::MOV16mr;
+ } else if (RC == &X86::GR8_ABCD_LRegClass) {
+ Opc = X86::MOV8mr;
+ } else if (RC == &X86::GR8_ABCD_HRegClass) {
+ if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8mr_NOREX;
+ else
+ Opc = X86::MOV8mr;
+ } else if (RC == &X86::GR64_NOREXRegClass) {
+ Opc = X86::MOV64mr;
+ } else if (RC == &X86::GR32_NOREXRegClass) {
+ Opc = X86::MOV32mr;
+ } else if (RC == &X86::GR16_NOREXRegClass) {
+ Opc = X86::MOV16mr;
+ } else if (RC == &X86::GR8_NOREXRegClass) {
Opc = X86::MOV8mr;
- } else if (RC == &X86::GR32_RegClass) {
- Opc = X86::MOV32_mr;
- } else if (RC == &X86::GR16_RegClass) {
- Opc = X86::MOV16_mr;
} else if (RC == &X86::RFP80RegClass) {
Opc = X86::ST_FpP80m; // pops
} else if (RC == &X86::RFP64RegClass) {
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDmr;
} else if (RC == &X86::VR128RegClass) {
- // FIXME: Use movaps once we are capable of selectively
- // aligning functions that spill SSE registers on 16-byte boundaries.
- Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
+ // If stack is realigned we can use aligned stores.
+ Opc = isStackAligned ? X86::MOVAPSmr : X86::MOVUPSmr;
} else if (RC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64mr;
} else {
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC) const {
- unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
- addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx)
- .addReg(SrcReg, false, false, isKill);
+ const MachineFunction &MF = *MBB.getParent();
+ bool isAligned = (RI.getStackAlignment() >= 16) ||
+ RI.needsStackRealignment(MF);
+ unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+ addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
+ .addReg(SrcReg, false, false, isKill);
}
void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
- MachineInstrBuilder MIB = BuildMI(get(Opc));
+ bool isAligned = (RI.getStackAlignment() >= 16) ||
+ RI.needsStackRealignment(MF);
+ unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
- MIB = X86InstrAddOperand(MIB, Addr[i]);
+ MIB.addOperand(Addr[i]);
MIB.addReg(SrcReg, false, false, isKill);
NewMIs.push_back(MIB);
}
-static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
- unsigned StackAlign) {
+static unsigned getLoadRegOpcode(unsigned DestReg,
+ const TargetRegisterClass *RC,
+ bool isStackAligned,
+ const TargetMachine &TM) {
unsigned Opc = 0;
if (RC == &X86::GR64RegClass) {
Opc = X86::MOV64rm;
} else if (RC == &X86::GR16RegClass) {
Opc = X86::MOV16rm;
} else if (RC == &X86::GR8RegClass) {
+ // Copying to or from a physical H register on x86-64 requires a NOREX
+ // move. Otherwise use a normal move.
+ if (isHReg(DestReg) &&
+ TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8rm_NOREX;
+ else
+ Opc = X86::MOV8rm;
+ } else if (RC == &X86::GR64_ABCDRegClass) {
+ Opc = X86::MOV64rm;
+ } else if (RC == &X86::GR32_ABCDRegClass) {
+ Opc = X86::MOV32rm;
+ } else if (RC == &X86::GR16_ABCDRegClass) {
+ Opc = X86::MOV16rm;
+ } else if (RC == &X86::GR8_ABCD_LRegClass) {
+ Opc = X86::MOV8rm;
+ } else if (RC == &X86::GR8_ABCD_HRegClass) {
+ if (TM.getSubtarget<X86Subtarget>().is64Bit())
+ Opc = X86::MOV8rm_NOREX;
+ else
+ Opc = X86::MOV8rm;
+ } else if (RC == &X86::GR64_NOREXRegClass) {
+ Opc = X86::MOV64rm;
+ } else if (RC == &X86::GR32_NOREXRegClass) {
+ Opc = X86::MOV32rm;
+ } else if (RC == &X86::GR16_NOREXRegClass) {
+ Opc = X86::MOV16rm;
+ } else if (RC == &X86::GR8_NOREXRegClass) {
Opc = X86::MOV8rm;
- } else if (RC == &X86::GR32_RegClass) {
- Opc = X86::MOV32_rm;
- } else if (RC == &X86::GR16_RegClass) {
- Opc = X86::MOV16_rm;
} else if (RC == &X86::RFP80RegClass) {
Opc = X86::LD_Fp80m;
} else if (RC == &X86::RFP64RegClass) {
} else if (RC == &X86::FR64RegClass) {
Opc = X86::MOVSDrm;
} else if (RC == &X86::VR128RegClass) {
- // FIXME: Use movaps once we are capable of selectively
- // aligning functions that spill SSE registers on 16-byte boundaries.
- Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
+ // If stack is realigned we can use aligned loads.
+ Opc = isStackAligned ? X86::MOVAPSrm : X86::MOVUPSrm;
} else if (RC == &X86::VR64RegClass) {
Opc = X86::MMX_MOVQ64rm;
} else {
}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
- unsigned DestReg, int FrameIdx,
- const TargetRegisterClass *RC) const{
- unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
- addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx);
+ MachineBasicBlock::iterator MI,
+ unsigned DestReg, int FrameIdx,
+ const TargetRegisterClass *RC) const{
+ const MachineFunction &MF = *MBB.getParent();
+ bool isAligned = (RI.getStackAlignment() >= 16) ||
+ RI.needsStackRealignment(MF);
+ unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+ addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
}
void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
- SmallVectorImpl<MachineOperand> &Addr,
- const TargetRegisterClass *RC,
+ SmallVectorImpl<MachineOperand> &Addr,
+ const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
- MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
+ bool isAligned = (RI.getStackAlignment() >= 16) ||
+ RI.needsStackRealignment(MF);
+ unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
- MIB = X86InstrAddOperand(MIB, Addr[i]);
+ MIB.addOperand(Addr[i]);
NewMIs.push_back(MIB);
}
bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
+ MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const {
if (CSI.empty())
return false;
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
unsigned SlotSize = is64Bit ? 8 : 4;
unsigned Reg = CSI[i-1].getReg();
// Add the callee-saved register as live-in. It's killed at the spill.
MBB.addLiveIn(Reg);
- BuildMI(MBB, MI, get(Opc)).addReg(Reg);
+ BuildMI(MBB, MI, DL, get(Opc))
+ .addReg(Reg, /*isDef=*/false, /*isImp=*/false, /*isKill=*/true);
}
return true;
}
bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator MI,
+ MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI) const {
if (CSI.empty())
return false;
-
+
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ if (MI != MBB.end()) DL = MI->getDebugLoc();
+
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg();
- BuildMI(MBB, MI, get(Opc), Reg);
+ BuildMI(MBB, MI, DL, get(Opc), Reg);
}
return true;
}
-static MachineInstr *FuseTwoAddrInst(unsigned Opcode,
- SmallVector<MachineOperand,4> &MOs,
- MachineInstr *MI, const TargetInstrInfo &TII) {
+static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
+ const SmallVectorImpl<MachineOperand> &MOs,
+ MachineInstr *MI,
+ const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
- MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
+ MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
+ MI->getDebugLoc(), true);
MachineInstrBuilder MIB(NewMI);
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
- MIB = X86InstrAddOperand(MIB, MOs[i]);
+ MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
- MIB.addImm(1).addReg(0).addImm(0);
+ addOffset(MIB, 0);
// Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI->getDesc().getNumOperands()-2;
for (unsigned i = 0; i != NumOps; ++i) {
MachineOperand &MO = MI->getOperand(i+2);
- MIB = X86InstrAddOperand(MIB, MO);
+ MIB.addOperand(MO);
}
for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- MIB = X86InstrAddOperand(MIB, MO);
+ MIB.addOperand(MO);
}
return MIB;
}
-static MachineInstr *FuseInst(unsigned Opcode, unsigned OpNo,
- SmallVector<MachineOperand,4> &MOs,
+static MachineInstr *FuseInst(MachineFunction &MF,
+ unsigned Opcode, unsigned OpNo,
+ const SmallVectorImpl<MachineOperand> &MOs,
MachineInstr *MI, const TargetInstrInfo &TII) {
- MachineInstr *NewMI = new MachineInstr(TII.get(Opcode), true);
+ MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
+ MI->getDebugLoc(), true);
MachineInstrBuilder MIB(NewMI);
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) {
- assert(MO.isRegister() && "Expected to fold into reg operand!");
+ assert(MO.isReg() && "Expected to fold into reg operand!");
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
- MIB = X86InstrAddOperand(MIB, MOs[i]);
+ MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
- MIB.addImm(1).addReg(0).addImm(0);
+ addOffset(MIB, 0);
} else {
- MIB = X86InstrAddOperand(MIB, MO);
+ MIB.addOperand(MO);
}
}
return MIB;
}
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
- SmallVector<MachineOperand,4> &MOs,
+ const SmallVectorImpl<MachineOperand> &MOs,
MachineInstr *MI) {
- MachineInstrBuilder MIB = BuildMI(TII.get(Opcode));
+ MachineFunction &MF = *MI->getParent()->getParent();
+ MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
- MIB = X86InstrAddOperand(MIB, MOs[i]);
+ MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
- MIB.addImm(1).addReg(0).addImm(0);
+ addOffset(MIB, 0);
return MIB.addImm(0);
}
MachineInstr*
-X86InstrInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
- SmallVector<MachineOperand,4> &MOs) const {
+X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI, unsigned i,
+ const SmallVectorImpl<MachineOperand> &MOs) const{
const DenseMap<unsigned*, unsigned> *OpcodeTablePtr = NULL;
bool isTwoAddrFold = false;
unsigned NumOps = MI->getDesc().getNumOperands();
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
if (isTwoAddr && NumOps >= 2 && i < 2 &&
- MI->getOperand(0).isRegister() &&
- MI->getOperand(1).isRegister() &&
+ MI->getOperand(0).isReg() &&
+ MI->getOperand(1).isReg() &&
MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
OpcodeTablePtr = &RegOp2MemOpTable2Addr;
isTwoAddrFold = true;
NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI);
else if (MI->getOpcode() == X86::MOV8r0)
NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
- if (NewMI) {
- NewMI->copyKillDeadInfo(MI);
+ if (NewMI)
return NewMI;
- }
OpcodeTablePtr = &RegOp2MemOpTable0;
} else if (i == 1) {
OpcodeTablePtr->find((unsigned*)MI->getOpcode());
if (I != OpcodeTablePtr->end()) {
if (isTwoAddrFold)
- NewMI = FuseTwoAddrInst(I->second, MOs, MI, *this);
+ NewMI = FuseTwoAddrInst(MF, I->second, MOs, MI, *this);
else
- NewMI = FuseInst(I->second, i, MOs, MI, *this);
- NewMI->copyKillDeadInfo(MI);
+ NewMI = FuseInst(MF, I->second, i, MOs, MI, *this);
return NewMI;
}
}
// No fusion
if (PrintFailedFusing)
- cerr << "We failed to fuse operand " << i << *MI;
+ cerr << "We failed to fuse operand " << i << " in " << *MI;
return NULL;
}
-MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
- int FrameIndex) const {
+MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ int FrameIndex) const {
// Check switch flag
if (NoFusing) return NULL;
SmallVector<MachineOperand,4> MOs;
MOs.push_back(MachineOperand::CreateFI(FrameIndex));
- return foldMemoryOperand(MI, Ops[0], MOs);
+ return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
}
-MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF,
- MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops,
- MachineInstr *LoadMI) const {
+MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
+ MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops,
+ MachineInstr *LoadMI) const {
// Check switch flag
if (NoFusing) return NULL;
+ // Determine the alignment of the load.
unsigned Alignment = 0;
- for (unsigned i = 0, e = LoadMI->getNumMemOperands(); i != e; ++i) {
- const MachineMemOperand &MRO = LoadMI->getMemOperand(i);
- unsigned Align = MRO.getAlignment();
- if (Align > Alignment)
- Alignment = Align;
- }
+ if (LoadMI->hasOneMemOperand())
+ Alignment = LoadMI->memoperands_begin()->getAlignment();
// FIXME: Move alignment requirement into tables?
if (Alignment < 16) {
} else if (Ops.size() != 1)
return NULL;
- SmallVector<MachineOperand,4> MOs;
- unsigned NumOps = LoadMI->getDesc().getNumOperands();
- for (unsigned i = NumOps - 4; i != NumOps; ++i)
- MOs.push_back(LoadMI->getOperand(i));
- return foldMemoryOperand(MI, Ops[0], MOs);
+ SmallVector<MachineOperand,X86AddrNumOperands> MOs;
+ if (LoadMI->getOpcode() == X86::V_SET0 ||
+ LoadMI->getOpcode() == X86::V_SETALLONES) {
+ // Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
+ // Create a constant-pool entry and operands to load from it.
+
+ // x86-32 PIC requires a PIC base register for constant pools.
+ unsigned PICBase = 0;
+ if (TM.getRelocationModel() == Reloc::PIC_ &&
+ !TM.getSubtarget<X86Subtarget>().is64Bit())
+ // FIXME: PICBase = TM.getInstrInfo()->getGlobalBaseReg(&MF);
+ // This doesn't work for several reasons.
+ // 1. GlobalBaseReg may have been spilled.
+ // 2. It may not be live at MI.
+ return false;
+
+ // Create a v4i32 constant-pool entry.
+ MachineConstantPool &MCP = *MF.getConstantPool();
+ const VectorType *Ty = VectorType::get(Type::Int32Ty, 4);
+ Constant *C = LoadMI->getOpcode() == X86::V_SET0 ?
+ ConstantVector::getNullValue(Ty) :
+ ConstantVector::getAllOnesValue(Ty);
+ unsigned CPI = MCP.getConstantPoolIndex(C, 16);
+
+ // Create operands to load from the constant pool entry.
+ MOs.push_back(MachineOperand::CreateReg(PICBase, false));
+ MOs.push_back(MachineOperand::CreateImm(1));
+ MOs.push_back(MachineOperand::CreateReg(0, false));
+ MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
+ MOs.push_back(MachineOperand::CreateReg(0, false));
+ } else {
+ // Folding a normal load. Just copy the load's address operands.
+ unsigned NumOps = LoadMI->getDesc().getNumOperands();
+ for (unsigned i = NumOps - X86AddrNumOperands; i != NumOps; ++i)
+ MOs.push_back(LoadMI->getOperand(i));
+ }
+ return foldMemoryOperandImpl(MF, MI, Ops[0], MOs);
}
-bool X86InstrInfo::canFoldMemoryOperand(MachineInstr *MI,
- SmallVectorImpl<unsigned> &Ops) const {
+bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
+ const SmallVectorImpl<unsigned> &Ops) const {
// Check switch flag
if (NoFusing) return 0;
bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
- SmallVectorImpl<MachineInstr*> &NewMIs) const {
+ SmallVectorImpl<MachineInstr*> &NewMIs) const {
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I =
MemOp2RegOpTable.find((unsigned*)MI->getOpcode());
if (I == MemOp2RegOpTable.end())
return false;
+ DebugLoc dl = MI->getDebugLoc();
unsigned Opc = I->second.first;
unsigned Index = I->second.second & 0xf;
bool FoldedLoad = I->second.second & (1 << 4);
const TargetInstrDesc &TID = get(Opc);
const TargetOperandInfo &TOI = TID.OpInfo[Index];
const TargetRegisterClass *RC = TOI.isLookupPtrRegClass()
- ? getPointerRegClass() : RI.getRegClass(TOI.RegClass);
- SmallVector<MachineOperand,4> AddrOps;
+ ? RI.getPointerRegClass() : RI.getRegClass(TOI.RegClass);
+ SmallVector<MachineOperand, X86AddrNumOperands> AddrOps;
SmallVector<MachineOperand,2> BeforeOps;
SmallVector<MachineOperand,2> AfterOps;
SmallVector<MachineOperand,4> ImpOps;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI->getOperand(i);
- if (i >= Index && i < Index+4)
+ if (i >= Index && i < Index + X86AddrNumOperands)
AddrOps.push_back(Op);
- else if (Op.isRegister() && Op.isImplicit())
+ else if (Op.isReg() && Op.isImplicit())
ImpOps.push_back(Op);
else if (i < Index)
BeforeOps.push_back(Op);
loadRegFromAddr(MF, Reg, AddrOps, RC, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
- for (unsigned i = 1; i != 5; ++i) {
+ for (unsigned i = 1; i != 1 + X86AddrNumOperands; ++i) {
MachineOperand &MO = NewMIs[0]->getOperand(i);
- if (MO.isRegister())
+ if (MO.isReg())
MO.setIsKill(false);
}
}
}
// Emit the data processing instruction.
- MachineInstr *DataMI = new MachineInstr(TID, true);
+ MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true);
MachineInstrBuilder MIB(DataMI);
if (FoldedStore)
MIB.addReg(Reg, true);
for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
- MIB = X86InstrAddOperand(MIB, BeforeOps[i]);
+ MIB.addOperand(BeforeOps[i]);
if (FoldedLoad)
MIB.addReg(Reg);
for (unsigned i = 0, e = AfterOps.size(); i != e; ++i)
- MIB = X86InstrAddOperand(MIB, AfterOps[i]);
+ MIB.addOperand(AfterOps[i]);
for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) {
MachineOperand &MO = ImpOps[i];
MIB.addReg(MO.getReg(), MO.isDef(), true, MO.isKill(), MO.isDead());
if (UnfoldStore) {
const TargetOperandInfo &DstTOI = TID.OpInfo[0];
const TargetRegisterClass *DstRC = DstTOI.isLookupPtrRegClass()
- ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass);
+ ? RI.getPointerRegClass() : RI.getRegClass(DstTOI.RegClass);
storeRegToAddr(MF, Reg, true, AddrOps, DstRC, NewMIs);
}
bool
X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
- SmallVectorImpl<SDNode*> &NewNodes) const {
- if (!N->isTargetOpcode())
+ SmallVectorImpl<SDNode*> &NewNodes) const {
+ if (!N->isMachineOpcode())
return false;
DenseMap<unsigned*, std::pair<unsigned,unsigned> >::iterator I =
- MemOp2RegOpTable.find((unsigned*)N->getTargetOpcode());
+ MemOp2RegOpTable.find((unsigned*)N->getMachineOpcode());
if (I == MemOp2RegOpTable.end())
return false;
unsigned Opc = I->second.first;
const TargetInstrDesc &TID = get(Opc);
const TargetOperandInfo &TOI = TID.OpInfo[Index];
const TargetRegisterClass *RC = TOI.isLookupPtrRegClass()
- ? getPointerRegClass() : RI.getRegClass(TOI.RegClass);
- std::vector<SDOperand> AddrOps;
- std::vector<SDOperand> BeforeOps;
- std::vector<SDOperand> AfterOps;
+ ? RI.getPointerRegClass() : RI.getRegClass(TOI.RegClass);
+ unsigned NumDefs = TID.NumDefs;
+ std::vector<SDValue> AddrOps;
+ std::vector<SDValue> BeforeOps;
+ std::vector<SDValue> AfterOps;
+ DebugLoc dl = N->getDebugLoc();
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-1; ++i) {
- SDOperand Op = N->getOperand(i);
- if (i >= Index && i < Index+4)
+ SDValue Op = N->getOperand(i);
+ if (i >= Index-NumDefs && i < Index-NumDefs + X86AddrNumOperands)
AddrOps.push_back(Op);
- else if (i < Index)
+ else if (i < Index-NumDefs)
BeforeOps.push_back(Op);
- else if (i > Index)
+ else if (i > Index-NumDefs)
AfterOps.push_back(Op);
}
- SDOperand Chain = N->getOperand(NumOps-1);
+ SDValue Chain = N->getOperand(NumOps-1);
AddrOps.push_back(Chain);
// Emit the load instruction.
SDNode *Load = 0;
+ const MachineFunction &MF = DAG.getMachineFunction();
if (FoldedLoad) {
MVT VT = *RC->vt_begin();
- Load = DAG.getTargetNode(getLoadRegOpcode(RC, RI.getStackAlignment()), VT,
- MVT::Other, &AddrOps[0], AddrOps.size());
+ bool isAligned = (RI.getStackAlignment() >= 16) ||
+ RI.needsStackRealignment(MF);
+ Load = DAG.getTargetNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
+ VT, MVT::Other, &AddrOps[0], AddrOps.size());
NewNodes.push_back(Load);
}
if (TID.getNumDefs() > 0) {
const TargetOperandInfo &DstTOI = TID.OpInfo[0];
DstRC = DstTOI.isLookupPtrRegClass()
- ? getPointerRegClass() : RI.getRegClass(DstTOI.RegClass);
+ ? RI.getPointerRegClass() : RI.getRegClass(DstTOI.RegClass);
VTs.push_back(*DstRC->vt_begin());
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
VTs.push_back(VT);
}
if (Load)
- BeforeOps.push_back(SDOperand(Load, 0));
+ BeforeOps.push_back(SDValue(Load, 0));
std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps));
- SDNode *NewNode= DAG.getTargetNode(Opc, VTs, &BeforeOps[0], BeforeOps.size());
+ SDNode *NewNode= DAG.getTargetNode(Opc, dl, VTs, &BeforeOps[0],
+ BeforeOps.size());
NewNodes.push_back(NewNode);
// Emit the store instruction.
if (FoldedStore) {
AddrOps.pop_back();
- AddrOps.push_back(SDOperand(NewNode, 0));
+ AddrOps.push_back(SDValue(NewNode, 0));
AddrOps.push_back(Chain);
- SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(DstRC, RI.getStackAlignment()),
- MVT::Other, &AddrOps[0], AddrOps.size());
+ bool isAligned = (RI.getStackAlignment() >= 16) ||
+ RI.needsStackRealignment(MF);
+ SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(0, DstRC,
+ isAligned, TM),
+ dl, MVT::Other,
+ &AddrOps[0], AddrOps.size());
NewNodes.push_back(Store);
}
return I->second.first;
}
-bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
+bool X86InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
if (MBB.empty()) return false;
switch (MBB.back().getOpcode()) {
}
bool X86InstrInfo::
-ReverseBranchCondition(std::vector<MachineOperand> &Cond) const {
+ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
- Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm()));
+ X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
+ if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
+ return true;
+ Cond[0].setImm(GetOppositeBranchCondition(CC));
return false;
}
-const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const {
- const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
- if (Subtarget->is64Bit())
- return &X86::GR64RegClass;
- else
- return &X86::GR32RegClass;
+bool X86InstrInfo::
+isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
+ // FIXME: Return false for x87 stack register classes for now. We can't
+ // allow any loads of these registers before FpGet_ST0_80.
+ return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass ||
+ RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass);
}
unsigned X86InstrInfo::sizeOfImm(const TargetInstrDesc *Desc) {
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended register?
/// e.g. r8, xmm8, etc.
bool X86InstrInfo::isX86_64ExtendedReg(const MachineOperand &MO) {
- if (!MO.isRegister()) return false;
+ if (!MO.isReg()) return false;
switch (MO.getReg()) {
default: break;
case X86::R8: case X86::R9: case X86::R10: case X86::R11:
unsigned i = isTwoAddr ? 1 : 0;
for (unsigned e = NumOps; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
- if (MO.isRegister()) {
+ if (MO.isReg()) {
unsigned Reg = MO.getReg();
if (isX86_64NonExtLowByteReg(Reg))
REX |= 0x40;
i = isTwoAddr ? 2 : 1;
for (; i != NumOps; ++i) {
const MachineOperand& MO = MI.getOperand(i);
- if (MO.isRegister()) {
+ if (MO.isReg()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
case X86II::MRMDestMem: {
- unsigned e = isTwoAddr ? 5 : 4;
+ unsigned e = (isTwoAddr ? X86AddrNumOperands+1 : X86AddrNumOperands);
i = isTwoAddr ? 1 : 0;
if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e)))
REX |= 1 << 2;
unsigned Bit = 0;
for (; i != e; ++i) {
const MachineOperand& MO = MI.getOperand(i);
- if (MO.isRegister()) {
+ if (MO.isReg()) {
if (isX86_64ExtendedReg(MO))
REX |= 1 << Bit;
Bit++;
}
// Otherwise, this is something that requires a relocation.
- if (RelocOp->isGlobalAddress()) {
+ if (RelocOp->isGlobal()) {
FinalSize += sizeGlobalAddress(false);
- } else if (RelocOp->isConstantPoolIndex()) {
+ } else if (RelocOp->isCPI()) {
FinalSize += sizeConstPoolAddress(false);
- } else if (RelocOp->isJumpTableIndex()) {
+ } else if (RelocOp->isJTI()) {
FinalSize += sizeJumpTableAddress(false);
} else {
assert(0 && "Unknown value to relocate!");
unsigned FinalSize = 0;
// Figure out what sort of displacement we have to handle here.
- if (Op3.isGlobalAddress()) {
+ if (Op3.isGlobal()) {
DispForReloc = &Op3;
- } else if (Op3.isConstantPoolIndex()) {
+ } else if (Op3.isCPI()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
DispVal = 1;
}
- } else if (Op3.isJumpTableIndex()) {
+ } else if (Op3.isJTI()) {
if (Is64BitMode || IsPIC) {
DispForReloc = &Op3;
} else {
unsigned BaseReg = Base.getReg();
// Is a SIB byte needed?
- if (IndexReg.getReg() == 0 &&
- (BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) {
+ if (IndexReg.getReg() == 0 && (!Is64BitMode || BaseReg != 0) &&
+ (BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) {
if (BaseReg == 0) { // Just a displacement?
// Emit special case [disp32] encoding
++FinalSize;
// Emit the lock opcode prefix as needed.
if (Desc->TSFlags & X86II::LOCK) ++FinalSize;
+ // Emit segment overrid opcode prefix as needed.
+ switch (Desc->TSFlags & X86II::SegOvrMask) {
+ case X86II::FS:
+ case X86II::GS:
+ ++FinalSize;
+ break;
+ default: assert(0 && "Invalid segment!");
+ case 0: break; // No segment override!
+ }
+
// Emit the repeat opcode prefix as needed.
if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize;
unsigned CurOp = 0;
if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1)
CurOp++;
+ else if (NumOps > 2 && Desc->getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0)
+ // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32
+ --NumOps;
switch (Desc->TSFlags & X86II::FormMask) {
default: assert(0 && "Unknown FormMask value in X86 MachineCodeEmitter!");
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
- if (MO.isMachineBasicBlock()) {
+ if (MO.isMBB()) {
FinalSize += sizePCRelativeBlockAddress();
- } else if (MO.isGlobalAddress()) {
+ } else if (MO.isGlobal()) {
FinalSize += sizeGlobalAddress(false);
- } else if (MO.isExternalSymbol()) {
+ } else if (MO.isSymbol()) {
FinalSize += sizeExternalSymbolAddress(false);
- } else if (MO.isImmediate()) {
+ } else if (MO.isImm()) {
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
} else {
assert(0 && "Unknown RawFrm operand!");
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO1.isImmediate())
+ if (MO1.isImm())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64ri)
dword = true;
- if (MO1.isGlobalAddress()) {
+ if (MO1.isGlobal()) {
FinalSize += sizeGlobalAddress(dword);
- } else if (MO1.isExternalSymbol())
+ } else if (MO1.isSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO1.isConstantPoolIndex())
+ else if (MO1.isCPI())
FinalSize += sizeConstPoolAddress(dword);
- else if (MO1.isJumpTableIndex())
+ else if (MO1.isJTI())
FinalSize += sizeJumpTableAddress(dword);
}
}
case X86II::MRMDestMem: {
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
- CurOp += 5;
+ CurOp += X86AddrNumOperands + 1;
if (CurOp != NumOps) {
++CurOp;
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
break;
case X86II::MRMSrcMem: {
+ int AddrOperands;
+ if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
+ Opcode == X86::LEA16r || Opcode == X86::LEA32r)
+ AddrOperands = X86AddrNumOperands - 1; // No segment register
+ else
+ AddrOperands = X86AddrNumOperands;
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode);
- CurOp += 5;
+ CurOp += AddrOperands + 1;
if (CurOp != NumOps) {
++CurOp;
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
case X86II::MRM4r: case X86II::MRM5r:
case X86II::MRM6r: case X86II::MRM7r:
++FinalSize;
- ++CurOp;
- FinalSize += sizeRegModRMByte();
+ // Special handling of lfence and mfence.
+ if (Desc->getOpcode() == X86::LFENCE ||
+ Desc->getOpcode() == X86::MFENCE)
+ FinalSize += sizeRegModRMByte();
+ else {
+ ++CurOp;
+ FinalSize += sizeRegModRMByte();
+ }
if (CurOp != NumOps) {
const MachineOperand &MO1 = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO1.isImmediate())
+ if (MO1.isImm())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64ri32)
dword = true;
- if (MO1.isGlobalAddress()) {
+ if (MO1.isGlobal()) {
FinalSize += sizeGlobalAddress(dword);
- } else if (MO1.isExternalSymbol())
+ } else if (MO1.isSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO1.isConstantPoolIndex())
+ else if (MO1.isCPI())
FinalSize += sizeConstPoolAddress(dword);
- else if (MO1.isJumpTableIndex())
+ else if (MO1.isJTI())
FinalSize += sizeJumpTableAddress(dword);
}
}
++FinalSize;
FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode);
- CurOp += 4;
+ CurOp += X86AddrNumOperands;
if (CurOp != NumOps) {
const MachineOperand &MO = MI.getOperand(CurOp++);
unsigned Size = X86InstrInfo::sizeOfImm(Desc);
- if (MO.isImmediate())
+ if (MO.isImm())
FinalSize += sizeConstant(Size);
else {
bool dword = false;
if (Opcode == X86::MOV64mi32)
dword = true;
- if (MO.isGlobalAddress()) {
+ if (MO.isGlobal()) {
FinalSize += sizeGlobalAddress(dword);
- } else if (MO.isExternalSymbol())
+ } else if (MO.isSymbol())
FinalSize += sizeExternalSymbolAddress(dword);
- else if (MO.isConstantPoolIndex())
+ else if (MO.isCPI())
FinalSize += sizeConstPoolAddress(dword);
- else if (MO.isJumpTableIndex())
+ else if (MO.isJTI())
FinalSize += sizeJumpTableAddress(dword);
}
}
}
return Size;
}
+
+/// getGlobalBaseReg - Return a virtual register initialized with the
+/// the global base register value. Output instructions required to
+/// initialize the register in the function entry block, if necessary.
+///
+unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
+ assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
+ "X86-64 PIC uses RIP relative addressing");
+
+ X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
+ unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
+ if (GlobalBaseReg != 0)
+ return GlobalBaseReg;
+
+ // Insert the set of GlobalBaseReg into the first MBB of the function
+ MachineBasicBlock &FirstMBB = MF->front();
+ MachineBasicBlock::iterator MBBI = FirstMBB.begin();
+ DebugLoc DL = DebugLoc::getUnknownLoc();
+ if (MBBI != FirstMBB.end()) DL = MBBI->getDebugLoc();
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+ unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
+
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ // Operand of MovePCtoStack is completely ignored by asm printer. It's
+ // only used in JIT code emission as displacement to pc.
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC)
+ .addImm(0);
+
+ // If we're using vanilla 'GOT' PIC style, we should use relative addressing
+ // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external
+ if (TM.getRelocationModel() == Reloc::PIC_ &&
+ TM.getSubtarget<X86Subtarget>().isPICStyleGOT()) {
+ GlobalBaseReg =
+ RegInfo.createVirtualRegister(X86::GR32RegisterClass);
+ BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
+ .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
+ } else {
+ GlobalBaseReg = PC;
+ }
+
+ X86FI->setGlobalBaseReg(GlobalBaseReg);
+ return GlobalBaseReg;
+}