/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
- virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- int FrameIndex) const {
+ /// The instruction and any auxiliary instructions necessary will be inserted
+ /// at InsertPt.
+ virtual MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
return nullptr;
}
/// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction.
- virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineInstr *LoadMI) const {
+ /// The instruction and any auxiliary instructions necessary will be inserted
+ /// at InsertPt.
+ virtual MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
return nullptr;
}
MI->getOpcode() == TargetOpcode::PATCHPOINT) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
+ if (NewMI)
+ MBB->insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
- NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI);
+ NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
}
-
+
if (NewMI) {
NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
// Add a memory operand, foldMemoryOperandImpl doesn't do that.
MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO);
- // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
- return MBB->insert(MI, NewMI);
+ return NewMI;
}
// Straight COPY may fold as load/store.
isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
+ if (NewMI)
+ NewMI = MBB.insert(MI, NewMI);
} else {
// Ask the target to do the actual folding.
- NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
+ NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
}
if (!NewMI) return nullptr;
- NewMI = MBB.insert(MI, NewMI);
-
// Copy the memoperands from the load to the folded instruction.
if (MI->memoperands_empty()) {
NewMI->setMemRefs(LoadMI->memoperands_begin(),
.setMIFlag(Flag);
}
-MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- int FrameIndex) const {
+MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
// This is a bit of a hack. Consider this instruction:
//
// %vreg0<def> = COPY %SP; GPR64all:%vreg0
using TargetInstrInfo::foldMemoryOperandImpl;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
return false;
}
-MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- int FI) const {
+MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FI) const {
// Hexagon_TODO: Implement.
return nullptr;
}
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override {
return nullptr;
}
return true;
}
-MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- int FrameIndex) const {
+MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
// TODO: Implement this function
return nullptr;
}
-MachineInstr *
-AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineInstr *LoadMI) const {
+MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
// TODO: Implement this function
return nullptr;
}
protected:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override;
public:
return nullptr;
}
-MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- int FrameIndex) const {
+MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Size = MFI->getObjectSize(FrameIndex);
unsigned Opcode = MI->getOpcode();
isInt<8>(MI->getOperand(2).getImm()) &&
!MI->getOperand(3).getReg()) {
// LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
- return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI))
- .addFrameIndex(FrameIndex).addImm(0)
- .addImm(MI->getOperand(2).getImm());
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ get(SystemZ::AGSI))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addImm(MI->getOperand(2).getImm());
}
return nullptr;
}
isInt<8>(MI->getOperand(2).getImm())) {
// A(G)HI %reg, CONST -> A(G)SI %mem, CONST
Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
- return BuildMI(MF, MI->getDebugLoc(), get(Opcode))
- .addFrameIndex(FrameIndex).addImm(0)
- .addImm(MI->getOperand(2).getImm());
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ get(Opcode))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addImm(MI->getOperand(2).getImm());
}
if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
// source register instead.
if (OpNum == 0) {
unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
- return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode))
- .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex)
- .addImm(0).addReg(0);
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ get(StoreOpcode))
+ .addOperand(MI->getOperand(1))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addReg(0);
}
// If we're spilling the source of an LDGR or LGDR, load the
// destination register instead.
if (OpNum == 1) {
unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
unsigned Dest = MI->getOperand(0).getReg();
- return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest)
- .addFrameIndex(FrameIndex).addImm(0).addReg(0);
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ get(LoadOpcode), Dest)
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addReg(0);
}
}
if (MMO->getSize() == Size && !MMO->isVolatile()) {
// Handle conversion of loads.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
- return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
- .addFrameIndex(FrameIndex).addImm(0).addImm(Size)
- .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
- .addMemOperand(MMO);
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ get(SystemZ::MVC))
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addImm(Size)
+ .addOperand(MI->getOperand(1))
+ .addImm(MI->getOperand(2).getImm())
+ .addMemOperand(MMO);
}
// Handle conversion of stores.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
- return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC))
- .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm())
- .addImm(Size).addFrameIndex(FrameIndex).addImm(0)
- .addMemOperand(MMO);
+ return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
+ get(SystemZ::MVC))
+ .addOperand(MI->getOperand(1))
+ .addImm(MI->getOperand(2).getImm())
+ .addImm(Size)
+ .addFrameIndex(FrameIndex)
+ .addImm(0)
+ .addMemOperand(MMO);
}
}
}
assert(AccessBytes != 0 && "Size of access should be known");
assert(AccessBytes <= Size && "Access outside the frame index");
uint64_t Offset = Size - AccessBytes;
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode));
+ MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
+ MI->getDebugLoc(), get(MemOpcode));
for (unsigned I = 0; I < OpNum; ++I)
MIB.addOperand(MI->getOperand(I));
MIB.addFrameIndex(FrameIndex).addImm(Offset);
return nullptr;
}
-MachineInstr *
-SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineInstr *LoadMI) const {
+MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
return nullptr;
}
LiveVariables *LV) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const
SmallVector<MachineOperand, 8> AddrOps;
AM.getFullAddress(AddrOps);
- MachineInstr *Result =
- XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps,
- Size, Alignment, /*AllowCommute=*/true);
+ MachineInstr *Result = XII.foldMemoryOperandImpl(
+ *FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
+ /*AllowCommute=*/true);
if (!Result)
return false;
}
Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
- FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
MI->eraseFromParent();
return true;
}
return false;
}
+static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs) {
+ unsigned NumAddrOps = MOs.size();
+ for (unsigned i = 0; i != NumAddrOps; ++i)
+ MIB.addOperand(MOs[i]);
+ if (NumAddrOps < 4) // FrameIndex only
+ addOffset(MIB, 0);
+}
+
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
+ MachineBasicBlock::iterator InsertPt,
MachineInstr *MI,
const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
MI->getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
- unsigned NumAddrOps = MOs.size();
- for (unsigned i = 0; i != NumAddrOps; ++i)
- MIB.addOperand(MOs[i]);
- if (NumAddrOps < 4) // FrameIndex only
- addOffset(MIB, 0);
+ addOperands(MIB, MOs);
// Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI->getDesc().getNumOperands()-2;
MachineOperand &MO = MI->getOperand(i);
MIB.addOperand(MO);
}
+
+ MachineBasicBlock *MBB = InsertPt->getParent();
+ MBB->insert(InsertPt, NewMI);
+
return MIB;
}
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs,
+ MachineBasicBlock::iterator InsertPt,
MachineInstr *MI, const TargetInstrInfo &TII) {
// Omit the implicit operands, something BuildMI can't do.
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) {
assert(MO.isReg() && "Expected to fold into reg operand!");
- unsigned NumAddrOps = MOs.size();
- for (unsigned i = 0; i != NumAddrOps; ++i)
- MIB.addOperand(MOs[i]);
- if (NumAddrOps < 4) // FrameIndex only
- addOffset(MIB, 0);
+ addOperands(MIB, MOs);
} else {
MIB.addOperand(MO);
}
}
+
+ MachineBasicBlock *MBB = InsertPt->getParent();
+ MBB->insert(InsertPt, NewMI);
+
return MIB;
}
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
+ MachineBasicBlock::iterator InsertPt,
MachineInstr *MI) {
- MachineFunction &MF = *MI->getParent()->getParent();
- MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
-
- unsigned NumAddrOps = MOs.size();
- for (unsigned i = 0; i != NumAddrOps; ++i)
- MIB.addOperand(MOs[i]);
- if (NumAddrOps < 4) // FrameIndex only
- addOffset(MIB, 0);
+ MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
+ MI->getDebugLoc(), TII.get(Opcode));
+ addOperands(MIB, MOs);
return MIB.addImm(0);
}
-MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- unsigned OpNum,
- ArrayRef<MachineOperand> MOs,
- unsigned Size, unsigned Align,
- bool AllowCommute) const {
+MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, unsigned OpNum,
+ ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
+ unsigned Size, unsigned Align, bool AllowCommute) const {
const DenseMap<unsigned,
std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
bool isCallRegIndirect = Subtarget.callRegIndirect();
isTwoAddrFold = true;
} else if (OpNum == 0) {
if (MI->getOpcode() == X86::MOV32r0) {
- NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
+ NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI)
return NewMI;
}
}
if (isTwoAddrFold)
- NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this);
+ NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
else
- NewMI = FuseInst(MF, Opcode, OpNum, MOs, MI, *this);
+ NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
if (NarrowToMOV32rm) {
// If this is the special case where we use a MOV32rm to load a 32-bit
// Attempt to fold with the commuted version of the instruction.
unsigned CommuteOp =
(CommuteOpIdx1 == OriginalOpIdx ? CommuteOpIdx2 : CommuteOpIdx1);
- NewMI = foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, Size, Align,
- /*AllowCommute=*/false);
+ NewMI =
+ foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, InsertPt, Size, Align,
+ /*AllowCommute=*/false);
if (NewMI)
return NewMI;
MI->addRegisterKilled(Reg, TRI, true);
}
-MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- int FrameIndex) const {
+MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
// Check switch flag
if (NoFusing) return nullptr;
return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops[0],
- MachineOperand::CreateFI(FrameIndex), Size,
- Alignment, /*AllowCommute=*/true);
+ MachineOperand::CreateFI(FrameIndex), InsertPt,
+ Size, Alignment, /*AllowCommute=*/true);
}
static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
return false;
}
-MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
- MachineInstr *MI,
- ArrayRef<unsigned> Ops,
- MachineInstr *LoadMI) const {
+MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
// If loading from a FrameIndex, fold directly from the FrameIndex.
unsigned NumOps = LoadMI->getDesc().getNumOperands();
int FrameIndex;
if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
if (isPartialRegisterLoad(*LoadMI, MF))
return nullptr;
- return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex);
+ return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex);
}
// Check switch flag
break;
}
}
- return foldMemoryOperandImpl(MF, MI, Ops[0], MOs,
+ return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
/*Size=*/0, Alignment, /*AllowCommute=*/true);
}
/// references has been changed.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// stack slot.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override;
/// canFoldMemoryOperand - Returns true if the specified load / store is
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
unsigned OpNum,
ArrayRef<MachineOperand> MOs,
+ MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Alignment,
bool AllowCommute) const;