From: Dan Gohman Date: Wed, 3 Dec 2008 18:43:12 +0000 (+0000) Subject: Split foldMemoryOperand into public non-virtual and protected virtual X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=c54baa2d43730f1804acfb4f4e738fba72f966bd;hp=15511cf1660cfd6bb8b8e8fca2db9450f50430ee;p=oota-llvm.git Split foldMemoryOperand into public non-virtual and protected virtual parts, and add target-independent code to add/preserve MachineMemOperands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@60488 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h index 17acacec41d..34a7d48e836 100644 --- a/include/llvm/Target/TargetInstrInfo.h +++ b/include/llvm/Target/TargetInstrInfo.h @@ -305,23 +305,41 @@ public: /// operand folded, otherwise NULL is returned. The client is responsible for /// removing the old instruction and adding the new one in the instruction /// stream. - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { - return 0; - } + MachineInstr* foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, + MachineInstr* foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const; + +protected: + /// foldMemoryOperandImpl - Target-dependent implementation for + /// foldMemoryOperand. Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + int FrameIndex) const { + return 0; + } + + /// foldMemoryOperandImpl - Target-dependent implementation for + /// foldMemoryOperand. Target-independent code in foldMemoryOperand will + /// take care of adding a MachineMemOperand to the newly created instruction. + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { return 0; } +public: /// canFoldMemoryOperand - Returns true if the specified load / store is /// folding is possible. virtual diff --git a/lib/CodeGen/TargetInstrInfoImpl.cpp b/lib/CodeGen/TargetInstrInfoImpl.cpp index e4142423cec..c28b045b2ec 100644 --- a/lib/CodeGen/TargetInstrInfoImpl.cpp +++ b/lib/CodeGen/TargetInstrInfoImpl.cpp @@ -14,8 +14,10 @@ #include "llvm/Target/TargetInstrInfo.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/PseudoSourceValue.h" using namespace llvm; // commuteInstruction - The default implementation of this method just exchanges @@ -124,3 +126,69 @@ TargetInstrInfoImpl::GetFunctionSizeInBytes(const MachineFunction &MF) const { } return FnSize; } + +/// foldMemoryOperand - Attempt to fold a load or store of the specified stack +/// slot into the specified machine instruction for the specified operand(s). +/// If this is possible, a new instruction is returned with the specified +/// operand folded, otherwise NULL is returned. The client is responsible for +/// removing the old instruction and adding the new one in the instruction +/// stream. +MachineInstr* +TargetInstrInfo::foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const { + unsigned Flags = 0; + for (unsigned i = 0, e = Ops.size(); i != e; ++i) + if (MI->getOperand(Ops[i]).isDef()) + Flags |= MachineMemOperand::MOStore; + else + Flags |= MachineMemOperand::MOLoad; + + // Ask the target to do the actual folding. + MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FrameIndex); + if (!NewMI) return 0; + + assert((!(Flags & MachineMemOperand::MOStore) || + NewMI->getDesc().mayStore()) && + "Folded a def to a non-store!"); + assert((!(Flags & MachineMemOperand::MOLoad) || + NewMI->getDesc().mayLoad()) && + "Folded a use to a non-load!"); + const MachineFrameInfo &MFI = *MF.getFrameInfo(); + assert(MFI.getObjectOffset(FrameIndex) != -1); + MachineMemOperand MMO(PseudoSourceValue::getFixedStack(FrameIndex), + Flags, + MFI.getObjectOffset(FrameIndex), + MFI.getObjectSize(FrameIndex), + MFI.getObjectAlignment(FrameIndex)); + NewMI->addMemOperand(MF, MMO); + + return NewMI; +} + +/// foldMemoryOperand - Same as the previous version except it allows folding +/// of any load and store from / to any address, not just from a specific +/// stack slot. +MachineInstr* +TargetInstrInfo::foldMemoryOperand(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { + assert(LoadMI->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!"); +#ifndef NDEBUG + for (unsigned i = 0, e = Ops.size(); i != e; ++i) + assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); +#endif + + // Ask the target to do the actual folding. + MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI); + if (!NewMI) return 0; + + // Copy the memoperands from the load to the folded instruction. + for (std::list::iterator I = LoadMI->memoperands_begin(), + E = LoadMI->memoperands_end(); I != E; ++I) + NewMI->addMemOperand(MF, *I); + + return NewMI; +} diff --git a/lib/Target/ARM/ARMInstrInfo.cpp b/lib/Target/ARM/ARMInstrInfo.cpp index e78f2d31f17..a809a93bb77 100644 --- a/lib/Target/ARM/ARMInstrInfo.cpp +++ b/lib/Target/ARM/ARMInstrInfo.cpp @@ -663,10 +663,10 @@ bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, return true; } -MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr *MI, +MachineInstr *ARMInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, const SmallVectorImpl &Ops, - int FI) const { + int FI) const { if (Ops.size() != 1) return NULL; unsigned OpNum = Ops[0]; diff --git a/lib/Target/ARM/ARMInstrInfo.h b/lib/Target/ARM/ARMInstrInfo.h index cdf068f9d05..7afeb840820 100644 --- a/lib/Target/ARM/ARMInstrInfo.h +++ b/lib/Target/ARM/ARMInstrInfo.h @@ -211,15 +211,15 @@ public: MachineBasicBlock::iterator MI, const std::vector &CSI) const; - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; - - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; + + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/Alpha/AlphaInstrInfo.cpp b/lib/Target/Alpha/AlphaInstrInfo.cpp index 7b3053e5238..bd6f2cf3e37 100644 --- a/lib/Target/Alpha/AlphaInstrInfo.cpp +++ b/lib/Target/Alpha/AlphaInstrInfo.cpp @@ -255,10 +255,10 @@ void AlphaInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, NewMIs.push_back(MIB); } -MachineInstr *AlphaInstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr *MI, +MachineInstr *AlphaInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, const SmallVectorImpl &Ops, - int FrameIndex) const { + int FrameIndex) const { if (Ops.size() != 1) return NULL; // Make sure this is a reg-reg copy. diff --git a/lib/Target/Alpha/AlphaInstrInfo.h b/lib/Target/Alpha/AlphaInstrInfo.h index b218873c734..f04c4881f25 100644 --- a/lib/Target/Alpha/AlphaInstrInfo.h +++ b/lib/Target/Alpha/AlphaInstrInfo.h @@ -69,15 +69,15 @@ public: const TargetRegisterClass *RC, SmallVectorImpl &NewMIs) const; - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp index 510b05091f7..bb89306bd94 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.cpp +++ b/lib/Target/CellSPU/SPUInstrInfo.cpp @@ -399,10 +399,10 @@ void SPUInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, /// foldMemoryOperand - SPU, like PPC, can only fold spills into /// copy instructions, turning them into load/store instructions. MachineInstr * -SPUInstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const +SPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + const SmallVectorImpl &Ops, + int FrameIndex) const { #if SOMEDAY_SCOTT_LOOKS_AT_ME_AGAIN if (Ops.size() != 1) return NULL; diff --git a/lib/Target/CellSPU/SPUInstrInfo.h b/lib/Target/CellSPU/SPUInstrInfo.h index 722e1a10e83..5c59b68cbb1 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.h +++ b/lib/Target/CellSPU/SPUInstrInfo.h @@ -79,16 +79,16 @@ namespace llvm { SmallVectorImpl &NewMIs) const; //! Fold spills into load/store instructions - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; //! Fold any load/store to an operand - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { return 0; } }; diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp index 8d3c5a0055e..e71d26d2f4d 100644 --- a/lib/Target/Mips/MipsInstrInfo.cpp +++ b/lib/Target/Mips/MipsInstrInfo.cpp @@ -279,9 +279,9 @@ void MipsInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, } MachineInstr *MipsInstrInfo:: -foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, int FI) const +foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, int FI) const { if (Ops.size() != 1) return NULL; @@ -323,7 +323,7 @@ foldMemoryOperand(MachineFunction &MF, } else if (RC == Mips::AFGR64RegisterClass) { LoadOpc = Mips::LDC1; StoreOpc = Mips::SDC1; } else - assert(0 && "foldMemoryOperand register unknown"); + assert(0 && "foldMemoryOperandImpl register unknown"); if (Ops[0] == 0) { // COPY -> STORE unsigned SrcReg = MI->getOperand(1).getReg(); diff --git a/lib/Target/Mips/MipsInstrInfo.h b/lib/Target/Mips/MipsInstrInfo.h index b1c34bc4359..4302bfdeee8 100644 --- a/lib/Target/Mips/MipsInstrInfo.h +++ b/lib/Target/Mips/MipsInstrInfo.h @@ -196,15 +196,15 @@ public: const TargetRegisterClass *RC, SmallVectorImpl &NewMIs) const; - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; - - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; + + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/PowerPC/PPCInstrInfo.cpp b/lib/Target/PowerPC/PPCInstrInfo.cpp index 87f7b8a3840..e2faf400764 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -655,10 +655,10 @@ void PPCInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into /// copy instructions, turning them into load/store instructions. -MachineInstr *PPCInstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { +MachineInstr *PPCInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + const SmallVectorImpl &Ops, + int FrameIndex) const { if (Ops.size() != 1) return NULL; // Make sure this is a reg-reg copy. Note that we can't handle MCRF, because diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h index 909b0c501b1..f45b5ef6772 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.h +++ b/lib/Target/PowerPC/PPCInstrInfo.h @@ -142,15 +142,15 @@ public: /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into /// copy instructions, turning them into load/store instructions. - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; - - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; + + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { return 0; } diff --git a/lib/Target/Sparc/SparcInstrInfo.cpp b/lib/Target/Sparc/SparcInstrInfo.cpp index 167012e9967..8b07bcbabc5 100644 --- a/lib/Target/Sparc/SparcInstrInfo.cpp +++ b/lib/Target/Sparc/SparcInstrInfo.cpp @@ -225,10 +225,10 @@ void SparcInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg, return; } -MachineInstr *SparcInstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, +MachineInstr *SparcInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, const SmallVectorImpl &Ops, - int FI) const { + int FI) const { if (Ops.size() != 1) return NULL; unsigned OpNum = Ops[0]; diff --git a/lib/Target/Sparc/SparcInstrInfo.h b/lib/Target/Sparc/SparcInstrInfo.h index 346be9580b1..68a6de3b470 100644 --- a/lib/Target/Sparc/SparcInstrInfo.h +++ b/lib/Target/Sparc/SparcInstrInfo.h @@ -96,15 +96,15 @@ public: const TargetRegisterClass *RC, SmallVectorImpl &NewMIs) const; - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const { return 0; } }; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index a43c5ddad3f..8420445bf9f 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1976,9 +1976,9 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, } MachineInstr* -X86InstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr *MI, unsigned i, - const SmallVector &MOs) const{ +X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, unsigned i, + const SmallVector &MOs) const{ const DenseMap *OpcodeTablePtr = NULL; bool isTwoAddrFold = false; unsigned NumOps = MI->getDesc().getNumOperands(); @@ -2035,10 +2035,10 @@ X86InstrInfo::foldMemoryOperand(MachineFunction &MF, } -MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - int FrameIndex) const { +MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + const SmallVectorImpl &Ops, + int FrameIndex) const { // Check switch flag if (NoFusing) return NULL; @@ -2079,13 +2079,13 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, SmallVector MOs; MOs.push_back(MachineOperand::CreateFI(FrameIndex)); - return foldMemoryOperand(MF, MI, Ops[0], MOs); + return foldMemoryOperandImpl(MF, MI, Ops[0], MOs); } -MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, - MachineInstr *MI, - const SmallVectorImpl &Ops, - MachineInstr *LoadMI) const { +MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr *MI, + const SmallVectorImpl &Ops, + MachineInstr *LoadMI) const { // Check switch flag if (NoFusing) return NULL; @@ -2158,7 +2158,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperand(MachineFunction &MF, for (unsigned i = NumOps - 4; i != NumOps; ++i) MOs.push_back(LoadMI->getOperand(i)); } - return foldMemoryOperand(MF, MI, Ops[0], MOs); + return foldMemoryOperandImpl(MF, MI, Ops[0], MOs); } diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index a0e0124676a..21c9a1f0179 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -368,18 +368,18 @@ public: /// folding and return true, otherwise it should return false. If it folds /// the instruction, it is likely that the MachineInstruction the iterator /// references has been changed. - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const; + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const; /// canFoldMemoryOperand - Returns true if the specified load / store is /// folding is possible. @@ -444,10 +444,10 @@ public: unsigned getGlobalBaseReg(MachineFunction *MF) const; private: - MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - unsigned OpNum, - const SmallVector &MOs) const; + MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + unsigned OpNum, + const SmallVector &MOs) const; }; } // End llvm namespace