X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FPPCInstrInfo.h;h=c3c3a480a6aade2fd2debf4530d646207b06951a;hb=ca9fa31c8cddf8adbe019dbc12871a7b5703d0a4;hp=02d8bba369a8b6bf40fe874c401c79b4d984a224;hpb=43dbe05279b753aabda571d9c83eaeb36987001a;p=oota-llvm.git diff --git a/lib/Target/PowerPC/PPCInstrInfo.h b/lib/Target/PowerPC/PPCInstrInfo.h index 02d8bba369a..c3c3a480a6a 100644 --- a/lib/Target/PowerPC/PPCInstrInfo.h +++ b/lib/Target/PowerPC/PPCInstrInfo.h @@ -1,4 +1,4 @@ -//===- PPCInstrInfo.h - PowerPC Instruction Information ---------*- C++ -*-===// +//===-- PPCInstrInfo.h - PowerPC Instruction Information --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -11,12 +11,15 @@ // //===----------------------------------------------------------------------===// -#ifndef POWERPC32_INSTRUCTIONINFO_H -#define POWERPC32_INSTRUCTIONINFO_H +#ifndef LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H +#define LLVM_LIB_TARGET_POWERPC_PPCINSTRINFO_H #include "PPC.h" -#include "llvm/Target/TargetInstrInfo.h" #include "PPCRegisterInfo.h" +#include "llvm/Target/TargetInstrInfo.h" + +#define GET_INSTRINFO_HEADER +#include "PPCGenInstrInfo.inc" namespace llvm { @@ -32,7 +35,7 @@ enum { /// PPC970_First - This instruction starts a new dispatch group, so it will /// always be the first one in the group. PPC970_First = 0x1, - + /// PPC970_Single - This instruction starts a new dispatch group and /// terminates it, so it will be the sole instruction in the group. PPC970_Single = 0x2, @@ -40,7 +43,7 @@ enum { /// PPC970_Cracked - This instruction is cracked into two pieces, requiring /// two dispatch pipes to be available to issue. PPC970_Cracked = 0x4, - + /// PPC970_Mask/Shift - This is a bitmask that selects the pipeline type that /// an instruction is issued to. PPC970_Shift = 3, @@ -58,94 +61,217 @@ enum PPC970_Unit { PPC970_VPERM = 6 << PPC970_Shift, // Vector Permute Unit PPC970_BRU = 7 << PPC970_Shift // Branch Unit }; -} - - -class PPCInstrInfo : public TargetInstrInfoImpl { - PPCTargetMachine &TM; +} // end namespace PPCII + +class PPCSubtarget; +class PPCInstrInfo : public PPCGenInstrInfo { + PPCSubtarget &Subtarget; const PPCRegisterInfo RI; + + bool StoreRegToStackSlot(MachineFunction &MF, + unsigned SrcReg, bool isKill, int FrameIdx, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs, + bool &NonRI, bool &SpillsVRS) const; + bool LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, + unsigned DestReg, int FrameIdx, + const TargetRegisterClass *RC, + SmallVectorImpl &NewMIs, + bool &NonRI, bool &SpillsVRS) const; + virtual void anchor(); + +protected: + /// Commutes the operands in the given instruction. + /// The commutable operands are specified by their indices OpIdx1 and OpIdx2. + /// + /// Do not call this method for a non-commutable instruction or for + /// non-commutable pair of operand indices OpIdx1 and OpIdx2. + /// Even though the instruction is commutable, the method may still + /// fail to commute the operands, null pointer is returned in such cases. + /// + /// For example, we can commute rlwimi instructions, but only if the + /// rotate amt is zero. We also have to munge the immediates a bit. + MachineInstr *commuteInstructionImpl(MachineInstr *MI, + bool NewMI, + unsigned OpIdx1, + unsigned OpIdx2) const override; + public: - PPCInstrInfo(PPCTargetMachine &TM); + explicit PPCInstrInfo(PPCSubtarget &STI); /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As /// such, whenever a client has an instance of instruction info, it should /// always be able to get register info as well (through this method). /// - virtual const MRegisterInfo &getRegisterInfo() const { return RI; } + const PPCRegisterInfo &getRegisterInfo() const { return RI; } - /// getPointerRegClass - Return the register class to use to hold pointers. - /// This is used for addressing modes. - virtual const TargetRegisterClass *getPointerRegClass() const; + ScheduleHazardRecognizer * + CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, + const ScheduleDAG *DAG) const override; + ScheduleHazardRecognizer * + CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, + const ScheduleDAG *DAG) const override; - // Return true if the instruction is a register to register move and - // leave the source and dest operands in the passed parameters. - // - virtual bool isMoveInstr(const MachineInstr& MI, - unsigned& sourceReg, - unsigned& destReg) const; + unsigned getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr *MI, + unsigned *PredCost = nullptr) const override; - unsigned isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const; - unsigned isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const; + int getOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr *DefMI, unsigned DefIdx, + const MachineInstr *UseMI, + unsigned UseIdx) const override; + int getOperandLatency(const InstrItineraryData *ItinData, + SDNode *DefNode, unsigned DefIdx, + SDNode *UseNode, unsigned UseIdx) const override { + return PPCGenInstrInfo::getOperandLatency(ItinData, DefNode, DefIdx, + UseNode, UseIdx); + } + + bool hasLowDefLatency(const TargetSchedModel &SchedModel, + const MachineInstr *DefMI, + unsigned DefIdx) const override { + // Machine LICM should hoist all instructions in low-register-pressure + // situations; none are sufficiently free to justify leaving in a loop + // body. + return false; + } + + bool useMachineCombiner() const override { + return true; + } + + /// Return true when there is potentially a faster code sequence + /// for an instruction chain ending in . All potential patterns are + /// output in the array. + bool getMachineCombinerPatterns( + MachineInstr &Root, + SmallVectorImpl &P) const override; - // commuteInstruction - We can commute rlwimi instructions, but only if the - // rotate amt is zero. We also have to munge the immediates a bit. - virtual MachineInstr *commuteInstruction(MachineInstr *MI) const; - - virtual void insertNoop(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI) const; + bool isAssociativeAndCommutative(const MachineInstr &Inst) const override; + + bool isCoalescableExtInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SubIdx) const override; + unsigned isLoadFromStackSlot(const MachineInstr *MI, + int &FrameIndex) const override; + unsigned isStoreToStackSlot(const MachineInstr *MI, + int &FrameIndex) const override; + + bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, + unsigned &SrcOpIdx2) const override; + + void insertNoop(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const override; // Branch analysis. - virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, - MachineBasicBlock *&FBB, - std::vector &Cond) const; - virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; - virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, - MachineBasicBlock *FBB, - const std::vector &Cond) const; - virtual void copyRegToReg(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - unsigned DestReg, unsigned SrcReg, - const TargetRegisterClass *DestRC, - const TargetRegisterClass *SrcRC) const; - - virtual void storeRegToStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned SrcReg, bool isKill, int FrameIndex, - const TargetRegisterClass *RC) const; - - virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, - SmallVectorImpl &Addr, - const TargetRegisterClass *RC, - SmallVectorImpl &NewMIs) const; - - virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MBBI, - unsigned DestReg, int FrameIndex, - const TargetRegisterClass *RC) const; - - virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, - SmallVectorImpl &Addr, - const TargetRegisterClass *RC, - SmallVectorImpl &NewMIs) const; - - /// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into - /// copy instructions, turning them into load/store instructions. - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &Ops, - int FrameIndex) const; - - virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, - SmallVectorImpl &Ops, - MachineInstr* LoadMI) const { - return 0; + bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const override; + unsigned RemoveBranch(MachineBasicBlock &MBB) const override; + unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, ArrayRef Cond, + DebugLoc DL) const override; + + // Select analysis. + bool canInsertSelect(const MachineBasicBlock &, ArrayRef Cond, + unsigned, unsigned, int &, int &, int &) const override; + void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, + DebugLoc DL, unsigned DstReg, ArrayRef Cond, + unsigned TrueReg, unsigned FalseReg) const override; + + void copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, DebugLoc DL, + unsigned DestReg, unsigned SrcReg, + bool KillSrc) const override; + + void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + bool + ReverseBranchCondition(SmallVectorImpl &Cond) const override; + + bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, + unsigned Reg, MachineRegisterInfo *MRI) const override; + + // If conversion by predication (only supported by some branch instructions). + // All of the profitability checks always return true; it is always + // profitable to use the predicated branches. + bool isProfitableToIfCvt(MachineBasicBlock &MBB, + unsigned NumCycles, unsigned ExtraPredCycles, + BranchProbability Probability) const override { + return true; + } + + bool isProfitableToIfCvt(MachineBasicBlock &TMBB, + unsigned NumT, unsigned ExtraT, + MachineBasicBlock &FMBB, + unsigned NumF, unsigned ExtraF, + BranchProbability Probability) const override; + + bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, + BranchProbability Probability) const override { + return true; + } + + bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, + MachineBasicBlock &FMBB) const override { + return false; } - virtual bool canFoldMemoryOperand(MachineInstr *MI, - SmallVectorImpl &Ops) const; - - virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const; - virtual bool ReverseBranchCondition(std::vector &Cond) const; + // Predication support. + bool isPredicated(const MachineInstr *MI) const override; + + bool isUnpredicatedTerminator(const MachineInstr *MI) const override; + + bool PredicateInstruction(MachineInstr *MI, + ArrayRef Pred) const override; + + bool SubsumesPredicate(ArrayRef Pred1, + ArrayRef Pred2) const override; + + bool DefinesPredicate(MachineInstr *MI, + std::vector &Pred) const override; + + bool isPredicable(MachineInstr *MI) const override; + + // Comparison optimization. + + + bool analyzeCompare(const MachineInstr *MI, + unsigned &SrcReg, unsigned &SrcReg2, + int &Mask, int &Value) const override; + + bool optimizeCompareInstr(MachineInstr *CmpInstr, + unsigned SrcReg, unsigned SrcReg2, + int Mask, int Value, + const MachineRegisterInfo *MRI) const override; + + /// GetInstSize - Return the number of bytes of code the specified + /// instruction may be. This returns the maximum number of bytes. + /// + unsigned GetInstSizeInBytes(const MachineInstr *MI) const; + + void getNoopForMachoTarget(MCInst &NopInst) const override; + + std::pair + decomposeMachineOperandsTargetFlags(unsigned TF) const override; + + ArrayRef> + getSerializableDirectMachineOperandTargetFlags() const override; + + ArrayRef> + getSerializableBitmaskMachineOperandTargetFlags() const override; }; }