X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrInfo.h;h=57b19589545fd3646cf3317e5fdf335d8cf28e19;hb=87a2f3751c5ff7fff35a028105b0a33f993ccf77;hp=f03c34525af7e36dec4162ea4caf3355bb856309;hpb=2df3f58a0b3937f2cbd76d3417d2905ca86cf8fa;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index f03c34525af..57b19589545 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -1,4 +1,4 @@ -//===- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*- ===// +//===-- X86InstrInfo.h - X86 Instruction Information ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -11,20 +11,20 @@ // //===----------------------------------------------------------------------===// -#ifndef X86INSTRUCTIONINFO_H -#define X86INSTRUCTIONINFO_H +#ifndef LLVM_LIB_TARGET_X86_X86INSTRINFO_H +#define LLVM_LIB_TARGET_X86_X86INSTRINFO_H -#include "llvm/Target/TargetInstrInfo.h" -#include "X86.h" +#include "MCTargetDesc/X86BaseInfo.h" #include "X86RegisterInfo.h" #include "llvm/ADT/DenseMap.h" +#include "llvm/Target/TargetInstrInfo.h" #define GET_INSTRINFO_HEADER #include "X86GenInstrInfo.inc" namespace llvm { class X86RegisterInfo; - class X86TargetMachine; + class X86Subtarget; namespace X86 { // X86 specific condition code. These correspond to X86_*_COND in @@ -46,6 +46,7 @@ namespace X86 { COND_O = 13, COND_P = 14, COND_S = 15, + LAST_VALID_COND = COND_S, // Artificial condition codes. These are used by AnalyzeBranch // to indicate a block terminated with two conditional branches to @@ -61,9 +62,21 @@ namespace X86 { // Turn condition code into conditional branch opcode. unsigned GetCondBranchFromCond(CondCode CC); + /// \brief Return a set opcode for the given condition and whether it has + /// a memory operand. + unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false); + + /// \brief Return a cmov opcode for the given condition, register size in + /// bytes, and operand type. + unsigned getCMovFromCond(CondCode CC, unsigned RegBytes, + bool HasMemoryOperand = false); + + // Turn CMov opcode into condition code. + CondCode getCondFromCMovOpc(unsigned Opc); + /// GetOppositeBranchCondition - Return the inverse of the specified cond, /// e.g. turning COND_E to COND_NE. - CondCode GetOppositeBranchCondition(X86::CondCode CC); + CondCode GetOppositeBranchCondition(CondCode CC); } // end namespace X86; @@ -108,46 +121,58 @@ inline static bool isScale(const MachineOperand &MO) { inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) { if (MI->getOperand(Op).isFI()) return true; - return Op+4 <= MI->getNumOperands() && - MI->getOperand(Op ).isReg() && isScale(MI->getOperand(Op+1)) && - MI->getOperand(Op+2).isReg() && - (MI->getOperand(Op+3).isImm() || - MI->getOperand(Op+3).isGlobal() || - MI->getOperand(Op+3).isCPI() || - MI->getOperand(Op+3).isJTI()); + return Op+X86::AddrSegmentReg <= MI->getNumOperands() && + MI->getOperand(Op+X86::AddrBaseReg).isReg() && + isScale(MI->getOperand(Op+X86::AddrScaleAmt)) && + MI->getOperand(Op+X86::AddrIndexReg).isReg() && + (MI->getOperand(Op+X86::AddrDisp).isImm() || + MI->getOperand(Op+X86::AddrDisp).isGlobal() || + MI->getOperand(Op+X86::AddrDisp).isCPI() || + MI->getOperand(Op+X86::AddrDisp).isJTI()); } inline static bool isMem(const MachineInstr *MI, unsigned Op) { if (MI->getOperand(Op).isFI()) return true; - return Op+5 <= MI->getNumOperands() && - MI->getOperand(Op+4).isReg() && + return Op+X86::AddrNumOperands <= MI->getNumOperands() && + MI->getOperand(Op+X86::AddrSegmentReg).isReg() && isLeaMem(MI, Op); } -class X86InstrInfo : public X86GenInstrInfo { - X86TargetMachine &TM; +class X86InstrInfo final : public X86GenInstrInfo { + X86Subtarget &Subtarget; const X86RegisterInfo RI; - /// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1, - /// RegOp2MemOpTable2 - Load / store folding opcode maps. + /// RegOp2MemOpTable3Addr, RegOp2MemOpTable0, RegOp2MemOpTable1, + /// RegOp2MemOpTable2, RegOp2MemOpTable3 - Load / store folding opcode maps. /// - DenseMap > RegOp2MemOpTable2Addr; - DenseMap > RegOp2MemOpTable0; - DenseMap > RegOp2MemOpTable1; - DenseMap > RegOp2MemOpTable2; + typedef DenseMap > RegOp2MemOpTableType; + RegOp2MemOpTableType RegOp2MemOpTable2Addr; + RegOp2MemOpTableType RegOp2MemOpTable0; + RegOp2MemOpTableType RegOp2MemOpTable1; + RegOp2MemOpTableType RegOp2MemOpTable2; + RegOp2MemOpTableType RegOp2MemOpTable3; /// MemOp2RegOpTable - Load / store unfolding opcode map. /// - DenseMap > MemOp2RegOpTable; + typedef DenseMap > MemOp2RegOpTableType; + MemOp2RegOpTableType MemOp2RegOpTable; + + static void AddTableEntry(RegOp2MemOpTableType &R2MTable, + MemOp2RegOpTableType &M2RTable, + unsigned RegOp, unsigned MemOp, unsigned Flags); + + virtual void anchor(); public: - explicit X86InstrInfo(X86TargetMachine &tm); + explicit X86InstrInfo(X86Subtarget &STI); /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As /// such, whenever a client has an instance of instruction info, it should /// always be able to get register info as well (through this method). /// - virtual const X86RegisterInfo &getRegisterInfo() const { return RI; } + const X86RegisterInfo &getRegisterInfo() const { return RI; } /// isCoalescableExtInstr - Return true if the instruction is a "coalescable" /// extension instruction. That is, it's like a copy where it's legal for the @@ -155,30 +180,45 @@ public: /// true, then it's expected the pre-extension value is available as a subreg /// of the result register. This also returns the sub-register index in /// SubIdx. - virtual bool isCoalescableExtInstr(const MachineInstr &MI, - unsigned &SrcReg, unsigned &DstReg, - unsigned &SubIdx) const; + bool isCoalescableExtInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SubIdx) const override; - unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const; + unsigned isLoadFromStackSlot(const MachineInstr *MI, + int &FrameIndex) const override; /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination /// stack locations as well. This uses a heuristic so it isn't /// reliable for correctness. unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, - int &FrameIndex) const; + int &FrameIndex) const override; - unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const; + unsigned isStoreToStackSlot(const MachineInstr *MI, + int &FrameIndex) const override; /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination /// stack locations as well. This uses a heuristic so it isn't /// reliable for correctness. unsigned isStoreToStackSlotPostFE(const MachineInstr *MI, - int &FrameIndex) const; + int &FrameIndex) const override; bool isReallyTriviallyReMaterializable(const MachineInstr *MI, - AliasAnalysis *AA) const; + AliasAnalysis *AA) const override; void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SubIdx, const MachineInstr *Orig, - const TargetRegisterInfo &TRI) const; + const TargetRegisterInfo &TRI) const override; + + /// Given an operand within a MachineInstr, insert preceding code to put it + /// into the right format for a particular kind of LEA instruction. This may + /// involve using an appropriate super-register instead (with an implicit use + /// of the original) or creating a new virtual register and inserting COPY + /// instructions to get the data into the right class. + /// + /// Reference parameters are set to indicate how caller should add this + /// operand to the LEA instruction. + bool classifyLEAReg(MachineInstr *MI, const MachineOperand &Src, + unsigned LEAOpcode, bool AllowSP, + unsigned &NewSrc, bool &isKill, + bool &isUndef, MachineOperand &ImplicitOp) const; /// convertToThreeAddress - This method must be implemented by targets that /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target @@ -190,60 +230,68 @@ public: /// This method returns a null pointer if the transformation cannot be /// performed, otherwise it returns the new instruction. /// - virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, - MachineBasicBlock::iterator &MBBI, - LiveVariables *LV) const; + MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI, + LiveVariables *LV) const override; /// commuteInstruction - We have a few instructions that must be hacked on to /// commute them. /// - virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const; + MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const override; + + bool findCommutedOpIndices(MachineInstr *MI, unsigned &SrcOpIdx1, + unsigned &SrcOpIdx2) const override; // Branch analysis. - virtual bool isUnpredicatedTerminator(const MachineInstr* MI) const; - virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, - MachineBasicBlock *&FBB, - SmallVectorImpl &Cond, - bool AllowModify) const; - virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; - virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, - MachineBasicBlock *FBB, - const SmallVectorImpl &Cond, - DebugLoc DL) const; - virtual void copyPhysReg(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, DebugLoc DL, - unsigned DestReg, unsigned SrcReg, - bool KillSrc) const; - virtual void storeRegToStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - unsigned SrcReg, bool isKill, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; - - virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, - SmallVectorImpl &Addr, - const TargetRegisterClass *RC, - MachineInstr::mmo_iterator MMOBegin, - MachineInstr::mmo_iterator MMOEnd, - SmallVectorImpl &NewMIs) const; - - virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - unsigned DestReg, int FrameIndex, - const TargetRegisterClass *RC, - const TargetRegisterInfo *TRI) const; - - virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, - SmallVectorImpl &Addr, - const TargetRegisterClass *RC, - MachineInstr::mmo_iterator MMOBegin, - MachineInstr::mmo_iterator MMOEnd, - SmallVectorImpl &NewMIs) const; - virtual - MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, - int FrameIx, uint64_t Offset, - const MDNode *MDPtr, - DebugLoc DL) const; + bool isUnpredicatedTerminator(const MachineInstr* MI) const override; + bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl &Cond, + bool AllowModify) const override; + unsigned RemoveBranch(MachineBasicBlock &MBB) const override; + unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl &Cond, + DebugLoc DL) const override; + bool canInsertSelect(const MachineBasicBlock&, + const SmallVectorImpl &Cond, + unsigned, unsigned, int&, int&, int&) const override; + void insertSelect(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, DebugLoc DL, + unsigned DstReg, + const SmallVectorImpl &Cond, + unsigned TrueReg, unsigned FalseReg) const override; + void copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, DebugLoc DL, + unsigned DestReg, unsigned SrcReg, + bool KillSrc) const override; + void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, + SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + MachineInstr::mmo_iterator MMOBegin, + MachineInstr::mmo_iterator MMOEnd, + SmallVectorImpl &NewMIs) const; + + void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const override; + + void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, + SmallVectorImpl &Addr, + const TargetRegisterClass *RC, + MachineInstr::mmo_iterator MMOBegin, + MachineInstr::mmo_iterator MMOEnd, + SmallVectorImpl &NewMIs) const; + + bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override; /// foldMemoryOperand - If this target supports it, fold a load or store of /// the specified stack slot into the specified machine instruction for the @@ -251,33 +299,33 @@ public: /// folding and return true, otherwise it should return false. If it folds /// the instruction, it is likely that the MachineInstruction the iterator /// references has been changed. - virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - int FrameIndex) const; + MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const override; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. - virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, - MachineInstr* MI, - const SmallVectorImpl &Ops, - MachineInstr* LoadMI) const; + MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const override; /// canFoldMemoryOperand - Returns true if the specified load / store is /// folding is possible. - virtual bool canFoldMemoryOperand(const MachineInstr*, - const SmallVectorImpl &) const; + bool canFoldMemoryOperand(const MachineInstr*, + const SmallVectorImpl &) const override; /// unfoldMemoryOperand - Separate a single instruction which folded a load or /// a store or a load and a store into two or more instruction. If this is /// possible, returns true as well as the new instructions by reference. - virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, - unsigned Reg, bool UnfoldLoad, bool UnfoldStore, - SmallVectorImpl &NewMIs) const; + bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI, + unsigned Reg, bool UnfoldLoad, bool UnfoldStore, + SmallVectorImpl &NewMIs) const override; - virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, - SmallVectorImpl &NewNodes) const; + bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N, + SmallVectorImpl &NewNodes) const override; /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new /// instruction after load / store are unfolded from an instruction of the @@ -285,17 +333,17 @@ public: /// possible. If LoadRegIndex is non-null, it is filled in with the operand /// index of the operand which will hold the register holding the loaded /// value. - virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, - bool UnfoldLoad, bool UnfoldStore, - unsigned *LoadRegIndex = 0) const; + unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, + bool UnfoldLoad, bool UnfoldStore, + unsigned *LoadRegIndex = nullptr) const override; /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler /// to determine if two loads are loading from the same base address. It /// should only return true if the base pointers are the same and the /// only differences between the two addresses are the offset. It also returns /// the offsets by reference. - virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, - int64_t &Offset1, int64_t &Offset2) const; + bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, + int64_t &Offset2) const override; /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should @@ -305,18 +353,28 @@ public: /// from the common base address. It returns true if it decides it's desirable /// to schedule the two loads together. "NumLoads" is the number of loads that /// have already been scheduled after Load1. - virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, - int64_t Offset1, int64_t Offset2, - unsigned NumLoads) const; + bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, + int64_t Offset1, int64_t Offset2, + unsigned NumLoads) const override; + + bool shouldScheduleAdjacent(MachineInstr* First, + MachineInstr *Second) const override; - virtual void getNoopForMachoTarget(MCInst &NopInst) const; + void getNoopForMachoTarget(MCInst &NopInst) const override; - virtual - bool ReverseBranchCondition(SmallVectorImpl &Cond) const; + bool + ReverseBranchCondition(SmallVectorImpl &Cond) const override; /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine /// instruction that defines the specified register class. - bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const; + bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override; + + /// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction tha + /// would clobber the EFLAGS condition register. Note the result may be + /// conservative. If it cannot definitely determine the safety after visiting + /// a few instructions in each direction it assumes it's not safe. + bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const; static bool isX86_64ExtendedReg(const MachineOperand &MO) { if (!MO.isReg()) return false; @@ -329,25 +387,68 @@ public: /// unsigned getGlobalBaseReg(MachineFunction *MF) const; - /// GetSSEDomain - Return the SSE execution domain of MI as the first element, - /// and a bitmask of possible arguments to SetSSEDomain ase the second. - std::pair GetSSEDomain(const MachineInstr *MI) const; + std::pair + getExecutionDomain(const MachineInstr *MI) const override; + + void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override; - /// SetSSEDomain - Set the SSEDomain of MI. - void SetSSEDomain(MachineInstr *MI, unsigned Domain) const; + unsigned + getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum, + const TargetRegisterInfo *TRI) const override; + unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum, + const TargetRegisterInfo *TRI) const override; + void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum, + const TargetRegisterInfo *TRI) const override; MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, unsigned OpNum, const SmallVectorImpl &MOs, - unsigned Size, unsigned Alignment) const; + unsigned Size, unsigned Alignment, + bool AllowCommute) const; + + void + getUnconditionalBranch(MCInst &Branch, + const MCSymbolRefExpr *BranchTarget) const override; + + void getTrap(MCInst &MI) const override; + + unsigned getJumpInstrTableEntryBound() const override; - bool isHighLatencyDef(int opc) const; + bool isHighLatencyDef(int opc) const override; bool hasHighOperandLatency(const InstrItineraryData *ItinData, const MachineRegisterInfo *MRI, const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *UseMI, unsigned UseIdx) const; + const MachineInstr *UseMI, + unsigned UseIdx) const override; + + /// analyzeCompare - For a comparison instruction, return the source registers + /// in SrcReg and SrcReg2 if having two register operands, and the value it + /// compares against in CmpValue. Return true if the comparison instruction + /// can be analyzed. + bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, + unsigned &SrcReg2, int &CmpMask, + int &CmpValue) const override; + + /// optimizeCompareInstr - Check if there exists an earlier instruction that + /// operates on the same source operands and sets flags in the same way as + /// Compare; remove Compare if possible. + bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, + unsigned SrcReg2, int CmpMask, int CmpValue, + const MachineRegisterInfo *MRI) const override; + + /// optimizeLoadInstr - Try to remove the load by folding it to a register + /// operand at the use. We fold the load instructions if and only if the + /// def and use are in the same BB. We only look at one load and see + /// whether it can be folded into MI. FoldAsLoadDefReg is the virtual register + /// defined by the load we are trying to fold. DefMI returns the machine + /// instruction that defines FoldAsLoadDefReg, and the function returns + /// the machine instruction generated due to folding. + MachineInstr* optimizeLoadInstr(MachineInstr *MI, + const MachineRegisterInfo *MRI, + unsigned &FoldAsLoadDefReg, + MachineInstr *&DefMI) const override; private: MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,