X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86InstrInfo.h;h=2c20232df2950e7078d99d21857706f6fd9a5c76;hb=9edf7deb37f0f97664f279040fa15d89f32e23d9;hp=1413310b3c0c7848e513b8ca3e0bdb20823962c1;hpb=ef93cecd80ebdecb0ea2b2c316370998151308e2;p=oota-llvm.git diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index 1413310b3c0..2c20232df29 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -17,8 +17,7 @@ #include "llvm/Target/TargetInstrInfo.h" #include "X86.h" #include "X86RegisterInfo.h" -#include "llvm/ADT/IndexedMap.h" -#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/ADT/DenseMap.h" namespace llvm { class X86RegisterInfo; @@ -44,6 +43,15 @@ namespace X86 { COND_O = 13, COND_P = 14, COND_S = 15, + + // Artificial condition codes. These are used by AnalyzeBranch + // to indicate a block terminated with two conditional branches to + // the same location. This occurs in code using FCMP_OEQ or FCMP_UNE, + // which can't be represented on x86 with a single condition. These + // are never used in MachineInstrs. + COND_NE_OR_P, + COND_NP_OR_E, + COND_INVALID }; @@ -56,13 +64,158 @@ namespace X86 { } +/// X86II - This namespace holds all of the target specific flags that +/// instruction info tracks. +/// +namespace X86II { + /// Target Operand Flag enum. + enum TOF { + //===------------------------------------------------------------------===// + // X86 Specific MachineOperand flags. + + MO_NO_FLAG, + + /// MO_GOT_ABSOLUTE_ADDRESS - On a symbol operand, this represents a + /// relocation of: + /// SYMBOL_LABEL + [. - PICBASELABEL] + MO_GOT_ABSOLUTE_ADDRESS, + + /// MO_PIC_BASE_OFFSET - On a symbol operand this indicates that the + /// immediate should get the value of the symbol minus the PIC base label: + /// SYMBOL_LABEL - PICBASELABEL + MO_PIC_BASE_OFFSET, + + /// MO_GOT - On a symbol operand this indicates that the immediate is the + /// offset to the GOT entry for the symbol name from the base of the GOT. + /// + /// See the X86-64 ELF ABI supplement for more details. + /// SYMBOL_LABEL @GOT + MO_GOT, + + /// MO_GOTOFF - On a symbol operand this indicates that the immediate is + /// the offset to the location of the symbol name from the base of the GOT. + /// + /// See the X86-64 ELF ABI supplement for more details. + /// SYMBOL_LABEL @GOTOFF + MO_GOTOFF, + + /// MO_GOTPCREL - On a symbol operand this indicates that the immediate is + /// offset to the GOT entry for the symbol name from the current code + /// location. + /// + /// See the X86-64 ELF ABI supplement for more details. + /// SYMBOL_LABEL @GOTPCREL + MO_GOTPCREL, + + /// MO_PLT - On a symbol operand this indicates that the immediate is + /// offset to the PLT entry of symbol name from the current code location. + /// + /// See the X86-64 ELF ABI supplement for more details. + /// SYMBOL_LABEL @PLT + MO_PLT, + + /// MO_TLSGD - On a symbol operand this indicates that the immediate is + /// some TLS offset. + /// + /// See 'ELF Handling for Thread-Local Storage' for more details. + /// SYMBOL_LABEL @TLSGD + MO_TLSGD, + + /// MO_GOTTPOFF - On a symbol operand this indicates that the immediate is + /// some TLS offset. + /// + /// See 'ELF Handling for Thread-Local Storage' for more details. + /// SYMBOL_LABEL @GOTTPOFF + MO_GOTTPOFF, + + /// MO_INDNTPOFF - On a symbol operand this indicates that the immediate is + /// some TLS offset. + /// + /// See 'ELF Handling for Thread-Local Storage' for more details. + /// SYMBOL_LABEL @INDNTPOFF + MO_INDNTPOFF, + + /// MO_TPOFF - On a symbol operand this indicates that the immediate is + /// some TLS offset. + /// + /// See 'ELF Handling for Thread-Local Storage' for more details. + /// SYMBOL_LABEL @TPOFF + MO_TPOFF, + + /// MO_NTPOFF - On a symbol operand this indicates that the immediate is + /// some TLS offset. + /// + /// See 'ELF Handling for Thread-Local Storage' for more details. + /// SYMBOL_LABEL @NTPOFF + MO_NTPOFF, + + /// MO_DLLIMPORT - On a symbol operand "FOO", this indicates that the + /// reference is actually to the "__imp_FOO" symbol. This is used for + /// dllimport linkage on windows. + MO_DLLIMPORT, + + /// MO_DARWIN_STUB - On a symbol operand "FOO", this indicates that the + /// reference is actually to the "FOO$stub" symbol. This is used for calls + /// and jumps to external functions on Tiger and before. + MO_DARWIN_STUB, + + /// MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the + /// reference is actually to the "FOO$non_lazy_ptr" symbol, which is a + /// non-PIC-base-relative reference to a non-hidden dyld lazy pointer stub. + MO_DARWIN_NONLAZY, + + /// MO_DARWIN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this indicates + /// that the reference is actually to "FOO$non_lazy_ptr - PICBASE", which is + /// a PIC-base-relative reference to a non-hidden dyld lazy pointer stub. + MO_DARWIN_NONLAZY_PIC_BASE, + + /// MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this + /// indicates that the reference is actually to "FOO$non_lazy_ptr -PICBASE", + /// which is a PIC-base-relative reference to a hidden dyld lazy pointer + /// stub. + MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE + }; +} + +/// isGlobalStubReference - Return true if the specified TargetFlag operand is +/// a reference to a stub for a global, not the global itself. +inline static bool isGlobalStubReference(unsigned char TargetFlag) { + switch (TargetFlag) { + case X86II::MO_DLLIMPORT: // dllimport stub. + case X86II::MO_GOTPCREL: // rip-relative GOT reference. + case X86II::MO_GOT: // normal GOT reference. + case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref. + case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref. + case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Hidden $non_lazy_ptr ref. + return true; + default: + return false; + } +} + +/// isGlobalRelativeToPICBase - Return true if the specified global value +/// reference is relative to a 32-bit PIC base (X86ISD::GlobalBaseReg). If this +/// is true, the addressing mode has the PIC base register added in (e.g. EBX). +inline static bool isGlobalRelativeToPICBase(unsigned char TargetFlag) { + switch (TargetFlag) { + case X86II::MO_GOTOFF: // isPICStyleGOT: local global. + case X86II::MO_GOT: // isPICStyleGOT: other global. + case X86II::MO_PIC_BASE_OFFSET: // Darwin local global. + case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Darwin/32 external global. + case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: // Darwin/32 hidden global. + return true; + default: + return false; + } +} + /// X86II - This namespace holds all of the target specific flags that /// instruction info tracks. /// namespace X86II { enum { //===------------------------------------------------------------------===// - // Instruction types. These are the standard/most common forms for X86 + // Instruction encodings. These are the standard/most common forms for X86 // instructions. // @@ -115,6 +268,18 @@ namespace X86II { // MRMInitReg - This form is used for instructions whose source and // destinations are the same register. MRMInitReg = 32, + + //// MRM_C1 - A mod/rm byte of exactly 0xC1. + MRM_C1 = 33, + MRM_C2 = 34, + MRM_C3 = 35, + MRM_C4 = 36, + MRM_C8 = 37, + MRM_C9 = 38, + MRM_E8 = 39, + MRM_F0 = 40, + MRM_F8 = 41, + MRM_F9 = 42, FormMask = 63, @@ -161,6 +326,9 @@ namespace X86II { // T8, TA - Prefix after the 0x0F prefix. T8 = 13 << Op0Shift, TA = 14 << Op0Shift, + + // TF - Prefix before and after 0x0F + TF = 15 << Op0Shift, //===------------------------------------------------------------------===// // REX_W - REX prefixes are instruction prefixes used in 64-bit mode. @@ -175,11 +343,13 @@ namespace X86II { // This three-bit field describes the size of an immediate operand. Zero is // unused so that we can tell if we forgot to set a value. ImmShift = 13, - ImmMask = 7 << ImmShift, - Imm8 = 1 << ImmShift, - Imm16 = 2 << ImmShift, - Imm32 = 3 << ImmShift, - Imm64 = 4 << ImmShift, + ImmMask = 7 << ImmShift, + Imm8 = 1 << ImmShift, + Imm8PCRel = 2 << ImmShift, + Imm16 = 3 << ImmShift, + Imm32 = 4 << ImmShift, + Imm32PCRel = 5 << ImmShift, + Imm64 = 6 << ImmShift, //===------------------------------------------------------------------===// // FP Instruction Classification... Zero is non-fp instruction. @@ -228,19 +398,65 @@ namespace X86II { FS = 1 << SegOvrShift, GS = 2 << SegOvrShift, - // Bits 22 -> 23 are unused + // Execution domain for SSE instructions in bits 22, 23. + // 0 in bits 22-23 means normal, non-SSE instruction. + SSEDomainShift = 22, + OpcodeShift = 24, OpcodeMask = 0xFF << OpcodeShift }; + + // getBaseOpcodeFor - This function returns the "base" X86 opcode for the + // specified machine instruction. + // + static inline unsigned char getBaseOpcodeFor(unsigned TSFlags) { + return TSFlags >> X86II::OpcodeShift; + } + + static inline bool hasImm(unsigned TSFlags) { + return (TSFlags & X86II::ImmMask) != 0; + } + + /// getSizeOfImm - Decode the "size of immediate" field from the TSFlags field + /// of the specified instruction. + static inline unsigned getSizeOfImm(unsigned TSFlags) { + switch (TSFlags & X86II::ImmMask) { + default: assert(0 && "Unknown immediate size"); + case X86II::Imm8: + case X86II::Imm8PCRel: return 1; + case X86II::Imm16: return 2; + case X86II::Imm32: + case X86II::Imm32PCRel: return 4; + case X86II::Imm64: return 8; + } + } + + /// isImmPCRel - Return true if the immediate of the specified instruction's + /// TSFlags indicates that it is pc relative. + static inline unsigned isImmPCRel(unsigned TSFlags) { + switch (TSFlags & X86II::ImmMask) { + default: assert(0 && "Unknown immediate size"); + case X86II::Imm8PCRel: + case X86II::Imm32PCRel: + return true; + case X86II::Imm8: + case X86II::Imm16: + case X86II::Imm32: + case X86II::Imm64: + return false; + } + } } +const int X86AddrNumOperands = 5; + inline static bool isScale(const MachineOperand &MO) { return MO.isImm() && (MO.getImm() == 1 || MO.getImm() == 2 || MO.getImm() == 4 || MO.getImm() == 8); } -inline static bool isMem(const MachineInstr *MI, unsigned Op) { +inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) { if (MI->getOperand(Op).isFI()) return true; return Op+4 <= MI->getNumOperands() && MI->getOperand(Op ).isReg() && isScale(MI->getOperand(Op+1)) && @@ -251,6 +467,13 @@ inline static bool isMem(const MachineInstr *MI, unsigned Op) { MI->getOperand(Op+3).isJTI()); } +inline static bool isMem(const MachineInstr *MI, unsigned Op) { + if (MI->getOperand(Op).isFI()) return true; + return Op+5 <= MI->getNumOperands() && + MI->getOperand(Op+4).isReg() && + isLeaMem(MI, Op); +} + class X86InstrInfo : public TargetInstrInfoImpl { X86TargetMachine &TM; const X86RegisterInfo RI; @@ -258,15 +481,15 @@ class X86InstrInfo : public TargetInstrInfoImpl { /// RegOp2MemOpTable2Addr, RegOp2MemOpTable0, RegOp2MemOpTable1, /// RegOp2MemOpTable2 - Load / store folding opcode maps. /// - DenseMap RegOp2MemOpTable2Addr; - DenseMap RegOp2MemOpTable0; - DenseMap RegOp2MemOpTable1; - DenseMap RegOp2MemOpTable2; + DenseMap > RegOp2MemOpTable2Addr; + DenseMap > RegOp2MemOpTable0; + DenseMap > RegOp2MemOpTable1; + DenseMap > RegOp2MemOpTable2; /// MemOp2RegOpTable - Load / store unfolding opcode map. /// DenseMap > MemOp2RegOpTable; - + public: explicit X86InstrInfo(X86TargetMachine &tm); @@ -276,19 +499,63 @@ public: /// virtual const X86RegisterInfo &getRegisterInfo() const { return RI; } - // Return true if the instruction is a register to register move and - // leave the source and dest operands in the passed parameters. - // - bool isMoveInstr(const MachineInstr& MI, unsigned& sourceReg, - unsigned& destReg) const; - unsigned isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const; - unsigned isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const; - - bool isReallyTriviallyReMaterializable(const MachineInstr *MI) const; + /// Return true if the instruction is a register to register move and return + /// the source and dest operands and their sub-register indices by reference. + virtual bool isMoveInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SrcSubIdx, unsigned &DstSubIdx) const; + + /// isCoalescableExtInstr - Return true if the instruction is a "coalescable" + /// extension instruction. That is, it's like a copy where it's legal for the + /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns + /// true, then it's expected the pre-extension value is available as a subreg + /// of the result register. This also returns the sub-register index in + /// SubIdx. + virtual bool isCoalescableExtInstr(const MachineInstr &MI, + unsigned &SrcReg, unsigned &DstReg, + unsigned &SubIdx) const; + + unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const; + /// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination + /// stack locations as well. This uses a heuristic so it isn't + /// reliable for correctness. + unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, + int &FrameIndex) const; + + /// hasLoadFromStackSlot - If the specified machine instruction has + /// a load from a stack slot, return true along with the FrameIndex + /// of the loaded stack slot and the machine mem operand containing + /// the reference. If not, return false. Unlike + /// isLoadFromStackSlot, this returns true for any instructions that + /// loads from the stack. This is a hint only and may not catch all + /// cases. + bool hasLoadFromStackSlot(const MachineInstr *MI, + const MachineMemOperand *&MMO, + int &FrameIndex) const; + + unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const; + /// isStoreToStackSlotPostFE - Check for post-frame ptr elimination + /// stack locations as well. This uses a heuristic so it isn't + /// reliable for correctness. + unsigned isStoreToStackSlotPostFE(const MachineInstr *MI, + int &FrameIndex) const; + + /// hasStoreToStackSlot - If the specified machine instruction has a + /// store to a stack slot, return true along with the FrameIndex of + /// the loaded stack slot and the machine mem operand containing the + /// reference. If not, return false. Unlike isStoreToStackSlot, + /// this returns true for any instructions that loads from the + /// stack. This is a hint only and may not catch all cases. + bool hasStoreToStackSlot(const MachineInstr *MI, + const MachineMemOperand *&MMO, + int &FrameIndex) const; + + bool isReallyTriviallyReMaterializable(const MachineInstr *MI, + AliasAnalysis *AA) const; void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, - unsigned DestReg, const MachineInstr *Orig) const; - - bool isInvariantLoad(MachineInstr *MI) const; + unsigned DestReg, unsigned SubIdx, + const MachineInstr *Orig, + const TargetRegisterInfo &TRI) const; /// convertToThreeAddress - This method must be implemented by targets that /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target @@ -313,7 +580,8 @@ public: virtual bool isUnpredicatedTerminator(const MachineInstr* MI) const; virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, - SmallVectorImpl &Cond) const; + SmallVectorImpl &Cond, + bool AllowModify) const; virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, @@ -322,57 +590,73 @@ public: MachineBasicBlock::iterator MI, unsigned DestReg, unsigned SrcReg, const TargetRegisterClass *DestRC, - const TargetRegisterClass *SrcRC) const; + const TargetRegisterClass *SrcRC, + DebugLoc DL) const; virtual void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, - const TargetRegisterClass *RC) const; + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const; virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill, SmallVectorImpl &Addr, const TargetRegisterClass *RC, + MachineInstr::mmo_iterator MMOBegin, + MachineInstr::mmo_iterator MMOEnd, SmallVectorImpl &NewMIs) const; virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned DestReg, int FrameIndex, - const TargetRegisterClass *RC) const; + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const; virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg, SmallVectorImpl &Addr, const TargetRegisterClass *RC, + MachineInstr::mmo_iterator MMOBegin, + MachineInstr::mmo_iterator MMOEnd, SmallVectorImpl &NewMIs) const; virtual bool spillCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, - const std::vector &CSI) const; + const std::vector &CSI, + const TargetRegisterInfo *TRI) const; virtual bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, - const std::vector &CSI) const; + const std::vector &CSI, + const TargetRegisterInfo *TRI) const; + virtual + MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, + int FrameIx, uint64_t Offset, + const MDNode *MDPtr, + DebugLoc DL) const; + /// foldMemoryOperand - If this target supports it, fold a load or store of /// the specified stack slot into the specified machine instruction for the /// specified operand(s). If this is possible, the target should perform the /// folding and return true, otherwise it should return false. If it folds /// the instruction, it is likely that the MachineInstruction the iterator /// references has been changed. - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - SmallVectorImpl &Ops, - int FrameIndex) const; + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + int FrameIndex) const; /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. - virtual MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - SmallVectorImpl &Ops, - MachineInstr* LoadMI) const; + virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + const SmallVectorImpl &Ops, + MachineInstr* LoadMI) const; /// canFoldMemoryOperand - Returns true if the specified load / store is /// folding is possible. - virtual bool canFoldMemoryOperand(MachineInstr*, SmallVectorImpl &) const; + virtual bool canFoldMemoryOperand(const MachineInstr*, + const SmallVectorImpl &) const; /// unfoldMemoryOperand - Separate a single instruction which folded a load or /// a store or a load and a store into two or more instruction. If this is @@ -387,36 +671,57 @@ public: /// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new /// instruction after load / store are unfolded from an instruction of the /// specified opcode. It returns zero if the specified unfolding is not - /// possible. + /// possible. If LoadRegIndex is non-null, it is filled in with the operand + /// index of the operand which will hold the register holding the loaded + /// value. virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, - bool UnfoldLoad, bool UnfoldStore) const; + bool UnfoldLoad, bool UnfoldStore, + unsigned *LoadRegIndex = 0) const; - virtual bool BlockHasNoFallThrough(MachineBasicBlock &MBB) const; + /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler + /// to determine if two loads are loading from the same base address. It + /// should only return true if the base pointers are the same and the + /// only differences between the two addresses are the offset. It also returns + /// the offsets by reference. + virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, + int64_t &Offset1, int64_t &Offset2) const; + + /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to + /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should + /// be scheduled togther. On some targets if two loads are loading from + /// addresses in the same cache line, it's better if they are scheduled + /// together. This function takes two integers that represent the load offsets + /// from the common base address. It returns true if it decides it's desirable + /// to schedule the two loads together. "NumLoads" is the number of loads that + /// have already been scheduled after Load1. + virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, + int64_t Offset1, int64_t Offset2, + unsigned NumLoads) const; + + virtual void getNoopForMachoTarget(MCInst &NopInst) const; + virtual bool ReverseBranchCondition(SmallVectorImpl &Cond) const; - const TargetRegisterClass *getPointerRegClass() const; + /// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine + /// instruction that defines the specified register class. + bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const; - // getBaseOpcodeFor - This function returns the "base" X86 opcode for the - // specified machine instruction. - // - unsigned char getBaseOpcodeFor(const TargetInstrDesc *TID) const { - return TID->TSFlags >> X86II::OpcodeShift; - } - unsigned char getBaseOpcodeFor(unsigned Opcode) const { - return getBaseOpcodeFor(&get(Opcode)); - } - static bool isX86_64NonExtLowByteReg(unsigned reg) { return (reg == X86::SPL || reg == X86::BPL || reg == X86::SIL || reg == X86::DIL); } - static unsigned sizeOfImm(const TargetInstrDesc *Desc); - static unsigned getX86RegNum(unsigned RegNo); - static bool isX86_64ExtendedReg(const MachineOperand &MO); + static bool isX86_64ExtendedReg(const MachineOperand &MO) { + if (!MO.isReg()) return false; + return isX86_64ExtendedReg(MO.getReg()); + } static unsigned determineREX(const MachineInstr &MI); + /// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or + /// higher) register? e.g. r8, xmm8, xmm13, etc. + static bool isX86_64ExtendedReg(unsigned RegNo); + /// GetInstSize - Returns the size of the specified MachineInstr. /// virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const; @@ -427,11 +732,29 @@ public: /// unsigned getGlobalBaseReg(MachineFunction *MF) const; + /// GetSSEDomain - Return the SSE execution domain of MI as the first element, + /// and a bitmask of possible arguments to SetSSEDomain ase the second. + std::pair GetSSEDomain(const MachineInstr *MI) const; + + /// SetSSEDomain - Set the SSEDomain of MI. + void SetSSEDomain(MachineInstr *MI, unsigned Domain) const; + private: - MachineInstr* foldMemoryOperand(MachineFunction &MF, - MachineInstr* MI, - unsigned OpNum, - SmallVector &MOs) const; + MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc, + MachineFunction::iterator &MFI, + MachineBasicBlock::iterator &MBBI, + LiveVariables *LV) const; + + MachineInstr* foldMemoryOperandImpl(MachineFunction &MF, + MachineInstr* MI, + unsigned OpNum, + const SmallVectorImpl &MOs, + unsigned Size, unsigned Alignment) const; + + /// isFrameOperand - Return true and the FrameIndex if the specified + /// operand and follow operands form a reference to the stack frame. + bool isFrameOperand(const MachineInstr *MI, unsigned int Op, + int &FrameIndex) const; }; } // End llvm namespace