X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86Subtarget.h;h=f723c66b729cc9a1e482684c26b37a53be7a3039;hb=7bbd6e366b39157445cc921024a987e61ea68c00;hp=10ef868968b720db492476640f7659fe750e29bf;hpb=16de4632bb263d7e0def43ebc13af5077726f067;p=oota-llvm.git diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index 10ef868968b..f723c66b729 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -1,4 +1,4 @@ -//=====---- X86Subtarget.h - Define Subtarget for the X86 -----*- C++ -*--====// +//===-- X86Subtarget.h - Define Subtarget for the X86 ----------*- C++ -*--===// // // The LLVM Compiler Infrastructure // @@ -14,9 +14,9 @@ #ifndef X86SUBTARGET_H #define X86SUBTARGET_H +#include "llvm/CallingConv.h" #include "llvm/ADT/Triple.h" #include "llvm/Target/TargetSubtargetInfo.h" -#include "llvm/CallingConv.h" #include #define GET_SUBTARGETINFO_HEADER @@ -49,6 +49,13 @@ protected: NoThreeDNow, ThreeDNow, ThreeDNowA }; + enum X86ProcFamilyEnum { + Others, IntelAtom + }; + + /// X86ProcFamily - X86 processor family: Intel Atom, and others + X86ProcFamilyEnum X86ProcFamily; + /// PICStyle - Which PIC style to use /// PICStyles::Style PICStyle; @@ -78,11 +85,11 @@ protected: /// HasAES - Target has AES instructions bool HasAES; - /// HasCLMUL - Target has carry-less multiplication - bool HasCLMUL; + /// HasPCLMUL - Target has carry-less multiplication + bool HasPCLMUL; - /// HasFMA3 - Target has 3-operand fused multiply-add - bool HasFMA3; + /// HasFMA - Target has 3-operand fused multiply-add + bool HasFMA; /// HasFMA4 - Target has 4-operand fused multiply-add bool HasFMA4; @@ -125,6 +132,17 @@ protected: /// this is true for most x86-64 chips, but not the first AMD chips. bool HasCmpxchg16b; + /// UseLeaForSP - True if the LEA instruction should be used for adjusting + /// the stack pointer. This is an optimization for Intel Atom processors. + bool UseLeaForSP; + + /// HasSlowDivide - True if smaller divides are significantly faster than + /// full divides and should be used when possible. + bool HasSlowDivide; + + /// PostRAScheduler - True if using post-register-allocation scheduler. + bool PostRAScheduler; + /// stackAlignment - The minimum alignment known to hold of the stack frame on /// entry to the function and which must be maintained by every function. unsigned stackAlignment; @@ -136,6 +154,9 @@ protected: /// TargetTriple - What processor and OS we're targeting. Triple TargetTriple; + /// Instruction itineraries for scheduling + InstrItineraryData InstrItins; + private: /// In64BitMode - True if compiling for 64-bit, false for 32-bit. bool In64BitMode; @@ -173,28 +194,23 @@ public: bool hasCMov() const { return HasCMov; } bool hasMMX() const { return X86SSELevel >= MMX; } - bool hasSSE1() const { return X86SSELevel >= SSE1 && !hasAVX(); } - bool hasSSE2() const { return X86SSELevel >= SSE2 && !hasAVX(); } - bool hasSSE3() const { return X86SSELevel >= SSE3 && !hasAVX(); } - bool hasSSSE3() const { return X86SSELevel >= SSSE3 && !hasAVX(); } - bool hasSSE41() const { return X86SSELevel >= SSE41 && !hasAVX(); } - bool hasSSE42() const { return X86SSELevel >= SSE42 && !hasAVX(); } + bool hasSSE1() const { return X86SSELevel >= SSE1; } + bool hasSSE2() const { return X86SSELevel >= SSE2; } + bool hasSSE3() const { return X86SSELevel >= SSE3; } + bool hasSSSE3() const { return X86SSELevel >= SSSE3; } + bool hasSSE41() const { return X86SSELevel >= SSE41; } + bool hasSSE42() const { return X86SSELevel >= SSE42; } + bool hasAVX() const { return X86SSELevel >= AVX; } + bool hasAVX2() const { return X86SSELevel >= AVX2; } bool hasSSE4A() const { return HasSSE4A; } bool has3DNow() const { return X863DNowLevel >= ThreeDNow; } bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; } bool hasPOPCNT() const { return HasPOPCNT; } - bool hasAVX() const { return X86SSELevel >= AVX; } - bool hasAVX2() const { return X86SSELevel >= AVX2; } - bool hasXMM() const { return X86SSELevel >= SSE1; } - bool hasXMMInt() const { return X86SSELevel >= SSE2; } - bool hasSSE3orAVX() const { return X86SSELevel >= SSE3; } - bool hasSSSE3orAVX() const { return X86SSELevel >= SSSE3; } - bool hasSSE41orAVX() const { return X86SSELevel >= SSE41; } - bool hasSSE42orAVX() const { return X86SSELevel >= SSE42; } bool hasAES() const { return HasAES; } - bool hasCLMUL() const { return HasCLMUL; } - bool hasFMA3() const { return HasFMA3; } - bool hasFMA4() const { return HasFMA4; } + bool hasPCLMUL() const { return HasPCLMUL; } + bool hasFMA() const { return HasFMA; } + // FIXME: Favor FMA when both are enabled. Is this the right thing to do? + bool hasFMA4() const { return HasFMA4 && !HasFMA; } bool hasXOP() const { return HasXOP; } bool hasMOVBE() const { return HasMOVBE; } bool hasRDRAND() const { return HasRDRAND; } @@ -207,6 +223,10 @@ public: bool isUnalignedMemAccessFast() const { return IsUAMemFast; } bool hasVectorUAMem() const { return HasVectorUAMem; } bool hasCmpxchg16b() const { return HasCmpxchg16b; } + bool useLeaForSP() const { return UseLeaForSP; } + bool hasSlowDivide() const { return HasSlowDivide; } + + bool isAtom() const { return X86ProcFamily == IntelAtom; } const Triple &getTargetTriple() const { return TargetTriple; } @@ -217,11 +237,9 @@ public: bool isTargetSolaris() const { return TargetTriple.getOS() == Triple::Solaris; } - - // ELF is a reasonably sane default and the only other X86 targets we - // support are Darwin and Windows. Just use "not those". bool isTargetELF() const { - return !isTargetDarwin() && !isTargetWindows() && !isTargetCygMing(); + return (TargetTriple.getEnvironment() == Triple::ELF || + TargetTriple.isOSBinFormatELF()); } bool isTargetLinux() const { return TargetTriple.getOS() == Triple::Linux; } bool isTargetNaCl() const { @@ -229,29 +247,24 @@ public: } bool isTargetNaCl32() const { return isTargetNaCl() && !is64Bit(); } bool isTargetNaCl64() const { return isTargetNaCl() && is64Bit(); } - bool isTargetWindows() const { return TargetTriple.getOS() == Triple::Win32; } bool isTargetMingw() const { return TargetTriple.getOS() == Triple::MinGW32; } bool isTargetCygwin() const { return TargetTriple.getOS() == Triple::Cygwin; } - bool isTargetCygMing() const { - return isTargetMingw() || isTargetCygwin(); - } - - /// isTargetCOFF - Return true if this is any COFF/Windows target variant. + bool isTargetCygMing() const { return TargetTriple.isOSCygMing(); } bool isTargetCOFF() const { - return isTargetMingw() || isTargetCygwin() || isTargetWindows(); + return (TargetTriple.getEnvironment() != Triple::ELF && + TargetTriple.isOSBinFormatCOFF()); } + bool isTargetEnvMacho() const { return TargetTriple.isEnvironmentMachO(); } bool isTargetWin64() const { // FIXME: x86_64-cygwin has not been released yet. - return In64BitMode && (isTargetCygMing() || isTargetWindows()); - } - - bool isTargetEnvMacho() const { - return isTargetDarwin() || (TargetTriple.getEnvironment() == Triple::MachO); + return In64BitMode && TargetTriple.isOSWindows(); } bool isTargetWin32() const { + // FIXME: Cygwin is included for isTargetWin64 -- should it be included + // here too? return !In64BitMode && (isTargetMingw() || isTargetWindows()); } @@ -297,6 +310,17 @@ public: /// indicating the number of scheduling cycles of backscheduling that /// should be attempted. unsigned getSpecialAddressLatency() const; + + /// enablePostRAScheduler - run for Atom optimization. + bool enablePostRAScheduler(CodeGenOpt::Level OptLevel, + TargetSubtargetInfo::AntiDepBreakMode& Mode, + RegClassVector& CriticalPathRCs) const; + + bool postRAScheduler() const { return PostRAScheduler; } + + /// getInstrItins = Return the instruction itineraries based on the + /// subtarget selection. + const InstrItineraryData &getInstrItineraryData() const { return InstrItins; } }; } // End llvm namespace