X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86Subtarget.cpp;h=58e0d06dfb474fa5d1c4a4ec820c7d63eb18606b;hb=789cb5df9ca61f8a3794a4fbde7cc020fd00a02a;hp=45a8c2f1e81c3d3557ade642ab8179e73527adce;hpb=4b2dc74d3f17b40a57eba3068ce498a7f30769fe;p=oota-llvm.git diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 45a8c2f1e81..58e0d06dfb4 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -14,14 +14,15 @@ #define DEBUG_TYPE "subtarget" #include "X86Subtarget.h" #include "X86InstrInfo.h" -#include "llvm/GlobalValue.h" +#include "llvm/IR/Attributes.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/GlobalValue.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" -#include "llvm/Support/raw_ostream.h" #include "llvm/Support/Host.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" -#include "llvm/ADT/SmallVector.h" #define GET_SUBTARGETINFO_TARGET_DESC #define GET_SUBTARGETINFO_CTOR @@ -40,10 +41,10 @@ unsigned char X86Subtarget:: ClassifyBlockAddressReference() const { if (isPICStyleGOT()) // 32-bit ELF targets. return X86II::MO_GOTOFF; - + if (isPICStyleStubPIC()) // Darwin/32 in PIC mode. return X86II::MO_PIC_BASE_OFFSET; - + // Direct static reference to label. return X86II::MO_NO_FLAG; } @@ -70,7 +71,7 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const { // Large model never uses stubs. if (TM.getCodeModel() == CodeModel::Large) return X86II::MO_NO_FLAG; - + if (isTargetDarwin()) { // If symbol visibility is hidden, the extra load is not needed if // target is x86-64 or the symbol is definitely defined in the current @@ -88,18 +89,18 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const { return X86II::MO_NO_FLAG; } - + if (isPICStyleGOT()) { // 32-bit ELF targets. // Extra load is needed for all externally visible. if (GV->hasLocalLinkage() || GV->hasHiddenVisibility()) return X86II::MO_GOTOFF; return X86II::MO_GOT; } - + if (isPICStyleStubPIC()) { // Darwin/32 in PIC mode. // Determine whether we have a stub reference and/or whether the reference // is relative to the PIC base or not. - + // If this is a strong reference to a definition, it is definitely not // through a stub. if (!isDecl && !GV->isWeakForLinker()) @@ -109,26 +110,26 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const { // normal $non_lazy_ptr stub because this symbol might be resolved late. if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference. return X86II::MO_DARWIN_NONLAZY_PIC_BASE; - + // If symbol visibility is hidden, we have a stub for common symbol // references and external declarations. if (isDecl || GV->hasCommonLinkage()) { // Hidden $non_lazy_ptr reference. return X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE; } - + // Otherwise, no stub. return X86II::MO_PIC_BASE_OFFSET; } - + if (isPICStyleStubNoDynamic()) { // Darwin/32 in -mdynamic-no-pic mode. // Determine whether we have a stub reference. - + // If this is a strong reference to a definition, it is definitely not // through a stub. if (!isDecl && !GV->isWeakForLinker()) return X86II::MO_NO_FLAG; - + // Unless we have a symbol with hidden visibility, we have to go through a // normal $non_lazy_ptr stub because this symbol might be resolved late. if (!GV->hasHiddenVisibility()) // Non-hidden $non_lazy_ptr reference. @@ -137,7 +138,7 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const { // Otherwise, no stub. return X86II::MO_NO_FLAG; } - + // Direct static reference to global. return X86II::MO_NO_FLAG; } @@ -156,6 +157,12 @@ const char *X86Subtarget::getBZeroEntry() const { return 0; } +bool X86Subtarget::hasSinCos() const { + return getTargetTriple().isMacOSX() && + !getTargetTriple().isMacOSXVersionLT(10, 9) && + is64Bit(); +} + /// IsLegalToCallImmediateAddr - Return true if the subtarget allows calls /// to immediate address. bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const { @@ -164,17 +171,6 @@ bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const { return isTargetELF() || TM.getRelocationModel() == Reloc::Static; } -/// getSpecialAddressLatency - For targets where it is beneficial to -/// backschedule instructions that compute addresses, return a value -/// indicating the number of scheduling cycles of backscheduling that -/// should be attempted. -unsigned X86Subtarget::getSpecialAddressLatency() const { - // For x86 out-of-order targets, back-schedule address computations so - // that loads and stores aren't blocked. - // This value was chosen arbitrarily. - return 200; -} - void X86Subtarget::AutoDetectSubtargetFeatures() { unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0; unsigned MaxLevel; @@ -197,33 +193,32 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { if ((ECX >> 9) & 1) { X86SSELevel = SSSE3; ToggleFeature(X86::FeatureSSSE3);} if ((ECX >> 19) & 1) { X86SSELevel = SSE41; ToggleFeature(X86::FeatureSSE41);} if ((ECX >> 20) & 1) { X86SSELevel = SSE42; ToggleFeature(X86::FeatureSSE42);} - // FIXME: AVX codegen support is not ready. - //if ((ECX >> 28) & 1) { HasAVX = true; ToggleFeature(X86::FeatureAVX); } + if ((ECX >> 28) & 1) { X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX); } bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0; bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0; - if (IsIntel && ((ECX >> 1) & 0x1)) { - HasCLMUL = true; - ToggleFeature(X86::FeatureCLMUL); + if ((ECX >> 1) & 0x1) { + HasPCLMUL = true; + ToggleFeature(X86::FeaturePCLMUL); } - if (IsIntel && ((ECX >> 12) & 0x1)) { - HasFMA3 = true; - ToggleFeature(X86::FeatureFMA3); + if ((ECX >> 12) & 0x1) { + HasFMA = true; + ToggleFeature(X86::FeatureFMA); } if (IsIntel && ((ECX >> 22) & 0x1)) { HasMOVBE = true; ToggleFeature(X86::FeatureMOVBE); } - if (IsIntel && ((ECX >> 23) & 0x1)) { + if ((ECX >> 23) & 0x1) { HasPOPCNT = true; ToggleFeature(X86::FeaturePOPCNT); } - if (IsIntel && ((ECX >> 25) & 0x1)) { + if ((ECX >> 25) & 0x1) { HasAES = true; ToggleFeature(X86::FeatureAES); } - if (IsIntel && ((ECX >> 29) & 0x1)) { + if ((ECX >> 29) & 0x1) { HasF16C = true; ToggleFeature(X86::FeatureF16C); } @@ -246,13 +241,35 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { IsBTMemSlow = true; ToggleFeature(X86::FeatureSlowBTMem); } - // If it's Nehalem, unaligned memory access is fast. - // FIXME: Nehalem is family 6. Also include Westmere and later processors? - if (Family == 15 && Model == 26) { + + // If it's an Intel chip since Nehalem and not an Atom chip, unaligned + // memory access is fast. We hard code model numbers here because they + // aren't strictly increasing for Intel chips it seems. + if (IsIntel && + ((Family == 6 && Model == 0x1E) || // Nehalem: Clarksfield, Lynnfield, + // Jasper Froest + (Family == 6 && Model == 0x1A) || // Nehalem: Bloomfield, Nehalem-EP + (Family == 6 && Model == 0x2E) || // Nehalem: Nehalem-EX + (Family == 6 && Model == 0x25) || // Westmere: Arrandale, Clarksdale + (Family == 6 && Model == 0x2C) || // Westmere: Gulftown, Westmere-EP + (Family == 6 && Model == 0x2F) || // Westmere: Westmere-EX + (Family == 6 && Model == 0x2A) || // SandyBridge + (Family == 6 && Model == 0x2D) || // SandyBridge: SandyBridge-E* + (Family == 6 && Model == 0x3A))) {// IvyBridge IsUAMemFast = true; ToggleFeature(X86::FeatureFastUAMem); } + // Set processor type. Currently only Atom is detected. + if (Family == 6 && + (Model == 28 || Model == 38 || Model == 39 + || Model == 53 || Model == 54)) { + X86ProcFamily = IntelAtom; + + UseLeaForSP = true; + ToggleFeature(X86::FeatureLeaForSP); + } + unsigned MaxExtLevel; X86_MC::GetCpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX); @@ -266,68 +283,69 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { HasLZCNT = true; ToggleFeature(X86::FeatureLZCNT); } - if (IsAMD && ((ECX >> 6) & 0x1)) { - HasSSE4A = true; - ToggleFeature(X86::FeatureSSE4A); - } - if (IsAMD && ((ECX >> 16) & 0x1)) { - HasFMA4 = true; - ToggleFeature(X86::FeatureFMA4); + if (IsAMD) { + if ((ECX >> 6) & 0x1) { + HasSSE4A = true; + ToggleFeature(X86::FeatureSSE4A); + } + if ((ECX >> 11) & 0x1) { + HasXOP = true; + ToggleFeature(X86::FeatureXOP); + } + if ((ECX >> 16) & 0x1) { + HasFMA4 = true; + ToggleFeature(X86::FeatureFMA4); + } } } } - if (IsIntel && MaxLevel >= 7) { + if (MaxLevel >= 7) { if (!X86_MC::GetCpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX)) { + if (IsIntel && (EBX & 0x1)) { + HasFSGSBase = true; + ToggleFeature(X86::FeatureFSGSBase); + } if ((EBX >> 3) & 0x1) { HasBMI = true; ToggleFeature(X86::FeatureBMI); } - if ((EBX >> 8) & 0x1) { + if (IsIntel && ((EBX >> 5) & 0x1)) { + X86SSELevel = AVX2; + ToggleFeature(X86::FeatureAVX2); + } + if (IsIntel && ((EBX >> 8) & 0x1)) { HasBMI2 = true; ToggleFeature(X86::FeatureBMI2); } + if (IsIntel && ((EBX >> 11) & 0x1)) { + HasRTM = true; + ToggleFeature(X86::FeatureRTM); + } } } } -X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, - const std::string &FS, - unsigned StackAlignOverride, bool is64Bit) - : X86GenSubtargetInfo(TT, CPU, FS) - , PICStyle(PICStyles::None) - , X86SSELevel(NoMMXSSE) - , X863DNowLevel(NoThreeDNow) - , HasCMov(false) - , HasX86_64(false) - , HasPOPCNT(false) - , HasSSE4A(false) - , HasAVX(false) - , HasAES(false) - , HasCLMUL(false) - , HasFMA3(false) - , HasFMA4(false) - , HasMOVBE(false) - , HasRDRAND(false) - , HasF16C(false) - , HasLZCNT(false) - , HasBMI(false) - , HasBMI2(false) - , IsBTMemSlow(false) - , IsUAMemFast(false) - , HasVectorUAMem(false) - , HasCmpxchg16b(false) - , stackAlignment(8) - // FIXME: this is a known good value for Yonah. How about others? - , MaxInlineSizeThreshold(128) - , TargetTriple(TT) - , In64BitMode(is64Bit) - , InNaClMode(false) { - // Determine default and user specified characteristics +void X86Subtarget::resetSubtargetFeatures(const MachineFunction *MF) { + AttributeSet FnAttrs = MF->getFunction()->getAttributes(); + Attribute CPUAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, + "target-cpu"); + Attribute FSAttr = FnAttrs.getAttribute(AttributeSet::FunctionIndex, + "target-features"); + std::string CPU = + !CPUAttr.hasAttribute(Attribute::None) ?CPUAttr.getValueAsString() : ""; + std::string FS = + !FSAttr.hasAttribute(Attribute::None) ? FSAttr.getValueAsString() : ""; + if (!FS.empty()) + resetSubtargetFeatures(CPU, FS); +} + +void X86Subtarget::resetSubtargetFeatures(StringRef CPU, StringRef FS) { + std::string CPUName = CPU; if (!FS.empty() || !CPU.empty()) { - std::string CPUName = CPU; if (CPUName.empty()) { -#if defined (__x86_64__) || defined(__i386__) +#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\ + || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64) CPUName = sys::getHostCPUName(); #else CPUName = "generic"; @@ -347,6 +365,13 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, // If feature string is not empty, parse features string. ParseSubtargetFeatures(CPUName, FullFS); } else { + if (CPUName.empty()) { +#if defined (__x86_64__) || defined(__i386__) + CPUName = sys::getHostCPUName(); +#else + CPUName = "generic"; +#endif + } // Otherwise, use CPUID to auto-detect feature set. AutoDetectSubtargetFeatures(); @@ -355,7 +380,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, HasX86_64 = true; ToggleFeature(X86::Feature64Bit); HasCMov = true; ToggleFeature(X86::FeatureCMOV); - if (!HasAVX && X86SSELevel < SSE2) { + if (X86SSELevel < SSE2) { X86SSELevel = SSE2; ToggleFeature(X86::FeatureSSE1); ToggleFeature(X86::FeatureSSE2); @@ -363,33 +388,83 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, } } + // CPUName may have been set by the CPU detection code. Make sure the + // new MCSchedModel is used. + InitMCProcessorInfo(CPUName, FS); + + if (X86ProcFamily == IntelAtom) + PostRAScheduler = true; + + InstrItins = getInstrItineraryForCPU(CPUName); + // It's important to keep the MCSubtargetInfo feature bits in sync with // target data structure which is shared with MC code emitter, etc. if (In64BitMode) ToggleFeature(X86::Mode64Bit); - if (isTargetNaCl()) { - InNaClMode = true; - ToggleFeature(X86::ModeNaCl); - } - - if (HasAVX) - X86SSELevel = NoMMXSSE; - DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel << ", 3DNowLevel " << X863DNowLevel << ", 64bit " << HasX86_64 << "\n"); assert((!In64BitMode || HasX86_64) && "64-bit code requested on a subtarget that doesn't support it!"); - if(EnableSegmentedStacks && !isTargetELF()) - report_fatal_error("Segmented stacks are only implemented on ELF."); - - // Stack alignment is 16 bytes on Darwin, FreeBSD, Linux and Solaris (both + // Stack alignment is 16 bytes on Darwin, Linux and Solaris (both // 32 and 64 bit) and for all 64-bit targets. if (StackAlignOverride) stackAlignment = StackAlignOverride; - else if (isTargetDarwin() || isTargetFreeBSD() || isTargetLinux() || - isTargetSolaris() || In64BitMode) + else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() || + In64BitMode) stackAlignment = 16; } + +X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, + const std::string &FS, + unsigned StackAlignOverride, bool is64Bit) + : X86GenSubtargetInfo(TT, CPU, FS) + , X86ProcFamily(Others) + , PICStyle(PICStyles::None) + , X86SSELevel(NoMMXSSE) + , X863DNowLevel(NoThreeDNow) + , HasCMov(false) + , HasX86_64(false) + , HasPOPCNT(false) + , HasSSE4A(false) + , HasAES(false) + , HasPCLMUL(false) + , HasFMA(false) + , HasFMA4(false) + , HasXOP(false) + , HasMOVBE(false) + , HasRDRAND(false) + , HasF16C(false) + , HasFSGSBase(false) + , HasLZCNT(false) + , HasBMI(false) + , HasBMI2(false) + , HasRTM(false) + , HasADX(false) + , IsBTMemSlow(false) + , IsUAMemFast(false) + , HasVectorUAMem(false) + , HasCmpxchg16b(false) + , UseLeaForSP(false) + , HasSlowDivide(false) + , PostRAScheduler(false) + , PadShortFunctions(false) + , stackAlignment(4) + // FIXME: this is a known good value for Yonah. How about others? + , MaxInlineSizeThreshold(128) + , TargetTriple(TT) + , StackAlignOverride(StackAlignOverride) + , In64BitMode(is64Bit) { + resetSubtargetFeatures(CPU, FS); +} + +bool X86Subtarget::enablePostRAScheduler( + CodeGenOpt::Level OptLevel, + TargetSubtargetInfo::AntiDepBreakMode& Mode, + RegClassVector& CriticalPathRCs) const { + Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL; + CriticalPathRCs.clear(); + return PostRAScheduler && OptLevel >= CodeGenOpt::Default; +}