X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86Subtarget.cpp;h=3eb9441b9725f582333c143b03dd99822f1b30ff;hb=90f20044ade3712c8b0c3f4ebe47d57ad15ae6ce;hp=0d02e5ee472bbfd51479efc715e45008c01b89d6;hpb=699647cabcd98efd10b3bfb7cedc4d4b54f9b93d;p=oota-llvm.git diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 0d02e5ee472..3eb9441b972 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -1,4 +1,4 @@ -//===-- X86Subtarget.cpp - X86 Subtarget Information ------------*- C++ -*-===// +//===-- X86Subtarget.cpp - X86 Subtarget Information ----------------------===// // // The LLVM Compiler Infrastructure // @@ -7,21 +7,26 @@ // //===----------------------------------------------------------------------===// // -// This file implements the X86 specific subclass of TargetSubtarget. +// This file implements the X86 specific subclass of TargetSubtargetInfo. // //===----------------------------------------------------------------------===// #define DEBUG_TYPE "subtarget" #include "X86Subtarget.h" #include "X86InstrInfo.h" -#include "X86GenSubtarget.inc" #include "llvm/GlobalValue.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/System/Host.h" +#include "llvm/Support/Host.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/ADT/SmallVector.h" + +#define GET_SUBTARGETINFO_TARGET_DESC +#define GET_SUBTARGETINFO_CTOR +#include "X86GenSubtargetInfo.inc" + using namespace llvm; #if defined(_MSC_VER) @@ -144,7 +149,8 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const { /// passed as the second argument. Otherwise it returns null. const char *X86Subtarget::getBZeroEntry() const { // Darwin 10 has a __bzero entry point for this purpose. - if (getDarwinVers() >= 10) + if (getTargetTriple().isMacOSX() && + !getTargetTriple().isMacOSXVersionLT(10, 6)) return "__bzero"; return 0; @@ -153,7 +159,7 @@ const char *X86Subtarget::getBZeroEntry() const { /// IsLegalToCallImmediateAddr - Return true if the subtarget allows calls /// to immediate address. bool X86Subtarget::IsLegalToCallImmediateAddr(const TargetMachine &TM) const { - if (Is64Bit) + if (In64BitMode) return false; return isTargetELF() || TM.getRelocationModel() == Reloc::Static; } @@ -169,203 +175,257 @@ unsigned X86Subtarget::getSpecialAddressLatency() const { return 200; } -/// GetCpuIDAndInfo - Execute the specified cpuid and return the 4 values in the -/// specified arguments. If we can't run cpuid on the host, return true. -static bool GetCpuIDAndInfo(unsigned value, unsigned *rEAX, - unsigned *rEBX, unsigned *rECX, unsigned *rEDX) { -#if defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64) - #if defined(__GNUC__) - // gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually. - asm ("movq\t%%rbx, %%rsi\n\t" - "cpuid\n\t" - "xchgq\t%%rbx, %%rsi\n\t" - : "=a" (*rEAX), - "=S" (*rEBX), - "=c" (*rECX), - "=d" (*rEDX) - : "a" (value)); - return false; - #elif defined(_MSC_VER) - int registers[4]; - __cpuid(registers, value); - *rEAX = registers[0]; - *rEBX = registers[1]; - *rECX = registers[2]; - *rEDX = registers[3]; - return false; - #endif -#elif defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86) - #if defined(__GNUC__) - asm ("movl\t%%ebx, %%esi\n\t" - "cpuid\n\t" - "xchgl\t%%ebx, %%esi\n\t" - : "=a" (*rEAX), - "=S" (*rEBX), - "=c" (*rECX), - "=d" (*rEDX) - : "a" (value)); - return false; - #elif defined(_MSC_VER) - __asm { - mov eax,value - cpuid - mov esi,rEAX - mov dword ptr [esi],eax - mov esi,rEBX - mov dword ptr [esi],ebx - mov esi,rECX - mov dword ptr [esi],ecx - mov esi,rEDX - mov dword ptr [esi],edx - } - return false; - #endif -#endif - return true; -} - -static void DetectFamilyModel(unsigned EAX, unsigned &Family, unsigned &Model) { - Family = (EAX >> 8) & 0xf; // Bits 8 - 11 - Model = (EAX >> 4) & 0xf; // Bits 4 - 7 - if (Family == 6 || Family == 0xf) { - if (Family == 0xf) - // Examine extended family ID if family ID is F. - Family += (EAX >> 20) & 0xff; // Bits 20 - 27 - // Examine extended model ID if family ID is 6 or F. - Model += ((EAX >> 16) & 0xf) << 4; // Bits 16 - 19 - } -} - void X86Subtarget::AutoDetectSubtargetFeatures() { unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0; + unsigned MaxLevel; union { unsigned u[3]; char c[12]; } text; - - if (GetCpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1)) + + if (X86_MC::GetCpuIDAndInfo(0, &MaxLevel, text.u+0, text.u+2, text.u+1) || + MaxLevel < 1) return; - GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX); - - if ((EDX >> 15) & 1) HasCMov = true; - if ((EDX >> 23) & 1) X86SSELevel = MMX; - if ((EDX >> 25) & 1) X86SSELevel = SSE1; - if ((EDX >> 26) & 1) X86SSELevel = SSE2; - if (ECX & 0x1) X86SSELevel = SSE3; - if ((ECX >> 9) & 1) X86SSELevel = SSSE3; - if ((ECX >> 19) & 1) X86SSELevel = SSE41; - if ((ECX >> 20) & 1) X86SSELevel = SSE42; + X86_MC::GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX); + + if ((EDX >> 15) & 1) { HasCMov = true; ToggleFeature(X86::FeatureCMOV); } + if ((EDX >> 23) & 1) { X86SSELevel = MMX; ToggleFeature(X86::FeatureMMX); } + if ((EDX >> 25) & 1) { X86SSELevel = SSE1; ToggleFeature(X86::FeatureSSE1); } + if ((EDX >> 26) & 1) { X86SSELevel = SSE2; ToggleFeature(X86::FeatureSSE2); } + if (ECX & 0x1) { X86SSELevel = SSE3; ToggleFeature(X86::FeatureSSE3); } + if ((ECX >> 9) & 1) { X86SSELevel = SSSE3; ToggleFeature(X86::FeatureSSSE3);} + if ((ECX >> 19) & 1) { X86SSELevel = SSE41; ToggleFeature(X86::FeatureSSE41);} + if ((ECX >> 20) & 1) { X86SSELevel = SSE42; ToggleFeature(X86::FeatureSSE42);} + // FIXME: AVX codegen support is not ready. + //if ((ECX >> 28) & 1) { X86SSELevel = AVX; ToggleFeature(X86::FeatureAVX); } bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0; bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0; - HasCLMUL = IsIntel && ((ECX >> 1) & 0x1); - HasFMA3 = IsIntel && ((ECX >> 12) & 0x1); - HasAVX = ((ECX >> 28) & 0x1); - HasAES = IsIntel && ((ECX >> 25) & 0x1); + if (IsIntel && ((ECX >> 1) & 0x1)) { + HasCLMUL = true; + ToggleFeature(X86::FeatureCLMUL); + } + if (IsIntel && ((ECX >> 12) & 0x1)) { + HasFMA3 = true; + ToggleFeature(X86::FeatureFMA3); + } + if (IsIntel && ((ECX >> 22) & 0x1)) { + HasMOVBE = true; + ToggleFeature(X86::FeatureMOVBE); + } + if (IsIntel && ((ECX >> 23) & 0x1)) { + HasPOPCNT = true; + ToggleFeature(X86::FeaturePOPCNT); + } + if (IsIntel && ((ECX >> 25) & 0x1)) { + HasAES = true; + ToggleFeature(X86::FeatureAES); + } + if (IsIntel && ((ECX >> 29) & 0x1)) { + HasF16C = true; + ToggleFeature(X86::FeatureF16C); + } + if (IsIntel && ((ECX >> 30) & 0x1)) { + HasRDRAND = true; + ToggleFeature(X86::FeatureRDRAND); + } + + if ((ECX >> 13) & 0x1) { + HasCmpxchg16b = true; + ToggleFeature(X86::FeatureCMPXCHG16B); + } if (IsIntel || IsAMD) { // Determine if bit test memory instructions are slow. unsigned Family = 0; unsigned Model = 0; - DetectFamilyModel(EAX, Family, Model); - IsBTMemSlow = IsAMD || (Family == 6 && Model >= 13); + X86_MC::DetectFamilyModel(EAX, Family, Model); + if (IsAMD || (Family == 6 && Model >= 13)) { + IsBTMemSlow = true; + ToggleFeature(X86::FeatureSlowBTMem); + } + // If it's Nehalem, unaligned memory access is fast. - if (Family == 15 && Model == 26) + // FIXME: Nehalem is family 6. Also include Westmere and later processors? + if (Family == 15 && Model == 26) { IsUAMemFast = true; + ToggleFeature(X86::FeatureFastUAMem); + } - GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX); - HasX86_64 = (EDX >> 29) & 0x1; - HasSSE4A = IsAMD && ((ECX >> 6) & 0x1); - HasFMA4 = IsAMD && ((ECX >> 16) & 0x1); + // Set processor type. Currently only Atom is detected. + if (Family == 6 && Model == 28) { + X86ProcFamily = IntelAtom; + ToggleFeature(X86::FeatureLeaForSP); + } + + unsigned MaxExtLevel; + X86_MC::GetCpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX); + + if (MaxExtLevel >= 0x80000001) { + X86_MC::GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX); + if ((EDX >> 29) & 0x1) { + HasX86_64 = true; + ToggleFeature(X86::Feature64Bit); + } + if ((ECX >> 5) & 0x1) { + HasLZCNT = true; + ToggleFeature(X86::FeatureLZCNT); + } + if (IsAMD) { + if ((ECX >> 6) & 0x1) { + HasSSE4A = true; + ToggleFeature(X86::FeatureSSE4A); + } + if ((ECX >> 11) & 0x1) { + HasXOP = true; + ToggleFeature(X86::FeatureXOP); + } + if ((ECX >> 16) & 0x1) { + HasFMA4 = true; + ToggleFeature(X86::FeatureFMA4); + } + } + } + } + + if (IsIntel && MaxLevel >= 7) { + if (!X86_MC::GetCpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX)) { + if (EBX & 0x1) { + HasFSGSBase = true; + ToggleFeature(X86::FeatureFSGSBase); + } + if ((EBX >> 3) & 0x1) { + HasBMI = true; + ToggleFeature(X86::FeatureBMI); + } + // FIXME: AVX2 codegen support is not ready. + //if ((EBX >> 5) & 0x1) { + // X86SSELevel = AVX2; + // ToggleFeature(X86::FeatureAVX2); + //} + if ((EBX >> 8) & 0x1) { + HasBMI2 = true; + ToggleFeature(X86::FeatureBMI2); + } + } } } -X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS, - bool is64Bit) - : PICStyle(PICStyles::None) +X86Subtarget::X86Subtarget(const std::string &TT, const std::string &CPU, + const std::string &FS, + unsigned StackAlignOverride, bool is64Bit) + : X86GenSubtargetInfo(TT, CPU, FS) + , X86ProcFamily(Others) + , PICStyle(PICStyles::None) , X86SSELevel(NoMMXSSE) , X863DNowLevel(NoThreeDNow) , HasCMov(false) , HasX86_64(false) + , HasPOPCNT(false) , HasSSE4A(false) - , HasAVX(false) , HasAES(false) , HasCLMUL(false) , HasFMA3(false) , HasFMA4(false) + , HasXOP(false) + , HasMOVBE(false) + , HasRDRAND(false) + , HasF16C(false) + , HasFSGSBase(false) + , HasLZCNT(false) + , HasBMI(false) + , HasBMI2(false) , IsBTMemSlow(false) , IsUAMemFast(false) , HasVectorUAMem(false) - , stackAlignment(8) + , HasCmpxchg16b(false) + , UseLeaForSP(false) + , PostRAScheduler(false) + , stackAlignment(4) // FIXME: this is a known good value for Yonah. How about others? , MaxInlineSizeThreshold(128) , TargetTriple(TT) - , Is64Bit(is64Bit) { - - // default to hard float ABI - if (FloatABIType == FloatABI::Default) - FloatABIType = FloatABI::Hard; - + , In64BitMode(is64Bit) { // Determine default and user specified characteristics - if (!FS.empty()) { + std::string CPUName = CPU; + if (!FS.empty() || !CPU.empty()) { + if (CPUName.empty()) { +#if defined(i386) || defined(__i386__) || defined(__x86__) || defined(_M_IX86)\ + || defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64) + CPUName = sys::getHostCPUName(); +#else + CPUName = "generic"; +#endif + } + + // Make sure 64-bit features are available in 64-bit mode. (But make sure + // SSE2 can be turned off explicitly.) + std::string FullFS = FS; + if (In64BitMode) { + if (!FullFS.empty()) + FullFS = "+64bit,+sse2," + FullFS; + else + FullFS = "+64bit,+sse2"; + } + // If feature string is not empty, parse features string. - std::string CPU = sys::getHostCPUName(); - ParseSubtargetFeatures(FS, CPU); - // All X86-64 CPUs also have SSE2, however user might request no SSE via - // -mattr, so don't force SSELevel here. + ParseSubtargetFeatures(CPUName, FullFS); } else { + if (CPUName.empty()) { +#if defined (__x86_64__) || defined(__i386__) + CPUName = sys::getHostCPUName(); +#else + CPUName = "generic"; +#endif + } // Otherwise, use CPUID to auto-detect feature set. AutoDetectSubtargetFeatures(); - // Make sure SSE2 is enabled; it is available on all X86-64 CPUs. - if (Is64Bit && X86SSELevel < SSE2) - X86SSELevel = SSE2; - } - // If requesting codegen for X86-64, make sure that 64-bit features - // are enabled. - if (Is64Bit) { - HasX86_64 = true; + // Make sure 64-bit features are available in 64-bit mode. + if (In64BitMode) { + HasX86_64 = true; ToggleFeature(X86::Feature64Bit); + HasCMov = true; ToggleFeature(X86::FeatureCMOV); - // All 64-bit cpus have cmov support. - HasCMov = true; + if (X86SSELevel < SSE2) { + X86SSELevel = SSE2; + ToggleFeature(X86::FeatureSSE1); + ToggleFeature(X86::FeatureSSE2); + } + } } - + + if (X86ProcFamily == IntelAtom) { + PostRAScheduler = true; + InstrItins = getInstrItineraryForCPU(CPUName); + } + + // It's important to keep the MCSubtargetInfo feature bits in sync with + // target data structure which is shared with MC code emitter, etc. + if (In64BitMode) + ToggleFeature(X86::Mode64Bit); + DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel << ", 3DNowLevel " << X863DNowLevel << ", 64bit " << HasX86_64 << "\n"); - assert((!Is64Bit || HasX86_64) && + assert((!In64BitMode || HasX86_64) && "64-bit code requested on a subtarget that doesn't support it!"); - // Stack alignment is 16 bytes on Darwin (both 32 and 64 bit) and for all 64 - // bit targets. - if (isTargetDarwin() || Is64Bit) + // Stack alignment is 16 bytes on Darwin, FreeBSD, Linux and Solaris (both + // 32 and 64 bit) and for all 64-bit targets. + if (StackAlignOverride) + stackAlignment = StackAlignOverride; + else if (isTargetDarwin() || isTargetFreeBSD() || isTargetLinux() || + isTargetSolaris() || In64BitMode) stackAlignment = 16; - - if (StackAlignment) - stackAlignment = StackAlignment; } -/// IsCalleePop - Determines whether the callee is required to pop its -/// own arguments. Callee pop is necessary to support tail calls. -bool X86Subtarget::IsCalleePop(bool IsVarArg, - CallingConv::ID CallingConv) const { - if (IsVarArg) - return false; - - switch (CallingConv) { - default: - return false; - case CallingConv::X86_StdCall: - return !is64Bit(); - case CallingConv::X86_FastCall: - return !is64Bit(); - case CallingConv::X86_ThisCall: - return !is64Bit(); - case CallingConv::Fast: - return GuaranteedTailCallOpt; - case CallingConv::GHC: - return GuaranteedTailCallOpt; - } +bool X86Subtarget::enablePostRAScheduler( + CodeGenOpt::Level OptLevel, + TargetSubtargetInfo::AntiDepBreakMode& Mode, + RegClassVector& CriticalPathRCs) const { + Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL; + CriticalPathRCs.clear(); + return PostRAScheduler && OptLevel >= CodeGenOpt::Default; }