if (GV->hasDLLImportLinkage())
return X86II::MO_DLLIMPORT;
- // GV with ghost linkage (in JIT lazy compilation mode) do not require an
+ // Materializable GVs (in JIT lazy compilation mode) do not require an
// extra load from stub.
- bool isDecl = GV->isDeclaration() && !GV->hasNotBeenReadFromBitcode();
+ bool isDecl = GV->isDeclaration() && !GV->isMaterializable();
// X86-64 in PIC mode.
if (isPICStyleRIPRel()) {
HasFMA3 = IsIntel && ((ECX >> 12) & 0x1);
HasAVX = ((ECX >> 28) & 0x1);
+ HasAES = IsIntel && ((ECX >> 25) & 0x1);
if (IsIntel || IsAMD) {
// Determine if bit test memory instructions are slow.
unsigned Model = 0;
DetectFamilyModel(EAX, Family, Model);
IsBTMemSlow = IsAMD || (Family == 6 && Model >= 13);
+ // If it's Nehalem, unaligned memory access is fast.
+ if (Family == 15 && Model == 26)
+ IsUAMemFast = true;
GetCpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
HasX86_64 = (EDX >> 29) & 0x1;
, HasX86_64(false)
, HasSSE4A(false)
, HasAVX(false)
+ , HasAES(false)
, HasFMA3(false)
, HasFMA4(false)
, IsBTMemSlow(false)
+ , IsUAMemFast(false)
+ , HasVectorUAMem(false)
, DarwinVers(0)
, stackAlignment(8)
// FIXME: this is a known good value for Yonah. How about others?
// If requesting codegen for X86-64, make sure that 64-bit features
// are enabled.
- if (Is64Bit)
+ if (Is64Bit) {
HasX86_64 = true;
- DEBUG(errs() << "Subtarget features: SSELevel " << X86SSELevel
+ // All 64-bit cpus have cmov support.
+ HasCMov = true;
+ }
+
+
+ DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
<< ", 64bit " << HasX86_64 << "\n");
assert((!Is64Bit || HasX86_64) &&
stackAlignment = StackAlignment;
}
-bool X86Subtarget::enablePostRAScheduler(
- CodeGenOpt::Level OptLevel,
- TargetSubtarget::AntiDepBreakMode& Mode,
- RegClassVector& CriticalPathRCs) const {
- Mode = TargetSubtarget::ANTIDEP_CRITICAL;
- CriticalPathRCs.clear();
- return OptLevel >= CodeGenOpt::Aggressive;
+/// IsCalleePop - Determines whether the callee is required to pop its
+/// own arguments. Callee pop is necessary to support tail calls.
+bool X86Subtarget::IsCalleePop(bool IsVarArg,
+ CallingConv::ID CallingConv) const {
+ if (IsVarArg)
+ return false;
+
+ switch (CallingConv) {
+ default:
+ return false;
+ case CallingConv::X86_StdCall:
+ return !is64Bit();
+ case CallingConv::X86_FastCall:
+ return !is64Bit();
+ case CallingConv::X86_ThisCall:
+ return !is64Bit();
+ case CallingConv::Fast:
+ return GuaranteedTailCallOpt;
+ case CallingConv::GHC:
+ return GuaranteedTailCallOpt;
+ }
}