X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FARM%2FARMTargetMachine.cpp;h=0778ec372a8ee1bd0d1f43897df162b895887996;hb=d44dd1789ad34be8e9537f544fd3e815d7a4b721;hp=e3e6228206e6799269a26e7255c76e8918b2a469;hpb=061096482f16e6cd40e3254ba2ce390e9b816e1f;p=oota-llvm.git diff --git a/lib/Target/ARM/ARMTargetMachine.cpp b/lib/Target/ARM/ARMTargetMachine.cpp index e3e6228206e..0778ec372a8 100644 --- a/lib/Target/ARM/ARMTargetMachine.cpp +++ b/lib/Target/ARM/ARMTargetMachine.cpp @@ -37,6 +37,16 @@ EnableAtomicTidy("arm-atomic-cfg-tidy", cl::Hidden, " to make use of cmpxchg flow-based information"), cl::init(true)); +static cl::opt +EnableARMLoadStoreOpt("arm-load-store-opt", cl::Hidden, + cl::desc("Enable ARM load/store optimization pass"), + cl::init(true)); + +// FIXME: Unify control over GlobalMerge. +static cl::opt +EnableGlobalMerge("arm-global-merge", cl::Hidden, + cl::desc("Enable the global merge pass")); + extern "C" void LLVMInitializeARMTarget() { // Register the target. RegisterTargetMachine X(TheARMLETarget); @@ -70,8 +80,7 @@ computeTargetABI(const Triple &TT, StringRef CPU, // FIXME: This is duplicated code from the front end and should be unified. if (TT.isOSBinFormatMachO()) { if (TT.getEnvironment() == llvm::Triple::EABI || - (TT.getOS() == llvm::Triple::UnknownOS && - TT.getObjectFormat() == llvm::Triple::MachO) || + (TT.getOS() == llvm::Triple::UnknownOS && TT.isOSBinFormatMachO()) || CPU.startswith("cortex-m")) { TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; } else { @@ -94,8 +103,8 @@ computeTargetABI(const Triple &TT, StringRef CPU, TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; break; default: - if (TT.getOS() == llvm::Triple::NetBSD) - TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; + if (TT.isOSNetBSD()) + TargetABI = ARMBaseTargetMachine::ARM_ABI_APCS; else TargetABI = ARMBaseTargetMachine::ARM_ABI_AAPCS; break; @@ -105,9 +114,10 @@ computeTargetABI(const Triple &TT, StringRef CPU, return TargetABI; } -static std::string computeDataLayout(const Triple &TT, - ARMBaseTargetMachine::ARMABI ABI, +static std::string computeDataLayout(const Triple &TT, StringRef CPU, + const TargetOptions &Options, bool isLittle) { + auto ABI = computeTargetABI(TT, CPU, Options); std::string Ret = ""; if (isLittle) @@ -159,15 +169,15 @@ static std::string computeDataLayout(const Triple &TT, /// TargetMachine ctor - Create an ARM architecture model. /// -ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, StringRef TT, +ARMBaseTargetMachine::ARMBaseTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle) - : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), - TargetABI(computeTargetABI(Triple(TT), CPU, Options)), - DL(computeDataLayout(Triple(TT), TargetABI, isLittle)), - TLOF(createTLOF(Triple(getTargetTriple()))), + : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, + CPU, FS, Options, RM, CM, OL), + TargetABI(computeTargetABI(TT, CPU, Options)), + TLOF(createTLOF(getTargetTriple())), Subtarget(TT, CPU, FS, *this, isLittle), isLittle(isLittle) { // Default to triple-appropriate float ABI @@ -195,13 +205,15 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { // function before we can generate a subtarget. We also need to use // it as a key for the subtarget since that can be the only difference // between two functions. - Attribute SFAttr = F.getFnAttribute("use-soft-float"); - bool SoftFloat = !SFAttr.hasAttribute(Attribute::None) - ? SFAttr.getValueAsString() == "true" - : Options.UseSoftFloat; - - auto &I = SubtargetMap[CPU + FS + (SoftFloat ? "use-soft-float=true" - : "use-soft-float=false")]; + bool SoftFloat = + F.hasFnAttribute("use-soft-float") && + F.getFnAttribute("use-soft-float").getValueAsString() == "true"; + // If the soft float attribute is set on the function turn on the soft float + // subtarget feature. + if (SoftFloat) + FS += FS.empty() ? "+soft-float" : ",+soft-float"; + + auto &I = SubtargetMap[CPU + FS]; if (!I) { // This needs to be done before we create a new subtarget since any // creation will depend on the TM and the code generation flags on the @@ -220,8 +232,9 @@ TargetIRAnalysis ARMBaseTargetMachine::getTargetIRAnalysis() { void ARMTargetMachine::anchor() { } -ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, - StringRef FS, const TargetOptions &Options, +ARMTargetMachine::ARMTargetMachine(const Target &T, const Triple &TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle) : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { @@ -233,7 +246,7 @@ ARMTargetMachine::ARMTargetMachine(const Target &T, StringRef TT, StringRef CPU, void ARMLETargetMachine::anchor() { } -ARMLETargetMachine::ARMLETargetMachine(const Target &T, StringRef TT, +ARMLETargetMachine::ARMLETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, @@ -242,7 +255,7 @@ ARMLETargetMachine::ARMLETargetMachine(const Target &T, StringRef TT, void ARMBETargetMachine::anchor() { } -ARMBETargetMachine::ARMBETargetMachine(const Target &T, StringRef TT, +ARMBETargetMachine::ARMBETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, @@ -251,19 +264,18 @@ ARMBETargetMachine::ARMBETargetMachine(const Target &T, StringRef TT, void ThumbTargetMachine::anchor() { } -ThumbTargetMachine::ThumbTargetMachine(const Target &T, StringRef TT, +ThumbTargetMachine::ThumbTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL, bool isLittle) - : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, - isLittle) { + : ARMBaseTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, isLittle) { initAsmInfo(); } void ThumbLETargetMachine::anchor() { } -ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, StringRef TT, +ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, @@ -272,7 +284,7 @@ ThumbLETargetMachine::ThumbLETargetMachine(const Target &T, StringRef TT, void ThumbBETargetMachine::anchor() { } -ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, StringRef TT, +ThumbBETargetMachine::ThumbBETargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, @@ -290,10 +302,6 @@ public: return getTM(); } - const ARMSubtarget &getARMSubtarget() const { - return *getARMTargetMachine().getSubtargetImpl(); - } - void addIRPasses() override; bool addPreISel() override; bool addInstSelector() override; @@ -316,22 +324,38 @@ void ARMPassConfig::addIRPasses() { // Cmpxchg instructions are often used with a subsequent comparison to // determine whether it succeeded. We can exploit existing control-flow in // ldrex/strex loops to simplify this, but it needs tidying up. - const ARMSubtarget *Subtarget = &getARMSubtarget(); - if (Subtarget->hasAnyDataBarrier() && !Subtarget->isThumb1Only()) - if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) - addPass(createCFGSimplificationPass()); + if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy) + addPass(createCFGSimplificationPass(-1, [this](const Function &F) { + const auto &ST = this->TM->getSubtarget(F); + return ST.hasAnyDataBarrier() && !ST.isThumb1Only(); + })); TargetPassConfig::addIRPasses(); + + // Match interleaved memory accesses to ldN/stN intrinsics. + if (TM->getOptLevel() != CodeGenOpt::None) + addPass(createInterleavedAccessPass(TM)); } bool ARMPassConfig::addPreISel() { - if (TM->getOptLevel() != CodeGenOpt::None) + if ((TM->getOptLevel() != CodeGenOpt::None && + EnableGlobalMerge == cl::BOU_UNSET) || + EnableGlobalMerge == cl::BOU_TRUE) { // FIXME: This is using the thumb1 only constant value for // maximal global offset for merging globals. We may want // to look into using the old value for non-thumb1 code of // 4095 based on the TargetMachine, but this starts to become // tricky when doing code gen per function. - addPass(createGlobalMergePass(TM, 127)); + bool OnlyOptimizeForSize = (TM->getOptLevel() < CodeGenOpt::Aggressive) && + (EnableGlobalMerge == cl::BOU_UNSET); + // Merging of extern globals is enabled by default on non-Mach-O as we + // expect it to be generally either beneficial or harmless. On Mach-O it + // is disabled as we emit the .subsections_via_symbols directive which + // means that merging extern globals is not safe. + bool MergeExternalByDefault = !TM->getTargetTriple().isOSBinFormatMachO(); + addPass(createGlobalMergePass(TM, 127, OnlyOptimizeForSize, + MergeExternalByDefault)); + } return false; } @@ -339,25 +363,28 @@ bool ARMPassConfig::addPreISel() { bool ARMPassConfig::addInstSelector() { addPass(createARMISelDag(getARMTargetMachine(), getOptLevel())); - if (Triple(TM->getTargetTriple()).isOSBinFormatELF() && - TM->Options.EnableFastISel) + if (TM->getTargetTriple().isOSBinFormatELF() && TM->Options.EnableFastISel) addPass(createARMGlobalBaseRegPass()); return false; } void ARMPassConfig::addPreRegAlloc() { - if (getOptLevel() != CodeGenOpt::None) - addPass(createARMLoadStoreOptimizationPass(true)); - if (getOptLevel() != CodeGenOpt::None) + if (getOptLevel() != CodeGenOpt::None) { addPass(createMLxExpansionPass()); - if (getOptLevel() != CodeGenOpt::None && !DisableA15SDOptimization) { - addPass(createA15SDOptimizerPass()); + + if (EnableARMLoadStoreOpt) + addPass(createARMLoadStoreOptimizationPass(/* pre-register alloc */ true)); + + if (!DisableA15SDOptimization) + addPass(createA15SDOptimizerPass()); } } void ARMPassConfig::addPreSched2() { if (getOptLevel() != CodeGenOpt::None) { - addPass(createARMLoadStoreOptimizationPass()); + if (EnableARMLoadStoreOpt) + addPass(createARMLoadStoreOptimizationPass()); + addPass(createExecutionDependencyFixPass(&ARM::DPRRegClass)); } @@ -367,11 +394,14 @@ void ARMPassConfig::addPreSched2() { if (getOptLevel() != CodeGenOpt::None) { // in v8, IfConversion depends on Thumb instruction widths - if (getARMSubtarget().restrictIT()) - addPass(createThumb2SizeReductionPass()); - if (!getARMSubtarget().isThumb1Only()) - addPass(&IfConverterID); - } + addPass(createThumb2SizeReductionPass([this](const Function &F) { + return this->TM->getSubtarget(F).restrictIT(); + })); + + addPass(createIfConverter([this](const Function &F) { + return !this->TM->getSubtarget(F).isThumb1Only(); + })); + } addPass(createThumb2ITBlockPass()); } @@ -379,9 +409,13 @@ void ARMPassConfig::addPreEmitPass() { addPass(createThumb2SizeReductionPass()); // Constant island pass work on unbundled instructions. - if (getARMSubtarget().isThumb2()) - addPass(&UnpackMachineBundlesID); + addPass(createUnpackMachineBundles([this](const Function &F) { + return this->TM->getSubtarget(F).isThumb2(); + })); + + // Don't optimize barriers at -O0. + if (getOptLevel() != CodeGenOpt::None) + addPass(createARMOptimizeBarriersPass()); - addPass(createARMOptimizeBarriersPass()); addPass(createARMConstantIslandPass()); }