1 //===-- AArch64TargetMachine.cpp - Define TargetMachine for AArch64 -------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
14 #include "AArch64TargetMachine.h"
15 #include "llvm/CodeGen/Passes.h"
16 #include "llvm/CodeGen/RegAllocRegistry.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/PassManager.h"
19 #include "llvm/Support/CommandLine.h"
20 #include "llvm/Support/TargetRegistry.h"
21 #include "llvm/Target/TargetOptions.h"
22 #include "llvm/Transforms/Scalar.h"
26 EnableCCMP("aarch64-ccmp", cl::desc("Enable the CCMP formation pass"),
27 cl::init(true), cl::Hidden);
29 static cl::opt<bool> EnableMCR("aarch64-mcr",
30 cl::desc("Enable the machine combiner pass"),
31 cl::init(true), cl::Hidden);
34 EnableStPairSuppress("aarch64-stp-suppress", cl::desc("Suppress STP for AArch64"),
35 cl::init(true), cl::Hidden);
38 EnableAdvSIMDScalar("aarch64-simd-scalar", cl::desc("Enable use of AdvSIMD scalar"
39 " integer instructions"), cl::init(false), cl::Hidden);
42 EnablePromoteConstant("aarch64-promote-const", cl::desc("Enable the promote "
43 "constant pass"), cl::init(true), cl::Hidden);
46 EnableCollectLOH("aarch64-collect-loh", cl::desc("Enable the pass that emits the"
47 " linker optimization hints (LOH)"), cl::init(true),
51 EnableDeadRegisterElimination("aarch64-dead-def-elimination", cl::Hidden,
52 cl::desc("Enable the pass that removes dead"
53 " definitons and replaces stores to"
54 " them with stores to the zero"
59 EnableLoadStoreOpt("aarch64-load-store-opt", cl::desc("Enable the load/store pair"
60 " optimization pass"), cl::init(true), cl::Hidden);
63 EnableAtomicTidy("aarch64-atomic-cfg-tidy", cl::Hidden,
64 cl::desc("Run SimplifyCFG after expanding atomic operations"
65 " to make use of cmpxchg flow-based information"),
69 EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden,
70 cl::desc("Run early if-conversion"),
74 EnableCondOpt("aarch64-condopt",
75 cl::desc("Enable the condition optimizer pass"),
76 cl::init(true), cl::Hidden);
79 EnablePBQP("aarch64-pbqp", cl::Hidden,
80 cl::desc("Use PBQP register allocator (experimental)"),
84 EnableA53Fix835769("aarch64-fix-cortex-a53-835769", cl::Hidden,
85 cl::desc("Work around Cortex-A53 erratum 835769"),
88 extern "C" void LLVMInitializeAArch64Target() {
89 // Register the target.
90 RegisterTargetMachine<AArch64leTargetMachine> X(TheAArch64leTarget);
91 RegisterTargetMachine<AArch64beTargetMachine> Y(TheAArch64beTarget);
92 RegisterTargetMachine<AArch64leTargetMachine> Z(TheARM64Target);
95 /// TargetMachine ctor - Create an AArch64 architecture model.
97 AArch64TargetMachine::AArch64TargetMachine(const Target &T, StringRef TT,
98 StringRef CPU, StringRef FS,
99 const TargetOptions &Options,
100 Reloc::Model RM, CodeModel::Model CM,
101 CodeGenOpt::Level OL,
103 : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL),
104 Subtarget(TT, CPU, FS, *this, LittleEndian), isLittle(LittleEndian),
108 if (EnablePBQP && Subtarget.isCortexA57() && OL != CodeGenOpt::None) {
110 RegisterRegAlloc::setDefault(createDefaultPBQPRegisterAllocator);
114 const AArch64Subtarget *
115 AArch64TargetMachine::getSubtargetImpl(const Function &F) const {
116 AttributeSet FnAttrs = F.getAttributes();
118 FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu");
120 FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features");
122 std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
123 ? CPUAttr.getValueAsString().str()
125 std::string FS = !FSAttr.hasAttribute(Attribute::None)
126 ? FSAttr.getValueAsString().str()
129 auto &I = SubtargetMap[CPU + FS];
131 // This needs to be done before we create a new subtarget since any
132 // creation will depend on the TM and the code generation flags on the
133 // function that reside in TargetOptions.
134 resetTargetOptions(F);
135 I = llvm::make_unique<AArch64Subtarget>(TargetTriple, CPU, FS, *this, isLittle);
140 void AArch64leTargetMachine::anchor() { }
142 AArch64leTargetMachine::
143 AArch64leTargetMachine(const Target &T, StringRef TT,
144 StringRef CPU, StringRef FS, const TargetOptions &Options,
145 Reloc::Model RM, CodeModel::Model CM,
146 CodeGenOpt::Level OL)
147 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {}
149 void AArch64beTargetMachine::anchor() { }
151 AArch64beTargetMachine::
152 AArch64beTargetMachine(const Target &T, StringRef TT,
153 StringRef CPU, StringRef FS, const TargetOptions &Options,
154 Reloc::Model RM, CodeModel::Model CM,
155 CodeGenOpt::Level OL)
156 : AArch64TargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {}
159 /// AArch64 Code Generator Pass Configuration Options.
160 class AArch64PassConfig : public TargetPassConfig {
162 AArch64PassConfig(AArch64TargetMachine *TM, PassManagerBase &PM)
163 : TargetPassConfig(TM, PM) {
164 if (TM->getOptLevel() != CodeGenOpt::None)
165 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
168 AArch64TargetMachine &getAArch64TargetMachine() const {
169 return getTM<AArch64TargetMachine>();
172 void addIRPasses() override;
173 bool addPreISel() override;
174 bool addInstSelector() override;
175 bool addILPOpts() override;
176 bool addPreRegAlloc() override;
177 bool addPostRegAlloc() override;
178 bool addPreSched2() override;
179 bool addPreEmitPass() override;
183 void AArch64TargetMachine::addAnalysisPasses(PassManagerBase &PM) {
184 // Add first the target-independent BasicTTI pass, then our AArch64 pass. This
185 // allows the AArch64 pass to delegate to the target independent layer when
187 PM.add(createBasicTargetTransformInfoPass(this));
188 PM.add(createAArch64TargetTransformInfoPass(this));
191 TargetPassConfig *AArch64TargetMachine::createPassConfig(PassManagerBase &PM) {
192 return new AArch64PassConfig(this, PM);
195 void AArch64PassConfig::addIRPasses() {
196 // Always expand atomic operations, we don't deal with atomicrmw or cmpxchg
198 addPass(createAtomicExpandPass(TM));
200 // Cmpxchg instructions are often used with a subsequent comparison to
201 // determine whether it succeeded. We can exploit existing control-flow in
202 // ldrex/strex loops to simplify this, but it needs tidying up.
203 if (TM->getOptLevel() != CodeGenOpt::None && EnableAtomicTidy)
204 addPass(createCFGSimplificationPass());
206 TargetPassConfig::addIRPasses();
209 // Pass Pipeline Configuration
210 bool AArch64PassConfig::addPreISel() {
211 // Run promote constant before global merge, so that the promoted constants
212 // get a chance to be merged
213 if (TM->getOptLevel() != CodeGenOpt::None && EnablePromoteConstant)
214 addPass(createAArch64PromoteConstantPass());
215 if (TM->getOptLevel() != CodeGenOpt::None)
216 addPass(createGlobalMergePass(TM));
217 if (TM->getOptLevel() != CodeGenOpt::None)
218 addPass(createAArch64AddressTypePromotionPass());
223 bool AArch64PassConfig::addInstSelector() {
224 addPass(createAArch64ISelDag(getAArch64TargetMachine(), getOptLevel()));
226 // For ELF, cleanup any local-dynamic TLS accesses (i.e. combine as many
227 // references to _TLS_MODULE_BASE_ as possible.
228 if (TM->getSubtarget<AArch64Subtarget>().isTargetELF() &&
229 getOptLevel() != CodeGenOpt::None)
230 addPass(createAArch64CleanupLocalDynamicTLSPass());
235 bool AArch64PassConfig::addILPOpts() {
237 addPass(createAArch64ConditionOptimizerPass());
239 addPass(createAArch64ConditionalCompares());
241 addPass(&MachineCombinerID);
242 if (EnableEarlyIfConversion)
243 addPass(&EarlyIfConverterID);
244 if (EnableStPairSuppress)
245 addPass(createAArch64StorePairSuppressPass());
249 bool AArch64PassConfig::addPreRegAlloc() {
250 // Use AdvSIMD scalar instructions whenever profitable.
251 if (TM->getOptLevel() != CodeGenOpt::None && EnableAdvSIMDScalar) {
252 addPass(createAArch64AdvSIMDScalar());
253 // The AdvSIMD pass may produce copies that can be rewritten to
254 // be register coaleascer friendly.
255 addPass(&PeepholeOptimizerID);
260 bool AArch64PassConfig::addPostRegAlloc() {
261 // Change dead register definitions to refer to the zero register.
262 if (TM->getOptLevel() != CodeGenOpt::None && EnableDeadRegisterElimination)
263 addPass(createAArch64DeadRegisterDefinitions());
264 if (TM->getOptLevel() != CodeGenOpt::None &&
265 TM->getSubtarget<AArch64Subtarget>().isCortexA57() &&
266 !static_cast<const AArch64TargetMachine *>(TM)->isPBQPUsed())
267 // Improve performance for some FP/SIMD code for A57.
268 addPass(createAArch64A57FPLoadBalancing());
272 bool AArch64PassConfig::addPreSched2() {
273 // Expand some pseudo instructions to allow proper scheduling.
274 addPass(createAArch64ExpandPseudoPass());
275 // Use load/store pair instructions when possible.
276 if (TM->getOptLevel() != CodeGenOpt::None && EnableLoadStoreOpt)
277 addPass(createAArch64LoadStoreOptimizationPass());
281 bool AArch64PassConfig::addPreEmitPass() {
282 if (EnableA53Fix835769)
283 addPass(createAArch64A53Fix835769());
284 // Relax conditional branch instructions if they're otherwise out of
285 // range of their destination.
286 addPass(createAArch64BranchRelaxation());
287 if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH &&
288 TM->getSubtarget<AArch64Subtarget>().isTargetMachO())
289 addPass(createAArch64CollectLOHPass());