1 //===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief The AMDGPU target machine contains all of the hardware specific
12 /// information needed to emit code for R600 and SI GPUs.
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUTargetMachine.h"
18 #include "R600ISelLowering.h"
19 #include "R600InstrInfo.h"
20 #include "R600MachineScheduler.h"
21 #include "SIISelLowering.h"
22 #include "SIInstrInfo.h"
23 #include "llvm/Analysis/Passes.h"
24 #include "llvm/CodeGen/MachineFunctionAnalysis.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/IR/Verifier.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/PassManager.h"
30 #include "llvm/Support/TargetRegistry.h"
31 #include "llvm/Support/raw_os_ostream.h"
32 #include "llvm/Transforms/IPO.h"
33 #include "llvm/Transforms/Scalar.h"
34 #include <llvm/CodeGen/Passes.h>
38 extern "C" void LLVMInitializeR600Target() {
39 // Register the target
40 RegisterTargetMachine<AMDGPUTargetMachine> X(TheAMDGPUTarget);
43 static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
44 return new ScheduleDAGMILive(C, make_unique<R600SchedStrategy>());
47 static MachineSchedRegistry
48 SchedCustomRegistry("r600", "Run R600's custom scheduler",
49 createR600MachineScheduler);
51 AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, StringRef TT,
52 StringRef CPU, StringRef FS,
53 TargetOptions Options, Reloc::Model RM,
55 CodeGenOpt::Level OptLevel)
56 : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OptLevel),
57 Subtarget(TT, CPU, FS, *this), IntrinsicInfo() {
58 setRequiresStructuredCFG(true);
62 AMDGPUTargetMachine::~AMDGPUTargetMachine() {
66 class AMDGPUPassConfig : public TargetPassConfig {
68 AMDGPUPassConfig(AMDGPUTargetMachine *TM, PassManagerBase &PM)
69 : TargetPassConfig(TM, PM) {}
71 AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
72 return getTM<AMDGPUTargetMachine>();
76 createMachineScheduler(MachineSchedContext *C) const override {
77 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
78 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
79 return createR600MachineScheduler(C);
83 void addIRPasses() override;
84 void addCodeGenPrepare() override;
85 bool addPreISel() override;
86 bool addInstSelector() override;
87 bool addPreRegAlloc() override;
88 bool addPostRegAlloc() override;
89 bool addPreSched2() override;
90 bool addPreEmitPass() override;
92 } // End of anonymous namespace
94 TargetPassConfig *AMDGPUTargetMachine::createPassConfig(PassManagerBase &PM) {
95 return new AMDGPUPassConfig(this, PM);
98 //===----------------------------------------------------------------------===//
99 // AMDGPU Analysis Pass Setup
100 //===----------------------------------------------------------------------===//
102 void AMDGPUTargetMachine::addAnalysisPasses(PassManagerBase &PM) {
103 // Add first the target-independent BasicTTI pass, then our AMDGPU pass. This
104 // allows the AMDGPU pass to delegate to the target independent layer when
106 PM.add(createBasicTargetTransformInfoPass(this));
107 PM.add(createAMDGPUTargetTransformInfoPass(this));
110 void AMDGPUPassConfig::addIRPasses() {
111 // Function calls are not supported, so make sure we inline everything.
112 addPass(createAMDGPUAlwaysInlinePass());
113 addPass(createAlwaysInlinerPass());
114 // We need to add the barrier noop pass, otherwise adding the function
115 // inlining pass will cause all of the PassConfigs passes to be run
116 // one function at a time, which means if we have a nodule with two
117 // functions, then we will generate code for the first function
118 // without ever running any passes on the second.
119 addPass(createBarrierNoopPass());
120 TargetPassConfig::addIRPasses();
123 void AMDGPUPassConfig::addCodeGenPrepare() {
124 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
125 if (ST.isPromoteAllocaEnabled()) {
126 addPass(createAMDGPUPromoteAlloca(ST));
127 addPass(createSROAPass());
130 TargetPassConfig::addCodeGenPrepare();
134 AMDGPUPassConfig::addPreISel() {
135 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
136 addPass(createFlattenCFGPass());
137 if (ST.IsIRStructurizerEnabled())
138 addPass(createStructurizeCFGPass());
139 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
140 addPass(createSinkingPass());
141 addPass(createSITypeRewriter());
142 addPass(createSIAnnotateControlFlowPass());
144 addPass(createR600TextureIntrinsicsReplacer());
149 bool AMDGPUPassConfig::addInstSelector() {
150 addPass(createAMDGPUISelDag(getAMDGPUTargetMachine()));
151 addPass(createSILowerI1CopiesPass());
155 bool AMDGPUPassConfig::addPreRegAlloc() {
156 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
158 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
159 addPass(createR600VectorRegMerger(*TM));
161 addPass(createSIFixSGPRCopiesPass(*TM));
162 // SIFixSGPRCopies can generate a lot of duplicate instructions,
163 // so we need to run MachineCSE afterwards.
164 addPass(&MachineCSEID);
166 if (getOptLevel() > CodeGenOpt::None && ST.loadStoreOptEnabled()) {
167 // Don't do this with no optimizations since it throws away debug info by
168 // merging nonadjacent loads.
170 // This should be run after scheduling, but before register allocation. It
171 // also need extra copies to the address operand to be eliminated.
172 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
173 insertPass(&MachineSchedulerID, &SILoadStoreOptimizerID);
176 addPass(createSIShrinkInstructionsPass());
177 addPass(createSIFixSGPRLiveRangesPass());
182 bool AMDGPUPassConfig::addPostRegAlloc() {
183 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
185 addPass(createSIShrinkInstructionsPass());
186 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
187 addPass(createSIInsertWaits(*TM));
192 bool AMDGPUPassConfig::addPreSched2() {
193 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
195 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
196 addPass(createR600EmitClauseMarkers());
197 if (ST.isIfCvtEnabled())
198 addPass(&IfConverterID);
199 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
200 addPass(createR600ClauseMergePass(*TM));
204 bool AMDGPUPassConfig::addPreEmitPass() {
205 const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>();
206 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
207 addPass(createAMDGPUCFGStructurizerPass());
208 addPass(createR600ExpandSpecialInstrsPass(*TM));
209 addPass(&FinalizeMachineBundlesID);
210 addPass(createR600Packetizer(*TM));
211 addPass(createR600ControlFlowFinalizer(*TM));
213 addPass(createSILowerControlFlowPass(*TM));