1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "R600InstrInfo.h"
18 #include "SIISelLowering.h"
19 #include "llvm/CodeGen/FunctionLoweringInfo.h"
20 #include "llvm/CodeGen/PseudoSourceValue.h"
21 #include "llvm/CodeGen/SelectionDAG.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/IR/Function.h"
27 //===----------------------------------------------------------------------===//
28 // Instruction Selector Implementation
29 //===----------------------------------------------------------------------===//
32 /// AMDGPU specific code to select AMDGPU machine instructions for
33 /// SelectionDAG operations.
34 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
35 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
36 // make the right decision when generating code for different targets.
37 const AMDGPUSubtarget &Subtarget;
39 AMDGPUDAGToDAGISel(TargetMachine &TM);
40 virtual ~AMDGPUDAGToDAGISel();
42 SDNode *Select(SDNode *N) override;
43 const char *getPassName() const override;
44 void PostprocessISelDAG() override;
47 bool isInlineImmediate(SDNode *N) const;
48 inline SDValue getSmallIPtrImm(unsigned Imm);
49 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
50 const R600InstrInfo *TII);
51 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
52 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
54 // Complex pattern selectors
55 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
56 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
57 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
59 static bool checkType(const Value *ptr, unsigned int addrspace);
60 static bool checkPrivateAddress(const MachineMemOperand *Op);
62 static bool isGlobalStore(const StoreSDNode *N);
63 static bool isPrivateStore(const StoreSDNode *N);
64 static bool isLocalStore(const StoreSDNode *N);
65 static bool isRegionStore(const StoreSDNode *N);
67 bool isCPLoad(const LoadSDNode *N) const;
68 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
69 bool isGlobalLoad(const LoadSDNode *N) const;
70 bool isParamLoad(const LoadSDNode *N) const;
71 bool isPrivateLoad(const LoadSDNode *N) const;
72 bool isLocalLoad(const LoadSDNode *N) const;
73 bool isRegionLoad(const LoadSDNode *N) const;
75 /// \returns True if the current basic block being selected is at control
76 /// flow depth 0. Meaning that the current block dominates the
78 bool isCFDepth0() const;
80 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
81 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
82 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
84 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
85 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
87 // Include the pieces autogenerated from the target description.
88 #include "AMDGPUGenDAGISel.inc"
90 } // end anonymous namespace
92 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
93 // DAG, ready for instruction scheduling.
94 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
95 return new AMDGPUDAGToDAGISel(TM);
98 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
99 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
102 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
105 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
106 const SITargetLowering *TL
107 = static_cast<const SITargetLowering *>(getTargetLowering());
108 return TL->analyzeImmediate(N) == 0;
111 /// \brief Determine the register class for \p OpNo
112 /// \returns The register class of the virtual register that will be used for
113 /// the given operand number \OpNo or NULL if the register class cannot be
115 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
116 unsigned OpNo) const {
117 if (!N->isMachineOpcode())
120 switch (N->getMachineOpcode()) {
122 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
123 unsigned OpIdx = Desc.getNumDefs() + OpNo;
124 if (OpIdx >= Desc.getNumOperands())
126 int RegClass = Desc.OpInfo[OpIdx].RegClass;
130 return TM.getRegisterInfo()->getRegClass(RegClass);
132 case AMDGPU::REG_SEQUENCE: {
133 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
134 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(RCID);
136 SDValue SubRegOp = N->getOperand(OpNo + 1);
137 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
138 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
143 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
144 return CurDAG->getTargetConstant(Imm, MVT::i32);
147 bool AMDGPUDAGToDAGISel::SelectADDRParam(
148 SDValue Addr, SDValue& R1, SDValue& R2) {
150 if (Addr.getOpcode() == ISD::FrameIndex) {
151 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
152 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
153 R2 = CurDAG->getTargetConstant(0, MVT::i32);
156 R2 = CurDAG->getTargetConstant(0, MVT::i32);
158 } else if (Addr.getOpcode() == ISD::ADD) {
159 R1 = Addr.getOperand(0);
160 R2 = Addr.getOperand(1);
163 R2 = CurDAG->getTargetConstant(0, MVT::i32);
168 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
169 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
170 Addr.getOpcode() == ISD::TargetGlobalAddress) {
173 return SelectADDRParam(Addr, R1, R2);
177 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
178 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
179 Addr.getOpcode() == ISD::TargetGlobalAddress) {
183 if (Addr.getOpcode() == ISD::FrameIndex) {
184 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
185 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
186 R2 = CurDAG->getTargetConstant(0, MVT::i64);
189 R2 = CurDAG->getTargetConstant(0, MVT::i64);
191 } else if (Addr.getOpcode() == ISD::ADD) {
192 R1 = Addr.getOperand(0);
193 R2 = Addr.getOperand(1);
196 R2 = CurDAG->getTargetConstant(0, MVT::i64);
201 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
202 unsigned int Opc = N->getOpcode();
203 if (N->isMachineOpcode()) {
205 return nullptr; // Already selected.
208 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
211 // We are selecting i64 ADD here instead of custom lower it during
212 // DAG legalization, so we can fold some i64 ADDs used for address
213 // calculation into the LOAD and STORE instructions.
215 if (N->getValueType(0) != MVT::i64 ||
216 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
220 SDValue LHS = N->getOperand(0);
221 SDValue RHS = N->getOperand(1);
223 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
224 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
226 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
227 DL, MVT::i32, LHS, Sub0);
228 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
229 DL, MVT::i32, LHS, Sub1);
231 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
232 DL, MVT::i32, RHS, Sub0);
233 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
234 DL, MVT::i32, RHS, Sub1);
236 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
238 SmallVector<SDValue, 8> AddLoArgs;
239 AddLoArgs.push_back(SDValue(Lo0, 0));
240 AddLoArgs.push_back(SDValue(Lo1, 0));
242 SDNode *AddLo = CurDAG->getMachineNode(
243 isCFDepth0() ? AMDGPU::S_ADD_I32 : AMDGPU::V_ADD_I32_e32,
244 DL, VTList, AddLoArgs);
245 SDValue Carry = SDValue(AddLo, 1);
246 SDNode *AddHi = CurDAG->getMachineNode(
247 isCFDepth0() ? AMDGPU::S_ADDC_U32 : AMDGPU::V_ADDC_U32_e32,
248 DL, MVT::i32, SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
251 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
257 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
259 case ISD::SCALAR_TO_VECTOR:
260 case ISD::BUILD_VECTOR: {
262 const AMDGPURegisterInfo *TRI =
263 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
264 const SIRegisterInfo *SIRI =
265 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
266 EVT VT = N->getValueType(0);
267 unsigned NumVectorElts = VT.getVectorNumElements();
268 EVT EltVT = VT.getVectorElementType();
269 assert(EltVT.bitsEq(MVT::i32));
270 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
272 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
274 if (!U->isMachineOpcode()) {
277 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
281 if (SIRI->isSGPRClass(RC)) {
285 switch(NumVectorElts) {
286 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
287 AMDGPU::SReg_32RegClassID;
289 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
290 AMDGPU::SReg_64RegClassID;
292 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
293 AMDGPU::SReg_128RegClassID;
295 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
296 AMDGPU::SReg_256RegClassID;
298 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
299 AMDGPU::SReg_512RegClassID;
301 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
304 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
305 // that adds a 128 bits reg copy when going through TwoAddressInstructions
306 // pass. We want to avoid 128 bits copies as much as possible because they
307 // can't be bundled by our scheduler.
308 switch(NumVectorElts) {
309 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
310 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
311 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
315 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
317 if (NumVectorElts == 1) {
318 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
319 N->getOperand(0), RegClass);
322 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
324 // 16 = Max Num Vector Elements
325 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
326 // 1 = Vector Register Class
327 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
329 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
330 bool IsRegSeq = true;
331 unsigned NOps = N->getNumOperands();
332 for (unsigned i = 0; i < NOps; i++) {
333 // XXX: Why is this here?
334 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
338 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
339 RegSeqArgs[1 + (2 * i) + 1] =
340 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
343 if (NOps != NumVectorElts) {
344 // Fill in the missing undef elements if this was a scalar_to_vector.
345 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
347 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
349 for (unsigned i = NOps; i < NumVectorElts; ++i) {
350 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
351 RegSeqArgs[1 + (2 * i) + 1] =
352 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
358 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
361 case ISD::BUILD_PAIR: {
362 SDValue RC, SubReg0, SubReg1;
363 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
366 if (N->getValueType(0) == MVT::i128) {
367 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
368 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
369 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
370 } else if (N->getValueType(0) == MVT::i64) {
371 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
372 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
373 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
375 llvm_unreachable("Unhandled value type for BUILD_PAIR");
377 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
378 N->getOperand(1), SubReg1 };
379 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
380 SDLoc(N), N->getValueType(0), Ops);
384 case ISD::ConstantFP: {
385 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
386 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
387 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
391 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
392 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
394 ConstantSDNode *C = cast<ConstantSDNode>(N);
395 Imm = C->getZExtValue();
398 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
399 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
400 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
401 CurDAG->getConstant(Imm >> 32, MVT::i32));
402 const SDValue Ops[] = {
403 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
404 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
405 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
408 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
409 N->getValueType(0), Ops);
412 case AMDGPUISD::REGISTER_LOAD: {
413 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
415 SDValue Addr, Offset;
417 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
418 const SDValue Ops[] = {
421 CurDAG->getTargetConstant(0, MVT::i32),
424 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
425 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
428 case AMDGPUISD::REGISTER_STORE: {
429 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
431 SDValue Addr, Offset;
432 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
433 const SDValue Ops[] = {
437 CurDAG->getTargetConstant(0, MVT::i32),
440 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
441 CurDAG->getVTList(MVT::Other),
445 case AMDGPUISD::BFE_I32:
446 case AMDGPUISD::BFE_U32: {
447 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
450 // There is a scalar version available, but unlike the vector version which
451 // has a separate operand for the offset and width, the scalar version packs
452 // the width and offset into a single operand. Try to move to the scalar
453 // version if the offsets are constant, so that we can try to keep extended
454 // loads of kernel arguments in SGPRs.
456 // TODO: Technically we could try to pattern match scalar bitshifts of
457 // dynamic values, but it's probably not useful.
458 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
462 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
466 bool Signed = Opc == AMDGPUISD::BFE_I32;
468 // Transformation function, pack the offset and width of a BFE into
469 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
470 // source, bits [5:0] contain the offset and bits [22:16] the width.
472 uint32_t OffsetVal = Offset->getZExtValue();
473 uint32_t WidthVal = Width->getZExtValue();
475 uint32_t PackedVal = OffsetVal | WidthVal << 16;
477 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
478 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
486 return SelectCode(N);
490 bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
491 assert(AS != 0 && "Use checkPrivateAddress instead.");
495 return Ptr->getType()->getPointerAddressSpace() == AS;
498 bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
499 if (Op->getPseudoValue())
502 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
503 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
508 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
509 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
512 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
513 const Value *MemVal = N->getMemOperand()->getValue();
514 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
515 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
516 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
519 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
520 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
523 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
524 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
527 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
528 const Value *MemVal = N->getMemOperand()->getValue();
530 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
532 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
535 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
536 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
537 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
538 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
539 N->getMemoryVT().bitsLT(MVT::i32)) {
543 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
546 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
547 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
550 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
551 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
554 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
555 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
558 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
559 MachineMemOperand *MMO = N->getMemOperand();
560 if (checkPrivateAddress(N->getMemOperand())) {
562 const PseudoSourceValue *PSV = MMO->getPseudoValue();
563 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
571 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
572 if (checkPrivateAddress(N->getMemOperand())) {
573 // Check to make sure we are not a constant pool load or a constant load
574 // that is marked as a private load
575 if (isCPLoad(N) || isConstantLoad(N, -1)) {
580 const Value *MemVal = N->getMemOperand()->getValue();
581 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
582 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
583 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
584 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
585 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
586 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
592 bool AMDGPUDAGToDAGISel::isCFDepth0() const {
593 // FIXME: Figure out a way to use DominatorTree analysis here.
594 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
595 const Function *Fn = FuncInfo->Fn;
596 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
600 const char *AMDGPUDAGToDAGISel::getPassName() const {
601 return "AMDGPU DAG->DAG Pattern Instruction Selection";
609 //===----------------------------------------------------------------------===//
611 //===----------------------------------------------------------------------===//
613 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
615 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
616 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
622 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
623 SDValue& BaseReg, SDValue &Offset) {
624 if (!isa<ConstantSDNode>(Addr)) {
626 Offset = CurDAG->getIntPtrConstant(0, true);
632 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
634 ConstantSDNode *IMMOffset;
636 if (Addr.getOpcode() == ISD::ADD
637 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
638 && isInt<16>(IMMOffset->getZExtValue())) {
640 Base = Addr.getOperand(0);
641 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
643 // If the pointer address is constant, we can move it to the offset field.
644 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
645 && isInt<16>(IMMOffset->getZExtValue())) {
646 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
647 SDLoc(CurDAG->getEntryNode()),
648 AMDGPU::ZERO, MVT::i32);
649 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
653 // Default case, no offset
655 Offset = CurDAG->getTargetConstant(0, MVT::i32);
659 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
663 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
664 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
665 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
666 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
667 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
668 Base = Addr.getOperand(0);
669 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
672 Offset = CurDAG->getTargetConstant(0, MVT::i32);
678 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
679 const AMDGPUTargetLowering& Lowering =
680 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
681 bool IsModified = false;
684 // Go over all selected nodes and try to fold them a bit more
685 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
686 E = CurDAG->allnodes_end(); I != E; ++I) {
690 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
694 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
695 if (ResNode != Node) {
696 ReplaceUses(Node, ResNode);
700 CurDAG->RemoveDeadNodes();
701 } while (IsModified);