1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600InstrInfo.h"
19 #include "SIDefines.h"
20 #include "SIISelLowering.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGISel.h"
28 #include "llvm/IR/Function.h"
32 //===----------------------------------------------------------------------===//
33 // Instruction Selector Implementation
34 //===----------------------------------------------------------------------===//
37 /// AMDGPU specific code to select AMDGPU machine instructions for
38 /// SelectionDAG operations.
39 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget &Subtarget;
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
47 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
52 bool isInlineImmediate(SDNode *N) const;
53 inline SDValue getSmallIPtrImm(unsigned Imm);
54 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
55 const R600InstrInfo *TII);
56 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
59 // Complex pattern selectors
60 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
61 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
62 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
64 static bool checkType(const Value *ptr, unsigned int addrspace);
65 static bool checkPrivateAddress(const MachineMemOperand *Op);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
72 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
80 /// \returns True if the current basic block being selected is at control
81 /// flow depth 0. Meaning that the current block dominates the
83 bool isCFDepth0() const;
85 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
86 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
87 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
89 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
90 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
91 bool SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr, SDValue &Offset,
92 SDValue &ImmOffset) const;
93 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
94 SDValue &SOffset, SDValue &ImmOffset) const;
95 bool SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
96 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
97 SDValue &Idxen, SDValue &GLC, SDValue &SLC,
100 SDNode *SelectADD_SUB_I64(SDNode *N);
101 SDNode *SelectDIV_SCALE(SDNode *N);
103 // Include the pieces autogenerated from the target description.
104 #include "AMDGPUGenDAGISel.inc"
106 } // end anonymous namespace
108 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
109 // DAG, ready for instruction scheduling.
110 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
111 return new AMDGPUDAGToDAGISel(TM);
114 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
115 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
118 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
121 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
122 const SITargetLowering *TL
123 = static_cast<const SITargetLowering *>(getTargetLowering());
124 return TL->analyzeImmediate(N) == 0;
127 /// \brief Determine the register class for \p OpNo
128 /// \returns The register class of the virtual register that will be used for
129 /// the given operand number \OpNo or NULL if the register class cannot be
131 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
132 unsigned OpNo) const {
133 if (!N->isMachineOpcode())
136 switch (N->getMachineOpcode()) {
138 const MCInstrDesc &Desc = TM.getInstrInfo()->get(N->getMachineOpcode());
139 unsigned OpIdx = Desc.getNumDefs() + OpNo;
140 if (OpIdx >= Desc.getNumOperands())
142 int RegClass = Desc.OpInfo[OpIdx].RegClass;
146 return TM.getRegisterInfo()->getRegClass(RegClass);
148 case AMDGPU::REG_SEQUENCE: {
149 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
150 const TargetRegisterClass *SuperRC = TM.getRegisterInfo()->getRegClass(RCID);
152 SDValue SubRegOp = N->getOperand(OpNo + 1);
153 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
154 return TM.getRegisterInfo()->getSubClassWithSubReg(SuperRC, SubRegIdx);
159 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
160 return CurDAG->getTargetConstant(Imm, MVT::i32);
163 bool AMDGPUDAGToDAGISel::SelectADDRParam(
164 SDValue Addr, SDValue& R1, SDValue& R2) {
166 if (Addr.getOpcode() == ISD::FrameIndex) {
167 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
168 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
169 R2 = CurDAG->getTargetConstant(0, MVT::i32);
172 R2 = CurDAG->getTargetConstant(0, MVT::i32);
174 } else if (Addr.getOpcode() == ISD::ADD) {
175 R1 = Addr.getOperand(0);
176 R2 = Addr.getOperand(1);
179 R2 = CurDAG->getTargetConstant(0, MVT::i32);
184 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
185 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
186 Addr.getOpcode() == ISD::TargetGlobalAddress) {
189 return SelectADDRParam(Addr, R1, R2);
193 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
194 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
195 Addr.getOpcode() == ISD::TargetGlobalAddress) {
199 if (Addr.getOpcode() == ISD::FrameIndex) {
200 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
201 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
202 R2 = CurDAG->getTargetConstant(0, MVT::i64);
205 R2 = CurDAG->getTargetConstant(0, MVT::i64);
207 } else if (Addr.getOpcode() == ISD::ADD) {
208 R1 = Addr.getOperand(0);
209 R2 = Addr.getOperand(1);
212 R2 = CurDAG->getTargetConstant(0, MVT::i64);
217 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
218 unsigned int Opc = N->getOpcode();
219 if (N->isMachineOpcode()) {
221 return nullptr; // Already selected.
224 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
227 // We are selecting i64 ADD here instead of custom lower it during
228 // DAG legalization, so we can fold some i64 ADDs used for address
229 // calculation into the LOAD and STORE instructions.
232 if (N->getValueType(0) != MVT::i64 ||
233 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
236 return SelectADD_SUB_I64(N);
238 case ISD::SCALAR_TO_VECTOR:
239 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
240 case ISD::BUILD_VECTOR: {
242 const AMDGPURegisterInfo *TRI =
243 static_cast<const AMDGPURegisterInfo*>(TM.getRegisterInfo());
244 const SIRegisterInfo *SIRI =
245 static_cast<const SIRegisterInfo*>(TM.getRegisterInfo());
246 EVT VT = N->getValueType(0);
247 unsigned NumVectorElts = VT.getVectorNumElements();
248 EVT EltVT = VT.getVectorElementType();
249 assert(EltVT.bitsEq(MVT::i32));
250 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
252 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
254 if (!U->isMachineOpcode()) {
257 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
261 if (SIRI->isSGPRClass(RC)) {
265 switch(NumVectorElts) {
266 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
267 AMDGPU::SReg_32RegClassID;
269 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
270 AMDGPU::SReg_64RegClassID;
272 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
273 AMDGPU::SReg_128RegClassID;
275 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
276 AMDGPU::SReg_256RegClassID;
278 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
279 AMDGPU::SReg_512RegClassID;
281 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
284 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
285 // that adds a 128 bits reg copy when going through TwoAddressInstructions
286 // pass. We want to avoid 128 bits copies as much as possible because they
287 // can't be bundled by our scheduler.
288 switch(NumVectorElts) {
289 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
291 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
292 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
294 RegClassID = AMDGPU::R600_Reg128RegClassID;
296 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
300 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
302 if (NumVectorElts == 1) {
303 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
304 N->getOperand(0), RegClass);
307 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
309 // 16 = Max Num Vector Elements
310 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
311 // 1 = Vector Register Class
312 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
314 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
315 bool IsRegSeq = true;
316 unsigned NOps = N->getNumOperands();
317 for (unsigned i = 0; i < NOps; i++) {
318 // XXX: Why is this here?
319 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
323 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
324 RegSeqArgs[1 + (2 * i) + 1] =
325 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
328 if (NOps != NumVectorElts) {
329 // Fill in the missing undef elements if this was a scalar_to_vector.
330 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
332 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
334 for (unsigned i = NOps; i < NumVectorElts; ++i) {
335 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
336 RegSeqArgs[1 + (2 * i) + 1] =
337 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
343 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
346 case ISD::BUILD_PAIR: {
347 SDValue RC, SubReg0, SubReg1;
348 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
351 if (N->getValueType(0) == MVT::i128) {
352 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
353 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
354 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
355 } else if (N->getValueType(0) == MVT::i64) {
356 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
357 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
358 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
360 llvm_unreachable("Unhandled value type for BUILD_PAIR");
362 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
363 N->getOperand(1), SubReg1 };
364 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
365 SDLoc(N), N->getValueType(0), Ops);
369 case ISD::ConstantFP: {
370 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
371 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
372 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
376 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
377 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
379 ConstantSDNode *C = cast<ConstantSDNode>(N);
380 Imm = C->getZExtValue();
383 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
384 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
385 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
386 CurDAG->getConstant(Imm >> 32, MVT::i32));
387 const SDValue Ops[] = {
388 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
389 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
390 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
393 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
394 N->getValueType(0), Ops);
397 case AMDGPUISD::REGISTER_LOAD: {
398 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
400 SDValue Addr, Offset;
402 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
403 const SDValue Ops[] = {
406 CurDAG->getTargetConstant(0, MVT::i32),
409 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
410 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
413 case AMDGPUISD::REGISTER_STORE: {
414 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
416 SDValue Addr, Offset;
417 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
418 const SDValue Ops[] = {
422 CurDAG->getTargetConstant(0, MVT::i32),
425 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
426 CurDAG->getVTList(MVT::Other),
430 case AMDGPUISD::BFE_I32:
431 case AMDGPUISD::BFE_U32: {
432 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
435 // There is a scalar version available, but unlike the vector version which
436 // has a separate operand for the offset and width, the scalar version packs
437 // the width and offset into a single operand. Try to move to the scalar
438 // version if the offsets are constant, so that we can try to keep extended
439 // loads of kernel arguments in SGPRs.
441 // TODO: Technically we could try to pattern match scalar bitshifts of
442 // dynamic values, but it's probably not useful.
443 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
447 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
451 bool Signed = Opc == AMDGPUISD::BFE_I32;
453 // Transformation function, pack the offset and width of a BFE into
454 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
455 // source, bits [5:0] contain the offset and bits [22:16] the width.
457 uint32_t OffsetVal = Offset->getZExtValue();
458 uint32_t WidthVal = Width->getZExtValue();
460 uint32_t PackedVal = OffsetVal | WidthVal << 16;
462 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
463 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
470 case AMDGPUISD::DIV_SCALE: {
471 return SelectDIV_SCALE(N);
474 return SelectCode(N);
478 bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
479 assert(AS != 0 && "Use checkPrivateAddress instead.");
483 return Ptr->getType()->getPointerAddressSpace() == AS;
486 bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
487 if (Op->getPseudoValue())
490 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
491 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
496 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
497 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
500 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
501 const Value *MemVal = N->getMemOperand()->getValue();
502 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
503 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
504 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
507 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
508 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
511 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
512 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
515 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
516 const Value *MemVal = N->getMemOperand()->getValue();
518 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
520 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
523 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
524 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
525 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
526 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
527 N->getMemoryVT().bitsLT(MVT::i32)) {
531 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
534 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
535 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
538 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
539 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
542 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
543 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
546 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
547 MachineMemOperand *MMO = N->getMemOperand();
548 if (checkPrivateAddress(N->getMemOperand())) {
550 const PseudoSourceValue *PSV = MMO->getPseudoValue();
551 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
559 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
560 if (checkPrivateAddress(N->getMemOperand())) {
561 // Check to make sure we are not a constant pool load or a constant load
562 // that is marked as a private load
563 if (isCPLoad(N) || isConstantLoad(N, -1)) {
568 const Value *MemVal = N->getMemOperand()->getValue();
569 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
570 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
571 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
572 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
573 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
574 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)){
580 bool AMDGPUDAGToDAGISel::isCFDepth0() const {
581 // FIXME: Figure out a way to use DominatorTree analysis here.
582 const BasicBlock *CurBlock = FuncInfo->MBB->getBasicBlock();
583 const Function *Fn = FuncInfo->Fn;
584 return &Fn->front() == CurBlock || &Fn->back() == CurBlock;
588 const char *AMDGPUDAGToDAGISel::getPassName() const {
589 return "AMDGPU DAG->DAG Pattern Instruction Selection";
597 //===----------------------------------------------------------------------===//
599 //===----------------------------------------------------------------------===//
601 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
603 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
604 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
610 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
611 SDValue& BaseReg, SDValue &Offset) {
612 if (!isa<ConstantSDNode>(Addr)) {
614 Offset = CurDAG->getIntPtrConstant(0, true);
620 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
622 ConstantSDNode *IMMOffset;
624 if (Addr.getOpcode() == ISD::ADD
625 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
626 && isInt<16>(IMMOffset->getZExtValue())) {
628 Base = Addr.getOperand(0);
629 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
631 // If the pointer address is constant, we can move it to the offset field.
632 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
633 && isInt<16>(IMMOffset->getZExtValue())) {
634 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
635 SDLoc(CurDAG->getEntryNode()),
636 AMDGPU::ZERO, MVT::i32);
637 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
641 // Default case, no offset
643 Offset = CurDAG->getTargetConstant(0, MVT::i32);
647 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
651 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
652 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
653 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
654 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
655 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
656 Base = Addr.getOperand(0);
657 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
660 Offset = CurDAG->getTargetConstant(0, MVT::i32);
666 SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
668 SDValue LHS = N->getOperand(0);
669 SDValue RHS = N->getOperand(1);
671 bool IsAdd = (N->getOpcode() == ISD::ADD);
673 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
674 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
676 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
677 DL, MVT::i32, LHS, Sub0);
678 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
679 DL, MVT::i32, LHS, Sub1);
681 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
682 DL, MVT::i32, RHS, Sub0);
683 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
684 DL, MVT::i32, RHS, Sub1);
686 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
687 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
690 unsigned Opc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
691 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
694 Opc = IsAdd ? AMDGPU::V_ADD_I32_e32 : AMDGPU::V_SUB_I32_e32;
695 CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e32 : AMDGPU::V_SUBB_U32_e32;
698 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
699 SDValue Carry(AddLo, 1);
701 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
702 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
705 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
711 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
714 SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
716 EVT VT = N->getValueType(0);
718 assert(VT == MVT::f32 || VT == MVT::f64);
721 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
723 const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
735 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
738 static SDValue wrapAddr64Rsrc(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
739 return SDValue(DAG->getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::v4i32,
743 static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
744 return isUInt<12>(Imm->getZExtValue());
747 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &Ptr,
749 SDValue &ImmOffset) const {
752 if (CurDAG->isBaseWithConstantOffset(Addr)) {
753 SDValue N0 = Addr.getOperand(0);
754 SDValue N1 = Addr.getOperand(1);
755 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
757 if (isLegalMUBUFImmOffset(C1)) {
759 if (N0.getOpcode() == ISD::ADD) {
760 // (add (add N2, N3), C1)
761 SDValue N2 = N0.getOperand(0);
762 SDValue N3 = N0.getOperand(1);
763 Ptr = wrapAddr64Rsrc(CurDAG, DL, N2);
765 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
770 Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getTargetConstant(0, MVT::i64));;
772 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
776 if (Addr.getOpcode() == ISD::ADD) {
778 SDValue N0 = Addr.getOperand(0);
779 SDValue N1 = Addr.getOperand(1);
780 Ptr = wrapAddr64Rsrc(CurDAG, DL, N0);
782 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
787 Ptr = wrapAddr64Rsrc(CurDAG, DL, CurDAG->getConstant(0, MVT::i64));
789 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
793 /// \brief Return a resource descriptor with the 'Add TID' bit enabled
794 /// The TID (Thread ID) is multipled by the stride value (bits [61:48]
795 /// of the resource descriptor) to create an offset, which is added to the
797 static SDValue buildScratchRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) {
799 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
802 SDValue PtrLo = DAG->getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr);
803 SDValue PtrHi = DAG->getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr);
804 SDValue DataLo = DAG->getTargetConstant(
805 Rsrc & APInt::getAllOnesValue(32).getZExtValue(), MVT::i32);
806 SDValue DataHi = DAG->getTargetConstant(Rsrc >> 32, MVT::i32);
808 const SDValue Ops[] = { PtrLo, PtrHi, DataLo, DataHi };
809 return SDValue(DAG->getMachineNode(AMDGPU::SI_BUFFER_RSRC, DL,
810 MVT::v4i32, Ops), 0);
813 bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
814 SDValue &VAddr, SDValue &SOffset,
815 SDValue &ImmOffset) const {
818 MachineFunction &MF = CurDAG->getMachineFunction();
819 const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo());
820 MachineRegisterInfo &MRI = MF.getRegInfo();
823 unsigned ScratchPtrReg =
824 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
825 unsigned ScratchOffsetReg =
826 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
828 Rsrc = buildScratchRSRC(CurDAG, DL, CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64));
829 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
830 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
833 if (CurDAG->isBaseWithConstantOffset(Addr)) {
834 SDValue N1 = Addr.getOperand(1);
835 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
837 if (isLegalMUBUFImmOffset(C1)) {
838 VAddr = Addr.getOperand(0);
839 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
845 if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
846 isa<FrameIndexSDNode>(Addr.getOperand(0))) {
847 VAddr = Addr.getOperand(1);
848 ImmOffset = Addr.getOperand(0);
853 if (isa<FrameIndexSDNode>(Addr)) {
854 VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
855 CurDAG->getConstant(0, MVT::i32)), 0);
862 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
866 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr32(SDValue Addr, SDValue &SRsrc,
867 SDValue &VAddr, SDValue &SOffset,
868 SDValue &Offset, SDValue &Offen,
869 SDValue &Idxen, SDValue &GLC,
870 SDValue &SLC, SDValue &TFE) const {
872 GLC = CurDAG->getTargetConstant(0, MVT::i1);
873 SLC = CurDAG->getTargetConstant(0, MVT::i1);
874 TFE = CurDAG->getTargetConstant(0, MVT::i1);
876 Idxen = CurDAG->getTargetConstant(0, MVT::i1);
877 Offen = CurDAG->getTargetConstant(1, MVT::i1);
879 return SelectMUBUFScratch(Addr, SRsrc, VAddr, SOffset, Offset);
882 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
883 const AMDGPUTargetLowering& Lowering =
884 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
885 bool IsModified = false;
888 // Go over all selected nodes and try to fold them a bit more
889 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
890 E = CurDAG->allnodes_end(); I != E; ++I) {
894 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
898 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
899 if (ResNode != Node) {
900 ReplaceUses(Node, ResNode);
904 CurDAG->RemoveDeadNodes();
905 } while (IsModified);