1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600InstrInfo.h"
19 #include "SIDefines.h"
20 #include "SIISelLowering.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/SelectionDAGISel.h"
28 #include "llvm/IR/Function.h"
32 //===----------------------------------------------------------------------===//
33 // Instruction Selector Implementation
34 //===----------------------------------------------------------------------===//
37 /// AMDGPU specific code to select AMDGPU machine instructions for
38 /// SelectionDAG operations.
39 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
40 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
41 // make the right decision when generating code for different targets.
42 const AMDGPUSubtarget &Subtarget;
44 AMDGPUDAGToDAGISel(TargetMachine &TM);
45 virtual ~AMDGPUDAGToDAGISel();
47 SDNode *Select(SDNode *N) override;
48 const char *getPassName() const override;
49 void PostprocessISelDAG() override;
52 bool isInlineImmediate(SDNode *N) const;
53 inline SDValue getSmallIPtrImm(unsigned Imm);
54 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
55 const R600InstrInfo *TII);
56 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
59 // Complex pattern selectors
60 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
61 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
62 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
64 static bool checkType(const Value *ptr, unsigned int addrspace);
65 static bool checkPrivateAddress(const MachineMemOperand *Op);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isFlatStore(const StoreSDNode *N);
69 static bool isPrivateStore(const StoreSDNode *N);
70 static bool isLocalStore(const StoreSDNode *N);
71 static bool isRegionStore(const StoreSDNode *N);
73 bool isCPLoad(const LoadSDNode *N) const;
74 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
75 bool isGlobalLoad(const LoadSDNode *N) const;
76 bool isFlatLoad(const LoadSDNode *N) const;
77 bool isParamLoad(const LoadSDNode *N) const;
78 bool isPrivateLoad(const LoadSDNode *N) const;
79 bool isLocalLoad(const LoadSDNode *N) const;
80 bool isRegionLoad(const LoadSDNode *N) const;
82 const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
83 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
84 bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
86 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
87 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
88 bool isDSOffsetLegal(const SDValue &Base, unsigned Offset,
89 unsigned OffsetBits) const;
90 bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
91 bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
92 SDValue &Offset1) const;
93 void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
94 SDValue &SOffset, SDValue &Offset, SDValue &Offen,
95 SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
97 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
98 SDValue &Offset) const;
99 bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
100 SDValue &VAddr, SDValue &Offset,
102 bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr,
103 SDValue &SOffset, SDValue &ImmOffset) const;
104 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
105 SDValue &Offset, SDValue &GLC, SDValue &SLC,
107 bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
108 SDValue &Offset, SDValue &GLC) const;
109 SDNode *SelectAddrSpaceCast(SDNode *N);
110 bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
111 bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
112 SDValue &Clamp, SDValue &Omod) const;
114 bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods,
115 SDValue &Omod) const;
117 SDNode *SelectADD_SUB_I64(SDNode *N);
118 SDNode *SelectDIV_SCALE(SDNode *N);
120 // Include the pieces autogenerated from the target description.
121 #include "AMDGPUGenDAGISel.inc"
123 } // end anonymous namespace
125 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
126 // DAG, ready for instruction scheduling.
127 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) {
128 return new AMDGPUDAGToDAGISel(TM);
131 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
132 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
135 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
138 bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const {
139 const SITargetLowering *TL
140 = static_cast<const SITargetLowering *>(getTargetLowering());
141 return TL->analyzeImmediate(N) == 0;
144 /// \brief Determine the register class for \p OpNo
145 /// \returns The register class of the virtual register that will be used for
146 /// the given operand number \OpNo or NULL if the register class cannot be
148 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
149 unsigned OpNo) const {
150 if (!N->isMachineOpcode())
153 switch (N->getMachineOpcode()) {
155 const MCInstrDesc &Desc =
156 TM.getSubtargetImpl()->getInstrInfo()->get(N->getMachineOpcode());
157 unsigned OpIdx = Desc.getNumDefs() + OpNo;
158 if (OpIdx >= Desc.getNumOperands())
160 int RegClass = Desc.OpInfo[OpIdx].RegClass;
164 return TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RegClass);
166 case AMDGPU::REG_SEQUENCE: {
167 unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
168 const TargetRegisterClass *SuperRC =
169 TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RCID);
171 SDValue SubRegOp = N->getOperand(OpNo + 1);
172 unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
173 return TM.getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg(
179 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
180 return CurDAG->getTargetConstant(Imm, MVT::i32);
183 bool AMDGPUDAGToDAGISel::SelectADDRParam(
184 SDValue Addr, SDValue& R1, SDValue& R2) {
186 if (Addr.getOpcode() == ISD::FrameIndex) {
187 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
188 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
189 R2 = CurDAG->getTargetConstant(0, MVT::i32);
192 R2 = CurDAG->getTargetConstant(0, MVT::i32);
194 } else if (Addr.getOpcode() == ISD::ADD) {
195 R1 = Addr.getOperand(0);
196 R2 = Addr.getOperand(1);
199 R2 = CurDAG->getTargetConstant(0, MVT::i32);
204 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
205 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
206 Addr.getOpcode() == ISD::TargetGlobalAddress) {
209 return SelectADDRParam(Addr, R1, R2);
213 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
214 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
215 Addr.getOpcode() == ISD::TargetGlobalAddress) {
219 if (Addr.getOpcode() == ISD::FrameIndex) {
220 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
221 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
222 R2 = CurDAG->getTargetConstant(0, MVT::i64);
225 R2 = CurDAG->getTargetConstant(0, MVT::i64);
227 } else if (Addr.getOpcode() == ISD::ADD) {
228 R1 = Addr.getOperand(0);
229 R2 = Addr.getOperand(1);
232 R2 = CurDAG->getTargetConstant(0, MVT::i64);
237 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
238 unsigned int Opc = N->getOpcode();
239 if (N->isMachineOpcode()) {
241 return nullptr; // Already selected.
244 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
247 // We are selecting i64 ADD here instead of custom lower it during
248 // DAG legalization, so we can fold some i64 ADDs used for address
249 // calculation into the LOAD and STORE instructions.
252 if (N->getValueType(0) != MVT::i64 ||
253 ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
256 return SelectADD_SUB_I64(N);
258 case ISD::SCALAR_TO_VECTOR:
259 case AMDGPUISD::BUILD_VERTICAL_VECTOR:
260 case ISD::BUILD_VECTOR: {
262 const AMDGPURegisterInfo *TRI = static_cast<const AMDGPURegisterInfo *>(
263 TM.getSubtargetImpl()->getRegisterInfo());
264 const SIRegisterInfo *SIRI = static_cast<const SIRegisterInfo *>(
265 TM.getSubtargetImpl()->getRegisterInfo());
266 EVT VT = N->getValueType(0);
267 unsigned NumVectorElts = VT.getVectorNumElements();
268 EVT EltVT = VT.getVectorElementType();
269 assert(EltVT.bitsEq(MVT::i32));
270 if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
272 for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
274 if (!U->isMachineOpcode()) {
277 const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
281 if (SIRI->isSGPRClass(RC)) {
285 switch(NumVectorElts) {
286 case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID :
287 AMDGPU::SReg_32RegClassID;
289 case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID :
290 AMDGPU::SReg_64RegClassID;
292 case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID :
293 AMDGPU::SReg_128RegClassID;
295 case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID :
296 AMDGPU::SReg_256RegClassID;
298 case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID :
299 AMDGPU::SReg_512RegClassID;
301 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
304 // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
305 // that adds a 128 bits reg copy when going through TwoAddressInstructions
306 // pass. We want to avoid 128 bits copies as much as possible because they
307 // can't be bundled by our scheduler.
308 switch(NumVectorElts) {
309 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
311 if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
312 RegClassID = AMDGPU::R600_Reg128VerticalRegClassID;
314 RegClassID = AMDGPU::R600_Reg128RegClassID;
316 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
320 SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32);
322 if (NumVectorElts == 1) {
323 return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
324 N->getOperand(0), RegClass);
327 assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
329 // 16 = Max Num Vector Elements
330 // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
331 // 1 = Vector Register Class
332 SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
334 RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32);
335 bool IsRegSeq = true;
336 unsigned NOps = N->getNumOperands();
337 for (unsigned i = 0; i < NOps; i++) {
338 // XXX: Why is this here?
339 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
343 RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
344 RegSeqArgs[1 + (2 * i) + 1] =
345 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
348 if (NOps != NumVectorElts) {
349 // Fill in the missing undef elements if this was a scalar_to_vector.
350 assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
352 MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
354 for (unsigned i = NOps; i < NumVectorElts; ++i) {
355 RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
356 RegSeqArgs[1 + (2 * i) + 1] =
357 CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32);
363 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
366 case ISD::BUILD_PAIR: {
367 SDValue RC, SubReg0, SubReg1;
368 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
371 if (N->getValueType(0) == MVT::i128) {
372 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
373 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
374 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
375 } else if (N->getValueType(0) == MVT::i64) {
376 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
377 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
378 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
380 llvm_unreachable("Unhandled value type for BUILD_PAIR");
382 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
383 N->getOperand(1), SubReg1 };
384 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
385 SDLoc(N), N->getValueType(0), Ops);
389 case ISD::ConstantFP: {
390 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
391 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
392 N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
396 if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
397 Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
399 ConstantSDNode *C = cast<ConstantSDNode>(N);
400 Imm = C->getZExtValue();
403 SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
404 CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32));
405 SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
406 CurDAG->getConstant(Imm >> 32, MVT::i32));
407 const SDValue Ops[] = {
408 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
409 SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
410 SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
413 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N),
414 N->getValueType(0), Ops);
417 case AMDGPUISD::REGISTER_LOAD: {
418 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
420 SDValue Addr, Offset;
422 SelectADDRIndirect(N->getOperand(1), Addr, Offset);
423 const SDValue Ops[] = {
426 CurDAG->getTargetConstant(0, MVT::i32),
429 return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N),
430 CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other),
433 case AMDGPUISD::REGISTER_STORE: {
434 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
436 SDValue Addr, Offset;
437 SelectADDRIndirect(N->getOperand(2), Addr, Offset);
438 const SDValue Ops[] = {
442 CurDAG->getTargetConstant(0, MVT::i32),
445 return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N),
446 CurDAG->getVTList(MVT::Other),
450 case AMDGPUISD::BFE_I32:
451 case AMDGPUISD::BFE_U32: {
452 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
455 // There is a scalar version available, but unlike the vector version which
456 // has a separate operand for the offset and width, the scalar version packs
457 // the width and offset into a single operand. Try to move to the scalar
458 // version if the offsets are constant, so that we can try to keep extended
459 // loads of kernel arguments in SGPRs.
461 // TODO: Technically we could try to pattern match scalar bitshifts of
462 // dynamic values, but it's probably not useful.
463 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
467 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
471 bool Signed = Opc == AMDGPUISD::BFE_I32;
473 // Transformation function, pack the offset and width of a BFE into
474 // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
475 // source, bits [5:0] contain the offset and bits [22:16] the width.
477 uint32_t OffsetVal = Offset->getZExtValue();
478 uint32_t WidthVal = Width->getZExtValue();
480 uint32_t PackedVal = OffsetVal | WidthVal << 16;
482 SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32);
483 return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
490 case AMDGPUISD::DIV_SCALE: {
491 return SelectDIV_SCALE(N);
493 case ISD::CopyToReg: {
494 const SITargetLowering& Lowering =
495 *static_cast<const SITargetLowering*>(getTargetLowering());
496 Lowering.legalizeTargetIndependentNode(N, *CurDAG);
499 case ISD::ADDRSPACECAST:
500 return SelectAddrSpaceCast(N);
503 return SelectCode(N);
507 bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
508 assert(AS != 0 && "Use checkPrivateAddress instead.");
512 return Ptr->getType()->getPointerAddressSpace() == AS;
515 bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) {
516 if (Op->getPseudoValue())
519 if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType()))
520 return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
525 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
526 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
529 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
530 const Value *MemVal = N->getMemOperand()->getValue();
531 return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
532 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
533 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS));
536 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
537 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
540 bool AMDGPUDAGToDAGISel::isFlatStore(const StoreSDNode *N) {
541 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
544 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
545 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
548 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
549 const Value *MemVal = N->getMemOperand()->getValue();
551 return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS);
553 return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
556 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
557 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
558 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
559 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
560 N->getMemoryVT().bitsLT(MVT::i32)) {
564 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS);
567 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
568 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS);
571 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
572 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS);
575 bool AMDGPUDAGToDAGISel::isFlatLoad(const LoadSDNode *N) const {
576 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS);
579 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
580 return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS);
583 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
584 MachineMemOperand *MMO = N->getMemOperand();
585 if (checkPrivateAddress(N->getMemOperand())) {
587 const PseudoSourceValue *PSV = MMO->getPseudoValue();
588 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
596 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
597 if (checkPrivateAddress(N->getMemOperand())) {
598 // Check to make sure we are not a constant pool load or a constant load
599 // that is marked as a private load
600 if (isCPLoad(N) || isConstantLoad(N, -1)) {
605 const Value *MemVal = N->getMemOperand()->getValue();
606 if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) &&
607 !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) &&
608 !checkType(MemVal, AMDGPUAS::FLAT_ADDRESS) &&
609 !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) &&
610 !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) &&
611 !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) &&
612 !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)) {
618 const char *AMDGPUDAGToDAGISel::getPassName() const {
619 return "AMDGPU DAG->DAG Pattern Instruction Selection";
627 //===----------------------------------------------------------------------===//
629 //===----------------------------------------------------------------------===//
631 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
633 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
634 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
640 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
641 SDValue& BaseReg, SDValue &Offset) {
642 if (!isa<ConstantSDNode>(Addr)) {
644 Offset = CurDAG->getIntPtrConstant(0, true);
650 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
652 ConstantSDNode *IMMOffset;
654 if (Addr.getOpcode() == ISD::ADD
655 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
656 && isInt<16>(IMMOffset->getZExtValue())) {
658 Base = Addr.getOperand(0);
659 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
661 // If the pointer address is constant, we can move it to the offset field.
662 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
663 && isInt<16>(IMMOffset->getZExtValue())) {
664 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
665 SDLoc(CurDAG->getEntryNode()),
666 AMDGPU::ZERO, MVT::i32);
667 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
671 // Default case, no offset
673 Offset = CurDAG->getTargetConstant(0, MVT::i32);
677 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
681 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
682 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
683 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
684 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
685 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
686 Base = Addr.getOperand(0);
687 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
690 Offset = CurDAG->getTargetConstant(0, MVT::i32);
696 SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
698 SDValue LHS = N->getOperand(0);
699 SDValue RHS = N->getOperand(1);
701 bool IsAdd = (N->getOpcode() == ISD::ADD);
703 SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
704 SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
706 SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
707 DL, MVT::i32, LHS, Sub0);
708 SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
709 DL, MVT::i32, LHS, Sub1);
711 SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
712 DL, MVT::i32, RHS, Sub0);
713 SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
714 DL, MVT::i32, RHS, Sub1);
716 SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
717 SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
720 unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
721 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
723 SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs);
724 SDValue Carry(AddLo, 1);
726 = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32,
727 SDValue(Hi0, 0), SDValue(Hi1, 0), Carry);
730 CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32),
736 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
739 SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
741 EVT VT = N->getValueType(0);
743 assert(VT == MVT::f32 || VT == MVT::f64);
746 = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32;
748 const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
749 const SDValue False = CurDAG->getTargetConstant(0, MVT::i1);
751 Zero, // src0_modifiers
752 N->getOperand(0), // src0
753 Zero, // src1_modifiers
754 N->getOperand(1), // src1
755 Zero, // src2_modifiers
756 N->getOperand(2), // src2
761 return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
764 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
765 unsigned OffsetBits) const {
766 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
767 if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
768 (OffsetBits == 8 && !isUInt<8>(Offset)))
771 if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS)
774 // On Southern Islands instruction with a negative base value and an offset
775 // don't seem to work.
776 return CurDAG->SignBitIsZero(Base);
779 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
780 SDValue &Offset) const {
781 if (CurDAG->isBaseWithConstantOffset(Addr)) {
782 SDValue N0 = Addr.getOperand(0);
783 SDValue N1 = Addr.getOperand(1);
784 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
785 if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) {
793 // If we have a constant address, prefer to put the constant into the
794 // offset. This can save moves to load the constant address since multiple
795 // operations can share the zero base address register, and enables merging
796 // into read2 / write2 instructions.
797 if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
798 if (isUInt<16>(CAddr->getZExtValue())) {
799 SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
800 MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
801 SDLoc(Addr), MVT::i32, Zero);
802 Base = SDValue(MovZero, 0);
810 Offset = CurDAG->getTargetConstant(0, MVT::i16);
814 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
816 SDValue &Offset1) const {
817 if (CurDAG->isBaseWithConstantOffset(Addr)) {
818 SDValue N0 = Addr.getOperand(0);
819 SDValue N1 = Addr.getOperand(1);
820 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
821 unsigned DWordOffset0 = C1->getZExtValue() / 4;
822 unsigned DWordOffset1 = DWordOffset0 + 1;
824 if (isDSOffsetLegal(N0, DWordOffset1, 8)) {
826 Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8);
827 Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8);
832 if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
833 unsigned DWordOffset0 = CAddr->getZExtValue() / 4;
834 unsigned DWordOffset1 = DWordOffset0 + 1;
835 assert(4 * DWordOffset0 == CAddr->getZExtValue());
837 if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) {
838 SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32);
839 MachineSDNode *MovZero
840 = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
841 SDLoc(Addr), MVT::i32, Zero);
842 Base = SDValue(MovZero, 0);
843 Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8);
844 Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8);
851 Offset0 = CurDAG->getTargetConstant(0, MVT::i8);
852 Offset1 = CurDAG->getTargetConstant(1, MVT::i8);
856 static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) {
857 return isUInt<12>(Imm->getZExtValue());
860 void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
861 SDValue &VAddr, SDValue &SOffset,
862 SDValue &Offset, SDValue &Offen,
863 SDValue &Idxen, SDValue &Addr64,
864 SDValue &GLC, SDValue &SLC,
865 SDValue &TFE) const {
868 GLC = CurDAG->getTargetConstant(0, MVT::i1);
869 SLC = CurDAG->getTargetConstant(0, MVT::i1);
870 TFE = CurDAG->getTargetConstant(0, MVT::i1);
872 Idxen = CurDAG->getTargetConstant(0, MVT::i1);
873 Offen = CurDAG->getTargetConstant(0, MVT::i1);
874 Addr64 = CurDAG->getTargetConstant(0, MVT::i1);
875 SOffset = CurDAG->getTargetConstant(0, MVT::i32);
877 if (CurDAG->isBaseWithConstantOffset(Addr)) {
878 SDValue N0 = Addr.getOperand(0);
879 SDValue N1 = Addr.getOperand(1);
880 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
882 if (isLegalMUBUFImmOffset(C1)) {
884 if (N0.getOpcode() == ISD::ADD) {
885 // (add (add N2, N3), C1) -> addr64
886 SDValue N2 = N0.getOperand(0);
887 SDValue N3 = N0.getOperand(1);
888 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
891 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
895 // (add N0, C1) -> offset
896 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
898 Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
902 if (Addr.getOpcode() == ISD::ADD) {
903 // (add N0, N1) -> addr64
904 SDValue N0 = Addr.getOperand(0);
905 SDValue N1 = Addr.getOperand(1);
906 Addr64 = CurDAG->getTargetConstant(1, MVT::i1);
909 Offset = CurDAG->getTargetConstant(0, MVT::i16);
913 // default case -> offset
914 VAddr = CurDAG->getTargetConstant(0, MVT::i32);
916 Offset = CurDAG->getTargetConstant(0, MVT::i16);
920 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
922 SDValue &Offset) const {
923 SDValue Ptr, SOffset, Offen, Idxen, Addr64, GLC, SLC, TFE;
925 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
928 ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
929 if (C->getSExtValue()) {
932 const SITargetLowering& Lowering =
933 *static_cast<const SITargetLowering*>(getTargetLowering());
935 SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
942 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
943 SDValue &VAddr, SDValue &Offset,
944 SDValue &SLC) const {
945 SLC = CurDAG->getTargetConstant(0, MVT::i1);
947 return SelectMUBUFAddr64(Addr, SRsrc, VAddr, Offset);
950 bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc,
951 SDValue &VAddr, SDValue &SOffset,
952 SDValue &ImmOffset) const {
955 MachineFunction &MF = CurDAG->getMachineFunction();
956 const SIRegisterInfo *TRI =
957 static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
958 MachineRegisterInfo &MRI = MF.getRegInfo();
959 const SITargetLowering& Lowering =
960 *static_cast<const SITargetLowering*>(getTargetLowering());
962 unsigned ScratchPtrReg =
963 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR);
964 unsigned ScratchOffsetReg =
965 TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET);
966 Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass,
967 ScratchOffsetReg, MVT::i32);
970 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
971 MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64);
972 Rsrc = SDValue(Lowering.buildScratchRSRC(*CurDAG, DL, ScratchPtr), 0);
973 SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL,
974 MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32);
977 if (CurDAG->isBaseWithConstantOffset(Addr)) {
978 SDValue N1 = Addr.getOperand(1);
979 ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
981 if (isLegalMUBUFImmOffset(C1)) {
982 VAddr = Addr.getOperand(0);
983 ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16);
989 if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
990 isa<FrameIndexSDNode>(Addr.getOperand(0))) {
991 VAddr = Addr.getOperand(1);
992 ImmOffset = Addr.getOperand(0);
997 if (isa<FrameIndexSDNode>(Addr)) {
998 VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32,
999 CurDAG->getConstant(0, MVT::i32)), 0);
1006 ImmOffset = CurDAG->getTargetConstant(0, MVT::i16);
1010 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1011 SDValue &SOffset, SDValue &Offset,
1012 SDValue &GLC, SDValue &SLC,
1013 SDValue &TFE) const {
1014 SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1016 SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1019 if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1020 !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1021 !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1022 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT |
1023 APInt::getAllOnesValue(32).getZExtValue(); // Size
1026 const SITargetLowering& Lowering =
1027 *static_cast<const SITargetLowering*>(getTargetLowering());
1029 SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1035 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1036 SDValue &Soffset, SDValue &Offset,
1037 SDValue &GLC) const {
1040 return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE);
1043 // FIXME: This is incorrect and only enough to be able to compile.
1044 SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) {
1045 AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(N);
1048 assert(Subtarget.hasFlatAddressSpace() &&
1049 "addrspacecast only supported with flat address space!");
1051 assert((ASC->getSrcAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
1052 ASC->getDestAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) &&
1053 "Cannot cast address space to / from constant address!");
1055 assert((ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
1056 ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) &&
1057 "Can only cast to / from flat address space!");
1059 // The flat instructions read the address as the index of the VGPR holding the
1060 // address, so casting should just be reinterpreting the base VGPR, so just
1061 // insert trunc / bitcast / zext.
1063 SDValue Src = ASC->getOperand(0);
1064 EVT DestVT = ASC->getValueType(0);
1065 EVT SrcVT = Src.getValueType();
1067 unsigned SrcSize = SrcVT.getSizeInBits();
1068 unsigned DestSize = DestVT.getSizeInBits();
1070 if (SrcSize > DestSize) {
1071 assert(SrcSize == 64 && DestSize == 32);
1072 return CurDAG->getMachineNode(
1073 TargetOpcode::EXTRACT_SUBREG,
1077 CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32));
1081 if (DestSize > SrcSize) {
1082 assert(SrcSize == 32 && DestSize == 64);
1084 SDValue RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32);
1086 const SDValue Ops[] = {
1089 CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
1090 SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32,
1091 CurDAG->getConstant(0, MVT::i32)), 0),
1092 CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32)
1095 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
1096 SDLoc(N), N->getValueType(0), Ops);
1099 assert(SrcSize == 64 && DestSize == 64);
1100 return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode();
1103 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
1104 SDValue &SrcMods) const {
1110 if (Src.getOpcode() == ISD::FNEG) {
1111 Mods |= SISrcMods::NEG;
1112 Src = Src.getOperand(0);
1115 if (Src.getOpcode() == ISD::FABS) {
1116 Mods |= SISrcMods::ABS;
1117 Src = Src.getOperand(0);
1120 SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32);
1125 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
1126 SDValue &SrcMods, SDValue &Clamp,
1127 SDValue &Omod) const {
1128 // FIXME: Handle Clamp and Omod
1129 Clamp = CurDAG->getTargetConstant(0, MVT::i32);
1130 Omod = CurDAG->getTargetConstant(0, MVT::i32);
1132 return SelectVOP3Mods(In, Src, SrcMods);
1135 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src,
1137 SDValue &Omod) const {
1138 // FIXME: Handle Omod
1139 Omod = CurDAG->getTargetConstant(0, MVT::i32);
1141 return SelectVOP3Mods(In, Src, SrcMods);
1144 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
1145 const AMDGPUTargetLowering& Lowering =
1146 *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
1147 bool IsModified = false;
1150 // Go over all selected nodes and try to fold them a bit more
1151 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
1152 E = CurDAG->allnodes_end(); I != E; ++I) {
1156 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
1160 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
1161 if (ResNode != Node) {
1162 ReplaceUses(Node, ResNode);
1166 CurDAG->RemoveDeadNodes();
1167 } while (IsModified);