1 //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //==-----------------------------------------------------------------------===//
11 /// \brief Defines an instruction selector for the AMDGPU target.
13 //===----------------------------------------------------------------------===//
14 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUISelLowering.h" // For AMDGPUISD
16 #include "AMDGPURegisterInfo.h"
17 #include "R600InstrInfo.h"
18 #include "SIISelLowering.h"
19 #include "llvm/ADT/ValueMap.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/CodeGen/SelectionDAG.h"
24 #include "llvm/CodeGen/SelectionDAGISel.h"
25 #include "llvm/Support/Compiler.h"
31 //===----------------------------------------------------------------------===//
32 // Instruction Selector Implementation
33 //===----------------------------------------------------------------------===//
36 /// AMDGPU specific code to select AMDGPU machine instructions for
37 /// SelectionDAG operations.
38 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
39 // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
40 // make the right decision when generating code for different targets.
41 const AMDGPUSubtarget &Subtarget;
43 AMDGPUDAGToDAGISel(TargetMachine &TM);
44 virtual ~AMDGPUDAGToDAGISel();
46 SDNode *Select(SDNode *N);
47 virtual const char *getPassName() const;
48 virtual void PostprocessISelDAG();
51 inline SDValue getSmallIPtrImm(unsigned Imm);
52 bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs,
53 const R600InstrInfo *TII);
54 bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
55 bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &);
57 // Complex pattern selectors
58 bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
59 bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
60 bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
61 SDValue SimplifyI24(SDValue &Op);
62 bool SelectI24(SDValue Addr, SDValue &Op);
63 bool SelectU24(SDValue Addr, SDValue &Op);
65 static bool checkType(const Value *ptr, unsigned int addrspace);
67 static bool isGlobalStore(const StoreSDNode *N);
68 static bool isPrivateStore(const StoreSDNode *N);
69 static bool isLocalStore(const StoreSDNode *N);
70 static bool isRegionStore(const StoreSDNode *N);
72 bool isCPLoad(const LoadSDNode *N) const;
73 bool isConstantLoad(const LoadSDNode *N, int cbID) const;
74 bool isGlobalLoad(const LoadSDNode *N) const;
75 bool isParamLoad(const LoadSDNode *N) const;
76 bool isPrivateLoad(const LoadSDNode *N) const;
77 bool isLocalLoad(const LoadSDNode *N) const;
78 bool isRegionLoad(const LoadSDNode *N) const;
80 bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
81 bool SelectGlobalValueVariableOffset(SDValue Addr,
82 SDValue &BaseReg, SDValue& Offset);
83 bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
84 bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
86 // Include the pieces autogenerated from the target description.
87 #include "AMDGPUGenDAGISel.inc"
89 } // end anonymous namespace
91 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
92 // DAG, ready for instruction scheduling.
93 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM
95 return new AMDGPUDAGToDAGISel(TM);
98 AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM)
99 : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) {
102 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
105 SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) {
106 return CurDAG->getTargetConstant(Imm, MVT::i32);
109 bool AMDGPUDAGToDAGISel::SelectADDRParam(
110 SDValue Addr, SDValue& R1, SDValue& R2) {
112 if (Addr.getOpcode() == ISD::FrameIndex) {
113 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
114 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
115 R2 = CurDAG->getTargetConstant(0, MVT::i32);
118 R2 = CurDAG->getTargetConstant(0, MVT::i32);
120 } else if (Addr.getOpcode() == ISD::ADD) {
121 R1 = Addr.getOperand(0);
122 R2 = Addr.getOperand(1);
125 R2 = CurDAG->getTargetConstant(0, MVT::i32);
130 bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) {
131 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
132 Addr.getOpcode() == ISD::TargetGlobalAddress) {
135 return SelectADDRParam(Addr, R1, R2);
139 bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) {
140 if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
141 Addr.getOpcode() == ISD::TargetGlobalAddress) {
145 if (Addr.getOpcode() == ISD::FrameIndex) {
146 if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
147 R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64);
148 R2 = CurDAG->getTargetConstant(0, MVT::i64);
151 R2 = CurDAG->getTargetConstant(0, MVT::i64);
153 } else if (Addr.getOpcode() == ISD::ADD) {
154 R1 = Addr.getOperand(0);
155 R2 = Addr.getOperand(1);
158 R2 = CurDAG->getTargetConstant(0, MVT::i64);
163 SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) {
164 const R600InstrInfo *TII =
165 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
166 unsigned int Opc = N->getOpcode();
167 if (N->isMachineOpcode()) {
168 return NULL; // Already selected.
172 case AMDGPUISD::CONST_ADDRESS: {
173 for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I);
174 I != SDNode::use_end(); I = Next) {
175 Next = llvm::next(I);
176 if (!I->isMachineOpcode()) {
179 unsigned Opcode = I->getMachineOpcode();
180 bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1;
181 int SrcIdx = I.getOperandNo();
183 // Unlike MachineInstrs, SDNodes do not have results in their operand
184 // list, so we need to increment the SrcIdx, since
185 // R600InstrInfo::getOperandIdx is based on the MachineInstr indices.
190 SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx);
196 if (N->getValueType(0).isVector() ||
197 !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset))
200 // Gather constants values
202 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
203 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
204 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2),
205 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
206 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
207 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
208 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
209 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
210 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
211 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
212 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
214 std::vector<unsigned> Consts;
215 for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) {
216 int OtherSrcIdx = SrcIndices[i];
217 int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
218 if (OtherSrcIdx < 0 || OtherSelIdx < 0) {
225 if (RegisterSDNode *Reg =
226 dyn_cast<RegisterSDNode>(I->getOperand(OtherSrcIdx))) {
227 if (Reg->getReg() == AMDGPU::ALU_CONST) {
228 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(I->getOperand(OtherSelIdx));
229 Consts.push_back(Cst->getZExtValue());
234 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(CstOffset);
235 Consts.push_back(Cst->getZExtValue());
236 if (!TII->fitsConstReadLimitations(Consts))
239 // Convert back to SDNode indices
244 std::vector<SDValue> Ops;
245 for (int i = 0, e = I->getNumOperands(); i != e; ++i) {
247 Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32));
248 } else if (i == SelIdx) {
249 Ops.push_back(CstOffset);
251 Ops.push_back(I->getOperand(i));
254 CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size());
258 case ISD::BUILD_VECTOR: {
259 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
260 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
265 switch(N->getValueType(0).getVectorNumElements()) {
266 case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break;
267 case 4: RegClassID = AMDGPU::R600_Reg128RegClassID; break;
268 default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
270 // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
271 // that adds a 128 bits reg copy when going through TwoAddressInstructions
272 // pass. We want to avoid 128 bits copies as much as possible because they
273 // can't be bundled by our scheduler.
274 SDValue RegSeqArgs[9] = {
275 CurDAG->getTargetConstant(RegClassID, MVT::i32),
276 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32),
277 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32),
278 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32),
279 SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32)
281 bool IsRegSeq = true;
282 for (unsigned i = 0; i < N->getNumOperands(); i++) {
283 if (dyn_cast<RegisterSDNode>(N->getOperand(i))) {
287 RegSeqArgs[2 * i + 1] = N->getOperand(i);
291 return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
292 RegSeqArgs, 2 * N->getNumOperands() + 1);
294 case ISD::BUILD_PAIR: {
295 SDValue RC, SubReg0, SubReg1;
296 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
297 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
300 if (N->getValueType(0) == MVT::i128) {
301 RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32);
302 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32);
303 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32);
304 } else if (N->getValueType(0) == MVT::i64) {
305 RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32);
306 SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32);
307 SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32);
309 llvm_unreachable("Unhandled value type for BUILD_PAIR");
311 const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
312 N->getOperand(1), SubReg1 };
313 return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
314 SDLoc(N), N->getValueType(0), Ops);
317 case ISD::ConstantFP:
318 case ISD::Constant: {
319 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
320 // XXX: Custom immediate lowering not implemented yet. Instead we use
321 // pseudo instructions defined in SIInstructions.td
322 if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) {
326 uint64_t ImmValue = 0;
327 unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
329 if (N->getOpcode() == ISD::ConstantFP) {
330 // XXX: 64-bit Immediates not supported yet
331 assert(N->getValueType(0) != MVT::f64);
333 ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N);
334 APFloat Value = C->getValueAPF();
335 float FloatValue = Value.convertToFloat();
336 if (FloatValue == 0.0) {
337 ImmReg = AMDGPU::ZERO;
338 } else if (FloatValue == 0.5) {
339 ImmReg = AMDGPU::HALF;
340 } else if (FloatValue == 1.0) {
341 ImmReg = AMDGPU::ONE;
343 ImmValue = Value.bitcastToAPInt().getZExtValue();
346 // XXX: 64-bit Immediates not supported yet
347 assert(N->getValueType(0) != MVT::i64);
349 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N);
350 if (C->getZExtValue() == 0) {
351 ImmReg = AMDGPU::ZERO;
352 } else if (C->getZExtValue() == 1) {
353 ImmReg = AMDGPU::ONE_INT;
355 ImmValue = C->getZExtValue();
359 for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use);
360 Use != SDNode::use_end(); Use = Next) {
361 Next = llvm::next(Use);
362 std::vector<SDValue> Ops;
363 for (unsigned i = 0; i < Use->getNumOperands(); ++i) {
364 Ops.push_back(Use->getOperand(i));
367 if (!Use->isMachineOpcode()) {
368 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
369 // We can only use literal constants (e.g. AMDGPU::ZERO,
370 // AMDGPU::ONE, etc) in machine opcodes.
374 if (!TII->isALUInstr(Use->getMachineOpcode()) ||
375 (TII->get(Use->getMachineOpcode()).TSFlags &
376 R600_InstFlag::VECTOR)) {
380 int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(),
381 AMDGPU::OpName::literal);
386 if (TII->getOperandIdx(Use->getMachineOpcode(),
387 AMDGPU::OpName::dst) != -1) {
388 // subtract one from ImmIdx, because the DST operand is usually index
389 // 0 for MachineInstrs, but we have no DST in the Ops vector.
393 // Check that we aren't already using an immediate.
394 // XXX: It's possible for an instruction to have more than one
395 // immediate operand, but this is not supported yet.
396 if (ImmReg == AMDGPU::ALU_LITERAL_X) {
397 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Use->getOperand(ImmIdx));
400 if (C->getZExtValue() != 0) {
401 // This instruction is already using an immediate.
405 // Set the immediate value
406 Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32);
409 // Set the immediate register
410 Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32);
412 CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands());
417 SDNode *Result = SelectCode(N);
419 // Fold operands of selected node
421 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
422 if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
423 const R600InstrInfo *TII =
424 static_cast<const R600InstrInfo*>(TM.getInstrInfo());
425 if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) {
426 bool IsModified = false;
428 std::vector<SDValue> Ops;
429 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
432 IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops);
434 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
436 } while (IsModified);
439 if (Result && Result->isMachineOpcode() &&
440 !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR)
441 && TII->hasInstrModifiers(Result->getMachineOpcode())) {
443 // TODO: Isel can generate multiple MachineInst, we need to recursively
445 bool IsModified = false;
447 std::vector<SDValue> Ops;
448 for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end();
451 IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops);
453 Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size());
455 } while (IsModified);
457 // If node has a single use which is CLAMP_R600, folds it
458 if (Result->hasOneUse() && Result->isMachineOpcode()) {
459 SDNode *PotentialClamp = *Result->use_begin();
460 if (PotentialClamp->isMachineOpcode() &&
461 PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) {
463 TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp);
464 std::vector<SDValue> Ops;
465 unsigned NumOp = Result->getNumOperands();
466 for (unsigned i = 0; i < NumOp; ++i) {
467 Ops.push_back(Result->getOperand(i));
469 Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32);
470 Result = CurDAG->SelectNodeTo(PotentialClamp,
471 Result->getMachineOpcode(), PotentialClamp->getVTList(),
481 bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg,
482 SDValue &Abs, const R600InstrInfo *TII) {
483 switch (Src.getOpcode()) {
485 Src = Src.getOperand(0);
486 Neg = CurDAG->getTargetConstant(1, MVT::i32);
491 Src = Src.getOperand(0);
492 Abs = CurDAG->getTargetConstant(1, MVT::i32);
495 Src = Src.getOperand(0);
502 bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode,
503 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
505 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0),
506 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1),
507 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2)
510 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel),
511 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel),
512 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel)
515 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg),
516 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg),
517 TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg)
520 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs),
521 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs),
526 for (unsigned i = 0; i < 3; i++) {
527 if (OperandIdx[i] < 0)
529 SDValue &Src = Ops[OperandIdx[i] - 1];
530 SDValue &Sel = Ops[SelIdx[i] - 1];
531 SDValue &Neg = Ops[NegIdx[i] - 1];
533 SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
534 if (FoldOperand(Src, Sel, Neg, Abs, TII))
540 bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode,
541 const R600InstrInfo *TII, std::vector<SDValue> &Ops) {
543 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X),
544 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y),
545 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z),
546 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W),
547 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X),
548 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y),
549 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z),
550 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W)
553 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X),
554 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y),
555 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z),
556 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W),
557 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X),
558 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y),
559 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z),
560 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W)
563 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X),
564 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y),
565 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z),
566 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W),
567 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X),
568 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y),
569 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z),
570 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W)
573 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X),
574 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y),
575 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z),
576 TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W),
577 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X),
578 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y),
579 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z),
580 TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W)
583 for (unsigned i = 0; i < 8; i++) {
584 if (OperandIdx[i] < 0)
586 SDValue &Src = Ops[OperandIdx[i] - 1];
587 SDValue &Sel = Ops[SelIdx[i] - 1];
588 SDValue &Neg = Ops[NegIdx[i] - 1];
589 SDValue &Abs = Ops[AbsIdx[i] - 1];
590 if (FoldOperand(Src, Sel, Neg, Abs, TII))
596 bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) {
600 Type *ptrType = ptr->getType();
601 return dyn_cast<PointerType>(ptrType)->getAddressSpace() == addrspace;
604 bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) {
605 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
608 bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) {
609 return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
610 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
611 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS));
614 bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) {
615 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
618 bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) {
619 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
622 bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const {
624 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS);
626 return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId);
629 bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const {
630 if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) {
631 const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>();
632 if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS ||
633 N->getMemoryVT().bitsLT(MVT::i32)) {
637 return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS);
640 bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const {
641 return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS);
644 bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const {
645 return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS);
648 bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const {
649 return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS);
652 bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const {
653 MachineMemOperand *MMO = N->getMemOperand();
654 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
656 const Value *V = MMO->getValue();
657 const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V);
658 if (PSV && PSV == PseudoSourceValue::getConstantPool()) {
666 bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const {
667 if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) {
668 // Check to make sure we are not a constant pool load or a constant load
669 // that is marked as a private load
670 if (isCPLoad(N) || isConstantLoad(N, -1)) {
674 if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS)
675 && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS)
676 && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)
677 && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS)
678 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS)
679 && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) {
685 const char *AMDGPUDAGToDAGISel::getPassName() const {
686 return "AMDGPU DAG->DAG Pattern Instruction Selection";
694 //===----------------------------------------------------------------------===//
696 //===----------------------------------------------------------------------===//
698 bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
700 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
701 IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true);
707 bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
708 SDValue& BaseReg, SDValue &Offset) {
709 if (!dyn_cast<ConstantSDNode>(Addr)) {
711 Offset = CurDAG->getIntPtrConstant(0, true);
717 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
719 ConstantSDNode * IMMOffset;
721 if (Addr.getOpcode() == ISD::ADD
722 && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
723 && isInt<16>(IMMOffset->getZExtValue())) {
725 Base = Addr.getOperand(0);
726 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
728 // If the pointer address is constant, we can move it to the offset field.
729 } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
730 && isInt<16>(IMMOffset->getZExtValue())) {
731 Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
732 SDLoc(CurDAG->getEntryNode()),
733 AMDGPU::ZERO, MVT::i32);
734 Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32);
738 // Default case, no offset
740 Offset = CurDAG->getTargetConstant(0, MVT::i32);
744 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
748 if ((C = dyn_cast<ConstantSDNode>(Addr))) {
749 Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32);
750 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
751 } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
752 (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
753 Base = Addr.getOperand(0);
754 Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
757 Offset = CurDAG->getTargetConstant(0, MVT::i32);
763 SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
764 APInt Demanded = APInt(32, 0x00FFFFFF);
765 APInt KnownZero, KnownOne;
766 TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
767 const TargetLowering *TLI = getTargetLowering();
768 if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
769 CurDAG->ReplaceAllUsesWith(Op, TLO.New);
770 CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
771 return SimplifyI24(TLO.New);
777 bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
779 assert(Op.getValueType() == MVT::i32);
781 if (CurDAG->ComputeNumSignBits(Op) == 9) {
782 I24 = SimplifyI24(Op);
788 bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
791 CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
793 assert (Op.getValueType() == MVT::i32);
795 // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
796 // i32. These smaller types are legal to use with the i24 instructions.
797 if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
798 Op.getOpcode() == ISD::ANY_EXTEND ||
799 ISD::isEXTLoad(Op.getNode())) {
800 U24 = SimplifyI24(Op);
806 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
808 if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {
812 // Go over all selected nodes and try to fold them a bit more
813 const AMDGPUTargetLowering& Lowering =
814 (*(const AMDGPUTargetLowering*)getTargetLowering());
815 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
816 E = CurDAG->allnodes_end(); I != E; ++I) {
819 switch (Node->getOpcode()) {
820 // Fix the register class in copy to CopyToReg nodes - ISel will always
821 // use SReg classes for 64-bit copies, but this is not always what we want.
822 case ISD::CopyToReg: {
823 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
824 SDValue Val = Node->getOperand(2);
825 const TargetRegisterClass *RC = RegInfo->getRegClass(Reg);
826 if (RC != &AMDGPU::SReg_64RegClass) {
830 if (!Val.getNode()->isMachineOpcode() ||
831 Val.getNode()->getMachineOpcode() == AMDGPU::IMPLICIT_DEF) {
835 const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode());
836 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
837 RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass));
842 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I);
846 SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
847 if (ResNode != Node) {
848 ReplaceUses(Node, ResNode);