1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPURegisterInfo.h"
20 #include "AMDGPUSubtarget.h"
21 #include "AMDILIntrinsicInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
38 /// Diagnostic information for unimplemented or unsupported feature reporting.
39 class DiagnosticInfoUnsupported : public DiagnosticInfo {
41 const Twine &Description;
46 static int getKindID() {
48 KindID = llvm::getNextAvailablePluginDiagnosticKind();
53 DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
54 DiagnosticSeverity Severity = DS_Error)
55 : DiagnosticInfo(getKindID(), Severity),
59 const Function &getFunction() const { return Fn; }
60 const Twine &getDescription() const { return Description; }
62 void print(DiagnosticPrinter &DP) const override {
63 DP << "unsupported " << getDescription() << " in " << Fn.getName();
66 static bool classof(const DiagnosticInfo *DI) {
67 return DI->getKind() == getKindID();
71 int DiagnosticInfoUnsupported::KindID = 0;
75 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
76 CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State) {
78 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
79 ArgFlags.getOrigAlign());
80 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
85 #include "AMDGPUGenCallingConv.inc"
87 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
88 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
90 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
92 // Initialize target lowering borrowed from AMDIL
95 // We need to custom lower some of the intrinsics
96 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
98 // Library functions. These default to Expand, but we have instructions
100 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
101 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
102 setOperationAction(ISD::FPOW, MVT::f32, Legal);
103 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
104 setOperationAction(ISD::FABS, MVT::f32, Legal);
105 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
106 setOperationAction(ISD::FRINT, MVT::f32, Legal);
107 setOperationAction(ISD::FROUND, MVT::f32, Legal);
108 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
110 // The hardware supports ROTR, but not ROTL
111 setOperationAction(ISD::ROTL, MVT::i32, Expand);
113 // Lower floating point store/load to integer store/load to reduce the number
114 // of patterns in tablegen.
115 setOperationAction(ISD::STORE, MVT::f32, Promote);
116 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
118 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
119 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
121 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
122 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
124 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
125 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
127 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
128 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
130 setOperationAction(ISD::STORE, MVT::f64, Promote);
131 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
133 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
134 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
136 // Custom lowering of vector stores is required for local address space
138 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
139 // XXX: Native v2i32 local address space stores are possible, but not
140 // currently implemented.
141 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
143 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
144 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
145 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
147 // XXX: This can be change to Custom, once ExpandVectorStores can
148 // handle 64-bit stores.
149 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
151 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
152 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
153 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
154 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
155 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
158 setOperationAction(ISD::LOAD, MVT::f32, Promote);
159 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
161 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
162 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
164 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
165 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
167 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
168 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
170 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
171 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
173 setOperationAction(ISD::LOAD, MVT::f64, Promote);
174 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
176 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
177 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
179 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
180 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
181 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
182 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
183 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
184 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
185 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
186 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
187 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
188 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
190 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
191 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
192 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
193 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
194 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
195 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
196 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
197 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
198 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
199 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
200 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
201 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
203 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
205 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
207 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
209 setOperationAction(ISD::MUL, MVT::i64, Expand);
210 setOperationAction(ISD::SUB, MVT::i64, Expand);
212 setOperationAction(ISD::UDIV, MVT::i32, Expand);
213 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
214 setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
215 setOperationAction(ISD::UREM, MVT::i32, Expand);
217 if (!Subtarget->hasBFI()) {
218 // fcopysign can be done in a single instruction with BFI.
219 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
220 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
223 if (!Subtarget->hasBCNT(32))
224 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
226 if (!Subtarget->hasBCNT(64))
227 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
229 for (MVT VT : { MVT::i32, MVT::i64 }) {
230 setOperationAction(ISD::CTTZ, VT, Expand);
231 setOperationAction(ISD::CTLZ, VT, Expand);
234 static const MVT::SimpleValueType IntTypes[] = {
235 MVT::v2i32, MVT::v4i32
238 for (MVT VT : IntTypes) {
239 // Expand the following operations for the current type by default.
240 setOperationAction(ISD::ADD, VT, Expand);
241 setOperationAction(ISD::AND, VT, Expand);
242 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
243 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
244 setOperationAction(ISD::MUL, VT, Expand);
245 setOperationAction(ISD::OR, VT, Expand);
246 setOperationAction(ISD::SHL, VT, Expand);
247 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
248 setOperationAction(ISD::SRL, VT, Expand);
249 setOperationAction(ISD::SRA, VT, Expand);
250 setOperationAction(ISD::SUB, VT, Expand);
251 setOperationAction(ISD::UDIV, VT, Expand);
252 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
253 setOperationAction(ISD::UREM, VT, Expand);
254 setOperationAction(ISD::SELECT, VT, Expand);
255 setOperationAction(ISD::VSELECT, VT, Expand);
256 setOperationAction(ISD::XOR, VT, Expand);
257 setOperationAction(ISD::BSWAP, VT, Expand);
258 setOperationAction(ISD::CTPOP, VT, Expand);
259 setOperationAction(ISD::CTTZ, VT, Expand);
260 setOperationAction(ISD::CTLZ, VT, Expand);
263 static const MVT::SimpleValueType FloatTypes[] = {
264 MVT::v2f32, MVT::v4f32
267 for (MVT VT : FloatTypes) {
268 setOperationAction(ISD::FABS, VT, Expand);
269 setOperationAction(ISD::FADD, VT, Expand);
270 setOperationAction(ISD::FCOS, VT, Expand);
271 setOperationAction(ISD::FDIV, VT, Expand);
272 setOperationAction(ISD::FPOW, VT, Expand);
273 setOperationAction(ISD::FFLOOR, VT, Expand);
274 setOperationAction(ISD::FTRUNC, VT, Expand);
275 setOperationAction(ISD::FMUL, VT, Expand);
276 setOperationAction(ISD::FRINT, VT, Expand);
277 setOperationAction(ISD::FSQRT, VT, Expand);
278 setOperationAction(ISD::FSIN, VT, Expand);
279 setOperationAction(ISD::FSUB, VT, Expand);
280 setOperationAction(ISD::FNEG, VT, Expand);
281 setOperationAction(ISD::SELECT, VT, Expand);
282 setOperationAction(ISD::VSELECT, VT, Expand);
283 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
286 setTargetDAGCombine(ISD::MUL);
287 setTargetDAGCombine(ISD::SELECT_CC);
290 //===----------------------------------------------------------------------===//
291 // Target Information
292 //===----------------------------------------------------------------------===//
294 MVT AMDGPUTargetLowering::getVectorIdxTy() const {
298 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
300 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
303 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
304 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
306 return ((LScalarSize <= CastScalarSize) ||
307 (CastScalarSize >= 32) ||
311 //===---------------------------------------------------------------------===//
313 //===---------------------------------------------------------------------===//
315 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
316 assert(VT.isFloatingPoint());
317 return VT == MVT::f32;
320 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
321 assert(VT.isFloatingPoint());
322 return VT == MVT::f32;
325 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
326 // Truncate is just accessing a subregister.
327 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
330 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
331 // Truncate is just accessing a subregister.
332 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
333 (Dest->getPrimitiveSizeInBits() % 32 == 0);
336 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
337 const DataLayout *DL = getDataLayout();
338 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
339 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
341 return SrcSize == 32 && DestSize == 64;
344 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
345 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
346 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
347 // this will enable reducing 64-bit operations the 32-bit, which is always
349 return Src == MVT::i32 && Dest == MVT::i64;
352 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
353 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
354 // limited number of native 64-bit operations. Shrinking an operation to fit
355 // in a single 32-bit register should always be helpful. As currently used,
356 // this is much less general than the name suggests, and is only used in
357 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
358 // not profitable, and may actually be harmful.
359 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
362 //===---------------------------------------------------------------------===//
363 // TargetLowering Callbacks
364 //===---------------------------------------------------------------------===//
366 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
367 const SmallVectorImpl<ISD::InputArg> &Ins) const {
369 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
372 SDValue AMDGPUTargetLowering::LowerReturn(
374 CallingConv::ID CallConv,
376 const SmallVectorImpl<ISD::OutputArg> &Outs,
377 const SmallVectorImpl<SDValue> &OutVals,
378 SDLoc DL, SelectionDAG &DAG) const {
379 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
382 //===---------------------------------------------------------------------===//
383 // Target specific lowering
384 //===---------------------------------------------------------------------===//
386 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
387 SmallVectorImpl<SDValue> &InVals) const {
388 SDValue Callee = CLI.Callee;
389 SelectionDAG &DAG = CLI.DAG;
391 const Function &Fn = *DAG.getMachineFunction().getFunction();
393 StringRef FuncName("<unknown>");
395 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
396 FuncName = G->getSymbol();
397 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
398 FuncName = G->getGlobal()->getName();
400 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
401 DAG.getContext()->diagnose(NoCalls);
405 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
407 switch (Op.getOpcode()) {
409 Op.getNode()->dump();
410 llvm_unreachable("Custom lowering code for this"
411 "instruction is not implemented yet!");
413 // AMDIL DAG lowering
414 case ISD::SDIV: return LowerSDIV(Op, DAG);
415 case ISD::SREM: return LowerSREM(Op, DAG);
416 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
417 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
418 // AMDGPU DAG lowering
419 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
420 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
421 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
422 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
423 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
424 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
429 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
430 SmallVectorImpl<SDValue> &Results,
431 SelectionDAG &DAG) const {
432 switch (N->getOpcode()) {
433 case ISD::SIGN_EXTEND_INREG:
434 // Different parts of legalization seem to interpret which type of
435 // sign_extend_inreg is the one to check for custom lowering. The extended
436 // from type is what really matters, but some places check for custom
437 // lowering of the result type. This results in trying to use
438 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
439 // nothing here and let the illegal result integer be handled normally.
442 SDValue Op = SDValue(N, 0);
444 EVT VT = Op.getValueType();
445 SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
446 N->getOperand(0), N->getOperand(1));
447 Results.push_back(UDIVREM);
451 SDValue Op = SDValue(N, 0);
453 EVT VT = Op.getValueType();
454 SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
455 N->getOperand(0), N->getOperand(1));
456 Results.push_back(UDIVREM.getValue(1));
460 SDValue Op = SDValue(N, 0);
462 EVT VT = Op.getValueType();
463 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
465 SDValue one = DAG.getConstant(1, HalfVT);
466 SDValue zero = DAG.getConstant(0, HalfVT);
469 SDValue LHS = N->getOperand(0);
470 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
471 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
473 SDValue RHS = N->getOperand(1);
474 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
475 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
477 // Get Speculative values
478 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
479 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
481 SDValue REM_Hi = zero;
482 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
484 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
485 SDValue DIV_Lo = zero;
487 const unsigned halfBitWidth = HalfVT.getSizeInBits();
489 for (unsigned i = 0; i < halfBitWidth; ++i) {
490 SDValue POS = DAG.getConstant(halfBitWidth - i - 1, HalfVT);
491 // Get Value of high bit
493 if (halfBitWidth == 32 && Subtarget->hasBFE()) {
494 HBit = DAG.getNode(AMDGPUISD::BFE_U32, DL, HalfVT, LHS_Lo, POS, one);
496 HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
497 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
500 SDValue Carry = DAG.getNode(ISD::SRL, DL, HalfVT, REM_Lo,
501 DAG.getConstant(halfBitWidth - 1, HalfVT));
502 REM_Hi = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Hi, one);
503 REM_Hi = DAG.getNode(ISD::OR, DL, HalfVT, REM_Hi, Carry);
505 REM_Lo = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Lo, one);
506 REM_Lo = DAG.getNode(ISD::OR, DL, HalfVT, REM_Lo, HBit);
509 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
511 SDValue BIT = DAG.getConstant(1 << (halfBitWidth - i - 1), HalfVT);
512 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETGE);
514 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
518 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
520 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETGE);
521 REM_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, zero);
522 REM_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, one);
525 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
526 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
527 Results.push_back(DIV);
528 Results.push_back(REM);
536 // FIXME: This implements accesses to initialized globals in the constant
537 // address space by copying them to private and accessing that. It does not
538 // properly handle illegal types or vectors. The private vector loads are not
539 // scalarized, and the illegal scalars hit an assertion. This technique will not
540 // work well with large initializers, and this should eventually be
541 // removed. Initialized globals should be placed into a data section that the
542 // runtime will load into a buffer before the kernel is executed. Uses of the
543 // global need to be replaced with a pointer loaded from an implicit kernel
544 // argument into this buffer holding the copy of the data, which will remove the
545 // need for any of this.
546 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
547 const GlobalValue *GV,
548 const SDValue &InitPtr,
550 SelectionDAG &DAG) const {
551 const DataLayout *TD = getTargetMachine().getDataLayout();
553 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
554 EVT VT = EVT::getEVT(CI->getType());
555 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
556 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
557 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
558 TD->getPrefTypeAlignment(CI->getType()));
561 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
562 EVT VT = EVT::getEVT(CFP->getType());
563 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
564 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
565 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
566 TD->getPrefTypeAlignment(CFP->getType()));
569 Type *InitTy = Init->getType();
570 if (StructType *ST = dyn_cast<StructType>(InitTy)) {
571 const StructLayout *SL = TD->getStructLayout(ST);
573 EVT PtrVT = InitPtr.getValueType();
574 SmallVector<SDValue, 8> Chains;
576 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
577 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
578 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
580 Constant *Elt = Init->getAggregateElement(I);
581 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
584 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
587 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
588 EVT PtrVT = InitPtr.getValueType();
590 unsigned NumElements;
591 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
592 NumElements = AT->getNumElements();
593 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
594 NumElements = VT->getNumElements();
596 llvm_unreachable("Unexpected type");
598 unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
599 SmallVector<SDValue, 8> Chains;
600 for (unsigned i = 0; i < NumElements; ++i) {
601 SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
602 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
604 Constant *Elt = Init->getAggregateElement(i);
605 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
608 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
612 llvm_unreachable("Unhandled constant initializer");
615 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
617 SelectionDAG &DAG) const {
619 const DataLayout *TD = getTargetMachine().getDataLayout();
620 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
621 const GlobalValue *GV = G->getGlobal();
623 switch (G->getAddressSpace()) {
624 default: llvm_unreachable("Global Address lowering not implemented for this "
626 case AMDGPUAS::LOCAL_ADDRESS: {
627 // XXX: What does the value of G->getOffset() mean?
628 assert(G->getOffset() == 0 &&
629 "Do not know what to do with an non-zero offset");
632 if (MFI->LocalMemoryObjects.count(GV) == 0) {
633 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
634 Offset = MFI->LDSSize;
635 MFI->LocalMemoryObjects[GV] = Offset;
636 // XXX: Account for alignment?
637 MFI->LDSSize += Size;
639 Offset = MFI->LocalMemoryObjects[GV];
642 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
644 case AMDGPUAS::CONSTANT_ADDRESS: {
645 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
646 Type *EltType = GV->getType()->getElementType();
647 unsigned Size = TD->getTypeAllocSize(EltType);
648 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
650 const GlobalVariable *Var = cast<GlobalVariable>(GV);
651 const Constant *Init = Var->getInitializer();
652 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
653 SDValue InitPtr = DAG.getFrameIndex(FI,
654 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
655 SmallVector<SDNode*, 8> WorkList;
657 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
658 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
659 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
661 WorkList.push_back(*I);
663 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
664 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
665 E = WorkList.end(); I != E; ++I) {
666 SmallVector<SDValue, 8> Ops;
667 Ops.push_back(Chain);
668 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
669 Ops.push_back((*I)->getOperand(i));
671 DAG.UpdateNodeOperands(*I, Ops);
673 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
674 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
679 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
680 SelectionDAG &DAG) const {
681 SmallVector<SDValue, 8> Args;
682 SDValue A = Op.getOperand(0);
683 SDValue B = Op.getOperand(1);
685 DAG.ExtractVectorElements(A, Args);
686 DAG.ExtractVectorElements(B, Args);
688 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
691 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
692 SelectionDAG &DAG) const {
694 SmallVector<SDValue, 8> Args;
695 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
696 EVT VT = Op.getValueType();
697 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
698 VT.getVectorNumElements());
700 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
703 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
704 SelectionDAG &DAG) const {
706 MachineFunction &MF = DAG.getMachineFunction();
707 const AMDGPUFrameLowering *TFL =
708 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
710 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
713 unsigned FrameIndex = FIN->getIndex();
714 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
715 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
719 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
720 SelectionDAG &DAG) const {
721 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
723 EVT VT = Op.getValueType();
725 switch (IntrinsicID) {
727 case AMDGPUIntrinsic::AMDIL_abs:
728 return LowerIntrinsicIABS(Op, DAG);
729 case AMDGPUIntrinsic::AMDIL_exp:
730 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
731 case AMDGPUIntrinsic::AMDGPU_lrp:
732 return LowerIntrinsicLRP(Op, DAG);
733 case AMDGPUIntrinsic::AMDIL_fraction:
734 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
735 case AMDGPUIntrinsic::AMDIL_max:
736 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
738 case AMDGPUIntrinsic::AMDGPU_imax:
739 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
741 case AMDGPUIntrinsic::AMDGPU_umax:
742 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
744 case AMDGPUIntrinsic::AMDIL_min:
745 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
747 case AMDGPUIntrinsic::AMDGPU_imin:
748 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
750 case AMDGPUIntrinsic::AMDGPU_umin:
751 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
754 case AMDGPUIntrinsic::AMDGPU_umul24:
755 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
756 Op.getOperand(1), Op.getOperand(2));
758 case AMDGPUIntrinsic::AMDGPU_imul24:
759 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
760 Op.getOperand(1), Op.getOperand(2));
762 case AMDGPUIntrinsic::AMDGPU_umad24:
763 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
764 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
766 case AMDGPUIntrinsic::AMDGPU_imad24:
767 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
768 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
770 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
771 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
776 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
777 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
782 case AMDGPUIntrinsic::AMDGPU_bfi:
783 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
788 case AMDGPUIntrinsic::AMDGPU_bfm:
789 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
793 case AMDGPUIntrinsic::AMDIL_round_nearest:
794 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
798 ///IABS(a) = SMAX(sub(0, a), a)
799 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
800 SelectionDAG &DAG) const {
802 EVT VT = Op.getValueType();
803 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
806 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
809 /// Linear Interpolation
810 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
811 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
812 SelectionDAG &DAG) const {
814 EVT VT = Op.getValueType();
815 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
816 DAG.getConstantFP(1.0f, MVT::f32),
818 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
820 return DAG.getNode(ISD::FADD, DL, VT,
821 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
825 /// \brief Generate Min/Max node
826 SDValue AMDGPUTargetLowering::CombineMinMax(SDNode *N,
827 SelectionDAG &DAG) const {
829 EVT VT = N->getValueType(0);
831 SDValue LHS = N->getOperand(0);
832 SDValue RHS = N->getOperand(1);
833 SDValue True = N->getOperand(2);
834 SDValue False = N->getOperand(3);
835 SDValue CC = N->getOperand(4);
837 if (VT != MVT::f32 ||
838 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
842 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
856 llvm_unreachable("Operation should already be optimised!");
863 unsigned Opc = (LHS == True) ? AMDGPUISD::FMIN : AMDGPUISD::FMAX;
864 return DAG.getNode(Opc, DL, VT, LHS, RHS);
872 unsigned Opc = (LHS == True) ? AMDGPUISD::FMAX : AMDGPUISD::FMIN;
873 return DAG.getNode(Opc, DL, VT, LHS, RHS);
875 case ISD::SETCC_INVALID:
876 llvm_unreachable("Invalid setcc condcode!");
881 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
882 SelectionDAG &DAG) const {
883 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
884 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
885 EVT EltVT = Op.getValueType().getVectorElementType();
886 EVT PtrVT = Load->getBasePtr().getValueType();
887 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
888 SmallVector<SDValue, 8> Loads;
891 for (unsigned i = 0, e = NumElts; i != e; ++i) {
892 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
893 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
894 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
895 Load->getChain(), Ptr,
896 MachinePointerInfo(Load->getMemOperand()->getValue()),
897 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
898 Load->getAlignment()));
900 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), Loads);
903 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
904 SelectionDAG &DAG) const {
905 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
906 EVT MemVT = Store->getMemoryVT();
907 unsigned MemBits = MemVT.getSizeInBits();
909 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
910 // truncating store into an i32 store.
911 // XXX: We could also handle optimize other vector bitwidths.
912 if (!MemVT.isVector() || MemBits > 32) {
917 SDValue Value = Store->getValue();
918 EVT VT = Value.getValueType();
919 EVT ElemVT = VT.getVectorElementType();
920 SDValue Ptr = Store->getBasePtr();
921 EVT MemEltVT = MemVT.getVectorElementType();
922 unsigned MemEltBits = MemEltVT.getSizeInBits();
923 unsigned MemNumElements = MemVT.getVectorNumElements();
924 unsigned PackedSize = MemVT.getStoreSizeInBits();
925 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
927 assert(Value.getValueType().getScalarSizeInBits() >= 32);
930 for (unsigned i = 0; i < MemNumElements; ++i) {
931 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
932 DAG.getConstant(i, MVT::i32));
933 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
934 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
936 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
937 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
942 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
946 if (PackedSize < 32) {
947 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
948 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
949 Store->getMemOperand()->getPointerInfo(),
951 Store->isNonTemporal(), Store->isVolatile(),
952 Store->getAlignment());
955 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
956 Store->getMemOperand()->getPointerInfo(),
957 Store->isVolatile(), Store->isNonTemporal(),
958 Store->getAlignment());
961 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
962 SelectionDAG &DAG) const {
963 StoreSDNode *Store = cast<StoreSDNode>(Op);
964 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
965 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
966 EVT PtrVT = Store->getBasePtr().getValueType();
967 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
970 SmallVector<SDValue, 8> Chains;
972 for (unsigned i = 0, e = NumElts; i != e; ++i) {
973 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
974 Store->getValue(), DAG.getConstant(i, MVT::i32));
975 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
977 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
979 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
980 MachinePointerInfo(Store->getMemOperand()->getValue()),
981 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
982 Store->getAlignment()));
984 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
987 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
989 LoadSDNode *Load = cast<LoadSDNode>(Op);
990 ISD::LoadExtType ExtType = Load->getExtensionType();
991 EVT VT = Op.getValueType();
992 EVT MemVT = Load->getMemoryVT();
994 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
995 // We can do the extload to 32-bits, and then need to separately extend to
998 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
1002 Load->getMemOperand());
1003 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
1006 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
1007 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
1008 // FIXME: Copied from PPC
1009 // First, load into 32 bits, then truncate to 1 bit.
1011 SDValue Chain = Load->getChain();
1012 SDValue BasePtr = Load->getBasePtr();
1013 MachineMemOperand *MMO = Load->getMemOperand();
1015 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
1016 BasePtr, MVT::i8, MMO);
1017 return DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
1020 // Lower loads constant address space global variable loads
1021 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
1022 isa<GlobalVariable>(
1023 GetUnderlyingObject(Load->getMemOperand()->getValue()))) {
1025 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
1026 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
1027 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
1028 DAG.getConstant(2, MVT::i32));
1029 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1030 Load->getChain(), Ptr,
1031 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
1034 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
1035 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
1039 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
1040 DAG.getConstant(2, MVT::i32));
1041 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1042 Load->getChain(), Ptr,
1043 DAG.getTargetConstant(0, MVT::i32),
1045 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1047 DAG.getConstant(0x3, MVT::i32));
1048 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1049 DAG.getConstant(3, MVT::i32));
1051 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
1053 EVT MemEltVT = MemVT.getScalarType();
1054 if (ExtType == ISD::SEXTLOAD) {
1055 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1056 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
1059 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
1062 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1064 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
1065 if (Result.getNode()) {
1069 StoreSDNode *Store = cast<StoreSDNode>(Op);
1070 SDValue Chain = Store->getChain();
1071 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1072 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1073 Store->getValue().getValueType().isVector()) {
1074 return SplitVectorStore(Op, DAG);
1077 EVT MemVT = Store->getMemoryVT();
1078 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1079 MemVT.bitsLT(MVT::i32)) {
1081 if (Store->getMemoryVT() == MVT::i8) {
1083 } else if (Store->getMemoryVT() == MVT::i16) {
1086 SDValue BasePtr = Store->getBasePtr();
1087 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
1088 DAG.getConstant(2, MVT::i32));
1089 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1090 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
1092 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
1093 DAG.getConstant(0x3, MVT::i32));
1095 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1096 DAG.getConstant(3, MVT::i32));
1098 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1101 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1103 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1104 MaskedValue, ShiftAmt);
1106 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
1108 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1109 DAG.getConstant(0xffffffff, MVT::i32));
1110 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1112 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1113 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1114 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
1119 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1120 SelectionDAG &DAG) const {
1122 EVT VT = Op.getValueType();
1124 SDValue Num = Op.getOperand(0);
1125 SDValue Den = Op.getOperand(1);
1127 // RCP = URECIP(Den) = 2^32 / Den + e
1128 // e is rounding error.
1129 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1131 // RCP_LO = umulo(RCP, Den) */
1132 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
1134 // RCP_HI = mulhu (RCP, Den) */
1135 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1137 // NEG_RCP_LO = -RCP_LO
1138 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
1141 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1142 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1145 // Calculate the rounding error from the URECIP instruction
1146 // E = mulhu(ABS_RCP_LO, RCP)
1147 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1149 // RCP_A_E = RCP + E
1150 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1152 // RCP_S_E = RCP - E
1153 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1155 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1156 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1159 // Quotient = mulhu(Tmp0, Num)
1160 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1162 // Num_S_Remainder = Quotient * Den
1163 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
1165 // Remainder = Num - Num_S_Remainder
1166 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1168 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1169 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1170 DAG.getConstant(-1, VT),
1171 DAG.getConstant(0, VT),
1173 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1174 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1176 DAG.getConstant(-1, VT),
1177 DAG.getConstant(0, VT),
1179 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1180 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1183 // Calculate Division result:
1185 // Quotient_A_One = Quotient + 1
1186 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1187 DAG.getConstant(1, VT));
1189 // Quotient_S_One = Quotient - 1
1190 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1191 DAG.getConstant(1, VT));
1193 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1194 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1195 Quotient, Quotient_A_One, ISD::SETEQ);
1197 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1198 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1199 Quotient_S_One, Div, ISD::SETEQ);
1201 // Calculate Rem result:
1203 // Remainder_S_Den = Remainder - Den
1204 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1206 // Remainder_A_Den = Remainder + Den
1207 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1209 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1210 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1211 Remainder, Remainder_S_Den, ISD::SETEQ);
1213 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1214 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1215 Remainder_A_Den, Rem, ISD::SETEQ);
1220 return DAG.getMergeValues(Ops, DL);
1223 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1224 SelectionDAG &DAG) const {
1225 SDValue S0 = Op.getOperand(0);
1227 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1230 // f32 uint_to_fp i64
1231 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1232 DAG.getConstant(0, MVT::i32));
1233 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1234 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1235 DAG.getConstant(1, MVT::i32));
1236 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1237 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1238 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1239 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1243 SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1245 SelectionDAG &DAG) const {
1246 MVT VT = Op.getSimpleValueType();
1248 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1249 // Shift left by 'Shift' bits.
1250 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1251 // Signed shift Right by 'Shift' bits.
1252 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1255 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1256 SelectionDAG &DAG) const {
1257 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1258 MVT VT = Op.getSimpleValueType();
1259 MVT ScalarVT = VT.getScalarType();
1264 SDValue Src = Op.getOperand(0);
1267 // TODO: Don't scalarize on Evergreen?
1268 unsigned NElts = VT.getVectorNumElements();
1269 SmallVector<SDValue, 8> Args;
1270 DAG.ExtractVectorElements(Src, Args, 0, NElts);
1272 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1273 for (unsigned I = 0; I < NElts; ++I)
1274 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
1276 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
1279 //===----------------------------------------------------------------------===//
1280 // Custom DAG optimizations
1281 //===----------------------------------------------------------------------===//
1283 static bool isU24(SDValue Op, SelectionDAG &DAG) {
1284 APInt KnownZero, KnownOne;
1285 EVT VT = Op.getValueType();
1286 DAG.computeKnownBits(Op, KnownZero, KnownOne);
1288 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1291 static bool isI24(SDValue Op, SelectionDAG &DAG) {
1292 EVT VT = Op.getValueType();
1294 // In order for this to be a signed 24-bit value, bit 23, must
1296 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1297 // as unsigned 24-bit values.
1298 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1301 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1303 SelectionDAG &DAG = DCI.DAG;
1304 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1305 EVT VT = Op.getValueType();
1307 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1308 APInt KnownZero, KnownOne;
1309 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1310 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1311 DCI.CommitTargetLoweringOpt(TLO);
1314 template <typename IntTy>
1315 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
1316 uint32_t Offset, uint32_t Width) {
1317 if (Width + Offset < 32) {
1318 IntTy Result = (Src0 << (32 - Offset - Width)) >> (32 - Width);
1319 return DAG.getConstant(Result, MVT::i32);
1322 return DAG.getConstant(Src0 >> Offset, MVT::i32);
1325 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1326 DAGCombinerInfo &DCI) const {
1327 SelectionDAG &DAG = DCI.DAG;
1330 switch(N->getOpcode()) {
1333 EVT VT = N->getValueType(0);
1334 SDValue N0 = N->getOperand(0);
1335 SDValue N1 = N->getOperand(1);
1338 // FIXME: Add support for 24-bit multiply with 64-bit output on SI.
1339 if (VT.isVector() || VT.getSizeInBits() > 32)
1342 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1343 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1344 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1345 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1346 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1347 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1348 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1349 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1354 // We need to use sext even for MUL_U24, because MUL_U24 is used
1355 // for signed multiply of 8 and 16-bit types.
1356 SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
1360 case AMDGPUISD::MUL_I24:
1361 case AMDGPUISD::MUL_U24: {
1362 SDValue N0 = N->getOperand(0);
1363 SDValue N1 = N->getOperand(1);
1364 simplifyI24(N0, DCI);
1365 simplifyI24(N1, DCI);
1368 case ISD::SELECT_CC: {
1369 return CombineMinMax(N, DAG);
1371 case AMDGPUISD::BFE_I32:
1372 case AMDGPUISD::BFE_U32: {
1373 assert(!N->getValueType(0).isVector() &&
1374 "Vector handling of BFE not implemented");
1375 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
1379 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
1381 return DAG.getConstant(0, MVT::i32);
1383 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
1387 SDValue BitsFrom = N->getOperand(0);
1388 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
1390 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
1392 if (OffsetVal == 0) {
1393 // This is already sign / zero extended, so try to fold away extra BFEs.
1394 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
1396 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
1397 if (OpSignBits >= SignBits)
1400 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
1402 // This is a sign_extend_inreg. Replace it to take advantage of existing
1403 // DAG Combines. If not eliminated, we will match back to BFE during
1406 // TODO: The sext_inreg of extended types ends, although we can could
1407 // handle them in a single BFE.
1408 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
1409 DAG.getValueType(SmallVT));
1412 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
1415 if (ConstantSDNode *Val = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
1417 return constantFoldBFE<int32_t>(DAG,
1418 Val->getSExtValue(),
1423 return constantFoldBFE<uint32_t>(DAG,
1424 Val->getZExtValue(),
1429 APInt Demanded = APInt::getBitsSet(32,
1431 OffsetVal + WidthVal);
1433 if ((OffsetVal + WidthVal) >= 32) {
1434 SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
1435 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
1436 BitsFrom, ShiftVal);
1439 APInt KnownZero, KnownOne;
1440 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1441 !DCI.isBeforeLegalizeOps());
1442 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1443 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
1444 TLI.SimplifyDemandedBits(BitsFrom, Demanded, KnownZero, KnownOne, TLO)) {
1445 DCI.CommitTargetLoweringOpt(TLO);
1454 //===----------------------------------------------------------------------===//
1456 //===----------------------------------------------------------------------===//
1458 void AMDGPUTargetLowering::getOriginalFunctionArgs(
1461 const SmallVectorImpl<ISD::InputArg> &Ins,
1462 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1464 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1465 if (Ins[i].ArgVT == Ins[i].VT) {
1466 OrigIns.push_back(Ins[i]);
1471 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1472 // Vector has been split into scalars.
1473 VT = Ins[i].ArgVT.getVectorElementType();
1474 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1475 Ins[i].ArgVT.getVectorElementType() !=
1476 Ins[i].VT.getVectorElementType()) {
1477 // Vector elements have been promoted
1480 // Vector has been spilt into smaller vectors.
1484 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1485 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1486 OrigIns.push_back(Arg);
1490 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1491 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1492 return CFP->isExactlyValue(1.0);
1494 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1495 return C->isAllOnesValue();
1500 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1501 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1502 return CFP->getValueAPF().isZero();
1504 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1505 return C->isNullValue();
1510 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1511 const TargetRegisterClass *RC,
1512 unsigned Reg, EVT VT) const {
1513 MachineFunction &MF = DAG.getMachineFunction();
1514 MachineRegisterInfo &MRI = MF.getRegInfo();
1515 unsigned VirtualRegister;
1516 if (!MRI.isLiveIn(Reg)) {
1517 VirtualRegister = MRI.createVirtualRegister(RC);
1518 MRI.addLiveIn(Reg, VirtualRegister);
1520 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1522 return DAG.getRegister(VirtualRegister, VT);
1525 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1527 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1529 default: return nullptr;
1531 NODE_NAME_CASE(CALL);
1532 NODE_NAME_CASE(UMUL);
1533 NODE_NAME_CASE(DIV_INF);
1534 NODE_NAME_CASE(RET_FLAG);
1535 NODE_NAME_CASE(BRANCH_COND);
1538 NODE_NAME_CASE(DWORDADDR)
1539 NODE_NAME_CASE(FRACT)
1540 NODE_NAME_CASE(FMAX)
1541 NODE_NAME_CASE(SMAX)
1542 NODE_NAME_CASE(UMAX)
1543 NODE_NAME_CASE(FMIN)
1544 NODE_NAME_CASE(SMIN)
1545 NODE_NAME_CASE(UMIN)
1546 NODE_NAME_CASE(BFE_U32)
1547 NODE_NAME_CASE(BFE_I32)
1550 NODE_NAME_CASE(MUL_U24)
1551 NODE_NAME_CASE(MUL_I24)
1552 NODE_NAME_CASE(MAD_U24)
1553 NODE_NAME_CASE(MAD_I24)
1554 NODE_NAME_CASE(URECIP)
1555 NODE_NAME_CASE(DOT4)
1556 NODE_NAME_CASE(EXPORT)
1557 NODE_NAME_CASE(CONST_ADDRESS)
1558 NODE_NAME_CASE(REGISTER_LOAD)
1559 NODE_NAME_CASE(REGISTER_STORE)
1560 NODE_NAME_CASE(LOAD_CONSTANT)
1561 NODE_NAME_CASE(LOAD_INPUT)
1562 NODE_NAME_CASE(SAMPLE)
1563 NODE_NAME_CASE(SAMPLEB)
1564 NODE_NAME_CASE(SAMPLED)
1565 NODE_NAME_CASE(SAMPLEL)
1566 NODE_NAME_CASE(STORE_MSKOR)
1567 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
1571 static void computeKnownBitsForMinMax(const SDValue Op0,
1575 const SelectionDAG &DAG,
1577 APInt Op0Zero, Op0One;
1578 APInt Op1Zero, Op1One;
1579 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
1580 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
1582 KnownZero = Op0Zero & Op1Zero;
1583 KnownOne = Op0One & Op1One;
1586 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
1590 const SelectionDAG &DAG,
1591 unsigned Depth) const {
1593 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
1597 unsigned Opc = Op.getOpcode();
1602 case ISD::INTRINSIC_WO_CHAIN: {
1603 // FIXME: The intrinsic should just use the node.
1604 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1605 case AMDGPUIntrinsic::AMDGPU_imax:
1606 case AMDGPUIntrinsic::AMDGPU_umax:
1607 case AMDGPUIntrinsic::AMDGPU_imin:
1608 case AMDGPUIntrinsic::AMDGPU_umin:
1609 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
1610 KnownZero, KnownOne, DAG, Depth);
1618 case AMDGPUISD::SMAX:
1619 case AMDGPUISD::UMAX:
1620 case AMDGPUISD::SMIN:
1621 case AMDGPUISD::UMIN:
1622 computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
1623 KnownZero, KnownOne, DAG, Depth);
1626 case AMDGPUISD::BFE_I32:
1627 case AMDGPUISD::BFE_U32: {
1628 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
1632 unsigned BitWidth = 32;
1633 uint32_t Width = CWidth->getZExtValue() & 0x1f;
1635 KnownZero = APInt::getAllOnesValue(BitWidth);
1636 KnownOne = APInt::getNullValue(BitWidth);
1640 // FIXME: This could do a lot more. If offset is 0, should be the same as
1641 // sign_extend_inreg implementation, but that involves duplicating it.
1642 if (Opc == AMDGPUISD::BFE_I32)
1643 KnownOne = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
1645 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
1652 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
1654 const SelectionDAG &DAG,
1655 unsigned Depth) const {
1656 switch (Op.getOpcode()) {
1657 case AMDGPUISD::BFE_I32: {
1658 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
1662 unsigned SignBits = 32 - Width->getZExtValue() + 1;
1663 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
1664 if (!Offset || !Offset->isNullValue())
1667 // TODO: Could probably figure something out with non-0 offsets.
1668 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
1669 return std::max(SignBits, Op0SignBits);
1672 case AMDGPUISD::BFE_U32: {
1673 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
1674 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;