1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUDiagnosticInfoUnsupported.h"
19 #include "AMDGPUFrameLowering.h"
20 #include "AMDGPUIntrinsicInfo.h"
21 #include "AMDGPURegisterInfo.h"
22 #include "AMDGPUSubtarget.h"
23 #include "R600MachineFunctionInfo.h"
24 #include "SIMachineFunctionInfo.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DataLayout.h"
34 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
35 CCValAssign::LocInfo LocInfo,
36 ISD::ArgFlagsTy ArgFlags, CCState &State) {
37 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
38 ArgFlags.getOrigAlign());
39 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
44 #include "AMDGPUGenCallingConv.inc"
46 // Find a larger type to do a load / store of a vector with.
47 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
48 unsigned StoreSize = VT.getStoreSizeInBits();
50 return EVT::getIntegerVT(Ctx, StoreSize);
52 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
53 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
56 // Type for a vector that will be loaded to.
57 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
58 unsigned StoreSize = VT.getStoreSizeInBits();
60 return EVT::getIntegerVT(Ctx, 32);
62 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
65 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
66 const AMDGPUSubtarget &STI)
67 : TargetLowering(TM), Subtarget(&STI) {
68 setOperationAction(ISD::Constant, MVT::i32, Legal);
69 setOperationAction(ISD::Constant, MVT::i64, Legal);
70 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
71 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
73 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
74 setOperationAction(ISD::BRIND, MVT::Other, Expand);
76 // This is totally unsupported, just custom lower to produce an error.
77 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
79 // We need to custom lower some of the intrinsics
80 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
82 // Library functions. These default to Expand, but we have instructions
84 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
85 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
86 setOperationAction(ISD::FPOW, MVT::f32, Legal);
87 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
88 setOperationAction(ISD::FABS, MVT::f32, Legal);
89 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
90 setOperationAction(ISD::FRINT, MVT::f32, Legal);
91 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
92 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
93 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
95 setOperationAction(ISD::FROUND, MVT::f32, Custom);
96 setOperationAction(ISD::FROUND, MVT::f64, Custom);
98 setOperationAction(ISD::FREM, MVT::f32, Custom);
99 setOperationAction(ISD::FREM, MVT::f64, Custom);
101 // v_mad_f32 does not support denormals according to some sources.
102 if (!Subtarget->hasFP32Denormals())
103 setOperationAction(ISD::FMAD, MVT::f32, Legal);
105 // Expand to fneg + fadd.
106 setOperationAction(ISD::FSUB, MVT::f64, Expand);
108 // Lower floating point store/load to integer store/load to reduce the number
109 // of patterns in tablegen.
110 setOperationAction(ISD::STORE, MVT::f32, Promote);
111 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
113 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
114 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
116 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
117 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
119 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
120 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
122 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
123 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
125 setOperationAction(ISD::STORE, MVT::f64, Promote);
126 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
128 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
129 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
131 // Custom lowering of vector stores is required for local address space
133 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
135 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
136 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
137 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
139 // XXX: This can be change to Custom, once ExpandVectorStores can
140 // handle 64-bit stores.
141 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
143 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
144 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
145 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
146 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
147 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
150 setOperationAction(ISD::LOAD, MVT::f32, Promote);
151 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
153 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
154 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
156 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
157 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
159 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
160 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
162 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
163 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
165 setOperationAction(ISD::LOAD, MVT::f64, Promote);
166 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
168 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
169 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
171 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
172 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
173 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
174 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
175 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
176 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
177 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
178 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
179 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
180 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
182 // There are no 64-bit extloads. These should be done as a 32-bit extload and
183 // an extension to 64-bit.
184 for (MVT VT : MVT::integer_valuetypes()) {
185 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
186 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
187 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
190 for (MVT VT : MVT::integer_vector_valuetypes()) {
191 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
192 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
193 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
194 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
195 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
196 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
197 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
198 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
199 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
200 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
201 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
202 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
205 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
207 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
208 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
209 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
210 setOperationAction(ISD::FRINT, MVT::f64, Custom);
211 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
214 if (!Subtarget->hasBFI()) {
215 // fcopysign can be done in a single instruction with BFI.
216 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
217 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
220 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
222 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
223 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
224 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
225 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
227 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
228 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
229 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
230 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
232 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
233 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
234 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
235 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
237 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
238 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
240 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
241 for (MVT VT : ScalarIntVTs) {
242 setOperationAction(ISD::SREM, VT, Expand);
243 setOperationAction(ISD::SDIV, VT, Expand);
245 // GPU does not have divrem function for signed or unsigned.
246 setOperationAction(ISD::SDIVREM, VT, Custom);
247 setOperationAction(ISD::UDIVREM, VT, Custom);
249 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
250 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
251 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
253 setOperationAction(ISD::BSWAP, VT, Expand);
254 setOperationAction(ISD::CTTZ, VT, Expand);
255 setOperationAction(ISD::CTLZ, VT, Expand);
258 if (!Subtarget->hasBCNT(32))
259 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
261 if (!Subtarget->hasBCNT(64))
262 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
264 // The hardware supports 32-bit ROTR, but not ROTL.
265 setOperationAction(ISD::ROTL, MVT::i32, Expand);
266 setOperationAction(ISD::ROTL, MVT::i64, Expand);
267 setOperationAction(ISD::ROTR, MVT::i64, Expand);
269 setOperationAction(ISD::MUL, MVT::i64, Expand);
270 setOperationAction(ISD::MULHU, MVT::i64, Expand);
271 setOperationAction(ISD::MULHS, MVT::i64, Expand);
272 setOperationAction(ISD::UDIV, MVT::i32, Expand);
273 setOperationAction(ISD::UREM, MVT::i32, Expand);
274 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
275 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
276 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
277 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
278 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
280 setOperationAction(ISD::SMIN, MVT::i32, Legal);
281 setOperationAction(ISD::UMIN, MVT::i32, Legal);
282 setOperationAction(ISD::SMAX, MVT::i32, Legal);
283 setOperationAction(ISD::UMAX, MVT::i32, Legal);
285 if (Subtarget->hasFFBH())
286 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
288 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
290 if (!Subtarget->hasFFBL())
291 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
293 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
295 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
296 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
298 static const MVT::SimpleValueType VectorIntTypes[] = {
299 MVT::v2i32, MVT::v4i32
302 for (MVT VT : VectorIntTypes) {
303 // Expand the following operations for the current type by default.
304 setOperationAction(ISD::ADD, VT, Expand);
305 setOperationAction(ISD::AND, VT, Expand);
306 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
307 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
308 setOperationAction(ISD::MUL, VT, Expand);
309 setOperationAction(ISD::OR, VT, Expand);
310 setOperationAction(ISD::SHL, VT, Expand);
311 setOperationAction(ISD::SRA, VT, Expand);
312 setOperationAction(ISD::SRL, VT, Expand);
313 setOperationAction(ISD::ROTL, VT, Expand);
314 setOperationAction(ISD::ROTR, VT, Expand);
315 setOperationAction(ISD::SUB, VT, Expand);
316 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
317 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
318 setOperationAction(ISD::SDIV, VT, Expand);
319 setOperationAction(ISD::UDIV, VT, Expand);
320 setOperationAction(ISD::SREM, VT, Expand);
321 setOperationAction(ISD::UREM, VT, Expand);
322 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
323 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
324 setOperationAction(ISD::SDIVREM, VT, Custom);
325 setOperationAction(ISD::UDIVREM, VT, Expand);
326 setOperationAction(ISD::ADDC, VT, Expand);
327 setOperationAction(ISD::SUBC, VT, Expand);
328 setOperationAction(ISD::ADDE, VT, Expand);
329 setOperationAction(ISD::SUBE, VT, Expand);
330 setOperationAction(ISD::SELECT, VT, Expand);
331 setOperationAction(ISD::VSELECT, VT, Expand);
332 setOperationAction(ISD::SELECT_CC, VT, Expand);
333 setOperationAction(ISD::XOR, VT, Expand);
334 setOperationAction(ISD::BSWAP, VT, Expand);
335 setOperationAction(ISD::CTPOP, VT, Expand);
336 setOperationAction(ISD::CTTZ, VT, Expand);
337 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
338 setOperationAction(ISD::CTLZ, VT, Expand);
339 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
340 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
343 static const MVT::SimpleValueType FloatVectorTypes[] = {
344 MVT::v2f32, MVT::v4f32
347 for (MVT VT : FloatVectorTypes) {
348 setOperationAction(ISD::FABS, VT, Expand);
349 setOperationAction(ISD::FMINNUM, VT, Expand);
350 setOperationAction(ISD::FMAXNUM, VT, Expand);
351 setOperationAction(ISD::FADD, VT, Expand);
352 setOperationAction(ISD::FCEIL, VT, Expand);
353 setOperationAction(ISD::FCOS, VT, Expand);
354 setOperationAction(ISD::FDIV, VT, Expand);
355 setOperationAction(ISD::FEXP2, VT, Expand);
356 setOperationAction(ISD::FLOG2, VT, Expand);
357 setOperationAction(ISD::FREM, VT, Expand);
358 setOperationAction(ISD::FPOW, VT, Expand);
359 setOperationAction(ISD::FFLOOR, VT, Expand);
360 setOperationAction(ISD::FTRUNC, VT, Expand);
361 setOperationAction(ISD::FMUL, VT, Expand);
362 setOperationAction(ISD::FMA, VT, Expand);
363 setOperationAction(ISD::FRINT, VT, Expand);
364 setOperationAction(ISD::FNEARBYINT, VT, Expand);
365 setOperationAction(ISD::FSQRT, VT, Expand);
366 setOperationAction(ISD::FSIN, VT, Expand);
367 setOperationAction(ISD::FSUB, VT, Expand);
368 setOperationAction(ISD::FNEG, VT, Expand);
369 setOperationAction(ISD::SELECT, VT, Expand);
370 setOperationAction(ISD::VSELECT, VT, Expand);
371 setOperationAction(ISD::SELECT_CC, VT, Expand);
372 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
373 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
376 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
377 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
379 setTargetDAGCombine(ISD::SHL);
380 setTargetDAGCombine(ISD::MUL);
381 setTargetDAGCombine(ISD::SELECT);
382 setTargetDAGCombine(ISD::SELECT_CC);
383 setTargetDAGCombine(ISD::STORE);
385 setTargetDAGCombine(ISD::FADD);
386 setTargetDAGCombine(ISD::FSUB);
388 setBooleanContents(ZeroOrNegativeOneBooleanContent);
389 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
391 setSchedulingPreference(Sched::RegPressure);
392 setJumpIsExpensive(true);
394 // SI at least has hardware support for floating point exceptions, but no way
395 // of using or handling them is implemented. They are also optional in OpenCL
397 setHasFloatingPointExceptions(false);
399 setSelectIsExpensive(false);
400 PredictableSelectIsExpensive = false;
402 setFsqrtIsCheap(true);
404 // We want to find all load dependencies for long chains of stores to enable
405 // merging into very wide vectors. The problem is with vectors with > 4
406 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
407 // vectors are a legal type, even though we have to split the loads
408 // usually. When we can more precisely specify load legality per address
409 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
410 // smarter so that they can figure out what to do in 2 iterations without all
411 // N > 4 stores on the same chain.
412 GatherAllAliasesMaxDepth = 16;
414 // FIXME: Need to really handle these.
415 MaxStoresPerMemcpy = 4096;
416 MaxStoresPerMemmove = 4096;
417 MaxStoresPerMemset = 4096;
420 //===----------------------------------------------------------------------===//
421 // Target Information
422 //===----------------------------------------------------------------------===//
424 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
428 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
432 // The backend supports 32 and 64 bit floating point immediates.
433 // FIXME: Why are we reporting vectors of FP immediates as legal?
434 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
435 EVT ScalarVT = VT.getScalarType();
436 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
439 // We don't want to shrink f64 / f32 constants.
440 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
441 EVT ScalarVT = VT.getScalarType();
442 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
445 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
449 unsigned NewSize = NewVT.getStoreSizeInBits();
451 // If we are reducing to a 32-bit load, this is always better.
455 EVT OldVT = N->getValueType(0);
456 unsigned OldSize = OldVT.getStoreSizeInBits();
458 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
459 // extloads, so doing one requires using a buffer_load. In cases where we
460 // still couldn't use a scalar load, using the wider load shouldn't really
463 // If the old size already had to be an extload, there's no harm in continuing
464 // to reduce the width.
465 return (OldSize < 32);
468 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
470 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
473 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
474 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
476 return ((LScalarSize <= CastScalarSize) ||
477 (CastScalarSize >= 32) ||
481 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
482 // profitable with the expansion for 64-bit since it's generally good to
484 // FIXME: These should really have the size as a parameter.
485 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
489 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
493 //===---------------------------------------------------------------------===//
495 //===---------------------------------------------------------------------===//
497 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
498 assert(VT.isFloatingPoint());
499 return VT == MVT::f32 || VT == MVT::f64;
502 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
503 assert(VT.isFloatingPoint());
504 return VT == MVT::f32 || VT == MVT::f64;
507 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
513 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
514 // There are few operations which truly have vector input operands. Any vector
515 // operation is going to involve operations on each component, and a
516 // build_vector will be a copy per element, so it always makes sense to use a
517 // build_vector input in place of the extracted element to avoid a copy into a
520 // We should probably only do this if all users are extracts only, but this
521 // should be the common case.
525 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
526 // Truncate is just accessing a subregister.
527 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
530 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
531 // Truncate is just accessing a subregister.
532 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
533 (Dest->getPrimitiveSizeInBits() % 32 == 0);
536 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
537 unsigned SrcSize = Src->getScalarSizeInBits();
538 unsigned DestSize = Dest->getScalarSizeInBits();
540 return SrcSize == 32 && DestSize == 64;
543 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
544 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
545 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
546 // this will enable reducing 64-bit operations the 32-bit, which is always
548 return Src == MVT::i32 && Dest == MVT::i64;
551 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
552 return isZExtFree(Val.getValueType(), VT2);
555 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
556 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
557 // limited number of native 64-bit operations. Shrinking an operation to fit
558 // in a single 32-bit register should always be helpful. As currently used,
559 // this is much less general than the name suggests, and is only used in
560 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
561 // not profitable, and may actually be harmful.
562 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
565 //===---------------------------------------------------------------------===//
566 // TargetLowering Callbacks
567 //===---------------------------------------------------------------------===//
569 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
570 const SmallVectorImpl<ISD::InputArg> &Ins) const {
572 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
575 void AMDGPUTargetLowering::AnalyzeReturn(CCState &State,
576 const SmallVectorImpl<ISD::OutputArg> &Outs) const {
578 State.AnalyzeReturn(Outs, RetCC_SI);
581 SDValue AMDGPUTargetLowering::LowerReturn(
583 CallingConv::ID CallConv,
585 const SmallVectorImpl<ISD::OutputArg> &Outs,
586 const SmallVectorImpl<SDValue> &OutVals,
587 SDLoc DL, SelectionDAG &DAG) const {
588 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
591 //===---------------------------------------------------------------------===//
592 // Target specific lowering
593 //===---------------------------------------------------------------------===//
595 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
596 SmallVectorImpl<SDValue> &InVals) const {
597 SDValue Callee = CLI.Callee;
598 SelectionDAG &DAG = CLI.DAG;
600 const Function &Fn = *DAG.getMachineFunction().getFunction();
602 StringRef FuncName("<unknown>");
604 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
605 FuncName = G->getSymbol();
606 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
607 FuncName = G->getGlobal()->getName();
609 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
610 DAG.getContext()->diagnose(NoCalls);
614 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
615 SelectionDAG &DAG) const {
616 const Function &Fn = *DAG.getMachineFunction().getFunction();
618 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "dynamic alloca");
619 DAG.getContext()->diagnose(NoDynamicAlloca);
623 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
624 SelectionDAG &DAG) const {
625 switch (Op.getOpcode()) {
627 Op.getNode()->dump();
628 llvm_unreachable("Custom lowering code for this"
629 "instruction is not implemented yet!");
631 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
632 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
633 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
634 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
635 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
636 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
637 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
638 case ISD::FREM: return LowerFREM(Op, DAG);
639 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
640 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
641 case ISD::FRINT: return LowerFRINT(Op, DAG);
642 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
643 case ISD::FROUND: return LowerFROUND(Op, DAG);
644 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
645 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
646 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
647 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
648 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
650 case ISD::CTLZ_ZERO_UNDEF:
651 return LowerCTLZ(Op, DAG);
652 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
657 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
658 SmallVectorImpl<SDValue> &Results,
659 SelectionDAG &DAG) const {
660 switch (N->getOpcode()) {
661 case ISD::SIGN_EXTEND_INREG:
662 // Different parts of legalization seem to interpret which type of
663 // sign_extend_inreg is the one to check for custom lowering. The extended
664 // from type is what really matters, but some places check for custom
665 // lowering of the result type. This results in trying to use
666 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
667 // nothing here and let the illegal result integer be handled normally.
670 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
674 Results.push_back(SDValue(Node, 0));
675 Results.push_back(SDValue(Node, 1));
676 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
678 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
682 SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
683 if (Lowered.getNode())
684 Results.push_back(Lowered);
692 // FIXME: This implements accesses to initialized globals in the constant
693 // address space by copying them to private and accessing that. It does not
694 // properly handle illegal types or vectors. The private vector loads are not
695 // scalarized, and the illegal scalars hit an assertion. This technique will not
696 // work well with large initializers, and this should eventually be
697 // removed. Initialized globals should be placed into a data section that the
698 // runtime will load into a buffer before the kernel is executed. Uses of the
699 // global need to be replaced with a pointer loaded from an implicit kernel
700 // argument into this buffer holding the copy of the data, which will remove the
701 // need for any of this.
702 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
703 const GlobalValue *GV,
704 const SDValue &InitPtr,
706 SelectionDAG &DAG) const {
707 const DataLayout &TD = DAG.getDataLayout();
709 Type *InitTy = Init->getType();
711 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
712 EVT VT = EVT::getEVT(InitTy);
713 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
714 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
715 MachinePointerInfo(UndefValue::get(PtrTy)), false,
716 false, TD.getPrefTypeAlignment(InitTy));
719 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
720 EVT VT = EVT::getEVT(CFP->getType());
721 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
722 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
723 MachinePointerInfo(UndefValue::get(PtrTy)), false,
724 false, TD.getPrefTypeAlignment(CFP->getType()));
727 if (StructType *ST = dyn_cast<StructType>(InitTy)) {
728 const StructLayout *SL = TD.getStructLayout(ST);
730 EVT PtrVT = InitPtr.getValueType();
731 SmallVector<SDValue, 8> Chains;
733 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
734 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), DL, PtrVT);
735 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
737 Constant *Elt = Init->getAggregateElement(I);
738 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
741 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
744 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
745 EVT PtrVT = InitPtr.getValueType();
747 unsigned NumElements;
748 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
749 NumElements = AT->getNumElements();
750 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
751 NumElements = VT->getNumElements();
753 llvm_unreachable("Unexpected type");
755 unsigned EltSize = TD.getTypeAllocSize(SeqTy->getElementType());
756 SmallVector<SDValue, 8> Chains;
757 for (unsigned i = 0; i < NumElements; ++i) {
758 SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT);
759 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
761 Constant *Elt = Init->getAggregateElement(i);
762 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
765 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
768 if (isa<UndefValue>(Init)) {
769 EVT VT = EVT::getEVT(InitTy);
770 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
771 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
772 MachinePointerInfo(UndefValue::get(PtrTy)), false,
773 false, TD.getPrefTypeAlignment(InitTy));
777 llvm_unreachable("Unhandled constant initializer");
780 static bool hasDefinedInitializer(const GlobalValue *GV) {
781 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
782 if (!GVar || !GVar->hasInitializer())
785 if (isa<UndefValue>(GVar->getInitializer()))
791 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
793 SelectionDAG &DAG) const {
795 const DataLayout &DL = DAG.getDataLayout();
796 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
797 const GlobalValue *GV = G->getGlobal();
799 switch (G->getAddressSpace()) {
800 case AMDGPUAS::LOCAL_ADDRESS: {
801 // XXX: What does the value of G->getOffset() mean?
802 assert(G->getOffset() == 0 &&
803 "Do not know what to do with an non-zero offset");
805 // TODO: We could emit code to handle the initialization somewhere.
806 if (hasDefinedInitializer(GV))
810 if (MFI->LocalMemoryObjects.count(GV) == 0) {
811 uint64_t Size = DL.getTypeAllocSize(GV->getType()->getElementType());
812 Offset = MFI->LDSSize;
813 MFI->LocalMemoryObjects[GV] = Offset;
814 // XXX: Account for alignment?
815 MFI->LDSSize += Size;
817 Offset = MFI->LocalMemoryObjects[GV];
820 return DAG.getConstant(Offset, SDLoc(Op),
821 getPointerTy(DL, AMDGPUAS::LOCAL_ADDRESS));
823 case AMDGPUAS::CONSTANT_ADDRESS: {
824 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
825 Type *EltType = GV->getType()->getElementType();
826 unsigned Size = DL.getTypeAllocSize(EltType);
827 unsigned Alignment = DL.getPrefTypeAlignment(EltType);
829 MVT PrivPtrVT = getPointerTy(DL, AMDGPUAS::PRIVATE_ADDRESS);
830 MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
832 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
833 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
835 const GlobalVariable *Var = cast<GlobalVariable>(GV);
836 if (!Var->hasInitializer()) {
837 // This has no use, but bugpoint will hit it.
838 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
841 const Constant *Init = Var->getInitializer();
842 SmallVector<SDNode*, 8> WorkList;
844 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
845 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
846 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
848 WorkList.push_back(*I);
850 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
851 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
852 E = WorkList.end(); I != E; ++I) {
853 SmallVector<SDValue, 8> Ops;
854 Ops.push_back(Chain);
855 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
856 Ops.push_back((*I)->getOperand(i));
858 DAG.UpdateNodeOperands(*I, Ops);
860 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
864 const Function &Fn = *DAG.getMachineFunction().getFunction();
865 DiagnosticInfoUnsupported BadInit(Fn,
866 "initializer for address space");
867 DAG.getContext()->diagnose(BadInit);
871 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
872 SelectionDAG &DAG) const {
873 SmallVector<SDValue, 8> Args;
875 for (const SDUse &U : Op->ops())
876 DAG.ExtractVectorElements(U.get(), Args);
878 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
881 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
882 SelectionDAG &DAG) const {
884 SmallVector<SDValue, 8> Args;
885 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
886 EVT VT = Op.getValueType();
887 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
888 VT.getVectorNumElements());
890 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
893 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
894 SelectionDAG &DAG) const {
896 MachineFunction &MF = DAG.getMachineFunction();
897 const AMDGPUFrameLowering *TFL = Subtarget->getFrameLowering();
899 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
901 unsigned FrameIndex = FIN->getIndex();
902 unsigned IgnoredFrameReg;
904 TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
905 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op),
909 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
910 SelectionDAG &DAG) const {
911 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
913 EVT VT = Op.getValueType();
915 switch (IntrinsicID) {
917 case AMDGPUIntrinsic::AMDGPU_abs:
918 case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
919 return LowerIntrinsicIABS(Op, DAG);
920 case AMDGPUIntrinsic::AMDGPU_lrp:
921 return LowerIntrinsicLRP(Op, DAG);
923 case AMDGPUIntrinsic::AMDGPU_clamp:
924 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
925 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
926 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
928 case Intrinsic::AMDGPU_div_scale: {
929 // 3rd parameter required to be a constant.
930 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
932 return DAG.getUNDEF(VT);
934 // Translate to the operands expected by the machine instruction. The
935 // first parameter must be the same as the first instruction.
936 SDValue Numerator = Op.getOperand(1);
937 SDValue Denominator = Op.getOperand(2);
939 // Note this order is opposite of the machine instruction's operations,
940 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
941 // intrinsic has the numerator as the first operand to match a normal
942 // division operation.
944 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
946 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
947 Denominator, Numerator);
950 case Intrinsic::AMDGPU_div_fmas:
951 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
952 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
955 case Intrinsic::AMDGPU_div_fixup:
956 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
957 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
959 case Intrinsic::AMDGPU_trig_preop:
960 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
961 Op.getOperand(1), Op.getOperand(2));
963 case Intrinsic::AMDGPU_rcp:
964 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
966 case Intrinsic::AMDGPU_rsq:
967 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
969 case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
970 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
972 case Intrinsic::AMDGPU_rsq_clamped:
973 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
974 Type *Type = VT.getTypeForEVT(*DAG.getContext());
975 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
976 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
978 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
979 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
980 DAG.getConstantFP(Max, DL, VT));
981 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
982 DAG.getConstantFP(Min, DL, VT));
984 return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
987 case Intrinsic::AMDGPU_ldexp:
988 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1),
991 case AMDGPUIntrinsic::AMDGPU_imax:
992 return DAG.getNode(ISD::SMAX, DL, VT, Op.getOperand(1),
994 case AMDGPUIntrinsic::AMDGPU_umax:
995 return DAG.getNode(ISD::UMAX, DL, VT, Op.getOperand(1),
997 case AMDGPUIntrinsic::AMDGPU_imin:
998 return DAG.getNode(ISD::SMIN, DL, VT, Op.getOperand(1),
1000 case AMDGPUIntrinsic::AMDGPU_umin:
1001 return DAG.getNode(ISD::UMIN, DL, VT, Op.getOperand(1),
1004 case AMDGPUIntrinsic::AMDGPU_umul24:
1005 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
1006 Op.getOperand(1), Op.getOperand(2));
1008 case AMDGPUIntrinsic::AMDGPU_imul24:
1009 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
1010 Op.getOperand(1), Op.getOperand(2));
1012 case AMDGPUIntrinsic::AMDGPU_umad24:
1013 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
1014 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
1016 case AMDGPUIntrinsic::AMDGPU_imad24:
1017 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
1018 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
1020 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
1021 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
1023 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
1024 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
1026 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
1027 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
1029 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
1030 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
1032 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
1033 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
1038 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
1039 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
1044 case AMDGPUIntrinsic::AMDGPU_bfi:
1045 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
1050 case AMDGPUIntrinsic::AMDGPU_bfm:
1051 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
1055 case Intrinsic::AMDGPU_class:
1056 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
1057 Op.getOperand(1), Op.getOperand(2));
1059 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
1060 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
1062 case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
1063 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
1064 case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
1065 return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
1066 case AMDGPUIntrinsic::AMDGPU_brev: // Legacy name
1067 return DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(1));
1071 ///IABS(a) = SMAX(sub(0, a), a)
1072 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
1073 SelectionDAG &DAG) const {
1075 EVT VT = Op.getValueType();
1076 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1079 return DAG.getNode(ISD::SMAX, DL, VT, Neg, Op.getOperand(1));
1082 /// Linear Interpolation
1083 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
1084 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
1085 SelectionDAG &DAG) const {
1087 EVT VT = Op.getValueType();
1088 // TODO: Should this propagate fast-math-flags?
1089 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
1090 DAG.getConstantFP(1.0f, DL, MVT::f32),
1092 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
1094 return DAG.getNode(ISD::FADD, DL, VT,
1095 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
1099 /// \brief Generate Min/Max node
1100 SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
1107 DAGCombinerInfo &DCI) const {
1108 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1111 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1114 SelectionDAG &DAG = DCI.DAG;
1115 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1124 case ISD::SETFALSE2:
1133 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1134 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1140 // Ordered. Assume ordered for undefined.
1142 // Only do this after legalization to avoid interfering with other combines
1143 // which might occur.
1144 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1145 !DCI.isCalledByLegalizer())
1148 // We need to permute the operands to get the correct NaN behavior. The
1149 // selected operand is the second one based on the failing compare with NaN,
1150 // so permute it based on the compare type the hardware uses.
1152 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1153 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1158 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1159 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1165 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1166 !DCI.isCalledByLegalizer())
1170 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1171 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1173 case ISD::SETCC_INVALID:
1174 llvm_unreachable("Invalid setcc condcode!");
1179 SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
1180 SelectionDAG &DAG) const {
1181 LoadSDNode *Load = cast<LoadSDNode>(Op);
1182 EVT MemVT = Load->getMemoryVT();
1183 EVT MemEltVT = MemVT.getVectorElementType();
1185 EVT LoadVT = Op.getValueType();
1186 EVT EltVT = LoadVT.getVectorElementType();
1187 EVT PtrVT = Load->getBasePtr().getValueType();
1189 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
1190 SmallVector<SDValue, 8> Loads;
1191 SmallVector<SDValue, 8> Chains;
1194 unsigned MemEltSize = MemEltVT.getStoreSize();
1195 MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
1197 for (unsigned i = 0; i < NumElts; ++i) {
1198 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
1199 DAG.getConstant(i * MemEltSize, SL, PtrVT));
1202 = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
1203 Load->getChain(), Ptr,
1204 SrcValue.getWithOffset(i * MemEltSize),
1205 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
1206 Load->isInvariant(), Load->getAlignment());
1207 Loads.push_back(NewLoad.getValue(0));
1208 Chains.push_back(NewLoad.getValue(1));
1212 DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
1213 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
1216 return DAG.getMergeValues(Ops, SL);
1219 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1220 SelectionDAG &DAG) const {
1221 EVT VT = Op.getValueType();
1223 // If this is a 2 element vector, we really want to scalarize and not create
1224 // weird 1 element vectors.
1225 if (VT.getVectorNumElements() == 2)
1226 return ScalarizeVectorLoad(Op, DAG);
1228 LoadSDNode *Load = cast<LoadSDNode>(Op);
1229 SDValue BasePtr = Load->getBasePtr();
1230 EVT PtrVT = BasePtr.getValueType();
1231 EVT MemVT = Load->getMemoryVT();
1234 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1237 EVT LoMemVT, HiMemVT;
1240 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1241 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1242 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
1244 unsigned Size = LoMemVT.getStoreSize();
1245 unsigned BaseAlign = Load->getAlignment();
1246 unsigned HiAlign = MinAlign(BaseAlign, Size);
1249 = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1250 Load->getChain(), BasePtr,
1252 LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
1253 Load->isInvariant(), BaseAlign);
1255 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1256 DAG.getConstant(Size, SL, PtrVT));
1259 = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
1260 Load->getChain(), HiPtr,
1261 SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1262 HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
1263 Load->isInvariant(), HiAlign);
1266 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
1267 DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1268 LoLoad.getValue(1), HiLoad.getValue(1))
1271 return DAG.getMergeValues(Ops, SL);
1274 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
1275 SelectionDAG &DAG) const {
1276 StoreSDNode *Store = cast<StoreSDNode>(Op);
1277 EVT MemVT = Store->getMemoryVT();
1278 unsigned MemBits = MemVT.getSizeInBits();
1280 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
1281 // truncating store into an i32 store.
1282 // XXX: We could also handle optimize other vector bitwidths.
1283 if (!MemVT.isVector() || MemBits > 32) {
1288 SDValue Value = Store->getValue();
1289 EVT VT = Value.getValueType();
1290 EVT ElemVT = VT.getVectorElementType();
1291 SDValue Ptr = Store->getBasePtr();
1292 EVT MemEltVT = MemVT.getVectorElementType();
1293 unsigned MemEltBits = MemEltVT.getSizeInBits();
1294 unsigned MemNumElements = MemVT.getVectorNumElements();
1295 unsigned PackedSize = MemVT.getStoreSizeInBits();
1296 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32);
1298 assert(Value.getValueType().getScalarSizeInBits() >= 32);
1300 SDValue PackedValue;
1301 for (unsigned i = 0; i < MemNumElements; ++i) {
1302 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
1303 DAG.getConstant(i, DL, MVT::i32));
1304 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1305 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1307 SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32);
1308 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1313 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
1317 if (PackedSize < 32) {
1318 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1319 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
1320 Store->getMemOperand()->getPointerInfo(),
1322 Store->isNonTemporal(), Store->isVolatile(),
1323 Store->getAlignment());
1326 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
1327 Store->getMemOperand()->getPointerInfo(),
1328 Store->isVolatile(), Store->isNonTemporal(),
1329 Store->getAlignment());
1332 SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op,
1333 SelectionDAG &DAG) const {
1334 StoreSDNode *Store = cast<StoreSDNode>(Op);
1335 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
1336 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
1337 EVT PtrVT = Store->getBasePtr().getValueType();
1338 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
1341 SmallVector<SDValue, 8> Chains;
1343 unsigned EltSize = MemEltVT.getStoreSize();
1344 MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
1346 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1347 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
1349 DAG.getConstant(i, SL, MVT::i32));
1351 SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), SL, PtrVT);
1352 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset);
1354 DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
1355 SrcValue.getWithOffset(i * EltSize),
1356 MemEltVT, Store->isNonTemporal(), Store->isVolatile(),
1357 Store->getAlignment());
1358 Chains.push_back(NewStore);
1361 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
1364 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1365 SelectionDAG &DAG) const {
1366 StoreSDNode *Store = cast<StoreSDNode>(Op);
1367 SDValue Val = Store->getValue();
1368 EVT VT = Val.getValueType();
1370 // If this is a 2 element vector, we really want to scalarize and not create
1371 // weird 1 element vectors.
1372 if (VT.getVectorNumElements() == 2)
1373 return ScalarizeVectorStore(Op, DAG);
1375 EVT MemVT = Store->getMemoryVT();
1376 SDValue Chain = Store->getChain();
1377 SDValue BasePtr = Store->getBasePtr();
1381 EVT LoMemVT, HiMemVT;
1384 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1385 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1386 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1388 EVT PtrVT = BasePtr.getValueType();
1389 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1390 DAG.getConstant(LoMemVT.getStoreSize(), SL,
1393 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1394 unsigned BaseAlign = Store->getAlignment();
1395 unsigned Size = LoMemVT.getStoreSize();
1396 unsigned HiAlign = MinAlign(BaseAlign, Size);
1399 = DAG.getTruncStore(Chain, SL, Lo,
1403 Store->isNonTemporal(),
1404 Store->isVolatile(),
1407 = DAG.getTruncStore(Chain, SL, Hi,
1409 SrcValue.getWithOffset(Size),
1411 Store->isNonTemporal(),
1412 Store->isVolatile(),
1415 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1419 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1421 LoadSDNode *Load = cast<LoadSDNode>(Op);
1422 ISD::LoadExtType ExtType = Load->getExtensionType();
1423 EVT VT = Op.getValueType();
1424 EVT MemVT = Load->getMemoryVT();
1426 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
1427 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
1428 // FIXME: Copied from PPC
1429 // First, load into 32 bits, then truncate to 1 bit.
1431 SDValue Chain = Load->getChain();
1432 SDValue BasePtr = Load->getBasePtr();
1433 MachineMemOperand *MMO = Load->getMemOperand();
1435 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
1436 BasePtr, MVT::i8, MMO);
1439 DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
1443 return DAG.getMergeValues(Ops, DL);
1446 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS ||
1447 Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
1448 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
1451 // <SI && AS=PRIVATE && EXTLOAD && size < 32bit,
1452 // register (2-)byte extract.
1454 // Get Register holding the target.
1455 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
1456 DAG.getConstant(2, DL, MVT::i32));
1457 // Load the Register.
1458 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1459 Load->getChain(), Ptr,
1460 DAG.getTargetConstant(0, DL, MVT::i32),
1463 // Get offset within the register.
1464 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1466 DAG.getConstant(0x3, DL, MVT::i32));
1468 // Bit offset of target byte (byteIdx * 8).
1469 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1470 DAG.getConstant(3, DL, MVT::i32));
1472 // Shift to the right.
1473 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
1475 // Eliminate the upper bits by setting them to ...
1476 EVT MemEltVT = MemVT.getScalarType();
1479 if (ExtType == ISD::SEXTLOAD) {
1480 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1483 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
1487 return DAG.getMergeValues(Ops, DL);
1492 DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
1496 return DAG.getMergeValues(Ops, DL);
1499 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1501 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
1502 if (Result.getNode()) {
1506 StoreSDNode *Store = cast<StoreSDNode>(Op);
1507 SDValue Chain = Store->getChain();
1508 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1509 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1510 Store->getValue().getValueType().isVector()) {
1511 return SplitVectorStore(Op, DAG);
1514 EVT MemVT = Store->getMemoryVT();
1515 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1516 MemVT.bitsLT(MVT::i32)) {
1518 if (Store->getMemoryVT() == MVT::i8) {
1520 } else if (Store->getMemoryVT() == MVT::i16) {
1523 SDValue BasePtr = Store->getBasePtr();
1524 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
1525 DAG.getConstant(2, DL, MVT::i32));
1526 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1528 DAG.getTargetConstant(0, DL, MVT::i32));
1530 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
1531 DAG.getConstant(0x3, DL, MVT::i32));
1533 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1534 DAG.getConstant(3, DL, MVT::i32));
1536 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1539 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1541 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1542 MaskedValue, ShiftAmt);
1544 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32,
1545 DAG.getConstant(Mask, DL, MVT::i32),
1547 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1548 DAG.getConstant(0xffffffff, DL, MVT::i32));
1549 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1551 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1552 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1554 DAG.getTargetConstant(0, DL, MVT::i32));
1559 // This is a shortcut for integer division because we have fast i32<->f32
1560 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1561 // float is enough to accurately represent up to a 24-bit integer.
1562 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const {
1564 EVT VT = Op.getValueType();
1565 SDValue LHS = Op.getOperand(0);
1566 SDValue RHS = Op.getOperand(1);
1567 MVT IntVT = MVT::i32;
1568 MVT FltVT = MVT::f32;
1570 ISD::NodeType ToFp = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1571 ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1573 if (VT.isVector()) {
1574 unsigned NElts = VT.getVectorNumElements();
1575 IntVT = MVT::getVectorVT(MVT::i32, NElts);
1576 FltVT = MVT::getVectorVT(MVT::f32, NElts);
1579 unsigned BitSize = VT.getScalarType().getSizeInBits();
1581 SDValue jq = DAG.getConstant(1, DL, IntVT);
1584 // char|short jq = ia ^ ib;
1585 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1587 // jq = jq >> (bitsize - 2)
1588 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1589 DAG.getConstant(BitSize - 2, DL, VT));
1592 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1595 jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
1598 // int ia = (int)LHS;
1600 DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT);
1602 // int ib, (int)RHS;
1604 DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT);
1606 // float fa = (float)ia;
1607 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1609 // float fb = (float)ib;
1610 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1612 // TODO: Should this propagate fast-math-flags?
1613 // float fq = native_divide(fa, fb);
1614 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1615 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1618 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1620 // float fqneg = -fq;
1621 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1623 // float fr = mad(fqneg, fb, fa);
1624 SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT,
1625 DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa);
1627 // int iq = (int)fq;
1628 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1631 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1634 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1636 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1638 // int cv = fr >= fb;
1639 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1641 // jq = (cv ? jq : 0);
1642 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1644 // dst = trunc/extend to legal type
1645 iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT);
1648 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1650 // Rem needs compensation, it's easier to recompute it
1651 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1652 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1658 return DAG.getMergeValues(Res, DL);
1661 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1663 SmallVectorImpl<SDValue> &Results) const {
1664 assert(Op.getValueType() == MVT::i64);
1667 EVT VT = Op.getValueType();
1668 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1670 SDValue one = DAG.getConstant(1, DL, HalfVT);
1671 SDValue zero = DAG.getConstant(0, DL, HalfVT);
1674 SDValue LHS = Op.getOperand(0);
1675 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
1676 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
1678 SDValue RHS = Op.getOperand(1);
1679 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
1680 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
1682 if (VT == MVT::i64 &&
1683 DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1684 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1686 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1689 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(0), zero);
1690 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(1), zero);
1691 Results.push_back(DIV);
1692 Results.push_back(REM);
1696 // Get Speculative values
1697 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1698 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1700 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
1701 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, zero);
1703 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
1704 SDValue DIV_Lo = zero;
1706 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1708 for (unsigned i = 0; i < halfBitWidth; ++i) {
1709 const unsigned bitPos = halfBitWidth - i - 1;
1710 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1711 // Get value of high bit
1712 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1713 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
1714 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1717 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1719 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1721 SDValue BIT = DAG.getConstant(1 << bitPos, DL, HalfVT);
1722 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
1724 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1727 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1728 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1731 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
1732 Results.push_back(DIV);
1733 Results.push_back(REM);
1736 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1737 SelectionDAG &DAG) const {
1739 EVT VT = Op.getValueType();
1741 if (VT == MVT::i64) {
1742 SmallVector<SDValue, 2> Results;
1743 LowerUDIVREM64(Op, DAG, Results);
1744 return DAG.getMergeValues(Results, DL);
1747 SDValue Num = Op.getOperand(0);
1748 SDValue Den = Op.getOperand(1);
1750 if (VT == MVT::i32) {
1751 if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) &&
1752 DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) {
1753 // TODO: We technically could do this for i64, but shouldn't that just be
1754 // handled by something generally reducing 64-bit division on 32-bit
1755 // values to 32-bit?
1756 return LowerDIVREM24(Op, DAG, false);
1760 // RCP = URECIP(Den) = 2^32 / Den + e
1761 // e is rounding error.
1762 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1764 // RCP_LO = mul(RCP, Den) */
1765 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1767 // RCP_HI = mulhu (RCP, Den) */
1768 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1770 // NEG_RCP_LO = -RCP_LO
1771 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1774 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1775 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1778 // Calculate the rounding error from the URECIP instruction
1779 // E = mulhu(ABS_RCP_LO, RCP)
1780 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1782 // RCP_A_E = RCP + E
1783 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1785 // RCP_S_E = RCP - E
1786 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1788 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1789 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1792 // Quotient = mulhu(Tmp0, Num)
1793 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1795 // Num_S_Remainder = Quotient * Den
1796 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1798 // Remainder = Num - Num_S_Remainder
1799 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1801 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1802 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1803 DAG.getConstant(-1, DL, VT),
1804 DAG.getConstant(0, DL, VT),
1806 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1807 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1809 DAG.getConstant(-1, DL, VT),
1810 DAG.getConstant(0, DL, VT),
1812 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1813 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1816 // Calculate Division result:
1818 // Quotient_A_One = Quotient + 1
1819 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1820 DAG.getConstant(1, DL, VT));
1822 // Quotient_S_One = Quotient - 1
1823 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1824 DAG.getConstant(1, DL, VT));
1826 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1827 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1828 Quotient, Quotient_A_One, ISD::SETEQ);
1830 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1831 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1832 Quotient_S_One, Div, ISD::SETEQ);
1834 // Calculate Rem result:
1836 // Remainder_S_Den = Remainder - Den
1837 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1839 // Remainder_A_Den = Remainder + Den
1840 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1842 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1843 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1844 Remainder, Remainder_S_Den, ISD::SETEQ);
1846 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1847 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1848 Remainder_A_Den, Rem, ISD::SETEQ);
1853 return DAG.getMergeValues(Ops, DL);
1856 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1857 SelectionDAG &DAG) const {
1859 EVT VT = Op.getValueType();
1861 SDValue LHS = Op.getOperand(0);
1862 SDValue RHS = Op.getOperand(1);
1864 SDValue Zero = DAG.getConstant(0, DL, VT);
1865 SDValue NegOne = DAG.getConstant(-1, DL, VT);
1867 if (VT == MVT::i32 &&
1868 DAG.ComputeNumSignBits(LHS) > 8 &&
1869 DAG.ComputeNumSignBits(RHS) > 8) {
1870 return LowerDIVREM24(Op, DAG, true);
1872 if (VT == MVT::i64 &&
1873 DAG.ComputeNumSignBits(LHS) > 32 &&
1874 DAG.ComputeNumSignBits(RHS) > 32) {
1875 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1878 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1879 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1880 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1883 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1884 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1886 return DAG.getMergeValues(Res, DL);
1889 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1890 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1891 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1892 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1894 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1895 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1897 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1898 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1900 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1901 SDValue Rem = Div.getValue(1);
1903 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1904 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1906 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1907 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1913 return DAG.getMergeValues(Res, DL);
1916 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
1917 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
1919 EVT VT = Op.getValueType();
1920 SDValue X = Op.getOperand(0);
1921 SDValue Y = Op.getOperand(1);
1923 // TODO: Should this propagate fast-math-flags?
1925 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
1926 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
1927 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
1929 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
1932 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1934 SDValue Src = Op.getOperand(0);
1936 // result = trunc(src)
1937 // if (src > 0.0 && src != result)
1940 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1942 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1943 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
1946 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1948 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1949 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1950 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1952 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1953 // TODO: Should this propagate fast-math-flags?
1954 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1957 static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
1958 const unsigned FractBits = 52;
1959 const unsigned ExpBits = 11;
1961 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
1963 DAG.getConstant(FractBits - 32, SL, MVT::i32),
1964 DAG.getConstant(ExpBits, SL, MVT::i32));
1965 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1966 DAG.getConstant(1023, SL, MVT::i32));
1971 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1973 SDValue Src = Op.getOperand(0);
1975 assert(Op.getValueType() == MVT::f64);
1977 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1978 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1980 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1982 // Extract the upper half, since this is where we will find the sign and
1984 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1986 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
1988 const unsigned FractBits = 52;
1990 // Extract the sign bit.
1991 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
1992 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1994 // Extend back to to 64-bits.
1995 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
1997 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1999 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
2000 const SDValue FractMask
2001 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
2003 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
2004 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
2005 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2008 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2010 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2012 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2013 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2015 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2016 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2018 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2021 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2023 SDValue Src = Op.getOperand(0);
2025 assert(Op.getValueType() == MVT::f64);
2027 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
2028 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2029 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2031 // TODO: Should this propagate fast-math-flags?
2033 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2034 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2036 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2038 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
2039 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2042 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2043 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2045 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2048 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2049 // FNEARBYINT and FRINT are the same, except in their handling of FP
2050 // exceptions. Those aren't really meaningful for us, and OpenCL only has
2051 // rint, so just treat them as equivalent.
2052 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2055 // XXX - May require not supporting f32 denormals?
2056 SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const {
2058 SDValue X = Op.getOperand(0);
2060 SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X);
2062 // TODO: Should this propagate fast-math-flags?
2064 SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T);
2066 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff);
2068 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32);
2069 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
2070 const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32);
2072 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X);
2075 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
2077 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2079 SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero);
2081 return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel);
2084 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
2086 SDValue X = Op.getOperand(0);
2088 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
2090 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2091 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2092 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
2093 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
2095 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2097 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
2099 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
2101 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2103 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
2106 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
2107 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
2108 DAG.getConstant(INT64_C(0x0008000000000000), SL,
2112 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
2113 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
2114 DAG.getConstant(0, SL, MVT::i64), Tmp0,
2117 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
2118 D, DAG.getConstant(0, SL, MVT::i64));
2119 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
2121 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
2122 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
2124 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2125 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2126 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
2128 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
2130 DAG.getConstantFP(1.0, SL, MVT::f64),
2131 DAG.getConstantFP(0.0, SL, MVT::f64));
2133 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
2135 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
2136 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
2141 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2142 EVT VT = Op.getValueType();
2145 return LowerFROUND32(Op, DAG);
2148 return LowerFROUND64(Op, DAG);
2150 llvm_unreachable("unhandled type");
2153 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2155 SDValue Src = Op.getOperand(0);
2157 // result = trunc(src);
2158 // if (src < 0.0 && src != result)
2161 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2163 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2164 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2167 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2169 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2170 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2171 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2173 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2174 // TODO: Should this propagate fast-math-flags?
2175 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2178 SDValue AMDGPUTargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
2180 SDValue Src = Op.getOperand(0);
2181 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2183 if (ZeroUndef && Src.getValueType() == MVT::i32)
2184 return DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Src);
2186 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2188 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2189 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2191 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2192 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2194 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2195 *DAG.getContext(), MVT::i32);
2197 SDValue Hi0 = DAG.getSetCC(SL, SetCCVT, Hi, Zero, ISD::SETEQ);
2199 SDValue CtlzLo = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Lo);
2200 SDValue CtlzHi = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Hi);
2202 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2203 SDValue Add = DAG.getNode(ISD::ADD, SL, MVT::i32, CtlzLo, Bits32);
2205 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2206 SDValue NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0, Add, CtlzHi);
2209 // Test if the full 64-bit input is zero.
2211 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2212 // which we probably don't want.
2213 SDValue Lo0 = DAG.getSetCC(SL, SetCCVT, Lo, Zero, ISD::SETEQ);
2214 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0, Hi0);
2216 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2217 // with the same cycles, otherwise it is slower.
2218 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2219 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2221 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2223 // The instruction returns -1 for 0 input, but the defined intrinsic
2224 // behavior is to return the number of bits.
2225 NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2226 SrcIsZero, Bits32, NewCtlz);
2229 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewCtlz);
2232 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
2233 bool Signed) const {
2237 // uint lz = clz(u);
2238 // uint e = (u != 0) ? 127U + 63U - lz : 0;
2239 // u = (u << lz) & 0x7fffffffffffffffUL;
2240 // ulong t = u & 0xffffffffffUL;
2241 // uint v = (e << 23) | (uint)(u >> 40);
2242 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
2243 // return as_float(v + r);
2248 // long s = l >> 63;
2249 // float r = cul2f((l + s) ^ s);
2250 // return s ? -r : r;
2254 SDValue Src = Op.getOperand(0);
2259 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
2260 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
2262 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
2263 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
2266 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2267 *DAG.getContext(), MVT::f32);
2270 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
2271 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
2272 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
2273 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
2275 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
2276 SDValue E = DAG.getSelect(SL, MVT::i32,
2277 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
2278 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
2281 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
2282 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
2283 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
2285 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
2286 DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
2288 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
2289 U, DAG.getConstant(40, SL, MVT::i64));
2291 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
2292 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
2293 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl));
2295 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
2296 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
2297 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
2299 SDValue One = DAG.getConstant(1, SL, MVT::i32);
2301 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
2303 SDValue R = DAG.getSelect(SL, MVT::i32,
2306 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
2307 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
2308 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
2313 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
2314 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
2317 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2318 bool Signed) const {
2320 SDValue Src = Op.getOperand(0);
2322 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2324 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2325 DAG.getConstant(0, SL, MVT::i32));
2326 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2327 DAG.getConstant(1, SL, MVT::i32));
2329 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2332 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2334 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2335 DAG.getConstant(32, SL, MVT::i32));
2336 // TODO: Should this propagate fast-math-flags?
2337 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2340 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2341 SelectionDAG &DAG) const {
2342 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2343 "operation should be legal");
2345 EVT DestVT = Op.getValueType();
2346 if (DestVT == MVT::f64)
2347 return LowerINT_TO_FP64(Op, DAG, false);
2349 if (DestVT == MVT::f32)
2350 return LowerINT_TO_FP32(Op, DAG, false);
2355 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2356 SelectionDAG &DAG) const {
2357 assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2358 "operation should be legal");
2360 EVT DestVT = Op.getValueType();
2361 if (DestVT == MVT::f32)
2362 return LowerINT_TO_FP32(Op, DAG, true);
2364 if (DestVT == MVT::f64)
2365 return LowerINT_TO_FP64(Op, DAG, true);
2370 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2371 bool Signed) const {
2374 SDValue Src = Op.getOperand(0);
2376 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2378 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2380 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2382 // TODO: Should this propagate fast-math-flags?
2383 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2385 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2388 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2390 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2391 MVT::i32, FloorMul);
2392 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2394 SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Lo, Hi);
2396 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2399 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2400 SelectionDAG &DAG) const {
2401 SDValue Src = Op.getOperand(0);
2403 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2404 return LowerFP64_TO_INT(Op, DAG, true);
2409 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2410 SelectionDAG &DAG) const {
2411 SDValue Src = Op.getOperand(0);
2413 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2414 return LowerFP64_TO_INT(Op, DAG, false);
2419 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2420 SelectionDAG &DAG) const {
2421 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2422 MVT VT = Op.getSimpleValueType();
2423 MVT ScalarVT = VT.getScalarType();
2428 SDValue Src = Op.getOperand(0);
2431 // TODO: Don't scalarize on Evergreen?
2432 unsigned NElts = VT.getVectorNumElements();
2433 SmallVector<SDValue, 8> Args;
2434 DAG.ExtractVectorElements(Src, Args, 0, NElts);
2436 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2437 for (unsigned I = 0; I < NElts; ++I)
2438 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2440 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
2443 //===----------------------------------------------------------------------===//
2444 // Custom DAG optimizations
2445 //===----------------------------------------------------------------------===//
2447 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2448 APInt KnownZero, KnownOne;
2449 EVT VT = Op.getValueType();
2450 DAG.computeKnownBits(Op, KnownZero, KnownOne);
2452 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
2455 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2456 EVT VT = Op.getValueType();
2458 // In order for this to be a signed 24-bit value, bit 23, must
2460 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2461 // as unsigned 24-bit values.
2462 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
2465 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
2467 SelectionDAG &DAG = DCI.DAG;
2468 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2469 EVT VT = Op.getValueType();
2471 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
2472 APInt KnownZero, KnownOne;
2473 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
2474 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
2475 DCI.CommitTargetLoweringOpt(TLO);
2478 template <typename IntTy>
2479 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
2480 uint32_t Offset, uint32_t Width, SDLoc DL) {
2481 if (Width + Offset < 32) {
2482 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2483 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2484 return DAG.getConstant(Result, DL, MVT::i32);
2487 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2490 static bool usesAllNormalStores(SDNode *LoadVal) {
2491 for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
2492 if (!ISD::isNormalStore(*I))
2499 // If we have a copy of an illegal type, replace it with a load / store of an
2500 // equivalently sized legal type. This avoids intermediate bit pack / unpack
2501 // instructions emitted when handling extloads and truncstores. Ideally we could
2502 // recognize the pack / unpack pattern to eliminate it.
2503 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2504 DAGCombinerInfo &DCI) const {
2505 if (!DCI.isBeforeLegalize())
2508 StoreSDNode *SN = cast<StoreSDNode>(N);
2509 SDValue Value = SN->getValue();
2510 EVT VT = Value.getValueType();
2512 if (isTypeLegal(VT) || SN->isVolatile() ||
2513 !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8)
2516 LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
2517 if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
2520 EVT MemVT = LoadVal->getMemoryVT();
2523 SelectionDAG &DAG = DCI.DAG;
2524 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
2526 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
2528 LoadVal->getChain(),
2529 LoadVal->getBasePtr(),
2530 LoadVal->getOffset(),
2532 LoadVal->getMemOperand());
2534 SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
2535 DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
2537 return DAG.getStore(SN->getChain(), SL, NewLoad,
2538 SN->getBasePtr(), SN->getMemOperand());
2541 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
2542 DAGCombinerInfo &DCI) const {
2543 if (N->getValueType(0) != MVT::i64)
2546 // i64 (shl x, 32) -> (build_pair 0, x)
2548 // Doing this with moves theoretically helps MI optimizations that understand
2549 // copies. 2 v_mov_b32_e32 will have the same code size / cycle count as
2550 // v_lshl_b64. In the SALU case, I think this is slightly worse since it
2551 // doubles the code size and I'm unsure about cycle count.
2552 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2553 if (!RHS || RHS->getZExtValue() != 32)
2556 SDValue LHS = N->getOperand(0);
2559 SelectionDAG &DAG = DCI.DAG;
2561 // Extract low 32-bits.
2562 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
2564 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2565 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, Zero, Lo);
2568 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
2569 DAGCombinerInfo &DCI) const {
2570 EVT VT = N->getValueType(0);
2572 if (VT.isVector() || VT.getSizeInBits() > 32)
2575 SelectionDAG &DAG = DCI.DAG;
2578 SDValue N0 = N->getOperand(0);
2579 SDValue N1 = N->getOperand(1);
2582 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
2583 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2584 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2585 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
2586 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
2587 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2588 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2589 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
2594 // We need to use sext even for MUL_U24, because MUL_U24 is used
2595 // for signed multiply of 8 and 16-bit types.
2596 return DAG.getSExtOrTrunc(Mul, DL, VT);
2599 static bool isNegativeOne(SDValue Val) {
2600 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
2601 return C->isAllOnesValue();
2605 static bool isCtlzOpc(unsigned Opc) {
2606 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2609 // Get FFBH node if the incoming op may have been type legalized from a smaller
2611 // Need to match pre-legalized type because the generic legalization inserts the
2612 // add/sub between the select and compare.
2613 static SDValue getFFBH_U32(const TargetLowering &TLI,
2614 SelectionDAG &DAG, SDLoc SL, SDValue Op) {
2615 EVT VT = Op.getValueType();
2616 EVT LegalVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
2617 if (LegalVT != MVT::i32)
2621 Op = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Op);
2623 SDValue FFBH = DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Op);
2625 FFBH = DAG.getNode(ISD::TRUNCATE, SL, VT, FFBH);
2630 // The native instructions return -1 on 0 input. Optimize out a select that
2631 // produces -1 on 0.
2633 // TODO: If zero is not undef, we could also do this if the output is compared
2634 // against the bitwidth.
2636 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
2637 SDValue AMDGPUTargetLowering::performCtlzCombine(SDLoc SL,
2641 DAGCombinerInfo &DCI) const {
2642 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
2643 if (!CmpRhs || !CmpRhs->isNullValue())
2646 SelectionDAG &DAG = DCI.DAG;
2647 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2648 SDValue CmpLHS = Cond.getOperand(0);
2650 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
2651 if (CCOpcode == ISD::SETEQ &&
2652 isCtlzOpc(RHS.getOpcode()) &&
2653 RHS.getOperand(0) == CmpLHS &&
2654 isNegativeOne(LHS)) {
2655 return getFFBH_U32(*this, DAG, SL, CmpLHS);
2658 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
2659 if (CCOpcode == ISD::SETNE &&
2660 isCtlzOpc(LHS.getOpcode()) &&
2661 LHS.getOperand(0) == CmpLHS &&
2662 isNegativeOne(RHS)) {
2663 return getFFBH_U32(*this, DAG, SL, CmpLHS);
2669 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
2670 DAGCombinerInfo &DCI) const {
2671 SDValue Cond = N->getOperand(0);
2672 if (Cond.getOpcode() != ISD::SETCC)
2675 EVT VT = N->getValueType(0);
2676 SDValue LHS = Cond.getOperand(0);
2677 SDValue RHS = Cond.getOperand(1);
2678 SDValue CC = Cond.getOperand(2);
2680 SDValue True = N->getOperand(1);
2681 SDValue False = N->getOperand(2);
2683 if (VT == MVT::f32 && Cond.hasOneUse())
2684 return CombineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
2686 // There's no reason to not do this if the condition has other uses.
2687 return performCtlzCombine(SDLoc(N), Cond, True, False, DCI);
2690 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
2691 DAGCombinerInfo &DCI) const {
2692 SelectionDAG &DAG = DCI.DAG;
2695 switch(N->getOpcode()) {
2699 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2702 return performShlCombine(N, DCI);
2705 return performMulCombine(N, DCI);
2706 case AMDGPUISD::MUL_I24:
2707 case AMDGPUISD::MUL_U24: {
2708 SDValue N0 = N->getOperand(0);
2709 SDValue N1 = N->getOperand(1);
2710 simplifyI24(N0, DCI);
2711 simplifyI24(N1, DCI);
2715 return performSelectCombine(N, DCI);
2716 case AMDGPUISD::BFE_I32:
2717 case AMDGPUISD::BFE_U32: {
2718 assert(!N->getValueType(0).isVector() &&
2719 "Vector handling of BFE not implemented");
2720 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
2724 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
2726 return DAG.getConstant(0, DL, MVT::i32);
2728 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2732 SDValue BitsFrom = N->getOperand(0);
2733 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
2735 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
2737 if (OffsetVal == 0) {
2738 // This is already sign / zero extended, so try to fold away extra BFEs.
2739 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
2741 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
2742 if (OpSignBits >= SignBits)
2745 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
2747 // This is a sign_extend_inreg. Replace it to take advantage of existing
2748 // DAG Combines. If not eliminated, we will match back to BFE during
2751 // TODO: The sext_inreg of extended types ends, although we can could
2752 // handle them in a single BFE.
2753 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
2754 DAG.getValueType(SmallVT));
2757 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
2760 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
2762 return constantFoldBFE<int32_t>(DAG,
2763 CVal->getSExtValue(),
2769 return constantFoldBFE<uint32_t>(DAG,
2770 CVal->getZExtValue(),
2776 if ((OffsetVal + WidthVal) >= 32) {
2777 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
2778 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2779 BitsFrom, ShiftVal);
2782 if (BitsFrom.hasOneUse()) {
2783 APInt Demanded = APInt::getBitsSet(32,
2785 OffsetVal + WidthVal);
2787 APInt KnownZero, KnownOne;
2788 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2789 !DCI.isBeforeLegalizeOps());
2790 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2791 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
2792 TLI.SimplifyDemandedBits(BitsFrom, Demanded,
2793 KnownZero, KnownOne, TLO)) {
2794 DCI.CommitTargetLoweringOpt(TLO);
2802 return performStoreCombine(N, DCI);
2807 //===----------------------------------------------------------------------===//
2809 //===----------------------------------------------------------------------===//
2811 void AMDGPUTargetLowering::getOriginalFunctionArgs(
2814 const SmallVectorImpl<ISD::InputArg> &Ins,
2815 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
2817 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
2818 if (Ins[i].ArgVT == Ins[i].VT) {
2819 OrigIns.push_back(Ins[i]);
2824 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
2825 // Vector has been split into scalars.
2826 VT = Ins[i].ArgVT.getVectorElementType();
2827 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
2828 Ins[i].ArgVT.getVectorElementType() !=
2829 Ins[i].VT.getVectorElementType()) {
2830 // Vector elements have been promoted
2833 // Vector has been spilt into smaller vectors.
2837 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2838 Ins[i].OrigArgIndex, Ins[i].PartOffset);
2839 OrigIns.push_back(Arg);
2843 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
2844 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2845 return CFP->isExactlyValue(1.0);
2847 return isAllOnesConstant(Op);
2850 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
2851 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2852 return CFP->getValueAPF().isZero();
2854 return isNullConstant(Op);
2857 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2858 const TargetRegisterClass *RC,
2859 unsigned Reg, EVT VT) const {
2860 MachineFunction &MF = DAG.getMachineFunction();
2861 MachineRegisterInfo &MRI = MF.getRegInfo();
2862 unsigned VirtualRegister;
2863 if (!MRI.isLiveIn(Reg)) {
2864 VirtualRegister = MRI.createVirtualRegister(RC);
2865 MRI.addLiveIn(Reg, VirtualRegister);
2867 VirtualRegister = MRI.getLiveInVirtReg(Reg);
2869 return DAG.getRegister(VirtualRegister, VT);
2872 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
2873 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const {
2874 uint64_t ArgOffset = MFI->ABIArgOffset;
2879 return ArgOffset + 4;
2881 llvm_unreachable("unexpected implicit parameter type");
2884 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2886 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
2887 switch ((AMDGPUISD::NodeType)Opcode) {
2888 case AMDGPUISD::FIRST_NUMBER: break;
2890 NODE_NAME_CASE(CALL);
2891 NODE_NAME_CASE(UMUL);
2892 NODE_NAME_CASE(RET_FLAG);
2893 NODE_NAME_CASE(BRANCH_COND);
2896 NODE_NAME_CASE(DWORDADDR)
2897 NODE_NAME_CASE(FRACT)
2898 NODE_NAME_CASE(CLAMP)
2899 NODE_NAME_CASE(COS_HW)
2900 NODE_NAME_CASE(SIN_HW)
2901 NODE_NAME_CASE(FMAX_LEGACY)
2902 NODE_NAME_CASE(FMIN_LEGACY)
2903 NODE_NAME_CASE(FMAX3)
2904 NODE_NAME_CASE(SMAX3)
2905 NODE_NAME_CASE(UMAX3)
2906 NODE_NAME_CASE(FMIN3)
2907 NODE_NAME_CASE(SMIN3)
2908 NODE_NAME_CASE(UMIN3)
2909 NODE_NAME_CASE(URECIP)
2910 NODE_NAME_CASE(DIV_SCALE)
2911 NODE_NAME_CASE(DIV_FMAS)
2912 NODE_NAME_CASE(DIV_FIXUP)
2913 NODE_NAME_CASE(TRIG_PREOP)
2916 NODE_NAME_CASE(RSQ_LEGACY)
2917 NODE_NAME_CASE(RSQ_CLAMPED)
2918 NODE_NAME_CASE(LDEXP)
2919 NODE_NAME_CASE(FP_CLASS)
2920 NODE_NAME_CASE(DOT4)
2921 NODE_NAME_CASE(CARRY)
2922 NODE_NAME_CASE(BORROW)
2923 NODE_NAME_CASE(BFE_U32)
2924 NODE_NAME_CASE(BFE_I32)
2927 NODE_NAME_CASE(FFBH_U32)
2928 NODE_NAME_CASE(MUL_U24)
2929 NODE_NAME_CASE(MUL_I24)
2930 NODE_NAME_CASE(MAD_U24)
2931 NODE_NAME_CASE(MAD_I24)
2932 NODE_NAME_CASE(TEXTURE_FETCH)
2933 NODE_NAME_CASE(EXPORT)
2934 NODE_NAME_CASE(CONST_ADDRESS)
2935 NODE_NAME_CASE(REGISTER_LOAD)
2936 NODE_NAME_CASE(REGISTER_STORE)
2937 NODE_NAME_CASE(LOAD_CONSTANT)
2938 NODE_NAME_CASE(LOAD_INPUT)
2939 NODE_NAME_CASE(SAMPLE)
2940 NODE_NAME_CASE(SAMPLEB)
2941 NODE_NAME_CASE(SAMPLED)
2942 NODE_NAME_CASE(SAMPLEL)
2943 NODE_NAME_CASE(CVT_F32_UBYTE0)
2944 NODE_NAME_CASE(CVT_F32_UBYTE1)
2945 NODE_NAME_CASE(CVT_F32_UBYTE2)
2946 NODE_NAME_CASE(CVT_F32_UBYTE3)
2947 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
2948 NODE_NAME_CASE(CONST_DATA_PTR)
2949 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
2950 NODE_NAME_CASE(SENDMSG)
2951 NODE_NAME_CASE(INTERP_MOV)
2952 NODE_NAME_CASE(INTERP_P1)
2953 NODE_NAME_CASE(INTERP_P2)
2954 NODE_NAME_CASE(STORE_MSKOR)
2955 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
2956 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
2961 SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand,
2962 DAGCombinerInfo &DCI,
2963 unsigned &RefinementSteps,
2964 bool &UseOneConstNR) const {
2965 SelectionDAG &DAG = DCI.DAG;
2966 EVT VT = Operand.getValueType();
2968 if (VT == MVT::f32) {
2969 RefinementSteps = 0;
2970 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
2973 // TODO: There is also f64 rsq instruction, but the documentation is less
2974 // clear on its precision.
2979 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
2980 DAGCombinerInfo &DCI,
2981 unsigned &RefinementSteps) const {
2982 SelectionDAG &DAG = DCI.DAG;
2983 EVT VT = Operand.getValueType();
2985 if (VT == MVT::f32) {
2986 // Reciprocal, < 1 ulp error.
2988 // This reciprocal approximation converges to < 0.5 ulp error with one
2989 // newton rhapson performed with two fused multiple adds (FMAs).
2991 RefinementSteps = 0;
2992 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
2995 // TODO: There is also f64 rcp instruction, but the documentation is less
2996 // clear on its precision.
3001 static void computeKnownBitsForMinMax(const SDValue Op0,
3005 const SelectionDAG &DAG,
3007 APInt Op0Zero, Op0One;
3008 APInt Op1Zero, Op1One;
3009 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
3010 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
3012 KnownZero = Op0Zero & Op1Zero;
3013 KnownOne = Op0One & Op1One;
3016 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
3020 const SelectionDAG &DAG,
3021 unsigned Depth) const {
3023 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
3027 unsigned Opc = Op.getOpcode();
3032 case ISD::INTRINSIC_WO_CHAIN: {
3033 // FIXME: The intrinsic should just use the node.
3034 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
3035 case AMDGPUIntrinsic::AMDGPU_imax:
3036 case AMDGPUIntrinsic::AMDGPU_umax:
3037 case AMDGPUIntrinsic::AMDGPU_imin:
3038 case AMDGPUIntrinsic::AMDGPU_umin:
3039 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
3040 KnownZero, KnownOne, DAG, Depth);
3048 case AMDGPUISD::CARRY:
3049 case AMDGPUISD::BORROW: {
3050 KnownZero = APInt::getHighBitsSet(32, 31);
3054 case AMDGPUISD::BFE_I32:
3055 case AMDGPUISD::BFE_U32: {
3056 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3060 unsigned BitWidth = 32;
3061 uint32_t Width = CWidth->getZExtValue() & 0x1f;
3063 if (Opc == AMDGPUISD::BFE_U32)
3064 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
3071 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
3073 const SelectionDAG &DAG,
3074 unsigned Depth) const {
3075 switch (Op.getOpcode()) {
3076 case AMDGPUISD::BFE_I32: {
3077 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3081 unsigned SignBits = 32 - Width->getZExtValue() + 1;
3082 if (!isNullConstant(Op.getOperand(1)))
3085 // TODO: Could probably figure something out with non-0 offsets.
3086 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
3087 return std::max(SignBits, Op0SignBits);
3090 case AMDGPUISD::BFE_U32: {
3091 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
3092 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
3095 case AMDGPUISD::CARRY:
3096 case AMDGPUISD::BORROW: