1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUDiagnosticInfoUnsupported.h"
19 #include "AMDGPUFrameLowering.h"
20 #include "AMDGPUIntrinsicInfo.h"
21 #include "AMDGPURegisterInfo.h"
22 #include "AMDGPUSubtarget.h"
23 #include "R600MachineFunctionInfo.h"
24 #include "SIMachineFunctionInfo.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DataLayout.h"
34 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
35 CCValAssign::LocInfo LocInfo,
36 ISD::ArgFlagsTy ArgFlags, CCState &State) {
37 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
38 ArgFlags.getOrigAlign());
39 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
44 #include "AMDGPUGenCallingConv.inc"
46 // Find a larger type to do a load / store of a vector with.
47 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
48 unsigned StoreSize = VT.getStoreSizeInBits();
50 return EVT::getIntegerVT(Ctx, StoreSize);
52 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
53 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
56 // Type for a vector that will be loaded to.
57 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
58 unsigned StoreSize = VT.getStoreSizeInBits();
60 return EVT::getIntegerVT(Ctx, 32);
62 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
65 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
66 const AMDGPUSubtarget &STI)
67 : TargetLowering(TM), Subtarget(&STI) {
68 setOperationAction(ISD::Constant, MVT::i32, Legal);
69 setOperationAction(ISD::Constant, MVT::i64, Legal);
70 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
71 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
73 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
74 setOperationAction(ISD::BRIND, MVT::Other, Expand);
76 // This is totally unsupported, just custom lower to produce an error.
77 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
79 // We need to custom lower some of the intrinsics
80 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
82 // Library functions. These default to Expand, but we have instructions
84 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
85 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
86 setOperationAction(ISD::FPOW, MVT::f32, Legal);
87 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
88 setOperationAction(ISD::FABS, MVT::f32, Legal);
89 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
90 setOperationAction(ISD::FRINT, MVT::f32, Legal);
91 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
92 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
93 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
95 setOperationAction(ISD::FROUND, MVT::f32, Custom);
96 setOperationAction(ISD::FROUND, MVT::f64, Custom);
98 setOperationAction(ISD::FREM, MVT::f32, Custom);
99 setOperationAction(ISD::FREM, MVT::f64, Custom);
101 // v_mad_f32 does not support denormals according to some sources.
102 if (!Subtarget->hasFP32Denormals())
103 setOperationAction(ISD::FMAD, MVT::f32, Legal);
105 // Expand to fneg + fadd.
106 setOperationAction(ISD::FSUB, MVT::f64, Expand);
108 // Lower floating point store/load to integer store/load to reduce the number
109 // of patterns in tablegen.
110 setOperationAction(ISD::STORE, MVT::f32, Promote);
111 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
113 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
114 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
116 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
117 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
119 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
120 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
122 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
123 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
125 setOperationAction(ISD::STORE, MVT::f64, Promote);
126 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
128 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
129 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
131 // Custom lowering of vector stores is required for local address space
133 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
135 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
136 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
137 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
139 // XXX: This can be change to Custom, once ExpandVectorStores can
140 // handle 64-bit stores.
141 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
143 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
144 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
145 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
146 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
147 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
150 setOperationAction(ISD::LOAD, MVT::f32, Promote);
151 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
153 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
154 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
156 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
157 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
159 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
160 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
162 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
163 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
165 setOperationAction(ISD::LOAD, MVT::f64, Promote);
166 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
168 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
169 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
171 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
172 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
173 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
174 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
175 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
176 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
177 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
178 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
179 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
180 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
182 // There are no 64-bit extloads. These should be done as a 32-bit extload and
183 // an extension to 64-bit.
184 for (MVT VT : MVT::integer_valuetypes()) {
185 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
186 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
187 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
190 for (MVT VT : MVT::integer_vector_valuetypes()) {
191 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
192 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
193 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
194 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
195 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
196 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
197 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
198 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
199 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
200 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
201 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
202 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
205 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
207 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
208 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
209 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
210 setOperationAction(ISD::FRINT, MVT::f64, Custom);
211 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
214 if (!Subtarget->hasBFI()) {
215 // fcopysign can be done in a single instruction with BFI.
216 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
217 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
220 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
222 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
223 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
224 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
225 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
227 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
228 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
229 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
230 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
232 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
233 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
234 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
235 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
237 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
238 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
240 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
241 for (MVT VT : ScalarIntVTs) {
242 setOperationAction(ISD::SREM, VT, Expand);
243 setOperationAction(ISD::SDIV, VT, Expand);
245 // GPU does not have divrem function for signed or unsigned.
246 setOperationAction(ISD::SDIVREM, VT, Custom);
247 setOperationAction(ISD::UDIVREM, VT, Custom);
249 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
250 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
251 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
253 setOperationAction(ISD::BSWAP, VT, Expand);
254 setOperationAction(ISD::CTTZ, VT, Expand);
255 setOperationAction(ISD::CTLZ, VT, Expand);
258 if (!Subtarget->hasBCNT(32))
259 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
261 if (!Subtarget->hasBCNT(64))
262 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
264 // The hardware supports 32-bit ROTR, but not ROTL.
265 setOperationAction(ISD::ROTL, MVT::i32, Expand);
266 setOperationAction(ISD::ROTL, MVT::i64, Expand);
267 setOperationAction(ISD::ROTR, MVT::i64, Expand);
269 setOperationAction(ISD::MUL, MVT::i64, Expand);
270 setOperationAction(ISD::MULHU, MVT::i64, Expand);
271 setOperationAction(ISD::MULHS, MVT::i64, Expand);
272 setOperationAction(ISD::UDIV, MVT::i32, Expand);
273 setOperationAction(ISD::UREM, MVT::i32, Expand);
274 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
275 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
276 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
277 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
278 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
280 setOperationAction(ISD::SMIN, MVT::i32, Legal);
281 setOperationAction(ISD::UMIN, MVT::i32, Legal);
282 setOperationAction(ISD::SMAX, MVT::i32, Legal);
283 setOperationAction(ISD::UMAX, MVT::i32, Legal);
285 if (!Subtarget->hasFFBH())
286 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
288 if (!Subtarget->hasFFBL())
289 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
291 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
293 setOperationAction(ISD::CTLZ, MVT::i64, Custom);
294 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
296 static const MVT::SimpleValueType VectorIntTypes[] = {
297 MVT::v2i32, MVT::v4i32
300 for (MVT VT : VectorIntTypes) {
301 // Expand the following operations for the current type by default.
302 setOperationAction(ISD::ADD, VT, Expand);
303 setOperationAction(ISD::AND, VT, Expand);
304 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
305 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
306 setOperationAction(ISD::MUL, VT, Expand);
307 setOperationAction(ISD::OR, VT, Expand);
308 setOperationAction(ISD::SHL, VT, Expand);
309 setOperationAction(ISD::SRA, VT, Expand);
310 setOperationAction(ISD::SRL, VT, Expand);
311 setOperationAction(ISD::ROTL, VT, Expand);
312 setOperationAction(ISD::ROTR, VT, Expand);
313 setOperationAction(ISD::SUB, VT, Expand);
314 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
315 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
316 setOperationAction(ISD::SDIV, VT, Expand);
317 setOperationAction(ISD::UDIV, VT, Expand);
318 setOperationAction(ISD::SREM, VT, Expand);
319 setOperationAction(ISD::UREM, VT, Expand);
320 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
321 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
322 setOperationAction(ISD::SDIVREM, VT, Custom);
323 setOperationAction(ISD::UDIVREM, VT, Expand);
324 setOperationAction(ISD::ADDC, VT, Expand);
325 setOperationAction(ISD::SUBC, VT, Expand);
326 setOperationAction(ISD::ADDE, VT, Expand);
327 setOperationAction(ISD::SUBE, VT, Expand);
328 setOperationAction(ISD::SELECT, VT, Expand);
329 setOperationAction(ISD::VSELECT, VT, Expand);
330 setOperationAction(ISD::SELECT_CC, VT, Expand);
331 setOperationAction(ISD::XOR, VT, Expand);
332 setOperationAction(ISD::BSWAP, VT, Expand);
333 setOperationAction(ISD::CTPOP, VT, Expand);
334 setOperationAction(ISD::CTTZ, VT, Expand);
335 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
336 setOperationAction(ISD::CTLZ, VT, Expand);
337 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
338 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
341 static const MVT::SimpleValueType FloatVectorTypes[] = {
342 MVT::v2f32, MVT::v4f32
345 for (MVT VT : FloatVectorTypes) {
346 setOperationAction(ISD::FABS, VT, Expand);
347 setOperationAction(ISD::FMINNUM, VT, Expand);
348 setOperationAction(ISD::FMAXNUM, VT, Expand);
349 setOperationAction(ISD::FADD, VT, Expand);
350 setOperationAction(ISD::FCEIL, VT, Expand);
351 setOperationAction(ISD::FCOS, VT, Expand);
352 setOperationAction(ISD::FDIV, VT, Expand);
353 setOperationAction(ISD::FEXP2, VT, Expand);
354 setOperationAction(ISD::FLOG2, VT, Expand);
355 setOperationAction(ISD::FREM, VT, Expand);
356 setOperationAction(ISD::FPOW, VT, Expand);
357 setOperationAction(ISD::FFLOOR, VT, Expand);
358 setOperationAction(ISD::FTRUNC, VT, Expand);
359 setOperationAction(ISD::FMUL, VT, Expand);
360 setOperationAction(ISD::FMA, VT, Expand);
361 setOperationAction(ISD::FRINT, VT, Expand);
362 setOperationAction(ISD::FNEARBYINT, VT, Expand);
363 setOperationAction(ISD::FSQRT, VT, Expand);
364 setOperationAction(ISD::FSIN, VT, Expand);
365 setOperationAction(ISD::FSUB, VT, Expand);
366 setOperationAction(ISD::FNEG, VT, Expand);
367 setOperationAction(ISD::SELECT, VT, Expand);
368 setOperationAction(ISD::VSELECT, VT, Expand);
369 setOperationAction(ISD::SELECT_CC, VT, Expand);
370 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
371 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
374 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
375 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
377 setTargetDAGCombine(ISD::SHL);
378 setTargetDAGCombine(ISD::MUL);
379 setTargetDAGCombine(ISD::SELECT);
380 setTargetDAGCombine(ISD::SELECT_CC);
381 setTargetDAGCombine(ISD::STORE);
383 setTargetDAGCombine(ISD::FADD);
384 setTargetDAGCombine(ISD::FSUB);
386 setBooleanContents(ZeroOrNegativeOneBooleanContent);
387 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
389 setSchedulingPreference(Sched::RegPressure);
390 setJumpIsExpensive(true);
392 // SI at least has hardware support for floating point exceptions, but no way
393 // of using or handling them is implemented. They are also optional in OpenCL
395 setHasFloatingPointExceptions(false);
397 setSelectIsExpensive(false);
398 PredictableSelectIsExpensive = false;
400 setFsqrtIsCheap(true);
402 // We want to find all load dependencies for long chains of stores to enable
403 // merging into very wide vectors. The problem is with vectors with > 4
404 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
405 // vectors are a legal type, even though we have to split the loads
406 // usually. When we can more precisely specify load legality per address
407 // space, we should be able to make FindBetterChain/MergeConsecutiveStores
408 // smarter so that they can figure out what to do in 2 iterations without all
409 // N > 4 stores on the same chain.
410 GatherAllAliasesMaxDepth = 16;
412 // FIXME: Need to really handle these.
413 MaxStoresPerMemcpy = 4096;
414 MaxStoresPerMemmove = 4096;
415 MaxStoresPerMemset = 4096;
418 //===----------------------------------------------------------------------===//
419 // Target Information
420 //===----------------------------------------------------------------------===//
422 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
426 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
430 // The backend supports 32 and 64 bit floating point immediates.
431 // FIXME: Why are we reporting vectors of FP immediates as legal?
432 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
433 EVT ScalarVT = VT.getScalarType();
434 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
437 // We don't want to shrink f64 / f32 constants.
438 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
439 EVT ScalarVT = VT.getScalarType();
440 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
443 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
447 unsigned NewSize = NewVT.getStoreSizeInBits();
449 // If we are reducing to a 32-bit load, this is always better.
453 EVT OldVT = N->getValueType(0);
454 unsigned OldSize = OldVT.getStoreSizeInBits();
456 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
457 // extloads, so doing one requires using a buffer_load. In cases where we
458 // still couldn't use a scalar load, using the wider load shouldn't really
461 // If the old size already had to be an extload, there's no harm in continuing
462 // to reduce the width.
463 return (OldSize < 32);
466 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
468 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
471 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
472 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
474 return ((LScalarSize <= CastScalarSize) ||
475 (CastScalarSize >= 32) ||
479 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
480 // profitable with the expansion for 64-bit since it's generally good to
482 // FIXME: These should really have the size as a parameter.
483 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
487 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
491 //===---------------------------------------------------------------------===//
493 //===---------------------------------------------------------------------===//
495 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
496 assert(VT.isFloatingPoint());
497 return VT == MVT::f32 || VT == MVT::f64;
500 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
501 assert(VT.isFloatingPoint());
502 return VT == MVT::f32 || VT == MVT::f64;
505 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
511 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
512 // There are few operations which truly have vector input operands. Any vector
513 // operation is going to involve operations on each component, and a
514 // build_vector will be a copy per element, so it always makes sense to use a
515 // build_vector input in place of the extracted element to avoid a copy into a
518 // We should probably only do this if all users are extracts only, but this
519 // should be the common case.
523 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
524 // Truncate is just accessing a subregister.
525 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
528 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
529 // Truncate is just accessing a subregister.
530 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
531 (Dest->getPrimitiveSizeInBits() % 32 == 0);
534 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
535 unsigned SrcSize = Src->getScalarSizeInBits();
536 unsigned DestSize = Dest->getScalarSizeInBits();
538 return SrcSize == 32 && DestSize == 64;
541 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
542 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
543 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
544 // this will enable reducing 64-bit operations the 32-bit, which is always
546 return Src == MVT::i32 && Dest == MVT::i64;
549 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
550 return isZExtFree(Val.getValueType(), VT2);
553 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
554 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
555 // limited number of native 64-bit operations. Shrinking an operation to fit
556 // in a single 32-bit register should always be helpful. As currently used,
557 // this is much less general than the name suggests, and is only used in
558 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
559 // not profitable, and may actually be harmful.
560 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
563 //===---------------------------------------------------------------------===//
564 // TargetLowering Callbacks
565 //===---------------------------------------------------------------------===//
567 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
568 const SmallVectorImpl<ISD::InputArg> &Ins) const {
570 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
573 SDValue AMDGPUTargetLowering::LowerReturn(
575 CallingConv::ID CallConv,
577 const SmallVectorImpl<ISD::OutputArg> &Outs,
578 const SmallVectorImpl<SDValue> &OutVals,
579 SDLoc DL, SelectionDAG &DAG) const {
580 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
583 //===---------------------------------------------------------------------===//
584 // Target specific lowering
585 //===---------------------------------------------------------------------===//
587 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
588 SmallVectorImpl<SDValue> &InVals) const {
589 SDValue Callee = CLI.Callee;
590 SelectionDAG &DAG = CLI.DAG;
592 const Function &Fn = *DAG.getMachineFunction().getFunction();
594 StringRef FuncName("<unknown>");
596 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
597 FuncName = G->getSymbol();
598 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
599 FuncName = G->getGlobal()->getName();
601 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
602 DAG.getContext()->diagnose(NoCalls);
606 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
607 SelectionDAG &DAG) const {
608 const Function &Fn = *DAG.getMachineFunction().getFunction();
610 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "dynamic alloca");
611 DAG.getContext()->diagnose(NoDynamicAlloca);
615 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
616 SelectionDAG &DAG) const {
617 switch (Op.getOpcode()) {
619 Op.getNode()->dump();
620 llvm_unreachable("Custom lowering code for this"
621 "instruction is not implemented yet!");
623 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
624 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
625 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
626 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
627 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
628 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
629 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
630 case ISD::FREM: return LowerFREM(Op, DAG);
631 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
632 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
633 case ISD::FRINT: return LowerFRINT(Op, DAG);
634 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
635 case ISD::FROUND: return LowerFROUND(Op, DAG);
636 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
637 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
638 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
639 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
640 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
642 case ISD::CTLZ_ZERO_UNDEF:
643 return LowerCTLZ(Op, DAG);
644 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
649 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
650 SmallVectorImpl<SDValue> &Results,
651 SelectionDAG &DAG) const {
652 switch (N->getOpcode()) {
653 case ISD::SIGN_EXTEND_INREG:
654 // Different parts of legalization seem to interpret which type of
655 // sign_extend_inreg is the one to check for custom lowering. The extended
656 // from type is what really matters, but some places check for custom
657 // lowering of the result type. This results in trying to use
658 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
659 // nothing here and let the illegal result integer be handled normally.
662 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
666 Results.push_back(SDValue(Node, 0));
667 Results.push_back(SDValue(Node, 1));
668 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
670 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
674 SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
675 if (Lowered.getNode())
676 Results.push_back(Lowered);
684 // FIXME: This implements accesses to initialized globals in the constant
685 // address space by copying them to private and accessing that. It does not
686 // properly handle illegal types or vectors. The private vector loads are not
687 // scalarized, and the illegal scalars hit an assertion. This technique will not
688 // work well with large initializers, and this should eventually be
689 // removed. Initialized globals should be placed into a data section that the
690 // runtime will load into a buffer before the kernel is executed. Uses of the
691 // global need to be replaced with a pointer loaded from an implicit kernel
692 // argument into this buffer holding the copy of the data, which will remove the
693 // need for any of this.
694 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
695 const GlobalValue *GV,
696 const SDValue &InitPtr,
698 SelectionDAG &DAG) const {
699 const DataLayout &TD = DAG.getDataLayout();
701 Type *InitTy = Init->getType();
703 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
704 EVT VT = EVT::getEVT(InitTy);
705 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
706 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
707 MachinePointerInfo(UndefValue::get(PtrTy)), false,
708 false, TD.getPrefTypeAlignment(InitTy));
711 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
712 EVT VT = EVT::getEVT(CFP->getType());
713 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
714 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
715 MachinePointerInfo(UndefValue::get(PtrTy)), false,
716 false, TD.getPrefTypeAlignment(CFP->getType()));
719 if (StructType *ST = dyn_cast<StructType>(InitTy)) {
720 const StructLayout *SL = TD.getStructLayout(ST);
722 EVT PtrVT = InitPtr.getValueType();
723 SmallVector<SDValue, 8> Chains;
725 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
726 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), DL, PtrVT);
727 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
729 Constant *Elt = Init->getAggregateElement(I);
730 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
733 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
736 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
737 EVT PtrVT = InitPtr.getValueType();
739 unsigned NumElements;
740 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
741 NumElements = AT->getNumElements();
742 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
743 NumElements = VT->getNumElements();
745 llvm_unreachable("Unexpected type");
747 unsigned EltSize = TD.getTypeAllocSize(SeqTy->getElementType());
748 SmallVector<SDValue, 8> Chains;
749 for (unsigned i = 0; i < NumElements; ++i) {
750 SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT);
751 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
753 Constant *Elt = Init->getAggregateElement(i);
754 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
757 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
760 if (isa<UndefValue>(Init)) {
761 EVT VT = EVT::getEVT(InitTy);
762 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
763 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
764 MachinePointerInfo(UndefValue::get(PtrTy)), false,
765 false, TD.getPrefTypeAlignment(InitTy));
769 llvm_unreachable("Unhandled constant initializer");
772 static bool hasDefinedInitializer(const GlobalValue *GV) {
773 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
774 if (!GVar || !GVar->hasInitializer())
777 if (isa<UndefValue>(GVar->getInitializer()))
783 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
785 SelectionDAG &DAG) const {
787 const DataLayout &DL = DAG.getDataLayout();
788 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
789 const GlobalValue *GV = G->getGlobal();
791 switch (G->getAddressSpace()) {
792 case AMDGPUAS::LOCAL_ADDRESS: {
793 // XXX: What does the value of G->getOffset() mean?
794 assert(G->getOffset() == 0 &&
795 "Do not know what to do with an non-zero offset");
797 // TODO: We could emit code to handle the initialization somewhere.
798 if (hasDefinedInitializer(GV))
802 if (MFI->LocalMemoryObjects.count(GV) == 0) {
803 uint64_t Size = DL.getTypeAllocSize(GV->getType()->getElementType());
804 Offset = MFI->LDSSize;
805 MFI->LocalMemoryObjects[GV] = Offset;
806 // XXX: Account for alignment?
807 MFI->LDSSize += Size;
809 Offset = MFI->LocalMemoryObjects[GV];
812 return DAG.getConstant(Offset, SDLoc(Op),
813 getPointerTy(DL, AMDGPUAS::LOCAL_ADDRESS));
815 case AMDGPUAS::CONSTANT_ADDRESS: {
816 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
817 Type *EltType = GV->getType()->getElementType();
818 unsigned Size = DL.getTypeAllocSize(EltType);
819 unsigned Alignment = DL.getPrefTypeAlignment(EltType);
821 MVT PrivPtrVT = getPointerTy(DL, AMDGPUAS::PRIVATE_ADDRESS);
822 MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
824 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
825 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
827 const GlobalVariable *Var = cast<GlobalVariable>(GV);
828 if (!Var->hasInitializer()) {
829 // This has no use, but bugpoint will hit it.
830 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
833 const Constant *Init = Var->getInitializer();
834 SmallVector<SDNode*, 8> WorkList;
836 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
837 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
838 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
840 WorkList.push_back(*I);
842 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
843 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
844 E = WorkList.end(); I != E; ++I) {
845 SmallVector<SDValue, 8> Ops;
846 Ops.push_back(Chain);
847 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
848 Ops.push_back((*I)->getOperand(i));
850 DAG.UpdateNodeOperands(*I, Ops);
852 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
856 const Function &Fn = *DAG.getMachineFunction().getFunction();
857 DiagnosticInfoUnsupported BadInit(Fn,
858 "initializer for address space");
859 DAG.getContext()->diagnose(BadInit);
863 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
864 SelectionDAG &DAG) const {
865 SmallVector<SDValue, 8> Args;
867 for (const SDUse &U : Op->ops())
868 DAG.ExtractVectorElements(U.get(), Args);
870 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
873 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
874 SelectionDAG &DAG) const {
876 SmallVector<SDValue, 8> Args;
877 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
878 EVT VT = Op.getValueType();
879 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
880 VT.getVectorNumElements());
882 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
885 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
886 SelectionDAG &DAG) const {
888 MachineFunction &MF = DAG.getMachineFunction();
889 const AMDGPUFrameLowering *TFL = Subtarget->getFrameLowering();
891 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
893 unsigned FrameIndex = FIN->getIndex();
894 unsigned IgnoredFrameReg;
896 TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
897 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op),
901 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
902 SelectionDAG &DAG) const {
903 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
905 EVT VT = Op.getValueType();
907 switch (IntrinsicID) {
909 case AMDGPUIntrinsic::AMDGPU_abs:
910 case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
911 return LowerIntrinsicIABS(Op, DAG);
912 case AMDGPUIntrinsic::AMDGPU_lrp:
913 return LowerIntrinsicLRP(Op, DAG);
915 case AMDGPUIntrinsic::AMDGPU_clamp:
916 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
917 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
918 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
920 case Intrinsic::AMDGPU_div_scale: {
921 // 3rd parameter required to be a constant.
922 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
924 return DAG.getUNDEF(VT);
926 // Translate to the operands expected by the machine instruction. The
927 // first parameter must be the same as the first instruction.
928 SDValue Numerator = Op.getOperand(1);
929 SDValue Denominator = Op.getOperand(2);
931 // Note this order is opposite of the machine instruction's operations,
932 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
933 // intrinsic has the numerator as the first operand to match a normal
934 // division operation.
936 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
938 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
939 Denominator, Numerator);
942 case Intrinsic::AMDGPU_div_fmas:
943 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
944 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
947 case Intrinsic::AMDGPU_div_fixup:
948 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
949 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
951 case Intrinsic::AMDGPU_trig_preop:
952 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
953 Op.getOperand(1), Op.getOperand(2));
955 case Intrinsic::AMDGPU_rcp:
956 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
958 case Intrinsic::AMDGPU_rsq:
959 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
961 case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
962 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
964 case Intrinsic::AMDGPU_rsq_clamped:
965 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
966 Type *Type = VT.getTypeForEVT(*DAG.getContext());
967 APFloat Max = APFloat::getLargest(Type->getFltSemantics());
968 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true);
970 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
971 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq,
972 DAG.getConstantFP(Max, DL, VT));
973 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp,
974 DAG.getConstantFP(Min, DL, VT));
976 return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
979 case Intrinsic::AMDGPU_ldexp:
980 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1),
983 case AMDGPUIntrinsic::AMDGPU_imax:
984 return DAG.getNode(ISD::SMAX, DL, VT, Op.getOperand(1),
986 case AMDGPUIntrinsic::AMDGPU_umax:
987 return DAG.getNode(ISD::UMAX, DL, VT, Op.getOperand(1),
989 case AMDGPUIntrinsic::AMDGPU_imin:
990 return DAG.getNode(ISD::SMIN, DL, VT, Op.getOperand(1),
992 case AMDGPUIntrinsic::AMDGPU_umin:
993 return DAG.getNode(ISD::UMIN, DL, VT, Op.getOperand(1),
996 case AMDGPUIntrinsic::AMDGPU_umul24:
997 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
998 Op.getOperand(1), Op.getOperand(2));
1000 case AMDGPUIntrinsic::AMDGPU_imul24:
1001 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
1002 Op.getOperand(1), Op.getOperand(2));
1004 case AMDGPUIntrinsic::AMDGPU_umad24:
1005 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
1006 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
1008 case AMDGPUIntrinsic::AMDGPU_imad24:
1009 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
1010 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
1012 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
1013 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
1015 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
1016 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
1018 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
1019 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
1021 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
1022 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
1024 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
1025 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
1030 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
1031 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
1036 case AMDGPUIntrinsic::AMDGPU_bfi:
1037 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
1042 case AMDGPUIntrinsic::AMDGPU_bfm:
1043 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
1047 case Intrinsic::AMDGPU_class:
1048 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT,
1049 Op.getOperand(1), Op.getOperand(2));
1051 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
1052 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
1054 case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
1055 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
1056 case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
1057 return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
1058 case AMDGPUIntrinsic::AMDGPU_brev: // Legacy name
1059 return DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(1));
1063 ///IABS(a) = SMAX(sub(0, a), a)
1064 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
1065 SelectionDAG &DAG) const {
1067 EVT VT = Op.getValueType();
1068 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1071 return DAG.getNode(ISD::SMAX, DL, VT, Neg, Op.getOperand(1));
1074 /// Linear Interpolation
1075 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
1076 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
1077 SelectionDAG &DAG) const {
1079 EVT VT = Op.getValueType();
1080 // TODO: Should this propagate fast-math-flags?
1081 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
1082 DAG.getConstantFP(1.0f, DL, MVT::f32),
1084 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
1086 return DAG.getNode(ISD::FADD, DL, VT,
1087 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
1091 /// \brief Generate Min/Max node
1092 SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
1099 DAGCombinerInfo &DCI) const {
1100 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1103 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
1106 SelectionDAG &DAG = DCI.DAG;
1107 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1116 case ISD::SETFALSE2:
1125 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1126 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1132 // Ordered. Assume ordered for undefined.
1134 // Only do this after legalization to avoid interfering with other combines
1135 // which might occur.
1136 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1137 !DCI.isCalledByLegalizer())
1140 // We need to permute the operands to get the correct NaN behavior. The
1141 // selected operand is the second one based on the failing compare with NaN,
1142 // so permute it based on the compare type the hardware uses.
1144 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1145 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1150 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
1151 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
1157 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
1158 !DCI.isCalledByLegalizer())
1162 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
1163 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
1165 case ISD::SETCC_INVALID:
1166 llvm_unreachable("Invalid setcc condcode!");
1171 SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
1172 SelectionDAG &DAG) const {
1173 LoadSDNode *Load = cast<LoadSDNode>(Op);
1174 EVT MemVT = Load->getMemoryVT();
1175 EVT MemEltVT = MemVT.getVectorElementType();
1177 EVT LoadVT = Op.getValueType();
1178 EVT EltVT = LoadVT.getVectorElementType();
1179 EVT PtrVT = Load->getBasePtr().getValueType();
1181 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
1182 SmallVector<SDValue, 8> Loads;
1183 SmallVector<SDValue, 8> Chains;
1186 unsigned MemEltSize = MemEltVT.getStoreSize();
1187 MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
1189 for (unsigned i = 0; i < NumElts; ++i) {
1190 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
1191 DAG.getConstant(i * MemEltSize, SL, PtrVT));
1194 = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
1195 Load->getChain(), Ptr,
1196 SrcValue.getWithOffset(i * MemEltSize),
1197 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
1198 Load->isInvariant(), Load->getAlignment());
1199 Loads.push_back(NewLoad.getValue(0));
1200 Chains.push_back(NewLoad.getValue(1));
1204 DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
1205 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
1208 return DAG.getMergeValues(Ops, SL);
1211 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1212 SelectionDAG &DAG) const {
1213 EVT VT = Op.getValueType();
1215 // If this is a 2 element vector, we really want to scalarize and not create
1216 // weird 1 element vectors.
1217 if (VT.getVectorNumElements() == 2)
1218 return ScalarizeVectorLoad(Op, DAG);
1220 LoadSDNode *Load = cast<LoadSDNode>(Op);
1221 SDValue BasePtr = Load->getBasePtr();
1222 EVT PtrVT = BasePtr.getValueType();
1223 EVT MemVT = Load->getMemoryVT();
1226 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1229 EVT LoMemVT, HiMemVT;
1232 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1233 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1234 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
1236 unsigned Size = LoMemVT.getStoreSize();
1237 unsigned BaseAlign = Load->getAlignment();
1238 unsigned HiAlign = MinAlign(BaseAlign, Size);
1241 = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1242 Load->getChain(), BasePtr,
1244 LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
1245 Load->isInvariant(), BaseAlign);
1247 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1248 DAG.getConstant(Size, SL, PtrVT));
1251 = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
1252 Load->getChain(), HiPtr,
1253 SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1254 HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
1255 Load->isInvariant(), HiAlign);
1258 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
1259 DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1260 LoLoad.getValue(1), HiLoad.getValue(1))
1263 return DAG.getMergeValues(Ops, SL);
1266 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
1267 SelectionDAG &DAG) const {
1268 StoreSDNode *Store = cast<StoreSDNode>(Op);
1269 EVT MemVT = Store->getMemoryVT();
1270 unsigned MemBits = MemVT.getSizeInBits();
1272 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
1273 // truncating store into an i32 store.
1274 // XXX: We could also handle optimize other vector bitwidths.
1275 if (!MemVT.isVector() || MemBits > 32) {
1280 SDValue Value = Store->getValue();
1281 EVT VT = Value.getValueType();
1282 EVT ElemVT = VT.getVectorElementType();
1283 SDValue Ptr = Store->getBasePtr();
1284 EVT MemEltVT = MemVT.getVectorElementType();
1285 unsigned MemEltBits = MemEltVT.getSizeInBits();
1286 unsigned MemNumElements = MemVT.getVectorNumElements();
1287 unsigned PackedSize = MemVT.getStoreSizeInBits();
1288 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32);
1290 assert(Value.getValueType().getScalarSizeInBits() >= 32);
1292 SDValue PackedValue;
1293 for (unsigned i = 0; i < MemNumElements; ++i) {
1294 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
1295 DAG.getConstant(i, DL, MVT::i32));
1296 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1297 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1299 SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32);
1300 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1305 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
1309 if (PackedSize < 32) {
1310 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1311 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
1312 Store->getMemOperand()->getPointerInfo(),
1314 Store->isNonTemporal(), Store->isVolatile(),
1315 Store->getAlignment());
1318 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
1319 Store->getMemOperand()->getPointerInfo(),
1320 Store->isVolatile(), Store->isNonTemporal(),
1321 Store->getAlignment());
1324 SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op,
1325 SelectionDAG &DAG) const {
1326 StoreSDNode *Store = cast<StoreSDNode>(Op);
1327 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
1328 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
1329 EVT PtrVT = Store->getBasePtr().getValueType();
1330 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
1333 SmallVector<SDValue, 8> Chains;
1335 unsigned EltSize = MemEltVT.getStoreSize();
1336 MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
1338 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1339 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
1341 DAG.getConstant(i, SL, MVT::i32));
1343 SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), SL, PtrVT);
1344 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset);
1346 DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
1347 SrcValue.getWithOffset(i * EltSize),
1348 MemEltVT, Store->isNonTemporal(), Store->isVolatile(),
1349 Store->getAlignment());
1350 Chains.push_back(NewStore);
1353 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
1356 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1357 SelectionDAG &DAG) const {
1358 StoreSDNode *Store = cast<StoreSDNode>(Op);
1359 SDValue Val = Store->getValue();
1360 EVT VT = Val.getValueType();
1362 // If this is a 2 element vector, we really want to scalarize and not create
1363 // weird 1 element vectors.
1364 if (VT.getVectorNumElements() == 2)
1365 return ScalarizeVectorStore(Op, DAG);
1367 EVT MemVT = Store->getMemoryVT();
1368 SDValue Chain = Store->getChain();
1369 SDValue BasePtr = Store->getBasePtr();
1373 EVT LoMemVT, HiMemVT;
1376 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1377 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1378 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1380 EVT PtrVT = BasePtr.getValueType();
1381 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1382 DAG.getConstant(LoMemVT.getStoreSize(), SL,
1385 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1386 unsigned BaseAlign = Store->getAlignment();
1387 unsigned Size = LoMemVT.getStoreSize();
1388 unsigned HiAlign = MinAlign(BaseAlign, Size);
1391 = DAG.getTruncStore(Chain, SL, Lo,
1395 Store->isNonTemporal(),
1396 Store->isVolatile(),
1399 = DAG.getTruncStore(Chain, SL, Hi,
1401 SrcValue.getWithOffset(Size),
1403 Store->isNonTemporal(),
1404 Store->isVolatile(),
1407 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1411 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1413 LoadSDNode *Load = cast<LoadSDNode>(Op);
1414 ISD::LoadExtType ExtType = Load->getExtensionType();
1415 EVT VT = Op.getValueType();
1416 EVT MemVT = Load->getMemoryVT();
1418 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
1419 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
1420 // FIXME: Copied from PPC
1421 // First, load into 32 bits, then truncate to 1 bit.
1423 SDValue Chain = Load->getChain();
1424 SDValue BasePtr = Load->getBasePtr();
1425 MachineMemOperand *MMO = Load->getMemOperand();
1427 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
1428 BasePtr, MVT::i8, MMO);
1431 DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
1435 return DAG.getMergeValues(Ops, DL);
1438 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS ||
1439 Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
1440 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
1443 // <SI && AS=PRIVATE && EXTLOAD && size < 32bit,
1444 // register (2-)byte extract.
1446 // Get Register holding the target.
1447 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
1448 DAG.getConstant(2, DL, MVT::i32));
1449 // Load the Register.
1450 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1451 Load->getChain(), Ptr,
1452 DAG.getTargetConstant(0, DL, MVT::i32),
1455 // Get offset within the register.
1456 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1458 DAG.getConstant(0x3, DL, MVT::i32));
1460 // Bit offset of target byte (byteIdx * 8).
1461 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1462 DAG.getConstant(3, DL, MVT::i32));
1464 // Shift to the right.
1465 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
1467 // Eliminate the upper bits by setting them to ...
1468 EVT MemEltVT = MemVT.getScalarType();
1471 if (ExtType == ISD::SEXTLOAD) {
1472 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1475 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
1479 return DAG.getMergeValues(Ops, DL);
1484 DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
1488 return DAG.getMergeValues(Ops, DL);
1491 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1493 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
1494 if (Result.getNode()) {
1498 StoreSDNode *Store = cast<StoreSDNode>(Op);
1499 SDValue Chain = Store->getChain();
1500 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1501 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1502 Store->getValue().getValueType().isVector()) {
1503 return SplitVectorStore(Op, DAG);
1506 EVT MemVT = Store->getMemoryVT();
1507 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1508 MemVT.bitsLT(MVT::i32)) {
1510 if (Store->getMemoryVT() == MVT::i8) {
1512 } else if (Store->getMemoryVT() == MVT::i16) {
1515 SDValue BasePtr = Store->getBasePtr();
1516 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
1517 DAG.getConstant(2, DL, MVT::i32));
1518 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1520 DAG.getTargetConstant(0, DL, MVT::i32));
1522 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
1523 DAG.getConstant(0x3, DL, MVT::i32));
1525 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1526 DAG.getConstant(3, DL, MVT::i32));
1528 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1531 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1533 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1534 MaskedValue, ShiftAmt);
1536 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32,
1537 DAG.getConstant(Mask, DL, MVT::i32),
1539 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1540 DAG.getConstant(0xffffffff, DL, MVT::i32));
1541 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1543 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1544 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1546 DAG.getTargetConstant(0, DL, MVT::i32));
1551 // This is a shortcut for integer division because we have fast i32<->f32
1552 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1553 // float is enough to accurately represent up to a 24-bit integer.
1554 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const {
1556 EVT VT = Op.getValueType();
1557 SDValue LHS = Op.getOperand(0);
1558 SDValue RHS = Op.getOperand(1);
1559 MVT IntVT = MVT::i32;
1560 MVT FltVT = MVT::f32;
1562 ISD::NodeType ToFp = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1563 ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1565 if (VT.isVector()) {
1566 unsigned NElts = VT.getVectorNumElements();
1567 IntVT = MVT::getVectorVT(MVT::i32, NElts);
1568 FltVT = MVT::getVectorVT(MVT::f32, NElts);
1571 unsigned BitSize = VT.getScalarType().getSizeInBits();
1573 SDValue jq = DAG.getConstant(1, DL, IntVT);
1576 // char|short jq = ia ^ ib;
1577 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1579 // jq = jq >> (bitsize - 2)
1580 jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1581 DAG.getConstant(BitSize - 2, DL, VT));
1584 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1587 jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
1590 // int ia = (int)LHS;
1592 DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT);
1594 // int ib, (int)RHS;
1596 DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT);
1598 // float fa = (float)ia;
1599 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1601 // float fb = (float)ib;
1602 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1604 // TODO: Should this propagate fast-math-flags?
1605 // float fq = native_divide(fa, fb);
1606 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1607 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1610 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1612 // float fqneg = -fq;
1613 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1615 // float fr = mad(fqneg, fb, fa);
1616 SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT,
1617 DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa);
1619 // int iq = (int)fq;
1620 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1623 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1626 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1628 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1630 // int cv = fr >= fb;
1631 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1633 // jq = (cv ? jq : 0);
1634 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1636 // dst = trunc/extend to legal type
1637 iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT);
1640 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1642 // Rem needs compensation, it's easier to recompute it
1643 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1644 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1650 return DAG.getMergeValues(Res, DL);
1653 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1655 SmallVectorImpl<SDValue> &Results) const {
1656 assert(Op.getValueType() == MVT::i64);
1659 EVT VT = Op.getValueType();
1660 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1662 SDValue one = DAG.getConstant(1, DL, HalfVT);
1663 SDValue zero = DAG.getConstant(0, DL, HalfVT);
1666 SDValue LHS = Op.getOperand(0);
1667 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
1668 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
1670 SDValue RHS = Op.getOperand(1);
1671 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
1672 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
1674 if (VT == MVT::i64 &&
1675 DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1676 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1678 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1681 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(0), zero);
1682 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(1), zero);
1683 Results.push_back(DIV);
1684 Results.push_back(REM);
1688 // Get Speculative values
1689 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1690 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1692 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
1693 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, zero);
1695 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
1696 SDValue DIV_Lo = zero;
1698 const unsigned halfBitWidth = HalfVT.getSizeInBits();
1700 for (unsigned i = 0; i < halfBitWidth; ++i) {
1701 const unsigned bitPos = halfBitWidth - i - 1;
1702 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1703 // Get value of high bit
1704 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1705 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
1706 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1709 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1711 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1713 SDValue BIT = DAG.getConstant(1 << bitPos, DL, HalfVT);
1714 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
1716 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1719 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1720 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1723 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
1724 Results.push_back(DIV);
1725 Results.push_back(REM);
1728 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1729 SelectionDAG &DAG) const {
1731 EVT VT = Op.getValueType();
1733 if (VT == MVT::i64) {
1734 SmallVector<SDValue, 2> Results;
1735 LowerUDIVREM64(Op, DAG, Results);
1736 return DAG.getMergeValues(Results, DL);
1739 SDValue Num = Op.getOperand(0);
1740 SDValue Den = Op.getOperand(1);
1742 if (VT == MVT::i32) {
1743 if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) &&
1744 DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) {
1745 // TODO: We technically could do this for i64, but shouldn't that just be
1746 // handled by something generally reducing 64-bit division on 32-bit
1747 // values to 32-bit?
1748 return LowerDIVREM24(Op, DAG, false);
1752 // RCP = URECIP(Den) = 2^32 / Den + e
1753 // e is rounding error.
1754 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1756 // RCP_LO = mul(RCP, Den) */
1757 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1759 // RCP_HI = mulhu (RCP, Den) */
1760 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1762 // NEG_RCP_LO = -RCP_LO
1763 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1766 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1767 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1770 // Calculate the rounding error from the URECIP instruction
1771 // E = mulhu(ABS_RCP_LO, RCP)
1772 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1774 // RCP_A_E = RCP + E
1775 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1777 // RCP_S_E = RCP - E
1778 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1780 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1781 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1784 // Quotient = mulhu(Tmp0, Num)
1785 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1787 // Num_S_Remainder = Quotient * Den
1788 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1790 // Remainder = Num - Num_S_Remainder
1791 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1793 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1794 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1795 DAG.getConstant(-1, DL, VT),
1796 DAG.getConstant(0, DL, VT),
1798 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1799 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1801 DAG.getConstant(-1, DL, VT),
1802 DAG.getConstant(0, DL, VT),
1804 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1805 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1808 // Calculate Division result:
1810 // Quotient_A_One = Quotient + 1
1811 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1812 DAG.getConstant(1, DL, VT));
1814 // Quotient_S_One = Quotient - 1
1815 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1816 DAG.getConstant(1, DL, VT));
1818 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1819 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1820 Quotient, Quotient_A_One, ISD::SETEQ);
1822 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1823 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1824 Quotient_S_One, Div, ISD::SETEQ);
1826 // Calculate Rem result:
1828 // Remainder_S_Den = Remainder - Den
1829 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1831 // Remainder_A_Den = Remainder + Den
1832 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1834 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1835 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1836 Remainder, Remainder_S_Den, ISD::SETEQ);
1838 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1839 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1840 Remainder_A_Den, Rem, ISD::SETEQ);
1845 return DAG.getMergeValues(Ops, DL);
1848 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1849 SelectionDAG &DAG) const {
1851 EVT VT = Op.getValueType();
1853 SDValue LHS = Op.getOperand(0);
1854 SDValue RHS = Op.getOperand(1);
1856 SDValue Zero = DAG.getConstant(0, DL, VT);
1857 SDValue NegOne = DAG.getConstant(-1, DL, VT);
1859 if (VT == MVT::i32 &&
1860 DAG.ComputeNumSignBits(LHS) > 8 &&
1861 DAG.ComputeNumSignBits(RHS) > 8) {
1862 return LowerDIVREM24(Op, DAG, true);
1864 if (VT == MVT::i64 &&
1865 DAG.ComputeNumSignBits(LHS) > 32 &&
1866 DAG.ComputeNumSignBits(RHS) > 32) {
1867 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1870 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1871 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1872 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1875 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1876 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1878 return DAG.getMergeValues(Res, DL);
1881 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1882 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1883 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1884 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1886 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1887 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1889 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1890 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1892 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1893 SDValue Rem = Div.getValue(1);
1895 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1896 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1898 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1899 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1905 return DAG.getMergeValues(Res, DL);
1908 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
1909 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
1911 EVT VT = Op.getValueType();
1912 SDValue X = Op.getOperand(0);
1913 SDValue Y = Op.getOperand(1);
1915 // TODO: Should this propagate fast-math-flags?
1917 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
1918 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
1919 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
1921 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
1924 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1926 SDValue Src = Op.getOperand(0);
1928 // result = trunc(src)
1929 // if (src > 0.0 && src != result)
1932 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1934 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1935 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
1938 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1940 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1941 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1942 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1944 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1945 // TODO: Should this propagate fast-math-flags?
1946 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1949 static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
1950 const unsigned FractBits = 52;
1951 const unsigned ExpBits = 11;
1953 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
1955 DAG.getConstant(FractBits - 32, SL, MVT::i32),
1956 DAG.getConstant(ExpBits, SL, MVT::i32));
1957 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1958 DAG.getConstant(1023, SL, MVT::i32));
1963 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1965 SDValue Src = Op.getOperand(0);
1967 assert(Op.getValueType() == MVT::f64);
1969 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1970 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1972 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1974 // Extract the upper half, since this is where we will find the sign and
1976 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1978 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
1980 const unsigned FractBits = 52;
1982 // Extract the sign bit.
1983 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
1984 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1986 // Extend back to to 64-bits.
1987 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
1989 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1991 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
1992 const SDValue FractMask
1993 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
1995 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1996 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1997 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
2000 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2002 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
2004 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2005 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2007 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
2008 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
2010 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
2013 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
2015 SDValue Src = Op.getOperand(0);
2017 assert(Op.getValueType() == MVT::f64);
2019 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
2020 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
2021 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
2023 // TODO: Should this propagate fast-math-flags?
2025 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
2026 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
2028 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
2030 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
2031 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
2034 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2035 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
2037 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
2040 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
2041 // FNEARBYINT and FRINT are the same, except in their handling of FP
2042 // exceptions. Those aren't really meaningful for us, and OpenCL only has
2043 // rint, so just treat them as equivalent.
2044 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
2047 // XXX - May require not supporting f32 denormals?
2048 SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const {
2050 SDValue X = Op.getOperand(0);
2052 SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X);
2054 // TODO: Should this propagate fast-math-flags?
2056 SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T);
2058 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff);
2060 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32);
2061 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
2062 const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32);
2064 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X);
2067 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
2069 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
2071 SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero);
2073 return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel);
2076 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
2078 SDValue X = Op.getOperand(0);
2080 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
2082 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2083 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2084 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
2085 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
2087 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
2089 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
2091 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
2093 SDValue Exp = extractF64Exponent(Hi, SL, DAG);
2095 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
2098 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
2099 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
2100 DAG.getConstant(INT64_C(0x0008000000000000), SL,
2104 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
2105 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
2106 DAG.getConstant(0, SL, MVT::i64), Tmp0,
2109 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
2110 D, DAG.getConstant(0, SL, MVT::i64));
2111 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
2113 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
2114 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
2116 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
2117 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
2118 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
2120 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
2122 DAG.getConstantFP(1.0, SL, MVT::f64),
2123 DAG.getConstantFP(0.0, SL, MVT::f64));
2125 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
2127 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
2128 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
2133 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
2134 EVT VT = Op.getValueType();
2137 return LowerFROUND32(Op, DAG);
2140 return LowerFROUND64(Op, DAG);
2142 llvm_unreachable("unhandled type");
2145 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
2147 SDValue Src = Op.getOperand(0);
2149 // result = trunc(src);
2150 // if (src < 0.0 && src != result)
2153 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2155 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
2156 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
2159 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
2161 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
2162 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
2163 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
2165 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
2166 // TODO: Should this propagate fast-math-flags?
2167 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
2170 SDValue AMDGPUTargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
2172 SDValue Src = Op.getOperand(0);
2173 assert(Src.getValueType() == MVT::i64);
2175 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
2176 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2178 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2179 const SDValue One = DAG.getConstant(1, SL, MVT::i32);
2181 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
2182 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
2184 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
2185 *DAG.getContext(), MVT::i32);
2187 SDValue Hi0 = DAG.getSetCC(SL, SetCCVT, Hi, Zero, ISD::SETEQ);
2189 SDValue CtlzLo = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Lo);
2190 SDValue CtlzHi = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Hi);
2192 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
2193 SDValue Add = DAG.getNode(ISD::ADD, SL, MVT::i32, CtlzLo, Bits32);
2195 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
2196 SDValue NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0, Add, CtlzHi);
2199 // Test if the full 64-bit input is zero.
2201 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
2202 // which we probably don't want.
2203 SDValue Lo0 = DAG.getSetCC(SL, SetCCVT, Lo, Zero, ISD::SETEQ);
2204 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0, Hi0);
2206 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
2207 // with the same cycles, otherwise it is slower.
2208 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
2209 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
2211 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
2213 // The instruction returns -1 for 0 input, but the defined intrinsic
2214 // behavior is to return the number of bits.
2215 NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32,
2216 SrcIsZero, Bits32, NewCtlz);
2219 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewCtlz);
2222 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
2223 bool Signed) const {
2225 SDValue Src = Op.getOperand(0);
2227 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
2229 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2230 DAG.getConstant(0, SL, MVT::i32));
2231 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
2232 DAG.getConstant(1, SL, MVT::i32));
2234 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
2237 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
2239 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
2240 DAG.getConstant(32, SL, MVT::i32));
2241 // TODO: Should this propagate fast-math-flags?
2242 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
2245 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
2246 SelectionDAG &DAG) const {
2247 SDValue S0 = Op.getOperand(0);
2248 if (S0.getValueType() != MVT::i64)
2251 EVT DestVT = Op.getValueType();
2252 if (DestVT == MVT::f64)
2253 return LowerINT_TO_FP64(Op, DAG, false);
2255 assert(DestVT == MVT::f32);
2259 // f32 uint_to_fp i64
2260 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
2261 DAG.getConstant(0, DL, MVT::i32));
2262 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
2263 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
2264 DAG.getConstant(1, DL, MVT::i32));
2265 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
2266 // TODO: Should this propagate fast-math-flags?
2267 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
2268 DAG.getConstantFP(4294967296.0f, DL, MVT::f32)); // 2^32
2269 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
2272 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2273 SelectionDAG &DAG) const {
2274 SDValue Src = Op.getOperand(0);
2275 if (Src.getValueType() == MVT::i64 && Op.getValueType() == MVT::f64)
2276 return LowerINT_TO_FP64(Op, DAG, true);
2281 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2282 bool Signed) const {
2285 SDValue Src = Op.getOperand(0);
2287 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2289 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2291 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2293 // TODO: Should this propagate fast-math-flags?
2294 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2296 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2299 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2301 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2302 MVT::i32, FloorMul);
2303 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2305 SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Lo, Hi);
2307 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2310 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2311 SelectionDAG &DAG) const {
2312 SDValue Src = Op.getOperand(0);
2314 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2315 return LowerFP64_TO_INT(Op, DAG, true);
2320 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2321 SelectionDAG &DAG) const {
2322 SDValue Src = Op.getOperand(0);
2324 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2325 return LowerFP64_TO_INT(Op, DAG, false);
2330 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2331 SelectionDAG &DAG) const {
2332 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2333 MVT VT = Op.getSimpleValueType();
2334 MVT ScalarVT = VT.getScalarType();
2339 SDValue Src = Op.getOperand(0);
2342 // TODO: Don't scalarize on Evergreen?
2343 unsigned NElts = VT.getVectorNumElements();
2344 SmallVector<SDValue, 8> Args;
2345 DAG.ExtractVectorElements(Src, Args, 0, NElts);
2347 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2348 for (unsigned I = 0; I < NElts; ++I)
2349 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2351 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
2354 //===----------------------------------------------------------------------===//
2355 // Custom DAG optimizations
2356 //===----------------------------------------------------------------------===//
2358 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2359 APInt KnownZero, KnownOne;
2360 EVT VT = Op.getValueType();
2361 DAG.computeKnownBits(Op, KnownZero, KnownOne);
2363 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
2366 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2367 EVT VT = Op.getValueType();
2369 // In order for this to be a signed 24-bit value, bit 23, must
2371 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2372 // as unsigned 24-bit values.
2373 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
2376 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
2378 SelectionDAG &DAG = DCI.DAG;
2379 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2380 EVT VT = Op.getValueType();
2382 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
2383 APInt KnownZero, KnownOne;
2384 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
2385 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
2386 DCI.CommitTargetLoweringOpt(TLO);
2389 template <typename IntTy>
2390 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
2391 uint32_t Offset, uint32_t Width, SDLoc DL) {
2392 if (Width + Offset < 32) {
2393 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2394 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2395 return DAG.getConstant(Result, DL, MVT::i32);
2398 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2401 static bool usesAllNormalStores(SDNode *LoadVal) {
2402 for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
2403 if (!ISD::isNormalStore(*I))
2410 // If we have a copy of an illegal type, replace it with a load / store of an
2411 // equivalently sized legal type. This avoids intermediate bit pack / unpack
2412 // instructions emitted when handling extloads and truncstores. Ideally we could
2413 // recognize the pack / unpack pattern to eliminate it.
2414 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2415 DAGCombinerInfo &DCI) const {
2416 if (!DCI.isBeforeLegalize())
2419 StoreSDNode *SN = cast<StoreSDNode>(N);
2420 SDValue Value = SN->getValue();
2421 EVT VT = Value.getValueType();
2423 if (isTypeLegal(VT) || SN->isVolatile() ||
2424 !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8)
2427 LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
2428 if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
2431 EVT MemVT = LoadVal->getMemoryVT();
2434 SelectionDAG &DAG = DCI.DAG;
2435 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
2437 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
2439 LoadVal->getChain(),
2440 LoadVal->getBasePtr(),
2441 LoadVal->getOffset(),
2443 LoadVal->getMemOperand());
2445 SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
2446 DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
2448 return DAG.getStore(SN->getChain(), SL, NewLoad,
2449 SN->getBasePtr(), SN->getMemOperand());
2452 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
2453 DAGCombinerInfo &DCI) const {
2454 if (N->getValueType(0) != MVT::i64)
2457 // i64 (shl x, 32) -> (build_pair 0, x)
2459 // Doing this with moves theoretically helps MI optimizations that understand
2460 // copies. 2 v_mov_b32_e32 will have the same code size / cycle count as
2461 // v_lshl_b64. In the SALU case, I think this is slightly worse since it
2462 // doubles the code size and I'm unsure about cycle count.
2463 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2464 if (!RHS || RHS->getZExtValue() != 32)
2467 SDValue LHS = N->getOperand(0);
2470 SelectionDAG &DAG = DCI.DAG;
2472 // Extract low 32-bits.
2473 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
2475 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2476 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, Zero, Lo);
2479 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
2480 DAGCombinerInfo &DCI) const {
2481 EVT VT = N->getValueType(0);
2483 if (VT.isVector() || VT.getSizeInBits() > 32)
2486 SelectionDAG &DAG = DCI.DAG;
2489 SDValue N0 = N->getOperand(0);
2490 SDValue N1 = N->getOperand(1);
2493 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
2494 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2495 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2496 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
2497 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
2498 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2499 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2500 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
2505 // We need to use sext even for MUL_U24, because MUL_U24 is used
2506 // for signed multiply of 8 and 16-bit types.
2507 return DAG.getSExtOrTrunc(Mul, DL, VT);
2510 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
2511 DAGCombinerInfo &DCI) const {
2512 SelectionDAG &DAG = DCI.DAG;
2515 switch(N->getOpcode()) {
2519 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2522 return performShlCombine(N, DCI);
2525 return performMulCombine(N, DCI);
2526 case AMDGPUISD::MUL_I24:
2527 case AMDGPUISD::MUL_U24: {
2528 SDValue N0 = N->getOperand(0);
2529 SDValue N1 = N->getOperand(1);
2530 simplifyI24(N0, DCI);
2531 simplifyI24(N1, DCI);
2535 SDValue Cond = N->getOperand(0);
2536 if (Cond.getOpcode() == ISD::SETCC && Cond.hasOneUse()) {
2537 EVT VT = N->getValueType(0);
2538 SDValue LHS = Cond.getOperand(0);
2539 SDValue RHS = Cond.getOperand(1);
2540 SDValue CC = Cond.getOperand(2);
2542 SDValue True = N->getOperand(1);
2543 SDValue False = N->getOperand(2);
2546 return CombineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI);
2551 case AMDGPUISD::BFE_I32:
2552 case AMDGPUISD::BFE_U32: {
2553 assert(!N->getValueType(0).isVector() &&
2554 "Vector handling of BFE not implemented");
2555 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
2559 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
2561 return DAG.getConstant(0, DL, MVT::i32);
2563 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2567 SDValue BitsFrom = N->getOperand(0);
2568 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
2570 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
2572 if (OffsetVal == 0) {
2573 // This is already sign / zero extended, so try to fold away extra BFEs.
2574 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
2576 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
2577 if (OpSignBits >= SignBits)
2580 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
2582 // This is a sign_extend_inreg. Replace it to take advantage of existing
2583 // DAG Combines. If not eliminated, we will match back to BFE during
2586 // TODO: The sext_inreg of extended types ends, although we can could
2587 // handle them in a single BFE.
2588 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
2589 DAG.getValueType(SmallVT));
2592 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
2595 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
2597 return constantFoldBFE<int32_t>(DAG,
2598 CVal->getSExtValue(),
2604 return constantFoldBFE<uint32_t>(DAG,
2605 CVal->getZExtValue(),
2611 if ((OffsetVal + WidthVal) >= 32) {
2612 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
2613 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2614 BitsFrom, ShiftVal);
2617 if (BitsFrom.hasOneUse()) {
2618 APInt Demanded = APInt::getBitsSet(32,
2620 OffsetVal + WidthVal);
2622 APInt KnownZero, KnownOne;
2623 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2624 !DCI.isBeforeLegalizeOps());
2625 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2626 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
2627 TLI.SimplifyDemandedBits(BitsFrom, Demanded,
2628 KnownZero, KnownOne, TLO)) {
2629 DCI.CommitTargetLoweringOpt(TLO);
2637 return performStoreCombine(N, DCI);
2642 //===----------------------------------------------------------------------===//
2644 //===----------------------------------------------------------------------===//
2646 void AMDGPUTargetLowering::getOriginalFunctionArgs(
2649 const SmallVectorImpl<ISD::InputArg> &Ins,
2650 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
2652 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
2653 if (Ins[i].ArgVT == Ins[i].VT) {
2654 OrigIns.push_back(Ins[i]);
2659 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
2660 // Vector has been split into scalars.
2661 VT = Ins[i].ArgVT.getVectorElementType();
2662 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
2663 Ins[i].ArgVT.getVectorElementType() !=
2664 Ins[i].VT.getVectorElementType()) {
2665 // Vector elements have been promoted
2668 // Vector has been spilt into smaller vectors.
2672 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2673 Ins[i].OrigArgIndex, Ins[i].PartOffset);
2674 OrigIns.push_back(Arg);
2678 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
2679 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2680 return CFP->isExactlyValue(1.0);
2682 return isAllOnesConstant(Op);
2685 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
2686 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2687 return CFP->getValueAPF().isZero();
2689 return isNullConstant(Op);
2692 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2693 const TargetRegisterClass *RC,
2694 unsigned Reg, EVT VT) const {
2695 MachineFunction &MF = DAG.getMachineFunction();
2696 MachineRegisterInfo &MRI = MF.getRegInfo();
2697 unsigned VirtualRegister;
2698 if (!MRI.isLiveIn(Reg)) {
2699 VirtualRegister = MRI.createVirtualRegister(RC);
2700 MRI.addLiveIn(Reg, VirtualRegister);
2702 VirtualRegister = MRI.getLiveInVirtReg(Reg);
2704 return DAG.getRegister(VirtualRegister, VT);
2707 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
2708 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const {
2709 uint64_t ArgOffset = MFI->ABIArgOffset;
2714 return ArgOffset + 4;
2716 llvm_unreachable("unexpected implicit parameter type");
2719 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2721 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
2722 switch ((AMDGPUISD::NodeType)Opcode) {
2723 case AMDGPUISD::FIRST_NUMBER: break;
2725 NODE_NAME_CASE(CALL);
2726 NODE_NAME_CASE(UMUL);
2727 NODE_NAME_CASE(RET_FLAG);
2728 NODE_NAME_CASE(BRANCH_COND);
2731 NODE_NAME_CASE(DWORDADDR)
2732 NODE_NAME_CASE(FRACT)
2733 NODE_NAME_CASE(CLAMP)
2734 NODE_NAME_CASE(COS_HW)
2735 NODE_NAME_CASE(SIN_HW)
2736 NODE_NAME_CASE(FMAX_LEGACY)
2737 NODE_NAME_CASE(FMIN_LEGACY)
2738 NODE_NAME_CASE(FMAX3)
2739 NODE_NAME_CASE(SMAX3)
2740 NODE_NAME_CASE(UMAX3)
2741 NODE_NAME_CASE(FMIN3)
2742 NODE_NAME_CASE(SMIN3)
2743 NODE_NAME_CASE(UMIN3)
2744 NODE_NAME_CASE(URECIP)
2745 NODE_NAME_CASE(DIV_SCALE)
2746 NODE_NAME_CASE(DIV_FMAS)
2747 NODE_NAME_CASE(DIV_FIXUP)
2748 NODE_NAME_CASE(TRIG_PREOP)
2751 NODE_NAME_CASE(RSQ_LEGACY)
2752 NODE_NAME_CASE(RSQ_CLAMPED)
2753 NODE_NAME_CASE(LDEXP)
2754 NODE_NAME_CASE(FP_CLASS)
2755 NODE_NAME_CASE(DOT4)
2756 NODE_NAME_CASE(CARRY)
2757 NODE_NAME_CASE(BORROW)
2758 NODE_NAME_CASE(BFE_U32)
2759 NODE_NAME_CASE(BFE_I32)
2762 NODE_NAME_CASE(MUL_U24)
2763 NODE_NAME_CASE(MUL_I24)
2764 NODE_NAME_CASE(MAD_U24)
2765 NODE_NAME_CASE(MAD_I24)
2766 NODE_NAME_CASE(TEXTURE_FETCH)
2767 NODE_NAME_CASE(EXPORT)
2768 NODE_NAME_CASE(CONST_ADDRESS)
2769 NODE_NAME_CASE(REGISTER_LOAD)
2770 NODE_NAME_CASE(REGISTER_STORE)
2771 NODE_NAME_CASE(LOAD_CONSTANT)
2772 NODE_NAME_CASE(LOAD_INPUT)
2773 NODE_NAME_CASE(SAMPLE)
2774 NODE_NAME_CASE(SAMPLEB)
2775 NODE_NAME_CASE(SAMPLED)
2776 NODE_NAME_CASE(SAMPLEL)
2777 NODE_NAME_CASE(CVT_F32_UBYTE0)
2778 NODE_NAME_CASE(CVT_F32_UBYTE1)
2779 NODE_NAME_CASE(CVT_F32_UBYTE2)
2780 NODE_NAME_CASE(CVT_F32_UBYTE3)
2781 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
2782 NODE_NAME_CASE(CONST_DATA_PTR)
2783 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
2784 NODE_NAME_CASE(SENDMSG)
2785 NODE_NAME_CASE(INTERP_MOV)
2786 NODE_NAME_CASE(INTERP_P1)
2787 NODE_NAME_CASE(INTERP_P2)
2788 NODE_NAME_CASE(STORE_MSKOR)
2789 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
2790 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
2795 SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand,
2796 DAGCombinerInfo &DCI,
2797 unsigned &RefinementSteps,
2798 bool &UseOneConstNR) const {
2799 SelectionDAG &DAG = DCI.DAG;
2800 EVT VT = Operand.getValueType();
2802 if (VT == MVT::f32) {
2803 RefinementSteps = 0;
2804 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
2807 // TODO: There is also f64 rsq instruction, but the documentation is less
2808 // clear on its precision.
2813 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
2814 DAGCombinerInfo &DCI,
2815 unsigned &RefinementSteps) const {
2816 SelectionDAG &DAG = DCI.DAG;
2817 EVT VT = Operand.getValueType();
2819 if (VT == MVT::f32) {
2820 // Reciprocal, < 1 ulp error.
2822 // This reciprocal approximation converges to < 0.5 ulp error with one
2823 // newton rhapson performed with two fused multiple adds (FMAs).
2825 RefinementSteps = 0;
2826 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
2829 // TODO: There is also f64 rcp instruction, but the documentation is less
2830 // clear on its precision.
2835 static void computeKnownBitsForMinMax(const SDValue Op0,
2839 const SelectionDAG &DAG,
2841 APInt Op0Zero, Op0One;
2842 APInt Op1Zero, Op1One;
2843 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
2844 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
2846 KnownZero = Op0Zero & Op1Zero;
2847 KnownOne = Op0One & Op1One;
2850 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
2854 const SelectionDAG &DAG,
2855 unsigned Depth) const {
2857 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
2861 unsigned Opc = Op.getOpcode();
2866 case ISD::INTRINSIC_WO_CHAIN: {
2867 // FIXME: The intrinsic should just use the node.
2868 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
2869 case AMDGPUIntrinsic::AMDGPU_imax:
2870 case AMDGPUIntrinsic::AMDGPU_umax:
2871 case AMDGPUIntrinsic::AMDGPU_imin:
2872 case AMDGPUIntrinsic::AMDGPU_umin:
2873 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
2874 KnownZero, KnownOne, DAG, Depth);
2882 case AMDGPUISD::CARRY:
2883 case AMDGPUISD::BORROW: {
2884 KnownZero = APInt::getHighBitsSet(32, 31);
2888 case AMDGPUISD::BFE_I32:
2889 case AMDGPUISD::BFE_U32: {
2890 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2894 unsigned BitWidth = 32;
2895 uint32_t Width = CWidth->getZExtValue() & 0x1f;
2897 if (Opc == AMDGPUISD::BFE_U32)
2898 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2905 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
2907 const SelectionDAG &DAG,
2908 unsigned Depth) const {
2909 switch (Op.getOpcode()) {
2910 case AMDGPUISD::BFE_I32: {
2911 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2915 unsigned SignBits = 32 - Width->getZExtValue() + 1;
2916 if (!isNullConstant(Op.getOperand(1)))
2919 // TODO: Could probably figure something out with non-0 offsets.
2920 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2921 return std::max(SignBits, Op0SignBits);
2924 case AMDGPUISD::BFE_U32: {
2925 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2926 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
2929 case AMDGPUISD::CARRY:
2930 case AMDGPUISD::BORROW: