1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPUIntrinsicInfo.h"
20 #include "AMDGPURegisterInfo.h"
21 #include "AMDGPUSubtarget.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
37 /// Diagnostic information for unimplemented or unsupported feature reporting.
38 class DiagnosticInfoUnsupported : public DiagnosticInfo {
40 const Twine &Description;
45 static int getKindID() {
47 KindID = llvm::getNextAvailablePluginDiagnosticKind();
52 DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
53 DiagnosticSeverity Severity = DS_Error)
54 : DiagnosticInfo(getKindID(), Severity),
58 const Function &getFunction() const { return Fn; }
59 const Twine &getDescription() const { return Description; }
61 void print(DiagnosticPrinter &DP) const override {
62 DP << "unsupported " << getDescription() << " in " << Fn.getName();
65 static bool classof(const DiagnosticInfo *DI) {
66 return DI->getKind() == getKindID();
70 int DiagnosticInfoUnsupported::KindID = 0;
74 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
75 CCValAssign::LocInfo LocInfo,
76 ISD::ArgFlagsTy ArgFlags, CCState &State) {
77 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
78 ArgFlags.getOrigAlign());
79 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
84 #include "AMDGPUGenCallingConv.inc"
86 // Find a larger type to do a load / store of a vector with.
87 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
88 unsigned StoreSize = VT.getStoreSizeInBits();
90 return EVT::getIntegerVT(Ctx, StoreSize);
92 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
93 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
96 // Type for a vector that will be loaded to.
97 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
98 unsigned StoreSize = VT.getStoreSizeInBits();
100 return EVT::getIntegerVT(Ctx, 32);
102 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
105 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
106 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
108 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
110 setOperationAction(ISD::Constant, MVT::i32, Legal);
111 setOperationAction(ISD::Constant, MVT::i64, Legal);
112 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
113 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
115 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
116 setOperationAction(ISD::BRIND, MVT::Other, Expand);
118 // We need to custom lower some of the intrinsics
119 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
121 // Library functions. These default to Expand, but we have instructions
123 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
124 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
125 setOperationAction(ISD::FPOW, MVT::f32, Legal);
126 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
127 setOperationAction(ISD::FABS, MVT::f32, Legal);
128 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
129 setOperationAction(ISD::FRINT, MVT::f32, Legal);
130 setOperationAction(ISD::FROUND, MVT::f32, Legal);
131 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
133 setOperationAction(ISD::FREM, MVT::f32, Custom);
134 setOperationAction(ISD::FREM, MVT::f64, Custom);
136 // Lower floating point store/load to integer store/load to reduce the number
137 // of patterns in tablegen.
138 setOperationAction(ISD::STORE, MVT::f32, Promote);
139 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
141 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
142 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
144 setOperationAction(ISD::STORE, MVT::i64, Promote);
145 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
147 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
148 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
150 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
151 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
153 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
154 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
156 setOperationAction(ISD::STORE, MVT::f64, Promote);
157 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
159 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
160 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
162 // Custom lowering of vector stores is required for local address space
164 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
165 // XXX: Native v2i32 local address space stores are possible, but not
166 // currently implemented.
167 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
169 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
170 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
171 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
173 // XXX: This can be change to Custom, once ExpandVectorStores can
174 // handle 64-bit stores.
175 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
177 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
178 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
179 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
180 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
181 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
184 setOperationAction(ISD::LOAD, MVT::f32, Promote);
185 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
187 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
188 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
190 setOperationAction(ISD::LOAD, MVT::i64, Promote);
191 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
193 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
194 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
196 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
197 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
199 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
200 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
202 setOperationAction(ISD::LOAD, MVT::f64, Promote);
203 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
205 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
206 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
208 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
209 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
210 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
211 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
212 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
213 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
214 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
215 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
216 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
217 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
219 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
220 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
221 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
222 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
223 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
224 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
225 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
226 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
227 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
228 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
229 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
230 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
232 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
234 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
235 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
236 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
237 setOperationAction(ISD::FRINT, MVT::f64, Custom);
238 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
241 if (!Subtarget->hasBFI()) {
242 // fcopysign can be done in a single instruction with BFI.
243 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
244 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
247 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
249 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
250 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
251 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
253 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
254 for (MVT VT : ScalarIntVTs) {
255 setOperationAction(ISD::SREM, VT, Expand);
256 setOperationAction(ISD::SDIV, VT, Expand);
258 // GPU does not have divrem function for signed or unsigned.
259 setOperationAction(ISD::SDIVREM, VT, Custom);
260 setOperationAction(ISD::UDIVREM, VT, Custom);
262 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
263 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
264 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
266 setOperationAction(ISD::BSWAP, VT, Expand);
267 setOperationAction(ISD::CTTZ, VT, Expand);
268 setOperationAction(ISD::CTLZ, VT, Expand);
271 if (!Subtarget->hasBCNT(32))
272 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
274 if (!Subtarget->hasBCNT(64))
275 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
277 // The hardware supports 32-bit ROTR, but not ROTL.
278 setOperationAction(ISD::ROTL, MVT::i32, Expand);
279 setOperationAction(ISD::ROTL, MVT::i64, Expand);
280 setOperationAction(ISD::ROTR, MVT::i64, Expand);
282 setOperationAction(ISD::MUL, MVT::i64, Expand);
283 setOperationAction(ISD::MULHU, MVT::i64, Expand);
284 setOperationAction(ISD::MULHS, MVT::i64, Expand);
285 setOperationAction(ISD::UDIV, MVT::i32, Expand);
286 setOperationAction(ISD::UREM, MVT::i32, Expand);
287 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
288 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
289 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
291 if (!Subtarget->hasFFBH())
292 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
294 if (!Subtarget->hasFFBL())
295 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
297 static const MVT::SimpleValueType VectorIntTypes[] = {
298 MVT::v2i32, MVT::v4i32
301 for (MVT VT : VectorIntTypes) {
302 // Expand the following operations for the current type by default.
303 setOperationAction(ISD::ADD, VT, Expand);
304 setOperationAction(ISD::AND, VT, Expand);
305 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
306 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
307 setOperationAction(ISD::MUL, VT, Expand);
308 setOperationAction(ISD::OR, VT, Expand);
309 setOperationAction(ISD::SHL, VT, Expand);
310 setOperationAction(ISD::SRA, VT, Expand);
311 setOperationAction(ISD::SRL, VT, Expand);
312 setOperationAction(ISD::ROTL, VT, Expand);
313 setOperationAction(ISD::ROTR, VT, Expand);
314 setOperationAction(ISD::SUB, VT, Expand);
315 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
316 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
317 setOperationAction(ISD::SDIV, VT, Expand);
318 setOperationAction(ISD::UDIV, VT, Expand);
319 setOperationAction(ISD::SREM, VT, Expand);
320 setOperationAction(ISD::UREM, VT, Expand);
321 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
322 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
323 setOperationAction(ISD::SDIVREM, VT, Custom);
324 setOperationAction(ISD::UDIVREM, VT, Custom);
325 setOperationAction(ISD::ADDC, VT, Expand);
326 setOperationAction(ISD::SUBC, VT, Expand);
327 setOperationAction(ISD::ADDE, VT, Expand);
328 setOperationAction(ISD::SUBE, VT, Expand);
329 setOperationAction(ISD::SELECT, VT, Expand);
330 setOperationAction(ISD::VSELECT, VT, Expand);
331 setOperationAction(ISD::SELECT_CC, VT, Expand);
332 setOperationAction(ISD::XOR, VT, Expand);
333 setOperationAction(ISD::BSWAP, VT, Expand);
334 setOperationAction(ISD::CTPOP, VT, Expand);
335 setOperationAction(ISD::CTTZ, VT, Expand);
336 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
337 setOperationAction(ISD::CTLZ, VT, Expand);
338 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
339 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
342 static const MVT::SimpleValueType FloatVectorTypes[] = {
343 MVT::v2f32, MVT::v4f32
346 for (MVT VT : FloatVectorTypes) {
347 setOperationAction(ISD::FABS, VT, Expand);
348 setOperationAction(ISD::FADD, VT, Expand);
349 setOperationAction(ISD::FCEIL, VT, Expand);
350 setOperationAction(ISD::FCOS, VT, Expand);
351 setOperationAction(ISD::FDIV, VT, Expand);
352 setOperationAction(ISD::FEXP2, VT, Expand);
353 setOperationAction(ISD::FLOG2, VT, Expand);
354 setOperationAction(ISD::FREM, VT, Expand);
355 setOperationAction(ISD::FPOW, VT, Expand);
356 setOperationAction(ISD::FFLOOR, VT, Expand);
357 setOperationAction(ISD::FTRUNC, VT, Expand);
358 setOperationAction(ISD::FMUL, VT, Expand);
359 setOperationAction(ISD::FMA, VT, Expand);
360 setOperationAction(ISD::FRINT, VT, Expand);
361 setOperationAction(ISD::FNEARBYINT, VT, Expand);
362 setOperationAction(ISD::FSQRT, VT, Expand);
363 setOperationAction(ISD::FSIN, VT, Expand);
364 setOperationAction(ISD::FSUB, VT, Expand);
365 setOperationAction(ISD::FNEG, VT, Expand);
366 setOperationAction(ISD::SELECT, VT, Expand);
367 setOperationAction(ISD::VSELECT, VT, Expand);
368 setOperationAction(ISD::SELECT_CC, VT, Expand);
369 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
373 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
374 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
376 setTargetDAGCombine(ISD::MUL);
377 setTargetDAGCombine(ISD::SELECT_CC);
378 setTargetDAGCombine(ISD::STORE);
380 setSchedulingPreference(Sched::RegPressure);
381 setJumpIsExpensive(true);
383 // SI at least has hardware support for floating point exceptions, but no way
384 // of using or handling them is implemented. They are also optional in OpenCL
386 setHasFloatingPointExceptions(false);
388 setSelectIsExpensive(false);
389 PredictableSelectIsExpensive = false;
391 // There are no integer divide instructions, and these expand to a pretty
392 // large sequence of instructions.
393 setIntDivIsCheap(false);
394 setPow2SDivIsCheap(false);
396 // FIXME: Need to really handle these.
397 MaxStoresPerMemcpy = 4096;
398 MaxStoresPerMemmove = 4096;
399 MaxStoresPerMemset = 4096;
402 //===----------------------------------------------------------------------===//
403 // Target Information
404 //===----------------------------------------------------------------------===//
406 MVT AMDGPUTargetLowering::getVectorIdxTy() const {
410 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
414 // The backend supports 32 and 64 bit floating point immediates.
415 // FIXME: Why are we reporting vectors of FP immediates as legal?
416 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
417 EVT ScalarVT = VT.getScalarType();
418 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
421 // We don't want to shrink f64 / f32 constants.
422 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
423 EVT ScalarVT = VT.getScalarType();
424 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
427 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
429 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
432 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
433 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
435 return ((LScalarSize <= CastScalarSize) ||
436 (CastScalarSize >= 32) ||
440 //===---------------------------------------------------------------------===//
442 //===---------------------------------------------------------------------===//
444 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
445 assert(VT.isFloatingPoint());
446 return VT == MVT::f32 || VT == MVT::f64;
449 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
450 assert(VT.isFloatingPoint());
451 return VT == MVT::f32 || VT == MVT::f64;
454 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
455 // Truncate is just accessing a subregister.
456 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
459 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
460 // Truncate is just accessing a subregister.
461 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
462 (Dest->getPrimitiveSizeInBits() % 32 == 0);
465 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
466 const DataLayout *DL = getDataLayout();
467 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
468 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
470 return SrcSize == 32 && DestSize == 64;
473 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
474 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
475 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
476 // this will enable reducing 64-bit operations the 32-bit, which is always
478 return Src == MVT::i32 && Dest == MVT::i64;
481 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
482 return isZExtFree(Val.getValueType(), VT2);
485 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
486 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
487 // limited number of native 64-bit operations. Shrinking an operation to fit
488 // in a single 32-bit register should always be helpful. As currently used,
489 // this is much less general than the name suggests, and is only used in
490 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
491 // not profitable, and may actually be harmful.
492 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
495 //===---------------------------------------------------------------------===//
496 // TargetLowering Callbacks
497 //===---------------------------------------------------------------------===//
499 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
500 const SmallVectorImpl<ISD::InputArg> &Ins) const {
502 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
505 SDValue AMDGPUTargetLowering::LowerReturn(
507 CallingConv::ID CallConv,
509 const SmallVectorImpl<ISD::OutputArg> &Outs,
510 const SmallVectorImpl<SDValue> &OutVals,
511 SDLoc DL, SelectionDAG &DAG) const {
512 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
515 //===---------------------------------------------------------------------===//
516 // Target specific lowering
517 //===---------------------------------------------------------------------===//
519 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
520 SmallVectorImpl<SDValue> &InVals) const {
521 SDValue Callee = CLI.Callee;
522 SelectionDAG &DAG = CLI.DAG;
524 const Function &Fn = *DAG.getMachineFunction().getFunction();
526 StringRef FuncName("<unknown>");
528 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
529 FuncName = G->getSymbol();
530 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
531 FuncName = G->getGlobal()->getName();
533 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
534 DAG.getContext()->diagnose(NoCalls);
538 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
539 SelectionDAG &DAG) const {
540 switch (Op.getOpcode()) {
542 Op.getNode()->dump();
543 llvm_unreachable("Custom lowering code for this"
544 "instruction is not implemented yet!");
546 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
547 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
548 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
549 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
550 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
551 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
552 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
553 case ISD::FREM: return LowerFREM(Op, DAG);
554 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
555 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
556 case ISD::FRINT: return LowerFRINT(Op, DAG);
557 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
558 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
559 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
560 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
565 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
566 SmallVectorImpl<SDValue> &Results,
567 SelectionDAG &DAG) const {
568 switch (N->getOpcode()) {
569 case ISD::SIGN_EXTEND_INREG:
570 // Different parts of legalization seem to interpret which type of
571 // sign_extend_inreg is the one to check for custom lowering. The extended
572 // from type is what really matters, but some places check for custom
573 // lowering of the result type. This results in trying to use
574 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
575 // nothing here and let the illegal result integer be handled normally.
578 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode();
582 Results.push_back(SDValue(Node, 0));
583 Results.push_back(SDValue(Node, 1));
584 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode
586 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1));
590 SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
591 if (Lowered.getNode())
592 Results.push_back(Lowered);
600 // FIXME: This implements accesses to initialized globals in the constant
601 // address space by copying them to private and accessing that. It does not
602 // properly handle illegal types or vectors. The private vector loads are not
603 // scalarized, and the illegal scalars hit an assertion. This technique will not
604 // work well with large initializers, and this should eventually be
605 // removed. Initialized globals should be placed into a data section that the
606 // runtime will load into a buffer before the kernel is executed. Uses of the
607 // global need to be replaced with a pointer loaded from an implicit kernel
608 // argument into this buffer holding the copy of the data, which will remove the
609 // need for any of this.
610 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
611 const GlobalValue *GV,
612 const SDValue &InitPtr,
614 SelectionDAG &DAG) const {
615 const DataLayout *TD = getTargetMachine().getSubtargetImpl()->getDataLayout();
617 Type *InitTy = Init->getType();
619 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
620 EVT VT = EVT::getEVT(InitTy);
621 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
622 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
623 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
624 TD->getPrefTypeAlignment(InitTy));
627 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
628 EVT VT = EVT::getEVT(CFP->getType());
629 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
630 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
631 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
632 TD->getPrefTypeAlignment(CFP->getType()));
635 if (StructType *ST = dyn_cast<StructType>(InitTy)) {
636 const StructLayout *SL = TD->getStructLayout(ST);
638 EVT PtrVT = InitPtr.getValueType();
639 SmallVector<SDValue, 8> Chains;
641 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
642 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
643 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
645 Constant *Elt = Init->getAggregateElement(I);
646 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
649 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
652 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
653 EVT PtrVT = InitPtr.getValueType();
655 unsigned NumElements;
656 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
657 NumElements = AT->getNumElements();
658 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
659 NumElements = VT->getNumElements();
661 llvm_unreachable("Unexpected type");
663 unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
664 SmallVector<SDValue, 8> Chains;
665 for (unsigned i = 0; i < NumElements; ++i) {
666 SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
667 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
669 Constant *Elt = Init->getAggregateElement(i);
670 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
673 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
676 if (isa<UndefValue>(Init)) {
677 EVT VT = EVT::getEVT(InitTy);
678 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
679 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
680 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
681 TD->getPrefTypeAlignment(InitTy));
685 llvm_unreachable("Unhandled constant initializer");
688 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
690 SelectionDAG &DAG) const {
692 const DataLayout *TD = getTargetMachine().getSubtargetImpl()->getDataLayout();
693 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
694 const GlobalValue *GV = G->getGlobal();
696 switch (G->getAddressSpace()) {
697 default: llvm_unreachable("Global Address lowering not implemented for this "
699 case AMDGPUAS::LOCAL_ADDRESS: {
700 // XXX: What does the value of G->getOffset() mean?
701 assert(G->getOffset() == 0 &&
702 "Do not know what to do with an non-zero offset");
705 if (MFI->LocalMemoryObjects.count(GV) == 0) {
706 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
707 Offset = MFI->LDSSize;
708 MFI->LocalMemoryObjects[GV] = Offset;
709 // XXX: Account for alignment?
710 MFI->LDSSize += Size;
712 Offset = MFI->LocalMemoryObjects[GV];
715 return DAG.getConstant(Offset, getPointerTy(AMDGPUAS::LOCAL_ADDRESS));
717 case AMDGPUAS::CONSTANT_ADDRESS: {
718 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
719 Type *EltType = GV->getType()->getElementType();
720 unsigned Size = TD->getTypeAllocSize(EltType);
721 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
723 MVT PrivPtrVT = getPointerTy(AMDGPUAS::PRIVATE_ADDRESS);
724 MVT ConstPtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
726 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
727 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
729 const GlobalVariable *Var = cast<GlobalVariable>(GV);
730 if (!Var->hasInitializer()) {
731 // This has no use, but bugpoint will hit it.
732 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
735 const Constant *Init = Var->getInitializer();
736 SmallVector<SDNode*, 8> WorkList;
738 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
739 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
740 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
742 WorkList.push_back(*I);
744 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
745 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
746 E = WorkList.end(); I != E; ++I) {
747 SmallVector<SDValue, 8> Ops;
748 Ops.push_back(Chain);
749 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
750 Ops.push_back((*I)->getOperand(i));
752 DAG.UpdateNodeOperands(*I, Ops);
754 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
759 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
760 SelectionDAG &DAG) const {
761 SmallVector<SDValue, 8> Args;
762 SDValue A = Op.getOperand(0);
763 SDValue B = Op.getOperand(1);
765 DAG.ExtractVectorElements(A, Args);
766 DAG.ExtractVectorElements(B, Args);
768 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
771 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
772 SelectionDAG &DAG) const {
774 SmallVector<SDValue, 8> Args;
775 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
776 EVT VT = Op.getValueType();
777 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
778 VT.getVectorNumElements());
780 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
783 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
784 SelectionDAG &DAG) const {
786 MachineFunction &MF = DAG.getMachineFunction();
787 const AMDGPUFrameLowering *TFL = static_cast<const AMDGPUFrameLowering *>(
788 getTargetMachine().getSubtargetImpl()->getFrameLowering());
790 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
792 unsigned FrameIndex = FIN->getIndex();
793 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
794 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
798 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
799 SelectionDAG &DAG) const {
800 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
802 EVT VT = Op.getValueType();
804 switch (IntrinsicID) {
806 case AMDGPUIntrinsic::AMDGPU_abs:
807 case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
808 return LowerIntrinsicIABS(Op, DAG);
809 case AMDGPUIntrinsic::AMDGPU_lrp:
810 return LowerIntrinsicLRP(Op, DAG);
811 case AMDGPUIntrinsic::AMDGPU_fract:
812 case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
813 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
815 case AMDGPUIntrinsic::AMDGPU_clamp:
816 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
817 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
818 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
820 case Intrinsic::AMDGPU_div_scale: {
821 // 3rd parameter required to be a constant.
822 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3));
824 return DAG.getUNDEF(VT);
826 // Translate to the operands expected by the machine instruction. The
827 // first parameter must be the same as the first instruction.
828 SDValue Numerator = Op.getOperand(1);
829 SDValue Denominator = Op.getOperand(2);
831 // Note this order is opposite of the machine instruction's operations,
832 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The
833 // intrinsic has the numerator as the first operand to match a normal
834 // division operation.
836 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator;
838 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0,
839 Denominator, Numerator);
842 case Intrinsic::AMDGPU_div_fmas:
843 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
844 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
846 case Intrinsic::AMDGPU_div_fixup:
847 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
848 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
850 case Intrinsic::AMDGPU_trig_preop:
851 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
852 Op.getOperand(1), Op.getOperand(2));
854 case Intrinsic::AMDGPU_rcp:
855 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
857 case Intrinsic::AMDGPU_rsq:
858 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
860 case AMDGPUIntrinsic::AMDGPU_legacy_rsq:
861 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1));
863 case Intrinsic::AMDGPU_rsq_clamped:
864 return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1));
866 case Intrinsic::AMDGPU_ldexp:
867 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1),
870 case AMDGPUIntrinsic::AMDGPU_imax:
871 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
873 case AMDGPUIntrinsic::AMDGPU_umax:
874 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
876 case AMDGPUIntrinsic::AMDGPU_imin:
877 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
879 case AMDGPUIntrinsic::AMDGPU_umin:
880 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
883 case AMDGPUIntrinsic::AMDGPU_umul24:
884 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
885 Op.getOperand(1), Op.getOperand(2));
887 case AMDGPUIntrinsic::AMDGPU_imul24:
888 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
889 Op.getOperand(1), Op.getOperand(2));
891 case AMDGPUIntrinsic::AMDGPU_umad24:
892 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
893 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
895 case AMDGPUIntrinsic::AMDGPU_imad24:
896 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
897 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
899 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
900 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
902 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
903 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
905 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
906 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
908 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
909 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
911 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
912 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
917 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
918 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
923 case AMDGPUIntrinsic::AMDGPU_bfi:
924 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
929 case AMDGPUIntrinsic::AMDGPU_bfm:
930 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
934 case AMDGPUIntrinsic::AMDGPU_brev:
935 return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
937 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
938 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
940 case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
941 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
942 case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name.
943 return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
947 ///IABS(a) = SMAX(sub(0, a), a)
948 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
949 SelectionDAG &DAG) const {
951 EVT VT = Op.getValueType();
952 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
955 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
958 /// Linear Interpolation
959 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
960 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
961 SelectionDAG &DAG) const {
963 EVT VT = Op.getValueType();
964 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
965 DAG.getConstantFP(1.0f, MVT::f32),
967 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
969 return DAG.getNode(ISD::FADD, DL, VT,
970 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
974 /// \brief Generate Min/Max node
975 SDValue AMDGPUTargetLowering::CombineMinMax(SDNode *N,
976 SelectionDAG &DAG) const {
978 EVT VT = N->getValueType(0);
980 SDValue LHS = N->getOperand(0);
981 SDValue RHS = N->getOperand(1);
982 SDValue True = N->getOperand(2);
983 SDValue False = N->getOperand(3);
984 SDValue CC = N->getOperand(4);
986 if (VT != MVT::f32 ||
987 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
991 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1000 case ISD::SETFALSE2:
1005 llvm_unreachable("Operation should already be optimised!");
1012 unsigned Opc = (LHS == True) ? AMDGPUISD::FMIN : AMDGPUISD::FMAX;
1013 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1021 unsigned Opc = (LHS == True) ? AMDGPUISD::FMAX : AMDGPUISD::FMIN;
1022 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1024 case ISD::SETCC_INVALID:
1025 llvm_unreachable("Invalid setcc condcode!");
1030 SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op,
1031 SelectionDAG &DAG) const {
1032 LoadSDNode *Load = cast<LoadSDNode>(Op);
1033 EVT MemVT = Load->getMemoryVT();
1034 EVT MemEltVT = MemVT.getVectorElementType();
1036 EVT LoadVT = Op.getValueType();
1037 EVT EltVT = LoadVT.getVectorElementType();
1038 EVT PtrVT = Load->getBasePtr().getValueType();
1040 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
1041 SmallVector<SDValue, 8> Loads;
1042 SmallVector<SDValue, 8> Chains;
1045 unsigned MemEltSize = MemEltVT.getStoreSize();
1046 MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
1048 for (unsigned i = 0; i < NumElts; ++i) {
1049 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
1050 DAG.getConstant(i * MemEltSize, PtrVT));
1053 = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
1054 Load->getChain(), Ptr,
1055 SrcValue.getWithOffset(i * MemEltSize),
1056 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
1057 Load->isInvariant(), Load->getAlignment());
1058 Loads.push_back(NewLoad.getValue(0));
1059 Chains.push_back(NewLoad.getValue(1));
1063 DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads),
1064 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains)
1067 return DAG.getMergeValues(Ops, SL);
1070 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1071 SelectionDAG &DAG) const {
1072 EVT VT = Op.getValueType();
1074 // If this is a 2 element vector, we really want to scalarize and not create
1075 // weird 1 element vectors.
1076 if (VT.getVectorNumElements() == 2)
1077 return ScalarizeVectorLoad(Op, DAG);
1079 LoadSDNode *Load = cast<LoadSDNode>(Op);
1080 SDValue BasePtr = Load->getBasePtr();
1081 EVT PtrVT = BasePtr.getValueType();
1082 EVT MemVT = Load->getMemoryVT();
1084 MachinePointerInfo SrcValue(Load->getMemOperand()->getValue());
1087 EVT LoMemVT, HiMemVT;
1090 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1091 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1092 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
1094 = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1095 Load->getChain(), BasePtr,
1097 LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
1098 Load->isInvariant(), Load->getAlignment());
1100 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1101 DAG.getConstant(LoMemVT.getStoreSize(), PtrVT));
1104 = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
1105 Load->getChain(), HiPtr,
1106 SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1107 HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
1108 Load->isInvariant(), Load->getAlignment());
1111 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
1112 DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1113 LoLoad.getValue(1), HiLoad.getValue(1))
1116 return DAG.getMergeValues(Ops, SL);
1119 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
1120 SelectionDAG &DAG) const {
1121 StoreSDNode *Store = cast<StoreSDNode>(Op);
1122 EVT MemVT = Store->getMemoryVT();
1123 unsigned MemBits = MemVT.getSizeInBits();
1125 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
1126 // truncating store into an i32 store.
1127 // XXX: We could also handle optimize other vector bitwidths.
1128 if (!MemVT.isVector() || MemBits > 32) {
1133 SDValue Value = Store->getValue();
1134 EVT VT = Value.getValueType();
1135 EVT ElemVT = VT.getVectorElementType();
1136 SDValue Ptr = Store->getBasePtr();
1137 EVT MemEltVT = MemVT.getVectorElementType();
1138 unsigned MemEltBits = MemEltVT.getSizeInBits();
1139 unsigned MemNumElements = MemVT.getVectorNumElements();
1140 unsigned PackedSize = MemVT.getStoreSizeInBits();
1141 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
1143 assert(Value.getValueType().getScalarSizeInBits() >= 32);
1145 SDValue PackedValue;
1146 for (unsigned i = 0; i < MemNumElements; ++i) {
1147 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
1148 DAG.getConstant(i, MVT::i32));
1149 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1150 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1152 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
1153 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1158 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
1162 if (PackedSize < 32) {
1163 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1164 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
1165 Store->getMemOperand()->getPointerInfo(),
1167 Store->isNonTemporal(), Store->isVolatile(),
1168 Store->getAlignment());
1171 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
1172 Store->getMemOperand()->getPointerInfo(),
1173 Store->isVolatile(), Store->isNonTemporal(),
1174 Store->getAlignment());
1177 SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op,
1178 SelectionDAG &DAG) const {
1179 StoreSDNode *Store = cast<StoreSDNode>(Op);
1180 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
1181 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
1182 EVT PtrVT = Store->getBasePtr().getValueType();
1183 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
1186 SmallVector<SDValue, 8> Chains;
1188 unsigned EltSize = MemEltVT.getStoreSize();
1189 MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
1191 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1192 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
1194 DAG.getConstant(i, MVT::i32));
1196 SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), PtrVT);
1197 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset);
1199 DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
1200 SrcValue.getWithOffset(i * EltSize),
1201 MemEltVT, Store->isNonTemporal(), Store->isVolatile(),
1202 Store->getAlignment());
1203 Chains.push_back(NewStore);
1206 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
1209 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1210 SelectionDAG &DAG) const {
1211 StoreSDNode *Store = cast<StoreSDNode>(Op);
1212 SDValue Val = Store->getValue();
1213 EVT VT = Val.getValueType();
1215 // If this is a 2 element vector, we really want to scalarize and not create
1216 // weird 1 element vectors.
1217 if (VT.getVectorNumElements() == 2)
1218 return ScalarizeVectorStore(Op, DAG);
1220 EVT MemVT = Store->getMemoryVT();
1221 SDValue Chain = Store->getChain();
1222 SDValue BasePtr = Store->getBasePtr();
1226 EVT LoMemVT, HiMemVT;
1229 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1230 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1231 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1233 EVT PtrVT = BasePtr.getValueType();
1234 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1235 DAG.getConstant(LoMemVT.getStoreSize(), PtrVT));
1237 MachinePointerInfo SrcValue(Store->getMemOperand()->getValue());
1239 = DAG.getTruncStore(Chain, SL, Lo,
1243 Store->isNonTemporal(),
1244 Store->isVolatile(),
1245 Store->getAlignment());
1247 = DAG.getTruncStore(Chain, SL, Hi,
1249 SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1251 Store->isNonTemporal(),
1252 Store->isVolatile(),
1253 Store->getAlignment());
1255 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1259 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1261 LoadSDNode *Load = cast<LoadSDNode>(Op);
1262 ISD::LoadExtType ExtType = Load->getExtensionType();
1263 EVT VT = Op.getValueType();
1264 EVT MemVT = Load->getMemoryVT();
1266 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
1267 // We can do the extload to 32-bits, and then need to separately extend to
1270 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
1274 Load->getMemOperand());
1277 DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32),
1278 ExtLoad32.getValue(1)
1281 return DAG.getMergeValues(Ops, DL);
1284 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
1285 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
1286 // FIXME: Copied from PPC
1287 // First, load into 32 bits, then truncate to 1 bit.
1289 SDValue Chain = Load->getChain();
1290 SDValue BasePtr = Load->getBasePtr();
1291 MachineMemOperand *MMO = Load->getMemOperand();
1293 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
1294 BasePtr, MVT::i8, MMO);
1297 DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD),
1301 return DAG.getMergeValues(Ops, DL);
1304 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS ||
1305 Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
1306 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
1310 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
1311 DAG.getConstant(2, MVT::i32));
1312 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1313 Load->getChain(), Ptr,
1314 DAG.getTargetConstant(0, MVT::i32),
1316 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1318 DAG.getConstant(0x3, MVT::i32));
1319 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1320 DAG.getConstant(3, MVT::i32));
1322 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
1324 EVT MemEltVT = MemVT.getScalarType();
1325 if (ExtType == ISD::SEXTLOAD) {
1326 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1329 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode),
1333 return DAG.getMergeValues(Ops, DL);
1337 DAG.getZeroExtendInReg(Ret, DL, MemEltVT),
1341 return DAG.getMergeValues(Ops, DL);
1344 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1346 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
1347 if (Result.getNode()) {
1351 StoreSDNode *Store = cast<StoreSDNode>(Op);
1352 SDValue Chain = Store->getChain();
1353 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1354 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1355 Store->getValue().getValueType().isVector()) {
1356 return ScalarizeVectorStore(Op, DAG);
1359 EVT MemVT = Store->getMemoryVT();
1360 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1361 MemVT.bitsLT(MVT::i32)) {
1363 if (Store->getMemoryVT() == MVT::i8) {
1365 } else if (Store->getMemoryVT() == MVT::i16) {
1368 SDValue BasePtr = Store->getBasePtr();
1369 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
1370 DAG.getConstant(2, MVT::i32));
1371 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1372 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
1374 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
1375 DAG.getConstant(0x3, MVT::i32));
1377 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1378 DAG.getConstant(3, MVT::i32));
1380 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1383 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1385 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1386 MaskedValue, ShiftAmt);
1388 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
1390 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1391 DAG.getConstant(0xffffffff, MVT::i32));
1392 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1394 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1395 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1396 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
1401 // This is a shortcut for integer division because we have fast i32<->f32
1402 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1403 // float is enough to accurately represent up to a 24-bit integer.
1404 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const {
1406 EVT VT = Op.getValueType();
1407 SDValue LHS = Op.getOperand(0);
1408 SDValue RHS = Op.getOperand(1);
1409 MVT IntVT = MVT::i32;
1410 MVT FltVT = MVT::f32;
1412 ISD::NodeType ToFp = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1413 ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1415 if (VT.isVector()) {
1416 unsigned NElts = VT.getVectorNumElements();
1417 IntVT = MVT::getVectorVT(MVT::i32, NElts);
1418 FltVT = MVT::getVectorVT(MVT::f32, NElts);
1421 unsigned BitSize = VT.getScalarType().getSizeInBits();
1423 SDValue jq = DAG.getConstant(1, IntVT);
1426 // char|short jq = ia ^ ib;
1427 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1429 // jq = jq >> (bitsize - 2)
1430 jq = DAG.getNode(ISD::SRA, DL, VT, jq, DAG.getConstant(BitSize - 2, VT));
1433 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, VT));
1436 jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
1439 // int ia = (int)LHS;
1441 DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT);
1443 // int ib, (int)RHS;
1445 DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT);
1447 // float fa = (float)ia;
1448 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1450 // float fb = (float)ib;
1451 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1453 // float fq = native_divide(fa, fb);
1454 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1455 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1458 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1460 // float fqneg = -fq;
1461 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1463 // float fr = mad(fqneg, fb, fa);
1464 SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT,
1465 DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa);
1467 // int iq = (int)fq;
1468 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1471 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1474 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1476 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), VT);
1478 // int cv = fr >= fb;
1479 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1481 // jq = (cv ? jq : 0);
1482 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, VT));
1484 // dst = trunc/extend to legal type
1485 iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT);
1488 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1490 // Rem needs compensation, it's easier to recompute it
1491 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1492 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1498 return DAG.getMergeValues(Res, DL);
1501 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1502 SelectionDAG &DAG) const {
1504 EVT VT = Op.getValueType();
1506 SDValue Num = Op.getOperand(0);
1507 SDValue Den = Op.getOperand(1);
1509 if (VT == MVT::i32) {
1510 if (DAG.MaskedValueIsZero(Op.getOperand(0), APInt(32, 0xff << 24)) &&
1511 DAG.MaskedValueIsZero(Op.getOperand(1), APInt(32, 0xff << 24))) {
1512 // TODO: We technically could do this for i64, but shouldn't that just be
1513 // handled by something generally reducing 64-bit division on 32-bit
1514 // values to 32-bit?
1515 return LowerDIVREM24(Op, DAG, false);
1519 // RCP = URECIP(Den) = 2^32 / Den + e
1520 // e is rounding error.
1521 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1523 // RCP_LO = mul(RCP, Den) */
1524 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1526 // RCP_HI = mulhu (RCP, Den) */
1527 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1529 // NEG_RCP_LO = -RCP_LO
1530 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
1533 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1534 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1537 // Calculate the rounding error from the URECIP instruction
1538 // E = mulhu(ABS_RCP_LO, RCP)
1539 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1541 // RCP_A_E = RCP + E
1542 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1544 // RCP_S_E = RCP - E
1545 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1547 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1548 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1551 // Quotient = mulhu(Tmp0, Num)
1552 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1554 // Num_S_Remainder = Quotient * Den
1555 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1557 // Remainder = Num - Num_S_Remainder
1558 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1560 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1561 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1562 DAG.getConstant(-1, VT),
1563 DAG.getConstant(0, VT),
1565 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1566 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1568 DAG.getConstant(-1, VT),
1569 DAG.getConstant(0, VT),
1571 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1572 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1575 // Calculate Division result:
1577 // Quotient_A_One = Quotient + 1
1578 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1579 DAG.getConstant(1, VT));
1581 // Quotient_S_One = Quotient - 1
1582 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1583 DAG.getConstant(1, VT));
1585 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1586 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1587 Quotient, Quotient_A_One, ISD::SETEQ);
1589 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1590 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1591 Quotient_S_One, Div, ISD::SETEQ);
1593 // Calculate Rem result:
1595 // Remainder_S_Den = Remainder - Den
1596 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1598 // Remainder_A_Den = Remainder + Den
1599 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1601 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1602 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1603 Remainder, Remainder_S_Den, ISD::SETEQ);
1605 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1606 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1607 Remainder_A_Den, Rem, ISD::SETEQ);
1612 return DAG.getMergeValues(Ops, DL);
1615 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1616 SelectionDAG &DAG) const {
1618 EVT VT = Op.getValueType();
1620 SDValue LHS = Op.getOperand(0);
1621 SDValue RHS = Op.getOperand(1);
1623 if (VT == MVT::i32) {
1624 if (DAG.ComputeNumSignBits(Op.getOperand(0)) > 8 &&
1625 DAG.ComputeNumSignBits(Op.getOperand(1)) > 8) {
1626 // TODO: We technically could do this for i64, but shouldn't that just be
1627 // handled by something generally reducing 64-bit division on 32-bit
1628 // values to 32-bit?
1629 return LowerDIVREM24(Op, DAG, true);
1633 SDValue Zero = DAG.getConstant(0, VT);
1634 SDValue NegOne = DAG.getConstant(-1, VT);
1636 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1637 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1638 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1639 SDValue RSign = LHSign; // Remainder sign is the same as LHS
1641 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1642 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1644 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1645 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1647 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1648 SDValue Rem = Div.getValue(1);
1650 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1651 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1653 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1654 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1660 return DAG.getMergeValues(Res, DL);
1663 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
1664 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
1666 EVT VT = Op.getValueType();
1667 SDValue X = Op.getOperand(0);
1668 SDValue Y = Op.getOperand(1);
1670 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
1671 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
1672 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
1674 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
1677 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1679 SDValue Src = Op.getOperand(0);
1681 // result = trunc(src)
1682 // if (src > 0.0 && src != result)
1685 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1687 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1688 const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
1690 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1692 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1693 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1694 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1696 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1697 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1700 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1702 SDValue Src = Op.getOperand(0);
1704 assert(Op.getValueType() == MVT::f64);
1706 const SDValue Zero = DAG.getConstant(0, MVT::i32);
1707 const SDValue One = DAG.getConstant(1, MVT::i32);
1709 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1711 // Extract the upper half, since this is where we will find the sign and
1713 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1715 const unsigned FractBits = 52;
1716 const unsigned ExpBits = 11;
1718 // Extract the exponent.
1719 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
1721 DAG.getConstant(FractBits - 32, MVT::i32),
1722 DAG.getConstant(ExpBits, MVT::i32));
1723 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1724 DAG.getConstant(1023, MVT::i32));
1726 // Extract the sign bit.
1727 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, MVT::i32);
1728 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1730 // Extend back to to 64-bits.
1731 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
1733 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1735 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
1736 const SDValue FractMask
1737 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, MVT::i64);
1739 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1740 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1741 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
1743 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
1745 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, MVT::i32);
1747 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1748 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1750 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
1751 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
1753 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
1756 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
1758 SDValue Src = Op.getOperand(0);
1760 assert(Op.getValueType() == MVT::f64);
1762 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
1763 SDValue C1 = DAG.getConstantFP(C1Val, MVT::f64);
1764 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
1766 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
1767 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
1769 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
1771 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
1772 SDValue C2 = DAG.getConstantFP(C2Val, MVT::f64);
1774 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1775 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
1777 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
1780 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
1781 // FNEARBYINT and FRINT are the same, except in their handling of FP
1782 // exceptions. Those aren't really meaningful for us, and OpenCL only has
1783 // rint, so just treat them as equivalent.
1784 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
1787 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
1789 SDValue Src = Op.getOperand(0);
1791 // result = trunc(src);
1792 // if (src < 0.0 && src != result)
1795 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1797 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1798 const SDValue NegOne = DAG.getConstantFP(-1.0, MVT::f64);
1800 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1802 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
1803 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1804 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1806 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
1807 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1810 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
1811 bool Signed) const {
1813 SDValue Src = Op.getOperand(0);
1815 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1817 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
1818 DAG.getConstant(0, MVT::i32));
1819 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
1820 DAG.getConstant(1, MVT::i32));
1822 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
1825 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
1827 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
1828 DAG.getConstant(32, MVT::i32));
1830 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
1833 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1834 SelectionDAG &DAG) const {
1835 SDValue S0 = Op.getOperand(0);
1836 if (S0.getValueType() != MVT::i64)
1839 EVT DestVT = Op.getValueType();
1840 if (DestVT == MVT::f64)
1841 return LowerINT_TO_FP64(Op, DAG, false);
1843 assert(DestVT == MVT::f32);
1847 // f32 uint_to_fp i64
1848 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1849 DAG.getConstant(0, MVT::i32));
1850 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1851 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1852 DAG.getConstant(1, MVT::i32));
1853 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1854 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1855 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1856 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1859 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
1860 SelectionDAG &DAG) const {
1861 SDValue Src = Op.getOperand(0);
1862 if (Src.getValueType() == MVT::i64 && Op.getValueType() == MVT::f64)
1863 return LowerINT_TO_FP64(Op, DAG, true);
1868 SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1870 SelectionDAG &DAG) const {
1871 MVT VT = Op.getSimpleValueType();
1873 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1874 // Shift left by 'Shift' bits.
1875 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1876 // Signed shift Right by 'Shift' bits.
1877 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1880 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1881 SelectionDAG &DAG) const {
1882 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1883 MVT VT = Op.getSimpleValueType();
1884 MVT ScalarVT = VT.getScalarType();
1889 SDValue Src = Op.getOperand(0);
1892 // TODO: Don't scalarize on Evergreen?
1893 unsigned NElts = VT.getVectorNumElements();
1894 SmallVector<SDValue, 8> Args;
1895 DAG.ExtractVectorElements(Src, Args, 0, NElts);
1897 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1898 for (unsigned I = 0; I < NElts; ++I)
1899 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
1901 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
1904 //===----------------------------------------------------------------------===//
1905 // Custom DAG optimizations
1906 //===----------------------------------------------------------------------===//
1908 static bool isU24(SDValue Op, SelectionDAG &DAG) {
1909 APInt KnownZero, KnownOne;
1910 EVT VT = Op.getValueType();
1911 DAG.computeKnownBits(Op, KnownZero, KnownOne);
1913 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1916 static bool isI24(SDValue Op, SelectionDAG &DAG) {
1917 EVT VT = Op.getValueType();
1919 // In order for this to be a signed 24-bit value, bit 23, must
1921 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1922 // as unsigned 24-bit values.
1923 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1926 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1928 SelectionDAG &DAG = DCI.DAG;
1929 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1930 EVT VT = Op.getValueType();
1932 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1933 APInt KnownZero, KnownOne;
1934 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1935 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1936 DCI.CommitTargetLoweringOpt(TLO);
1939 template <typename IntTy>
1940 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
1941 uint32_t Offset, uint32_t Width) {
1942 if (Width + Offset < 32) {
1943 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
1944 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
1945 return DAG.getConstant(Result, MVT::i32);
1948 return DAG.getConstant(Src0 >> Offset, MVT::i32);
1951 static bool usesAllNormalStores(SDNode *LoadVal) {
1952 for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
1953 if (!ISD::isNormalStore(*I))
1960 // If we have a copy of an illegal type, replace it with a load / store of an
1961 // equivalently sized legal type. This avoids intermediate bit pack / unpack
1962 // instructions emitted when handling extloads and truncstores. Ideally we could
1963 // recognize the pack / unpack pattern to eliminate it.
1964 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
1965 DAGCombinerInfo &DCI) const {
1966 if (!DCI.isBeforeLegalize())
1969 StoreSDNode *SN = cast<StoreSDNode>(N);
1970 SDValue Value = SN->getValue();
1971 EVT VT = Value.getValueType();
1973 if (isTypeLegal(VT) || SN->isVolatile() || !ISD::isNormalLoad(Value.getNode()))
1976 LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
1977 if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
1980 EVT MemVT = LoadVal->getMemoryVT();
1983 SelectionDAG &DAG = DCI.DAG;
1984 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
1986 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
1988 LoadVal->getChain(),
1989 LoadVal->getBasePtr(),
1990 LoadVal->getOffset(),
1992 LoadVal->getMemOperand());
1994 SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
1995 DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
1997 return DAG.getStore(SN->getChain(), SL, NewLoad,
1998 SN->getBasePtr(), SN->getMemOperand());
2001 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
2002 DAGCombinerInfo &DCI) const {
2003 EVT VT = N->getValueType(0);
2005 if (VT.isVector() || VT.getSizeInBits() > 32)
2008 SelectionDAG &DAG = DCI.DAG;
2011 SDValue N0 = N->getOperand(0);
2012 SDValue N1 = N->getOperand(1);
2015 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
2016 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2017 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2018 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
2019 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
2020 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2021 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2022 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
2027 // We need to use sext even for MUL_U24, because MUL_U24 is used
2028 // for signed multiply of 8 and 16-bit types.
2029 return DAG.getSExtOrTrunc(Mul, DL, VT);
2032 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
2033 DAGCombinerInfo &DCI) const {
2034 SelectionDAG &DAG = DCI.DAG;
2037 switch(N->getOpcode()) {
2040 return performMulCombine(N, DCI);
2041 case AMDGPUISD::MUL_I24:
2042 case AMDGPUISD::MUL_U24: {
2043 SDValue N0 = N->getOperand(0);
2044 SDValue N1 = N->getOperand(1);
2045 simplifyI24(N0, DCI);
2046 simplifyI24(N1, DCI);
2049 case ISD::SELECT_CC: {
2050 return CombineMinMax(N, DAG);
2052 case AMDGPUISD::BFE_I32:
2053 case AMDGPUISD::BFE_U32: {
2054 assert(!N->getValueType(0).isVector() &&
2055 "Vector handling of BFE not implemented");
2056 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
2060 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
2062 return DAG.getConstant(0, MVT::i32);
2064 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2068 SDValue BitsFrom = N->getOperand(0);
2069 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
2071 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
2073 if (OffsetVal == 0) {
2074 // This is already sign / zero extended, so try to fold away extra BFEs.
2075 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
2077 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
2078 if (OpSignBits >= SignBits)
2081 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
2083 // This is a sign_extend_inreg. Replace it to take advantage of existing
2084 // DAG Combines. If not eliminated, we will match back to BFE during
2087 // TODO: The sext_inreg of extended types ends, although we can could
2088 // handle them in a single BFE.
2089 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
2090 DAG.getValueType(SmallVT));
2093 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
2096 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
2098 // Avoid undefined left shift of a negative in the constant fold.
2099 // TODO: I'm not sure what the behavior of the hardware is, this should
2100 // probably follow that instead.
2101 return constantFoldBFE<int32_t>(DAG,
2102 CVal->getSExtValue(),
2107 return constantFoldBFE<uint32_t>(DAG,
2108 CVal->getZExtValue(),
2113 APInt Demanded = APInt::getBitsSet(32,
2115 OffsetVal + WidthVal);
2117 if ((OffsetVal + WidthVal) >= 32) {
2118 SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
2119 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2120 BitsFrom, ShiftVal);
2123 APInt KnownZero, KnownOne;
2124 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2125 !DCI.isBeforeLegalizeOps());
2126 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2127 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
2128 TLI.SimplifyDemandedBits(BitsFrom, Demanded, KnownZero, KnownOne, TLO)) {
2129 DCI.CommitTargetLoweringOpt(TLO);
2136 return performStoreCombine(N, DCI);
2141 //===----------------------------------------------------------------------===//
2143 //===----------------------------------------------------------------------===//
2145 void AMDGPUTargetLowering::getOriginalFunctionArgs(
2148 const SmallVectorImpl<ISD::InputArg> &Ins,
2149 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
2151 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
2152 if (Ins[i].ArgVT == Ins[i].VT) {
2153 OrigIns.push_back(Ins[i]);
2158 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
2159 // Vector has been split into scalars.
2160 VT = Ins[i].ArgVT.getVectorElementType();
2161 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
2162 Ins[i].ArgVT.getVectorElementType() !=
2163 Ins[i].VT.getVectorElementType()) {
2164 // Vector elements have been promoted
2167 // Vector has been spilt into smaller vectors.
2171 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2172 Ins[i].OrigArgIndex, Ins[i].PartOffset);
2173 OrigIns.push_back(Arg);
2177 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
2178 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2179 return CFP->isExactlyValue(1.0);
2181 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2182 return C->isAllOnesValue();
2187 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
2188 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2189 return CFP->getValueAPF().isZero();
2191 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2192 return C->isNullValue();
2197 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2198 const TargetRegisterClass *RC,
2199 unsigned Reg, EVT VT) const {
2200 MachineFunction &MF = DAG.getMachineFunction();
2201 MachineRegisterInfo &MRI = MF.getRegInfo();
2202 unsigned VirtualRegister;
2203 if (!MRI.isLiveIn(Reg)) {
2204 VirtualRegister = MRI.createVirtualRegister(RC);
2205 MRI.addLiveIn(Reg, VirtualRegister);
2207 VirtualRegister = MRI.getLiveInVirtReg(Reg);
2209 return DAG.getRegister(VirtualRegister, VT);
2212 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2214 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
2216 default: return nullptr;
2218 NODE_NAME_CASE(CALL);
2219 NODE_NAME_CASE(UMUL);
2220 NODE_NAME_CASE(RET_FLAG);
2221 NODE_NAME_CASE(BRANCH_COND);
2224 NODE_NAME_CASE(DWORDADDR)
2225 NODE_NAME_CASE(FRACT)
2226 NODE_NAME_CASE(CLAMP)
2228 NODE_NAME_CASE(FMAX)
2229 NODE_NAME_CASE(SMAX)
2230 NODE_NAME_CASE(UMAX)
2231 NODE_NAME_CASE(FMIN)
2232 NODE_NAME_CASE(SMIN)
2233 NODE_NAME_CASE(UMIN)
2234 NODE_NAME_CASE(URECIP)
2235 NODE_NAME_CASE(DIV_SCALE)
2236 NODE_NAME_CASE(DIV_FMAS)
2237 NODE_NAME_CASE(DIV_FIXUP)
2238 NODE_NAME_CASE(TRIG_PREOP)
2241 NODE_NAME_CASE(RSQ_LEGACY)
2242 NODE_NAME_CASE(RSQ_CLAMPED)
2243 NODE_NAME_CASE(LDEXP)
2244 NODE_NAME_CASE(DOT4)
2245 NODE_NAME_CASE(BFE_U32)
2246 NODE_NAME_CASE(BFE_I32)
2249 NODE_NAME_CASE(BREV)
2250 NODE_NAME_CASE(MUL_U24)
2251 NODE_NAME_CASE(MUL_I24)
2252 NODE_NAME_CASE(MAD_U24)
2253 NODE_NAME_CASE(MAD_I24)
2254 NODE_NAME_CASE(EXPORT)
2255 NODE_NAME_CASE(CONST_ADDRESS)
2256 NODE_NAME_CASE(REGISTER_LOAD)
2257 NODE_NAME_CASE(REGISTER_STORE)
2258 NODE_NAME_CASE(LOAD_CONSTANT)
2259 NODE_NAME_CASE(LOAD_INPUT)
2260 NODE_NAME_CASE(SAMPLE)
2261 NODE_NAME_CASE(SAMPLEB)
2262 NODE_NAME_CASE(SAMPLED)
2263 NODE_NAME_CASE(SAMPLEL)
2264 NODE_NAME_CASE(CVT_F32_UBYTE0)
2265 NODE_NAME_CASE(CVT_F32_UBYTE1)
2266 NODE_NAME_CASE(CVT_F32_UBYTE2)
2267 NODE_NAME_CASE(CVT_F32_UBYTE3)
2268 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
2269 NODE_NAME_CASE(CONST_DATA_PTR)
2270 NODE_NAME_CASE(STORE_MSKOR)
2271 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
2275 static void computeKnownBitsForMinMax(const SDValue Op0,
2279 const SelectionDAG &DAG,
2281 APInt Op0Zero, Op0One;
2282 APInt Op1Zero, Op1One;
2283 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
2284 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
2286 KnownZero = Op0Zero & Op1Zero;
2287 KnownOne = Op0One & Op1One;
2290 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
2294 const SelectionDAG &DAG,
2295 unsigned Depth) const {
2297 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
2301 unsigned Opc = Op.getOpcode();
2306 case ISD::INTRINSIC_WO_CHAIN: {
2307 // FIXME: The intrinsic should just use the node.
2308 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
2309 case AMDGPUIntrinsic::AMDGPU_imax:
2310 case AMDGPUIntrinsic::AMDGPU_umax:
2311 case AMDGPUIntrinsic::AMDGPU_imin:
2312 case AMDGPUIntrinsic::AMDGPU_umin:
2313 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
2314 KnownZero, KnownOne, DAG, Depth);
2322 case AMDGPUISD::SMAX:
2323 case AMDGPUISD::UMAX:
2324 case AMDGPUISD::SMIN:
2325 case AMDGPUISD::UMIN:
2326 computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
2327 KnownZero, KnownOne, DAG, Depth);
2330 case AMDGPUISD::BFE_I32:
2331 case AMDGPUISD::BFE_U32: {
2332 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2336 unsigned BitWidth = 32;
2337 uint32_t Width = CWidth->getZExtValue() & 0x1f;
2339 KnownZero = APInt::getAllOnesValue(BitWidth);
2340 KnownOne = APInt::getNullValue(BitWidth);
2344 // FIXME: This could do a lot more. If offset is 0, should be the same as
2345 // sign_extend_inreg implementation, but that involves duplicating it.
2346 if (Opc == AMDGPUISD::BFE_I32)
2347 KnownOne = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2349 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2356 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
2358 const SelectionDAG &DAG,
2359 unsigned Depth) const {
2360 switch (Op.getOpcode()) {
2361 case AMDGPUISD::BFE_I32: {
2362 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2366 unsigned SignBits = 32 - Width->getZExtValue() + 1;
2367 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2368 if (!Offset || !Offset->isNullValue())
2371 // TODO: Could probably figure something out with non-0 offsets.
2372 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2373 return std::max(SignBits, Op0SignBits);
2376 case AMDGPUISD::BFE_U32: {
2377 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2378 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;