1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPURegisterInfo.h"
20 #include "AMDGPUSubtarget.h"
21 #include "AMDILIntrinsicInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
38 /// Diagnostic information for unimplemented or unsupported feature reporting.
39 class DiagnosticInfoUnsupported : public DiagnosticInfo {
41 const Twine &Description;
46 static int getKindID() {
48 KindID = llvm::getNextAvailablePluginDiagnosticKind();
53 DiagnosticInfoUnsupported(const Function &Fn, const Twine &Desc,
54 DiagnosticSeverity Severity = DS_Error)
55 : DiagnosticInfo(getKindID(), Severity),
59 const Function &getFunction() const { return Fn; }
60 const Twine &getDescription() const { return Description; }
62 void print(DiagnosticPrinter &DP) const override {
63 DP << "unsupported " << getDescription() << " in " << Fn.getName();
66 static bool classof(const DiagnosticInfo *DI) {
67 return DI->getKind() == getKindID();
71 int DiagnosticInfoUnsupported::KindID = 0;
75 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
76 CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State) {
78 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
79 ArgFlags.getOrigAlign());
80 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
85 #include "AMDGPUGenCallingConv.inc"
87 // Find a larger type to do a load / store of a vector with.
88 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
89 unsigned StoreSize = VT.getStoreSizeInBits();
91 return EVT::getIntegerVT(Ctx, StoreSize);
93 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
94 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
97 // Type for a vector that will be loaded to.
98 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
99 unsigned StoreSize = VT.getStoreSizeInBits();
101 return EVT::getIntegerVT(Ctx, 32);
103 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
106 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
107 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
109 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
111 // Initialize target lowering borrowed from AMDIL
114 // We need to custom lower some of the intrinsics
115 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
117 // Library functions. These default to Expand, but we have instructions
119 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
120 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
121 setOperationAction(ISD::FPOW, MVT::f32, Legal);
122 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
123 setOperationAction(ISD::FABS, MVT::f32, Legal);
124 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
125 setOperationAction(ISD::FRINT, MVT::f32, Legal);
126 setOperationAction(ISD::FROUND, MVT::f32, Legal);
127 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
129 // Lower floating point store/load to integer store/load to reduce the number
130 // of patterns in tablegen.
131 setOperationAction(ISD::STORE, MVT::f32, Promote);
132 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
134 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
135 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
137 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
138 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
140 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
141 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
143 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
144 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
146 setOperationAction(ISD::STORE, MVT::f64, Promote);
147 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
149 setOperationAction(ISD::STORE, MVT::v2f64, Promote);
150 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
152 // Custom lowering of vector stores is required for local address space
154 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
155 // XXX: Native v2i32 local address space stores are possible, but not
156 // currently implemented.
157 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
159 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
160 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
161 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
163 // XXX: This can be change to Custom, once ExpandVectorStores can
164 // handle 64-bit stores.
165 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
167 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
168 setTruncStoreAction(MVT::i64, MVT::i8, Expand);
169 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
170 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
171 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
174 setOperationAction(ISD::LOAD, MVT::f32, Promote);
175 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
177 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
178 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
180 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
181 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
183 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
184 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
186 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
187 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
189 setOperationAction(ISD::LOAD, MVT::f64, Promote);
190 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
192 setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
193 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
195 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
196 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
197 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
198 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
199 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
200 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
201 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
202 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
203 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
204 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
206 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
207 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
208 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
209 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
210 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
211 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
212 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
213 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
214 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
215 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
216 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
217 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
219 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
221 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
222 setOperationAction(ISD::FCEIL, MVT::f64, Custom);
223 setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
224 setOperationAction(ISD::FRINT, MVT::f64, Custom);
225 setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
228 if (!Subtarget->hasBFI()) {
229 // fcopysign can be done in a single instruction with BFI.
230 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
231 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
234 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
235 for (MVT VT : ScalarIntVTs) {
236 setOperationAction(ISD::SREM, VT, Expand);
237 setOperationAction(ISD::SDIV, VT, Custom);
239 // GPU does not have divrem function for signed or unsigned.
240 setOperationAction(ISD::SDIVREM, VT, Expand);
241 setOperationAction(ISD::UDIVREM, VT, Custom);
243 // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
244 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
245 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
247 setOperationAction(ISD::BSWAP, VT, Expand);
248 setOperationAction(ISD::CTTZ, VT, Expand);
249 setOperationAction(ISD::CTLZ, VT, Expand);
252 if (!Subtarget->hasBCNT(32))
253 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
255 if (!Subtarget->hasBCNT(64))
256 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
258 // The hardware supports 32-bit ROTR, but not ROTL.
259 setOperationAction(ISD::ROTL, MVT::i32, Expand);
260 setOperationAction(ISD::ROTL, MVT::i64, Expand);
261 setOperationAction(ISD::ROTR, MVT::i64, Expand);
263 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Expand);
264 setOperationAction(ISD::MUL, MVT::i64, Expand);
265 setOperationAction(ISD::MULHU, MVT::i64, Expand);
266 setOperationAction(ISD::MULHS, MVT::i64, Expand);
267 setOperationAction(ISD::SUB, MVT::i64, Expand);
268 setOperationAction(ISD::UDIV, MVT::i32, Expand);
269 setOperationAction(ISD::UREM, MVT::i32, Expand);
270 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
271 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
273 static const MVT::SimpleValueType VectorIntTypes[] = {
274 MVT::v2i32, MVT::v4i32
277 for (MVT VT : VectorIntTypes) {
278 // Expand the following operations for the current type by default.
279 setOperationAction(ISD::ADD, VT, Expand);
280 setOperationAction(ISD::AND, VT, Expand);
281 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
282 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
283 setOperationAction(ISD::MUL, VT, Expand);
284 setOperationAction(ISD::OR, VT, Expand);
285 setOperationAction(ISD::SHL, VT, Expand);
286 setOperationAction(ISD::SRA, VT, Expand);
287 setOperationAction(ISD::SRL, VT, Expand);
288 setOperationAction(ISD::ROTL, VT, Expand);
289 setOperationAction(ISD::ROTR, VT, Expand);
290 setOperationAction(ISD::SUB, VT, Expand);
291 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
292 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
293 // TODO: Implement custom UREM / SREM routines.
294 setOperationAction(ISD::SDIV, VT, Custom);
295 setOperationAction(ISD::UDIV, VT, Expand);
296 setOperationAction(ISD::SREM, VT, Expand);
297 setOperationAction(ISD::UREM, VT, Expand);
298 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
299 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
300 setOperationAction(ISD::SDIVREM, VT, Expand);
301 setOperationAction(ISD::UDIVREM, VT, Custom);
302 setOperationAction(ISD::SELECT, VT, Expand);
303 setOperationAction(ISD::VSELECT, VT, Expand);
304 setOperationAction(ISD::XOR, VT, Expand);
305 setOperationAction(ISD::BSWAP, VT, Expand);
306 setOperationAction(ISD::CTPOP, VT, Expand);
307 setOperationAction(ISD::CTTZ, VT, Expand);
308 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
309 setOperationAction(ISD::CTLZ, VT, Expand);
310 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
313 static const MVT::SimpleValueType FloatVectorTypes[] = {
314 MVT::v2f32, MVT::v4f32
317 for (MVT VT : FloatVectorTypes) {
318 setOperationAction(ISD::FABS, VT, Expand);
319 setOperationAction(ISD::FADD, VT, Expand);
320 setOperationAction(ISD::FCEIL, VT, Expand);
321 setOperationAction(ISD::FCOS, VT, Expand);
322 setOperationAction(ISD::FDIV, VT, Expand);
323 setOperationAction(ISD::FEXP2, VT, Expand);
324 setOperationAction(ISD::FLOG2, VT, Expand);
325 setOperationAction(ISD::FPOW, VT, Expand);
326 setOperationAction(ISD::FFLOOR, VT, Expand);
327 setOperationAction(ISD::FTRUNC, VT, Expand);
328 setOperationAction(ISD::FMUL, VT, Expand);
329 setOperationAction(ISD::FRINT, VT, Expand);
330 setOperationAction(ISD::FNEARBYINT, VT, Expand);
331 setOperationAction(ISD::FSQRT, VT, Expand);
332 setOperationAction(ISD::FSIN, VT, Expand);
333 setOperationAction(ISD::FSUB, VT, Expand);
334 setOperationAction(ISD::FNEG, VT, Expand);
335 setOperationAction(ISD::SELECT, VT, Expand);
336 setOperationAction(ISD::VSELECT, VT, Expand);
337 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
340 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
341 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
343 setTargetDAGCombine(ISD::MUL);
344 setTargetDAGCombine(ISD::SELECT_CC);
346 setSchedulingPreference(Sched::RegPressure);
347 setJumpIsExpensive(true);
349 // There are no integer divide instructions, and these expand to a pretty
350 // large sequence of instructions.
351 setIntDivIsCheap(false);
353 // TODO: Investigate this when 64-bit divides are implemented.
354 addBypassSlowDiv(64, 32);
356 // FIXME: Need to really handle these.
357 MaxStoresPerMemcpy = 4096;
358 MaxStoresPerMemmove = 4096;
359 MaxStoresPerMemset = 4096;
362 //===----------------------------------------------------------------------===//
363 // Target Information
364 //===----------------------------------------------------------------------===//
366 MVT AMDGPUTargetLowering::getVectorIdxTy() const {
370 // The backend supports 32 and 64 bit floating point immediates.
371 // FIXME: Why are we reporting vectors of FP immediates as legal?
372 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
373 EVT ScalarVT = VT.getScalarType();
374 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
377 // We don't want to shrink f64 / f32 constants.
378 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
379 EVT ScalarVT = VT.getScalarType();
380 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
383 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
385 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
388 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
389 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
391 return ((LScalarSize <= CastScalarSize) ||
392 (CastScalarSize >= 32) ||
396 //===---------------------------------------------------------------------===//
398 //===---------------------------------------------------------------------===//
400 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
401 assert(VT.isFloatingPoint());
402 return VT == MVT::f32;
405 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
406 assert(VT.isFloatingPoint());
407 return VT == MVT::f32;
410 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
411 // Truncate is just accessing a subregister.
412 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
415 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
416 // Truncate is just accessing a subregister.
417 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
418 (Dest->getPrimitiveSizeInBits() % 32 == 0);
421 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
422 const DataLayout *DL = getDataLayout();
423 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
424 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
426 return SrcSize == 32 && DestSize == 64;
429 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
430 // Any register load of a 64-bit value really requires 2 32-bit moves. For all
431 // practical purposes, the extra mov 0 to load a 64-bit is free. As used,
432 // this will enable reducing 64-bit operations the 32-bit, which is always
434 return Src == MVT::i32 && Dest == MVT::i64;
437 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
438 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
439 // limited number of native 64-bit operations. Shrinking an operation to fit
440 // in a single 32-bit register should always be helpful. As currently used,
441 // this is much less general than the name suggests, and is only used in
442 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
443 // not profitable, and may actually be harmful.
444 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
447 //===---------------------------------------------------------------------===//
448 // TargetLowering Callbacks
449 //===---------------------------------------------------------------------===//
451 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
452 const SmallVectorImpl<ISD::InputArg> &Ins) const {
454 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
457 SDValue AMDGPUTargetLowering::LowerReturn(
459 CallingConv::ID CallConv,
461 const SmallVectorImpl<ISD::OutputArg> &Outs,
462 const SmallVectorImpl<SDValue> &OutVals,
463 SDLoc DL, SelectionDAG &DAG) const {
464 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
467 //===---------------------------------------------------------------------===//
468 // Target specific lowering
469 //===---------------------------------------------------------------------===//
471 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
472 SmallVectorImpl<SDValue> &InVals) const {
473 SDValue Callee = CLI.Callee;
474 SelectionDAG &DAG = CLI.DAG;
476 const Function &Fn = *DAG.getMachineFunction().getFunction();
478 StringRef FuncName("<unknown>");
480 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
481 FuncName = G->getSymbol();
482 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
483 FuncName = G->getGlobal()->getName();
485 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName);
486 DAG.getContext()->diagnose(NoCalls);
490 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
491 SelectionDAG &DAG) const {
492 switch (Op.getOpcode()) {
494 Op.getNode()->dump();
495 llvm_unreachable("Custom lowering code for this"
496 "instruction is not implemented yet!");
498 // AMDGPU DAG lowering.
499 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
500 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
501 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
502 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
503 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
504 case ISD::SDIV: return LowerSDIV(Op, DAG);
505 case ISD::SREM: return LowerSREM(Op, DAG);
506 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
507 case ISD::FCEIL: return LowerFCEIL(Op, DAG);
508 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
509 case ISD::FRINT: return LowerFRINT(Op, DAG);
510 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
511 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
512 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
514 // AMDIL DAG lowering.
515 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
520 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
521 SmallVectorImpl<SDValue> &Results,
522 SelectionDAG &DAG) const {
523 switch (N->getOpcode()) {
524 case ISD::SIGN_EXTEND_INREG:
525 // Different parts of legalization seem to interpret which type of
526 // sign_extend_inreg is the one to check for custom lowering. The extended
527 // from type is what really matters, but some places check for custom
528 // lowering of the result type. This results in trying to use
529 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
530 // nothing here and let the illegal result integer be handled normally.
533 SDValue Op = SDValue(N, 0);
535 EVT VT = Op.getValueType();
536 SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
537 N->getOperand(0), N->getOperand(1));
538 Results.push_back(UDIVREM);
542 SDValue Op = SDValue(N, 0);
544 EVT VT = Op.getValueType();
545 SDValue UDIVREM = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT),
546 N->getOperand(0), N->getOperand(1));
547 Results.push_back(UDIVREM.getValue(1));
551 SDValue Op = SDValue(N, 0);
553 EVT VT = Op.getValueType();
554 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
556 SDValue one = DAG.getConstant(1, HalfVT);
557 SDValue zero = DAG.getConstant(0, HalfVT);
560 SDValue LHS = N->getOperand(0);
561 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
562 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
564 SDValue RHS = N->getOperand(1);
565 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
566 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
568 // Get Speculative values
569 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
570 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
572 SDValue REM_Hi = zero;
573 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
575 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
576 SDValue DIV_Lo = zero;
578 const unsigned halfBitWidth = HalfVT.getSizeInBits();
580 for (unsigned i = 0; i < halfBitWidth; ++i) {
581 SDValue POS = DAG.getConstant(halfBitWidth - i - 1, HalfVT);
582 // Get Value of high bit
584 if (halfBitWidth == 32 && Subtarget->hasBFE()) {
585 HBit = DAG.getNode(AMDGPUISD::BFE_U32, DL, HalfVT, LHS_Lo, POS, one);
587 HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
588 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
591 SDValue Carry = DAG.getNode(ISD::SRL, DL, HalfVT, REM_Lo,
592 DAG.getConstant(halfBitWidth - 1, HalfVT));
593 REM_Hi = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Hi, one);
594 REM_Hi = DAG.getNode(ISD::OR, DL, HalfVT, REM_Hi, Carry);
596 REM_Lo = DAG.getNode(ISD::SHL, DL, HalfVT, REM_Lo, one);
597 REM_Lo = DAG.getNode(ISD::OR, DL, HalfVT, REM_Lo, HBit);
600 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
602 SDValue BIT = DAG.getConstant(1 << (halfBitWidth - i - 1), HalfVT);
603 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETGE);
605 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
609 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
611 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETGE);
612 REM_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, zero);
613 REM_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, REM, one);
616 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, REM_Hi);
617 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi);
618 Results.push_back(DIV);
619 Results.push_back(REM);
627 // FIXME: This implements accesses to initialized globals in the constant
628 // address space by copying them to private and accessing that. It does not
629 // properly handle illegal types or vectors. The private vector loads are not
630 // scalarized, and the illegal scalars hit an assertion. This technique will not
631 // work well with large initializers, and this should eventually be
632 // removed. Initialized globals should be placed into a data section that the
633 // runtime will load into a buffer before the kernel is executed. Uses of the
634 // global need to be replaced with a pointer loaded from an implicit kernel
635 // argument into this buffer holding the copy of the data, which will remove the
636 // need for any of this.
637 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
638 const GlobalValue *GV,
639 const SDValue &InitPtr,
641 SelectionDAG &DAG) const {
642 const DataLayout *TD = getTargetMachine().getDataLayout();
644 Type *InitTy = Init->getType();
646 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
647 EVT VT = EVT::getEVT(InitTy);
648 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
649 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
650 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
651 TD->getPrefTypeAlignment(InitTy));
654 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
655 EVT VT = EVT::getEVT(CFP->getType());
656 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
657 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
658 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
659 TD->getPrefTypeAlignment(CFP->getType()));
662 if (StructType *ST = dyn_cast<StructType>(InitTy)) {
663 const StructLayout *SL = TD->getStructLayout(ST);
665 EVT PtrVT = InitPtr.getValueType();
666 SmallVector<SDValue, 8> Chains;
668 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
669 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), PtrVT);
670 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
672 Constant *Elt = Init->getAggregateElement(I);
673 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
676 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
679 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
680 EVT PtrVT = InitPtr.getValueType();
682 unsigned NumElements;
683 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
684 NumElements = AT->getNumElements();
685 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
686 NumElements = VT->getNumElements();
688 llvm_unreachable("Unexpected type");
690 unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
691 SmallVector<SDValue, 8> Chains;
692 for (unsigned i = 0; i < NumElements; ++i) {
693 SDValue Offset = DAG.getConstant(i * EltSize, PtrVT);
694 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
696 Constant *Elt = Init->getAggregateElement(i);
697 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
700 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
703 if (isa<UndefValue>(Init)) {
704 EVT VT = EVT::getEVT(InitTy);
705 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
706 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
707 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
708 TD->getPrefTypeAlignment(InitTy));
712 llvm_unreachable("Unhandled constant initializer");
715 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
717 SelectionDAG &DAG) const {
719 const DataLayout *TD = getTargetMachine().getDataLayout();
720 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
721 const GlobalValue *GV = G->getGlobal();
723 switch (G->getAddressSpace()) {
724 default: llvm_unreachable("Global Address lowering not implemented for this "
726 case AMDGPUAS::LOCAL_ADDRESS: {
727 // XXX: What does the value of G->getOffset() mean?
728 assert(G->getOffset() == 0 &&
729 "Do not know what to do with an non-zero offset");
732 if (MFI->LocalMemoryObjects.count(GV) == 0) {
733 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
734 Offset = MFI->LDSSize;
735 MFI->LocalMemoryObjects[GV] = Offset;
736 // XXX: Account for alignment?
737 MFI->LDSSize += Size;
739 Offset = MFI->LocalMemoryObjects[GV];
742 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
744 case AMDGPUAS::CONSTANT_ADDRESS: {
745 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
746 Type *EltType = GV->getType()->getElementType();
747 unsigned Size = TD->getTypeAllocSize(EltType);
748 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
750 MVT PrivPtrVT = getPointerTy(AMDGPUAS::PRIVATE_ADDRESS);
751 MVT ConstPtrVT = getPointerTy(AMDGPUAS::CONSTANT_ADDRESS);
753 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
754 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
756 const GlobalVariable *Var = cast<GlobalVariable>(GV);
757 if (!Var->hasInitializer()) {
758 // This has no use, but bugpoint will hit it.
759 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
762 const Constant *Init = Var->getInitializer();
763 SmallVector<SDNode*, 8> WorkList;
765 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
766 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
767 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
769 WorkList.push_back(*I);
771 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
772 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
773 E = WorkList.end(); I != E; ++I) {
774 SmallVector<SDValue, 8> Ops;
775 Ops.push_back(Chain);
776 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
777 Ops.push_back((*I)->getOperand(i));
779 DAG.UpdateNodeOperands(*I, Ops);
781 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
786 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
787 SelectionDAG &DAG) const {
788 SmallVector<SDValue, 8> Args;
789 SDValue A = Op.getOperand(0);
790 SDValue B = Op.getOperand(1);
792 DAG.ExtractVectorElements(A, Args);
793 DAG.ExtractVectorElements(B, Args);
795 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
798 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
799 SelectionDAG &DAG) const {
801 SmallVector<SDValue, 8> Args;
802 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
803 EVT VT = Op.getValueType();
804 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
805 VT.getVectorNumElements());
807 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args);
810 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
811 SelectionDAG &DAG) const {
813 MachineFunction &MF = DAG.getMachineFunction();
814 const AMDGPUFrameLowering *TFL =
815 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
817 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
819 unsigned FrameIndex = FIN->getIndex();
820 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
821 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
825 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
826 SelectionDAG &DAG) const {
827 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
829 EVT VT = Op.getValueType();
831 switch (IntrinsicID) {
833 case AMDGPUIntrinsic::AMDGPU_abs:
834 case AMDGPUIntrinsic::AMDIL_abs: // Legacy name.
835 return LowerIntrinsicIABS(Op, DAG);
836 case AMDGPUIntrinsic::AMDGPU_lrp:
837 return LowerIntrinsicLRP(Op, DAG);
838 case AMDGPUIntrinsic::AMDGPU_fract:
839 case AMDGPUIntrinsic::AMDIL_fraction: // Legacy name.
840 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
842 case AMDGPUIntrinsic::AMDGPU_clamp:
843 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
844 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
845 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
847 case Intrinsic::AMDGPU_div_scale:
848 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, VT,
849 Op.getOperand(1), Op.getOperand(2));
851 case Intrinsic::AMDGPU_div_fmas:
852 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT,
853 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
855 case Intrinsic::AMDGPU_div_fixup:
856 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT,
857 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
859 case Intrinsic::AMDGPU_trig_preop:
860 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT,
861 Op.getOperand(1), Op.getOperand(2));
863 case Intrinsic::AMDGPU_rcp:
864 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1));
866 case Intrinsic::AMDGPU_rsq:
867 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
869 case AMDGPUIntrinsic::AMDGPU_imax:
870 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
872 case AMDGPUIntrinsic::AMDGPU_umax:
873 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
875 case AMDGPUIntrinsic::AMDGPU_imin:
876 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
878 case AMDGPUIntrinsic::AMDGPU_umin:
879 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
882 case AMDGPUIntrinsic::AMDGPU_umul24:
883 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT,
884 Op.getOperand(1), Op.getOperand(2));
886 case AMDGPUIntrinsic::AMDGPU_imul24:
887 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT,
888 Op.getOperand(1), Op.getOperand(2));
890 case AMDGPUIntrinsic::AMDGPU_umad24:
891 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT,
892 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
894 case AMDGPUIntrinsic::AMDGPU_imad24:
895 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT,
896 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
898 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0:
899 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1));
901 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1:
902 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1));
904 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2:
905 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1));
907 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3:
908 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1));
910 case AMDGPUIntrinsic::AMDGPU_bfe_i32:
911 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
916 case AMDGPUIntrinsic::AMDGPU_bfe_u32:
917 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
922 case AMDGPUIntrinsic::AMDGPU_bfi:
923 return DAG.getNode(AMDGPUISD::BFI, DL, VT,
928 case AMDGPUIntrinsic::AMDGPU_bfm:
929 return DAG.getNode(AMDGPUISD::BFM, DL, VT,
933 case AMDGPUIntrinsic::AMDGPU_brev:
934 return DAG.getNode(AMDGPUISD::BREV, DL, VT, Op.getOperand(1));
936 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
937 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
939 case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name.
940 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
941 case AMDGPUIntrinsic::AMDGPU_trunc:
942 return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1));
946 ///IABS(a) = SMAX(sub(0, a), a)
947 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
948 SelectionDAG &DAG) const {
950 EVT VT = Op.getValueType();
951 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
954 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
957 /// Linear Interpolation
958 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
959 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
960 SelectionDAG &DAG) const {
962 EVT VT = Op.getValueType();
963 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
964 DAG.getConstantFP(1.0f, MVT::f32),
966 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
968 return DAG.getNode(ISD::FADD, DL, VT,
969 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
973 /// \brief Generate Min/Max node
974 SDValue AMDGPUTargetLowering::CombineMinMax(SDNode *N,
975 SelectionDAG &DAG) const {
977 EVT VT = N->getValueType(0);
979 SDValue LHS = N->getOperand(0);
980 SDValue RHS = N->getOperand(1);
981 SDValue True = N->getOperand(2);
982 SDValue False = N->getOperand(3);
983 SDValue CC = N->getOperand(4);
985 if (VT != MVT::f32 ||
986 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
990 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
1004 llvm_unreachable("Operation should already be optimised!");
1011 unsigned Opc = (LHS == True) ? AMDGPUISD::FMIN : AMDGPUISD::FMAX;
1012 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1020 unsigned Opc = (LHS == True) ? AMDGPUISD::FMAX : AMDGPUISD::FMIN;
1021 return DAG.getNode(Opc, DL, VT, LHS, RHS);
1023 case ISD::SETCC_INVALID:
1024 llvm_unreachable("Invalid setcc condcode!");
1029 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
1030 SelectionDAG &DAG) const {
1031 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
1032 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
1033 EVT EltVT = Op.getValueType().getVectorElementType();
1034 EVT PtrVT = Load->getBasePtr().getValueType();
1035 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
1036 SmallVector<SDValue, 8> Loads;
1039 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1040 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
1041 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
1042 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
1043 Load->getChain(), Ptr,
1044 MachinePointerInfo(Load->getMemOperand()->getValue()),
1045 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
1046 Load->getAlignment()));
1048 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(), Loads);
1051 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
1052 SelectionDAG &DAG) const {
1053 StoreSDNode *Store = cast<StoreSDNode>(Op);
1054 EVT MemVT = Store->getMemoryVT();
1055 unsigned MemBits = MemVT.getSizeInBits();
1057 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
1058 // truncating store into an i32 store.
1059 // XXX: We could also handle optimize other vector bitwidths.
1060 if (!MemVT.isVector() || MemBits > 32) {
1065 SDValue Value = Store->getValue();
1066 EVT VT = Value.getValueType();
1067 EVT ElemVT = VT.getVectorElementType();
1068 SDValue Ptr = Store->getBasePtr();
1069 EVT MemEltVT = MemVT.getVectorElementType();
1070 unsigned MemEltBits = MemEltVT.getSizeInBits();
1071 unsigned MemNumElements = MemVT.getVectorNumElements();
1072 unsigned PackedSize = MemVT.getStoreSizeInBits();
1073 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, MVT::i32);
1075 assert(Value.getValueType().getScalarSizeInBits() >= 32);
1077 SDValue PackedValue;
1078 for (unsigned i = 0; i < MemNumElements; ++i) {
1079 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
1080 DAG.getConstant(i, MVT::i32));
1081 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1082 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1084 SDValue Shift = DAG.getConstant(MemEltBits * i, MVT::i32);
1085 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1090 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
1094 if (PackedSize < 32) {
1095 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1096 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
1097 Store->getMemOperand()->getPointerInfo(),
1099 Store->isNonTemporal(), Store->isVolatile(),
1100 Store->getAlignment());
1103 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
1104 Store->getMemOperand()->getPointerInfo(),
1105 Store->isVolatile(), Store->isNonTemporal(),
1106 Store->getAlignment());
1109 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1110 SelectionDAG &DAG) const {
1111 StoreSDNode *Store = cast<StoreSDNode>(Op);
1112 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
1113 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
1114 EVT PtrVT = Store->getBasePtr().getValueType();
1115 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
1118 SmallVector<SDValue, 8> Chains;
1120 for (unsigned i = 0, e = NumElts; i != e; ++i) {
1121 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
1122 Store->getValue(), DAG.getConstant(i, MVT::i32));
1123 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
1124 Store->getBasePtr(),
1125 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
1127 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
1128 MachinePointerInfo(Store->getMemOperand()->getValue()),
1129 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
1130 Store->getAlignment()));
1132 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains);
1135 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1137 LoadSDNode *Load = cast<LoadSDNode>(Op);
1138 ISD::LoadExtType ExtType = Load->getExtensionType();
1139 EVT VT = Op.getValueType();
1140 EVT MemVT = Load->getMemoryVT();
1142 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
1143 // We can do the extload to 32-bits, and then need to separately extend to
1146 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
1150 Load->getMemOperand());
1151 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
1154 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) {
1155 assert(VT == MVT::i1 && "Only i1 non-extloads expected");
1156 // FIXME: Copied from PPC
1157 // First, load into 32 bits, then truncate to 1 bit.
1159 SDValue Chain = Load->getChain();
1160 SDValue BasePtr = Load->getBasePtr();
1161 MachineMemOperand *MMO = Load->getMemOperand();
1163 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
1164 BasePtr, MVT::i8, MMO);
1165 return DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD);
1168 // Lower loads constant address space global variable loads
1169 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
1170 isa<GlobalVariable>(
1171 GetUnderlyingObject(Load->getMemOperand()->getValue()))) {
1173 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
1174 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
1175 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
1176 DAG.getConstant(2, MVT::i32));
1177 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1178 Load->getChain(), Ptr,
1179 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
1182 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
1183 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
1187 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
1188 DAG.getConstant(2, MVT::i32));
1189 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
1190 Load->getChain(), Ptr,
1191 DAG.getTargetConstant(0, MVT::i32),
1193 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1195 DAG.getConstant(0x3, MVT::i32));
1196 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1197 DAG.getConstant(3, MVT::i32));
1199 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
1201 EVT MemEltVT = MemVT.getScalarType();
1202 if (ExtType == ISD::SEXTLOAD) {
1203 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1204 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
1207 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
1210 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1212 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
1213 if (Result.getNode()) {
1217 StoreSDNode *Store = cast<StoreSDNode>(Op);
1218 SDValue Chain = Store->getChain();
1219 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1220 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1221 Store->getValue().getValueType().isVector()) {
1222 return SplitVectorStore(Op, DAG);
1225 EVT MemVT = Store->getMemoryVT();
1226 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
1227 MemVT.bitsLT(MVT::i32)) {
1229 if (Store->getMemoryVT() == MVT::i8) {
1231 } else if (Store->getMemoryVT() == MVT::i16) {
1234 SDValue BasePtr = Store->getBasePtr();
1235 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
1236 DAG.getConstant(2, MVT::i32));
1237 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
1238 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
1240 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
1241 DAG.getConstant(0x3, MVT::i32));
1243 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1244 DAG.getConstant(3, MVT::i32));
1246 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1249 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1251 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1252 MaskedValue, ShiftAmt);
1254 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
1256 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
1257 DAG.getConstant(0xffffffff, MVT::i32));
1258 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1260 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1261 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
1262 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
1267 SDValue AMDGPUTargetLowering::LowerSDIV24(SDValue Op, SelectionDAG &DAG) const {
1269 EVT OVT = Op.getValueType();
1270 SDValue LHS = Op.getOperand(0);
1271 SDValue RHS = Op.getOperand(1);
1274 if (!OVT.isVector()) {
1277 } else if (OVT.getVectorNumElements() == 2) {
1280 } else if (OVT.getVectorNumElements() == 4) {
1284 unsigned bitsize = OVT.getScalarType().getSizeInBits();
1285 // char|short jq = ia ^ ib;
1286 SDValue jq = DAG.getNode(ISD::XOR, DL, OVT, LHS, RHS);
1288 // jq = jq >> (bitsize - 2)
1289 jq = DAG.getNode(ISD::SRA, DL, OVT, jq, DAG.getConstant(bitsize - 2, OVT));
1292 jq = DAG.getNode(ISD::OR, DL, OVT, jq, DAG.getConstant(1, OVT));
1295 jq = DAG.getSExtOrTrunc(jq, DL, INTTY);
1297 // int ia = (int)LHS;
1298 SDValue ia = DAG.getSExtOrTrunc(LHS, DL, INTTY);
1300 // int ib, (int)RHS;
1301 SDValue ib = DAG.getSExtOrTrunc(RHS, DL, INTTY);
1303 // float fa = (float)ia;
1304 SDValue fa = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ia);
1306 // float fb = (float)ib;
1307 SDValue fb = DAG.getNode(ISD::SINT_TO_FP, DL, FLTTY, ib);
1309 // float fq = native_divide(fa, fb);
1310 SDValue fq = DAG.getNode(AMDGPUISD::DIV_INF, DL, FLTTY, fa, fb);
1313 fq = DAG.getNode(ISD::FTRUNC, DL, FLTTY, fq);
1315 // float fqneg = -fq;
1316 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FLTTY, fq);
1318 // float fr = mad(fqneg, fb, fa);
1319 SDValue fr = DAG.getNode(ISD::FADD, DL, FLTTY,
1320 DAG.getNode(ISD::MUL, DL, FLTTY, fqneg, fb), fa);
1322 // int iq = (int)fq;
1323 SDValue iq = DAG.getNode(ISD::FP_TO_SINT, DL, INTTY, fq);
1326 fr = DAG.getNode(ISD::FABS, DL, FLTTY, fr);
1329 fb = DAG.getNode(ISD::FABS, DL, FLTTY, fb);
1331 // int cv = fr >= fb;
1333 if (INTTY == MVT::i32) {
1334 cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
1336 cv = DAG.getSetCC(DL, INTTY, fr, fb, ISD::SETOGE);
1338 // jq = (cv ? jq : 0);
1339 jq = DAG.getNode(ISD::SELECT, DL, OVT, cv, jq,
1340 DAG.getConstant(0, OVT));
1342 iq = DAG.getSExtOrTrunc(iq, DL, OVT);
1343 iq = DAG.getNode(ISD::ADD, DL, OVT, iq, jq);
1347 SDValue AMDGPUTargetLowering::LowerSDIV32(SDValue Op, SelectionDAG &DAG) const {
1349 EVT OVT = Op.getValueType();
1350 SDValue LHS = Op.getOperand(0);
1351 SDValue RHS = Op.getOperand(1);
1352 // The LowerSDIV32 function generates equivalent to the following IL.
1362 // ixor r10, r10, r11
1364 // ixor DST, r0, r10
1373 SDValue r10 = DAG.getSelectCC(DL,
1374 r0, DAG.getConstant(0, OVT),
1375 DAG.getConstant(-1, OVT),
1376 DAG.getConstant(0, OVT),
1380 SDValue r11 = DAG.getSelectCC(DL,
1381 r1, DAG.getConstant(0, OVT),
1382 DAG.getConstant(-1, OVT),
1383 DAG.getConstant(0, OVT),
1387 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1390 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
1393 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1396 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
1399 r0 = DAG.getNode(ISD::UDIV, DL, OVT, r0, r1);
1401 // ixor r10, r10, r11
1402 r10 = DAG.getNode(ISD::XOR, DL, OVT, r10, r11);
1405 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1407 // ixor DST, r0, r10
1408 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1412 SDValue AMDGPUTargetLowering::LowerSDIV64(SDValue Op, SelectionDAG &DAG) const {
1413 return SDValue(Op.getNode(), 0);
1416 SDValue AMDGPUTargetLowering::LowerSDIV(SDValue Op, SelectionDAG &DAG) const {
1417 EVT OVT = Op.getValueType().getScalarType();
1419 if (OVT == MVT::i64)
1420 return LowerSDIV64(Op, DAG);
1422 if (OVT.getScalarType() == MVT::i32)
1423 return LowerSDIV32(Op, DAG);
1425 if (OVT == MVT::i16 || OVT == MVT::i8) {
1426 // FIXME: We should be checking for the masked bits. This isn't reached
1427 // because i8 and i16 are not legal types.
1428 return LowerSDIV24(Op, DAG);
1431 return SDValue(Op.getNode(), 0);
1434 SDValue AMDGPUTargetLowering::LowerSREM32(SDValue Op, SelectionDAG &DAG) const {
1436 EVT OVT = Op.getValueType();
1437 SDValue LHS = Op.getOperand(0);
1438 SDValue RHS = Op.getOperand(1);
1439 // The LowerSREM32 function generates equivalent to the following IL.
1449 // umul r20, r20, r1
1452 // ixor DST, r0, r10
1461 SDValue r10 = DAG.getSetCC(DL, OVT, r0, DAG.getConstant(0, OVT), ISD::SETLT);
1464 SDValue r11 = DAG.getSetCC(DL, OVT, r1, DAG.getConstant(0, OVT), ISD::SETLT);
1467 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1470 r1 = DAG.getNode(ISD::ADD, DL, OVT, r1, r11);
1473 r0 = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1476 r1 = DAG.getNode(ISD::XOR, DL, OVT, r1, r11);
1479 SDValue r20 = DAG.getNode(ISD::UREM, DL, OVT, r0, r1);
1481 // umul r20, r20, r1
1482 r20 = DAG.getNode(AMDGPUISD::UMUL, DL, OVT, r20, r1);
1485 r0 = DAG.getNode(ISD::SUB, DL, OVT, r0, r20);
1488 r0 = DAG.getNode(ISD::ADD, DL, OVT, r0, r10);
1490 // ixor DST, r0, r10
1491 SDValue DST = DAG.getNode(ISD::XOR, DL, OVT, r0, r10);
1495 SDValue AMDGPUTargetLowering::LowerSREM64(SDValue Op, SelectionDAG &DAG) const {
1496 return SDValue(Op.getNode(), 0);
1499 SDValue AMDGPUTargetLowering::LowerSREM(SDValue Op, SelectionDAG &DAG) const {
1500 EVT OVT = Op.getValueType();
1502 if (OVT.getScalarType() == MVT::i64)
1503 return LowerSREM64(Op, DAG);
1505 if (OVT.getScalarType() == MVT::i32)
1506 return LowerSREM32(Op, DAG);
1508 return SDValue(Op.getNode(), 0);
1511 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1512 SelectionDAG &DAG) const {
1514 EVT VT = Op.getValueType();
1516 SDValue Num = Op.getOperand(0);
1517 SDValue Den = Op.getOperand(1);
1519 // RCP = URECIP(Den) = 2^32 / Den + e
1520 // e is rounding error.
1521 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1523 // RCP_LO = umulo(RCP, Den) */
1524 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
1526 // RCP_HI = mulhu (RCP, Den) */
1527 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1529 // NEG_RCP_LO = -RCP_LO
1530 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
1533 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1534 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1537 // Calculate the rounding error from the URECIP instruction
1538 // E = mulhu(ABS_RCP_LO, RCP)
1539 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1541 // RCP_A_E = RCP + E
1542 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1544 // RCP_S_E = RCP - E
1545 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1547 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1548 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
1551 // Quotient = mulhu(Tmp0, Num)
1552 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1554 // Num_S_Remainder = Quotient * Den
1555 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
1557 // Remainder = Num - Num_S_Remainder
1558 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1560 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1561 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1562 DAG.getConstant(-1, VT),
1563 DAG.getConstant(0, VT),
1565 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1566 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1568 DAG.getConstant(-1, VT),
1569 DAG.getConstant(0, VT),
1571 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1572 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1575 // Calculate Division result:
1577 // Quotient_A_One = Quotient + 1
1578 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1579 DAG.getConstant(1, VT));
1581 // Quotient_S_One = Quotient - 1
1582 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1583 DAG.getConstant(1, VT));
1585 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1586 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1587 Quotient, Quotient_A_One, ISD::SETEQ);
1589 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1590 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1591 Quotient_S_One, Div, ISD::SETEQ);
1593 // Calculate Rem result:
1595 // Remainder_S_Den = Remainder - Den
1596 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1598 // Remainder_A_Den = Remainder + Den
1599 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1601 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1602 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
1603 Remainder, Remainder_S_Den, ISD::SETEQ);
1605 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1606 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
1607 Remainder_A_Den, Rem, ISD::SETEQ);
1612 return DAG.getMergeValues(Ops, DL);
1615 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1617 SDValue Src = Op.getOperand(0);
1619 // result = trunc(src)
1620 // if (src > 0.0 && src != result)
1623 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1625 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1626 const SDValue One = DAG.getConstantFP(1.0, MVT::f64);
1628 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1630 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1631 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1632 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1634 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1635 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1638 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1640 SDValue Src = Op.getOperand(0);
1642 assert(Op.getValueType() == MVT::f64);
1644 const SDValue Zero = DAG.getConstant(0, MVT::i32);
1645 const SDValue One = DAG.getConstant(1, MVT::i32);
1647 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1649 // Extract the upper half, since this is where we will find the sign and
1651 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1653 const unsigned FractBits = 52;
1654 const unsigned ExpBits = 11;
1656 // Extract the exponent.
1657 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_I32, SL, MVT::i32,
1659 DAG.getConstant(FractBits - 32, MVT::i32),
1660 DAG.getConstant(ExpBits, MVT::i32));
1661 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1662 DAG.getConstant(1023, MVT::i32));
1664 // Extract the sign bit.
1665 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, MVT::i32);
1666 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1668 // Extend back to to 64-bits.
1669 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
1671 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1673 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
1674 const SDValue FractMask
1675 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, MVT::i64);
1677 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1678 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1679 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
1681 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::i32);
1683 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, MVT::i32);
1685 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1686 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1688 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
1689 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
1691 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
1694 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
1696 SDValue Src = Op.getOperand(0);
1698 assert(Op.getValueType() == MVT::f64);
1700 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
1701 SDValue C1 = DAG.getConstantFP(C1Val, MVT::f64);
1702 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
1704 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
1705 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
1707 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
1709 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
1710 SDValue C2 = DAG.getConstantFP(C2Val, MVT::f64);
1712 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1713 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
1715 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
1718 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
1719 // FNEARBYINT and FRINT are the same, except in their handling of FP
1720 // exceptions. Those aren't really meaningful for us, and OpenCL only has
1721 // rint, so just treat them as equivalent.
1722 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
1725 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
1727 SDValue Src = Op.getOperand(0);
1729 // result = trunc(src);
1730 // if (src < 0.0 && src != result)
1733 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1735 const SDValue Zero = DAG.getConstantFP(0.0, MVT::f64);
1736 const SDValue NegOne = DAG.getConstantFP(-1.0, MVT::f64);
1738 EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f64);
1740 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
1741 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1742 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1744 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
1745 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1748 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1749 SelectionDAG &DAG) const {
1750 SDValue S0 = Op.getOperand(0);
1752 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
1755 // f32 uint_to_fp i64
1756 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1757 DAG.getConstant(0, MVT::i32));
1758 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
1759 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
1760 DAG.getConstant(1, MVT::i32));
1761 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
1762 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
1763 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
1764 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
1767 SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
1769 SelectionDAG &DAG) const {
1770 MVT VT = Op.getSimpleValueType();
1772 SDValue Shift = DAG.getConstant(BitsDiff, VT);
1773 // Shift left by 'Shift' bits.
1774 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
1775 // Signed shift Right by 'Shift' bits.
1776 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
1779 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1780 SelectionDAG &DAG) const {
1781 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1782 MVT VT = Op.getSimpleValueType();
1783 MVT ScalarVT = VT.getScalarType();
1788 SDValue Src = Op.getOperand(0);
1791 // TODO: Don't scalarize on Evergreen?
1792 unsigned NElts = VT.getVectorNumElements();
1793 SmallVector<SDValue, 8> Args;
1794 DAG.ExtractVectorElements(Src, Args, 0, NElts);
1796 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
1797 for (unsigned I = 0; I < NElts; ++I)
1798 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
1800 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args);
1803 //===----------------------------------------------------------------------===//
1804 // Custom DAG optimizations
1805 //===----------------------------------------------------------------------===//
1807 static bool isU24(SDValue Op, SelectionDAG &DAG) {
1808 APInt KnownZero, KnownOne;
1809 EVT VT = Op.getValueType();
1810 DAG.computeKnownBits(Op, KnownZero, KnownOne);
1812 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
1815 static bool isI24(SDValue Op, SelectionDAG &DAG) {
1816 EVT VT = Op.getValueType();
1818 // In order for this to be a signed 24-bit value, bit 23, must
1820 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
1821 // as unsigned 24-bit values.
1822 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
1825 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
1827 SelectionDAG &DAG = DCI.DAG;
1828 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1829 EVT VT = Op.getValueType();
1831 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
1832 APInt KnownZero, KnownOne;
1833 TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
1834 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
1835 DCI.CommitTargetLoweringOpt(TLO);
1838 template <typename IntTy>
1839 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
1840 uint32_t Offset, uint32_t Width) {
1841 if (Width + Offset < 32) {
1842 IntTy Result = (Src0 << (32 - Offset - Width)) >> (32 - Width);
1843 return DAG.getConstant(Result, MVT::i32);
1846 return DAG.getConstant(Src0 >> Offset, MVT::i32);
1849 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
1850 DAGCombinerInfo &DCI) const {
1851 SelectionDAG &DAG = DCI.DAG;
1854 switch(N->getOpcode()) {
1857 EVT VT = N->getValueType(0);
1858 SDValue N0 = N->getOperand(0);
1859 SDValue N1 = N->getOperand(1);
1862 // FIXME: Add support for 24-bit multiply with 64-bit output on SI.
1863 if (VT.isVector() || VT.getSizeInBits() > 32)
1866 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
1867 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
1868 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
1869 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
1870 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
1871 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
1872 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
1873 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
1878 // We need to use sext even for MUL_U24, because MUL_U24 is used
1879 // for signed multiply of 8 and 16-bit types.
1880 SDValue Reg = DAG.getSExtOrTrunc(Mul, DL, VT);
1884 case AMDGPUISD::MUL_I24:
1885 case AMDGPUISD::MUL_U24: {
1886 SDValue N0 = N->getOperand(0);
1887 SDValue N1 = N->getOperand(1);
1888 simplifyI24(N0, DCI);
1889 simplifyI24(N1, DCI);
1892 case ISD::SELECT_CC: {
1893 return CombineMinMax(N, DAG);
1895 case AMDGPUISD::BFE_I32:
1896 case AMDGPUISD::BFE_U32: {
1897 assert(!N->getValueType(0).isVector() &&
1898 "Vector handling of BFE not implemented");
1899 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
1903 uint32_t WidthVal = Width->getZExtValue() & 0x1f;
1905 return DAG.getConstant(0, MVT::i32);
1907 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
1911 SDValue BitsFrom = N->getOperand(0);
1912 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
1914 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
1916 if (OffsetVal == 0) {
1917 // This is already sign / zero extended, so try to fold away extra BFEs.
1918 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
1920 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
1921 if (OpSignBits >= SignBits)
1924 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
1926 // This is a sign_extend_inreg. Replace it to take advantage of existing
1927 // DAG Combines. If not eliminated, we will match back to BFE during
1930 // TODO: The sext_inreg of extended types ends, although we can could
1931 // handle them in a single BFE.
1932 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
1933 DAG.getValueType(SmallVT));
1936 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
1939 if (ConstantSDNode *Val = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
1941 return constantFoldBFE<int32_t>(DAG,
1942 Val->getSExtValue(),
1947 return constantFoldBFE<uint32_t>(DAG,
1948 Val->getZExtValue(),
1953 APInt Demanded = APInt::getBitsSet(32,
1955 OffsetVal + WidthVal);
1957 if ((OffsetVal + WidthVal) >= 32) {
1958 SDValue ShiftVal = DAG.getConstant(OffsetVal, MVT::i32);
1959 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
1960 BitsFrom, ShiftVal);
1963 APInt KnownZero, KnownOne;
1964 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
1965 !DCI.isBeforeLegalizeOps());
1966 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1967 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
1968 TLI.SimplifyDemandedBits(BitsFrom, Demanded, KnownZero, KnownOne, TLO)) {
1969 DCI.CommitTargetLoweringOpt(TLO);
1978 //===----------------------------------------------------------------------===//
1980 //===----------------------------------------------------------------------===//
1982 void AMDGPUTargetLowering::getOriginalFunctionArgs(
1985 const SmallVectorImpl<ISD::InputArg> &Ins,
1986 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1988 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1989 if (Ins[i].ArgVT == Ins[i].VT) {
1990 OrigIns.push_back(Ins[i]);
1995 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1996 // Vector has been split into scalars.
1997 VT = Ins[i].ArgVT.getVectorElementType();
1998 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1999 Ins[i].ArgVT.getVectorElementType() !=
2000 Ins[i].VT.getVectorElementType()) {
2001 // Vector elements have been promoted
2004 // Vector has been spilt into smaller vectors.
2008 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2009 Ins[i].OrigArgIndex, Ins[i].PartOffset);
2010 OrigIns.push_back(Arg);
2014 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
2015 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2016 return CFP->isExactlyValue(1.0);
2018 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2019 return C->isAllOnesValue();
2024 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
2025 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
2026 return CFP->getValueAPF().isZero();
2028 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
2029 return C->isNullValue();
2034 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2035 const TargetRegisterClass *RC,
2036 unsigned Reg, EVT VT) const {
2037 MachineFunction &MF = DAG.getMachineFunction();
2038 MachineRegisterInfo &MRI = MF.getRegInfo();
2039 unsigned VirtualRegister;
2040 if (!MRI.isLiveIn(Reg)) {
2041 VirtualRegister = MRI.createVirtualRegister(RC);
2042 MRI.addLiveIn(Reg, VirtualRegister);
2044 VirtualRegister = MRI.getLiveInVirtReg(Reg);
2046 return DAG.getRegister(VirtualRegister, VT);
2049 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2051 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
2053 default: return nullptr;
2055 NODE_NAME_CASE(CALL);
2056 NODE_NAME_CASE(UMUL);
2057 NODE_NAME_CASE(DIV_INF);
2058 NODE_NAME_CASE(RET_FLAG);
2059 NODE_NAME_CASE(BRANCH_COND);
2062 NODE_NAME_CASE(DWORDADDR)
2063 NODE_NAME_CASE(FRACT)
2064 NODE_NAME_CASE(CLAMP)
2065 NODE_NAME_CASE(FMAX)
2066 NODE_NAME_CASE(SMAX)
2067 NODE_NAME_CASE(UMAX)
2068 NODE_NAME_CASE(FMIN)
2069 NODE_NAME_CASE(SMIN)
2070 NODE_NAME_CASE(UMIN)
2071 NODE_NAME_CASE(URECIP)
2072 NODE_NAME_CASE(DIV_SCALE)
2073 NODE_NAME_CASE(DIV_FMAS)
2074 NODE_NAME_CASE(DIV_FIXUP)
2075 NODE_NAME_CASE(TRIG_PREOP)
2078 NODE_NAME_CASE(DOT4)
2079 NODE_NAME_CASE(BFE_U32)
2080 NODE_NAME_CASE(BFE_I32)
2083 NODE_NAME_CASE(BREV)
2084 NODE_NAME_CASE(MUL_U24)
2085 NODE_NAME_CASE(MUL_I24)
2086 NODE_NAME_CASE(MAD_U24)
2087 NODE_NAME_CASE(MAD_I24)
2088 NODE_NAME_CASE(EXPORT)
2089 NODE_NAME_CASE(CONST_ADDRESS)
2090 NODE_NAME_CASE(REGISTER_LOAD)
2091 NODE_NAME_CASE(REGISTER_STORE)
2092 NODE_NAME_CASE(LOAD_CONSTANT)
2093 NODE_NAME_CASE(LOAD_INPUT)
2094 NODE_NAME_CASE(SAMPLE)
2095 NODE_NAME_CASE(SAMPLEB)
2096 NODE_NAME_CASE(SAMPLED)
2097 NODE_NAME_CASE(SAMPLEL)
2098 NODE_NAME_CASE(CVT_F32_UBYTE0)
2099 NODE_NAME_CASE(CVT_F32_UBYTE1)
2100 NODE_NAME_CASE(CVT_F32_UBYTE2)
2101 NODE_NAME_CASE(CVT_F32_UBYTE3)
2102 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
2103 NODE_NAME_CASE(STORE_MSKOR)
2104 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
2108 static void computeKnownBitsForMinMax(const SDValue Op0,
2112 const SelectionDAG &DAG,
2114 APInt Op0Zero, Op0One;
2115 APInt Op1Zero, Op1One;
2116 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
2117 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
2119 KnownZero = Op0Zero & Op1Zero;
2120 KnownOne = Op0One & Op1One;
2123 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
2127 const SelectionDAG &DAG,
2128 unsigned Depth) const {
2130 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
2134 unsigned Opc = Op.getOpcode();
2139 case ISD::INTRINSIC_WO_CHAIN: {
2140 // FIXME: The intrinsic should just use the node.
2141 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
2142 case AMDGPUIntrinsic::AMDGPU_imax:
2143 case AMDGPUIntrinsic::AMDGPU_umax:
2144 case AMDGPUIntrinsic::AMDGPU_imin:
2145 case AMDGPUIntrinsic::AMDGPU_umin:
2146 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
2147 KnownZero, KnownOne, DAG, Depth);
2155 case AMDGPUISD::SMAX:
2156 case AMDGPUISD::UMAX:
2157 case AMDGPUISD::SMIN:
2158 case AMDGPUISD::UMIN:
2159 computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
2160 KnownZero, KnownOne, DAG, Depth);
2163 case AMDGPUISD::BFE_I32:
2164 case AMDGPUISD::BFE_U32: {
2165 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2169 unsigned BitWidth = 32;
2170 uint32_t Width = CWidth->getZExtValue() & 0x1f;
2172 KnownZero = APInt::getAllOnesValue(BitWidth);
2173 KnownOne = APInt::getNullValue(BitWidth);
2177 // FIXME: This could do a lot more. If offset is 0, should be the same as
2178 // sign_extend_inreg implementation, but that involves duplicating it.
2179 if (Opc == AMDGPUISD::BFE_I32)
2180 KnownOne = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2182 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2189 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
2191 const SelectionDAG &DAG,
2192 unsigned Depth) const {
2193 switch (Op.getOpcode()) {
2194 case AMDGPUISD::BFE_I32: {
2195 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2199 unsigned SignBits = 32 - Width->getZExtValue() + 1;
2200 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(Op.getOperand(1));
2201 if (!Offset || !Offset->isNullValue())
2204 // TODO: Could probably figure something out with non-0 offsets.
2205 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2206 return std::max(SignBits, Op0SignBits);
2209 case AMDGPUISD::BFE_U32: {
2210 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2211 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;