1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This is the parent TargetLowering class for hardware code gen
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPURegisterInfo.h"
20 #include "AMDGPUSubtarget.h"
21 #include "AMDILIntrinsicInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAG.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/IR/DataLayout.h"
33 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
34 CCValAssign::LocInfo LocInfo,
35 ISD::ArgFlagsTy ArgFlags, CCState &State) {
36 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
37 ArgFlags.getOrigAlign());
38 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
43 #include "AMDGPUGenCallingConv.inc"
45 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
46 TargetLowering(TM, new TargetLoweringObjectFileELF()) {
48 Subtarget = &TM.getSubtarget<AMDGPUSubtarget>();
50 // Initialize target lowering borrowed from AMDIL
53 // We need to custom lower some of the intrinsics
54 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
56 // Library functions. These default to Expand, but we have instructions
58 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
59 setOperationAction(ISD::FEXP2, MVT::f32, Legal);
60 setOperationAction(ISD::FPOW, MVT::f32, Legal);
61 setOperationAction(ISD::FLOG2, MVT::f32, Legal);
62 setOperationAction(ISD::FABS, MVT::f32, Legal);
63 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
64 setOperationAction(ISD::FRINT, MVT::f32, Legal);
65 setOperationAction(ISD::FROUND, MVT::f32, Legal);
66 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
68 // The hardware supports ROTR, but not ROTL
69 setOperationAction(ISD::ROTL, MVT::i32, Expand);
71 // Lower floating point store/load to integer store/load to reduce the number
72 // of patterns in tablegen.
73 setOperationAction(ISD::STORE, MVT::f32, Promote);
74 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
76 setOperationAction(ISD::STORE, MVT::v2f32, Promote);
77 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
79 setOperationAction(ISD::STORE, MVT::v4f32, Promote);
80 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
82 setOperationAction(ISD::STORE, MVT::v8f32, Promote);
83 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
85 setOperationAction(ISD::STORE, MVT::v16f32, Promote);
86 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
88 setOperationAction(ISD::STORE, MVT::f64, Promote);
89 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
91 // Custom lowering of vector stores is required for local address space
93 setOperationAction(ISD::STORE, MVT::v4i32, Custom);
94 // XXX: Native v2i32 local address space stores are possible, but not
95 // currently implemented.
96 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
98 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
99 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
100 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
102 // XXX: This can be change to Custom, once ExpandVectorStores can
103 // handle 64-bit stores.
104 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
106 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
107 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
108 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
111 setOperationAction(ISD::LOAD, MVT::f32, Promote);
112 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
114 setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
115 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
117 setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
118 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
120 setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
121 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
123 setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
124 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
126 setOperationAction(ISD::LOAD, MVT::f64, Promote);
127 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
129 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
130 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
131 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
132 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
133 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
134 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
135 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
136 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
137 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
138 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
140 setLoadExtAction(ISD::EXTLOAD, MVT::v2i8, Expand);
141 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i8, Expand);
142 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i8, Expand);
143 setLoadExtAction(ISD::EXTLOAD, MVT::v4i8, Expand);
144 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i8, Expand);
145 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i8, Expand);
146 setLoadExtAction(ISD::EXTLOAD, MVT::v2i16, Expand);
147 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i16, Expand);
148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i16, Expand);
149 setLoadExtAction(ISD::EXTLOAD, MVT::v4i16, Expand);
150 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i16, Expand);
151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i16, Expand);
153 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
155 setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
156 setOperationAction(ISD::FNEG, MVT::v4f32, Expand);
158 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
160 setOperationAction(ISD::MUL, MVT::i64, Expand);
162 setOperationAction(ISD::UDIV, MVT::i32, Expand);
163 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
164 setOperationAction(ISD::UREM, MVT::i32, Expand);
165 setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
166 setOperationAction(ISD::VSELECT, MVT::v4f32, Expand);
168 static const MVT::SimpleValueType IntTypes[] = {
169 MVT::v2i32, MVT::v4i32
171 const size_t NumIntTypes = array_lengthof(IntTypes);
173 for (unsigned int x = 0; x < NumIntTypes; ++x) {
174 MVT::SimpleValueType VT = IntTypes[x];
175 //Expand the following operations for the current type by default
176 setOperationAction(ISD::ADD, VT, Expand);
177 setOperationAction(ISD::AND, VT, Expand);
178 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
179 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
180 setOperationAction(ISD::MUL, VT, Expand);
181 setOperationAction(ISD::OR, VT, Expand);
182 setOperationAction(ISD::SHL, VT, Expand);
183 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
184 setOperationAction(ISD::SRL, VT, Expand);
185 setOperationAction(ISD::SRA, VT, Expand);
186 setOperationAction(ISD::SUB, VT, Expand);
187 setOperationAction(ISD::UDIV, VT, Expand);
188 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
189 setOperationAction(ISD::UREM, VT, Expand);
190 setOperationAction(ISD::SELECT, VT, Expand);
191 setOperationAction(ISD::VSELECT, VT, Expand);
192 setOperationAction(ISD::XOR, VT, Expand);
195 static const MVT::SimpleValueType FloatTypes[] = {
196 MVT::v2f32, MVT::v4f32
198 const size_t NumFloatTypes = array_lengthof(FloatTypes);
200 for (unsigned int x = 0; x < NumFloatTypes; ++x) {
201 MVT::SimpleValueType VT = FloatTypes[x];
202 setOperationAction(ISD::FABS, VT, Expand);
203 setOperationAction(ISD::FADD, VT, Expand);
204 setOperationAction(ISD::FDIV, VT, Expand);
205 setOperationAction(ISD::FPOW, VT, Expand);
206 setOperationAction(ISD::FFLOOR, VT, Expand);
207 setOperationAction(ISD::FTRUNC, VT, Expand);
208 setOperationAction(ISD::FMUL, VT, Expand);
209 setOperationAction(ISD::FRINT, VT, Expand);
210 setOperationAction(ISD::FSQRT, VT, Expand);
211 setOperationAction(ISD::FSUB, VT, Expand);
212 setOperationAction(ISD::SELECT, VT, Expand);
215 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Custom);
216 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i1, Custom);
217 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i1, Custom);
219 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Custom);
220 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
221 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Custom);
223 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Custom);
224 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
225 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Custom);
227 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Custom);
230 //===----------------------------------------------------------------------===//
231 // Target Information
232 //===----------------------------------------------------------------------===//
234 MVT AMDGPUTargetLowering::getVectorIdxTy() const {
238 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
240 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
243 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
244 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
246 return ((LScalarSize <= CastScalarSize) ||
247 (CastScalarSize >= 32) ||
251 //===---------------------------------------------------------------------===//
253 //===---------------------------------------------------------------------===//
255 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
256 assert(VT.isFloatingPoint());
257 return VT == MVT::f32;
260 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
261 assert(VT.isFloatingPoint());
262 return VT == MVT::f32;
265 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
266 // Truncate is just accessing a subregister.
267 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
270 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
271 // Truncate is just accessing a subregister.
272 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
273 (Dest->getPrimitiveSizeInBits() % 32 == 0);
276 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
277 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
278 // limited number of native 64-bit operations. Shrinking an operation to fit
279 // in a single 32-bit register should always be helpful. As currently used,
280 // this is much less general than the name suggests, and is only used in
281 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
282 // not profitable, and may actually be harmful.
283 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
286 //===---------------------------------------------------------------------===//
287 // TargetLowering Callbacks
288 //===---------------------------------------------------------------------===//
290 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
291 const SmallVectorImpl<ISD::InputArg> &Ins) const {
293 State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
296 SDValue AMDGPUTargetLowering::LowerReturn(
298 CallingConv::ID CallConv,
300 const SmallVectorImpl<ISD::OutputArg> &Outs,
301 const SmallVectorImpl<SDValue> &OutVals,
302 SDLoc DL, SelectionDAG &DAG) const {
303 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
306 //===---------------------------------------------------------------------===//
307 // Target specific lowering
308 //===---------------------------------------------------------------------===//
310 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG)
312 switch (Op.getOpcode()) {
314 Op.getNode()->dump();
315 llvm_unreachable("Custom lowering code for this"
316 "instruction is not implemented yet!");
318 // AMDIL DAG lowering
319 case ISD::SDIV: return LowerSDIV(Op, DAG);
320 case ISD::SREM: return LowerSREM(Op, DAG);
321 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
322 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
323 // AMDGPU DAG lowering
324 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
325 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
326 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG);
327 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
328 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
329 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
334 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
335 const GlobalValue *GV,
336 const SDValue &InitPtr,
338 SelectionDAG &DAG) const {
339 const DataLayout *TD = getTargetMachine().getDataLayout();
341 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
342 EVT VT = EVT::getEVT(CI->getType());
343 PointerType *PtrTy = PointerType::get(CI->getType(), 0);
344 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, VT), InitPtr,
345 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
346 TD->getPrefTypeAlignment(CI->getType()));
347 } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
348 EVT VT = EVT::getEVT(CFP->getType());
349 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
350 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, VT), InitPtr,
351 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
352 TD->getPrefTypeAlignment(CFP->getType()));
353 } else if (Init->getType()->isAggregateType()) {
354 EVT PtrVT = InitPtr.getValueType();
355 unsigned NumElements = Init->getType()->getArrayNumElements();
356 SmallVector<SDValue, 8> Chains;
357 for (unsigned i = 0; i < NumElements; ++i) {
358 SDValue Offset = DAG.getConstant(i * TD->getTypeAllocSize(
359 Init->getType()->getArrayElementType()), PtrVT);
360 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
361 Chains.push_back(LowerConstantInitializer(Init->getAggregateElement(i),
362 GV, Ptr, Chain, DAG));
364 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &Chains[0],
368 llvm_unreachable("Unhandled constant initializer");
372 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
374 SelectionDAG &DAG) const {
376 const DataLayout *TD = getTargetMachine().getDataLayout();
377 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
378 const GlobalValue *GV = G->getGlobal();
380 switch (G->getAddressSpace()) {
381 default: llvm_unreachable("Global Address lowering not implemented for this "
383 case AMDGPUAS::LOCAL_ADDRESS: {
384 // XXX: What does the value of G->getOffset() mean?
385 assert(G->getOffset() == 0 &&
386 "Do not know what to do with an non-zero offset");
389 if (MFI->LocalMemoryObjects.count(GV) == 0) {
390 uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
391 Offset = MFI->LDSSize;
392 MFI->LocalMemoryObjects[GV] = Offset;
393 // XXX: Account for alignment?
394 MFI->LDSSize += Size;
396 Offset = MFI->LocalMemoryObjects[GV];
399 return DAG.getConstant(Offset, getPointerTy(G->getAddressSpace()));
401 case AMDGPUAS::CONSTANT_ADDRESS: {
402 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
403 Type *EltType = GV->getType()->getElementType();
404 unsigned Size = TD->getTypeAllocSize(EltType);
405 unsigned Alignment = TD->getPrefTypeAlignment(EltType);
407 const GlobalVariable *Var = dyn_cast<GlobalVariable>(GV);
408 const Constant *Init = Var->getInitializer();
409 int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
410 SDValue InitPtr = DAG.getFrameIndex(FI,
411 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
412 SmallVector<SDNode*, 8> WorkList;
414 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
415 E = DAG.getEntryNode()->use_end(); I != E; ++I) {
416 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
418 WorkList.push_back(*I);
420 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
421 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
422 E = WorkList.end(); I != E; ++I) {
423 SmallVector<SDValue, 8> Ops;
424 Ops.push_back(Chain);
425 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
426 Ops.push_back((*I)->getOperand(i));
428 DAG.UpdateNodeOperands(*I, &Ops[0], Ops.size());
430 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op),
431 getPointerTy(AMDGPUAS::CONSTANT_ADDRESS));
436 void AMDGPUTargetLowering::ExtractVectorElements(SDValue Op, SelectionDAG &DAG,
437 SmallVectorImpl<SDValue> &Args,
439 unsigned Count) const {
440 EVT VT = Op.getValueType();
441 for (unsigned i = Start, e = Start + Count; i != e; ++i) {
442 Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op),
443 VT.getVectorElementType(),
444 Op, DAG.getConstant(i, MVT::i32)));
448 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
449 SelectionDAG &DAG) const {
450 SmallVector<SDValue, 8> Args;
451 SDValue A = Op.getOperand(0);
452 SDValue B = Op.getOperand(1);
454 ExtractVectorElements(A, DAG, Args, 0,
455 A.getValueType().getVectorNumElements());
456 ExtractVectorElements(B, DAG, Args, 0,
457 B.getValueType().getVectorNumElements());
459 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
460 &Args[0], Args.size());
463 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
464 SelectionDAG &DAG) const {
466 SmallVector<SDValue, 8> Args;
467 EVT VT = Op.getValueType();
468 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
469 ExtractVectorElements(Op.getOperand(0), DAG, Args, Start,
470 VT.getVectorNumElements());
472 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(),
473 &Args[0], Args.size());
476 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op,
477 SelectionDAG &DAG) const {
479 MachineFunction &MF = DAG.getMachineFunction();
480 const AMDGPUFrameLowering *TFL =
481 static_cast<const AMDGPUFrameLowering*>(getTargetMachine().getFrameLowering());
483 FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Op);
486 unsigned FrameIndex = FIN->getIndex();
487 unsigned Offset = TFL->getFrameIndexOffset(MF, FrameIndex);
488 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF),
492 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
493 SelectionDAG &DAG) const {
494 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
496 EVT VT = Op.getValueType();
498 switch (IntrinsicID) {
500 case AMDGPUIntrinsic::AMDIL_abs:
501 return LowerIntrinsicIABS(Op, DAG);
502 case AMDGPUIntrinsic::AMDIL_exp:
503 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
504 case AMDGPUIntrinsic::AMDGPU_lrp:
505 return LowerIntrinsicLRP(Op, DAG);
506 case AMDGPUIntrinsic::AMDIL_fraction:
507 return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1));
508 case AMDGPUIntrinsic::AMDIL_max:
509 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, Op.getOperand(1),
511 case AMDGPUIntrinsic::AMDGPU_imax:
512 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Op.getOperand(1),
514 case AMDGPUIntrinsic::AMDGPU_umax:
515 return DAG.getNode(AMDGPUISD::UMAX, DL, VT, Op.getOperand(1),
517 case AMDGPUIntrinsic::AMDIL_min:
518 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, Op.getOperand(1),
520 case AMDGPUIntrinsic::AMDGPU_imin:
521 return DAG.getNode(AMDGPUISD::SMIN, DL, VT, Op.getOperand(1),
523 case AMDGPUIntrinsic::AMDGPU_umin:
524 return DAG.getNode(AMDGPUISD::UMIN, DL, VT, Op.getOperand(1),
526 case AMDGPUIntrinsic::AMDIL_round_nearest:
527 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1));
531 ///IABS(a) = SMAX(sub(0, a), a)
532 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op,
533 SelectionDAG &DAG) const {
536 EVT VT = Op.getValueType();
537 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
540 return DAG.getNode(AMDGPUISD::SMAX, DL, VT, Neg, Op.getOperand(1));
543 /// Linear Interpolation
544 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c)
545 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op,
546 SelectionDAG &DAG) const {
548 EVT VT = Op.getValueType();
549 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT,
550 DAG.getConstantFP(1.0f, MVT::f32),
552 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA,
554 return DAG.getNode(ISD::FADD, DL, VT,
555 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)),
559 /// \brief Generate Min/Max node
560 SDValue AMDGPUTargetLowering::LowerMinMax(SDValue Op,
561 SelectionDAG &DAG) const {
563 EVT VT = Op.getValueType();
565 SDValue LHS = Op.getOperand(0);
566 SDValue RHS = Op.getOperand(1);
567 SDValue True = Op.getOperand(2);
568 SDValue False = Op.getOperand(3);
569 SDValue CC = Op.getOperand(4);
571 if (VT != MVT::f32 ||
572 !((LHS == True && RHS == False) || (LHS == False && RHS == True))) {
576 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
590 llvm_unreachable("Operation should already be optimised!");
598 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
600 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
609 return DAG.getNode(AMDGPUISD::FMAX, DL, VT, LHS, RHS);
611 return DAG.getNode(AMDGPUISD::FMIN, DL, VT, LHS, RHS);
613 case ISD::SETCC_INVALID:
614 llvm_unreachable("Invalid setcc condcode!");
619 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue &Op,
620 SelectionDAG &DAG) const {
621 LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
622 EVT MemEltVT = Load->getMemoryVT().getVectorElementType();
623 EVT EltVT = Op.getValueType().getVectorElementType();
624 EVT PtrVT = Load->getBasePtr().getValueType();
625 unsigned NumElts = Load->getMemoryVT().getVectorNumElements();
626 SmallVector<SDValue, 8> Loads;
629 for (unsigned i = 0, e = NumElts; i != e; ++i) {
630 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(),
631 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8), PtrVT));
632 Loads.push_back(DAG.getExtLoad(Load->getExtensionType(), SL, EltVT,
633 Load->getChain(), Ptr,
634 MachinePointerInfo(Load->getMemOperand()->getValue()),
635 MemEltVT, Load->isVolatile(), Load->isNonTemporal(),
636 Load->getAlignment()));
638 return DAG.getNode(ISD::BUILD_VECTOR, SL, Op.getValueType(),
639 Loads.data(), Loads.size());
642 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
643 SelectionDAG &DAG) const {
644 StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
645 EVT MemVT = Store->getMemoryVT();
646 unsigned MemBits = MemVT.getSizeInBits();
648 // Byte stores are really expensive, so if possible, try to pack 32-bit vector
649 // truncating store into an i32 store.
650 // XXX: We could also handle optimize other vector bitwidths.
651 if (!MemVT.isVector() || MemBits > 32) {
656 const SDValue &Value = Store->getValue();
657 EVT VT = Value.getValueType();
658 const SDValue &Ptr = Store->getBasePtr();
659 EVT MemEltVT = MemVT.getVectorElementType();
660 unsigned MemEltBits = MemEltVT.getSizeInBits();
661 unsigned MemNumElements = MemVT.getVectorNumElements();
662 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits());
663 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, PackedVT);
666 for (unsigned i = 0; i < MemNumElements; ++i) {
667 EVT ElemVT = VT.getVectorElementType();
668 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
669 DAG.getConstant(i, MVT::i32));
670 Elt = DAG.getZExtOrTrunc(Elt, DL, PackedVT);
671 Elt = DAG.getNode(ISD::AND, DL, PackedVT, Elt, Mask);
672 SDValue Shift = DAG.getConstant(MemEltBits * i, PackedVT);
673 Elt = DAG.getNode(ISD::SHL, DL, PackedVT, Elt, Shift);
677 PackedValue = DAG.getNode(ISD::OR, DL, PackedVT, PackedValue, Elt);
680 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
681 MachinePointerInfo(Store->getMemOperand()->getValue()),
682 Store->isVolatile(), Store->isNonTemporal(),
683 Store->getAlignment());
686 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
687 SelectionDAG &DAG) const {
688 StoreSDNode *Store = cast<StoreSDNode>(Op);
689 EVT MemEltVT = Store->getMemoryVT().getVectorElementType();
690 EVT EltVT = Store->getValue().getValueType().getVectorElementType();
691 EVT PtrVT = Store->getBasePtr().getValueType();
692 unsigned NumElts = Store->getMemoryVT().getVectorNumElements();
695 SmallVector<SDValue, 8> Chains;
697 for (unsigned i = 0, e = NumElts; i != e; ++i) {
698 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT,
699 Store->getValue(), DAG.getConstant(i, MVT::i32));
700 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT,
702 DAG.getConstant(i * (MemEltVT.getSizeInBits() / 8),
704 Chains.push_back(DAG.getTruncStore(Store->getChain(), SL, Val, Ptr,
705 MachinePointerInfo(Store->getMemOperand()->getValue()),
706 MemEltVT, Store->isVolatile(), Store->isNonTemporal(),
707 Store->getAlignment()));
709 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, &Chains[0], NumElts);
712 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
714 LoadSDNode *Load = cast<LoadSDNode>(Op);
715 ISD::LoadExtType ExtType = Load->getExtensionType();
716 EVT VT = Op.getValueType();
717 EVT MemVT = Load->getMemoryVT();
719 if (ExtType != ISD::NON_EXTLOAD && !VT.isVector() && VT.getSizeInBits() > 32) {
720 // We can do the extload to 32-bits, and then need to separately extend to
723 SDValue ExtLoad32 = DAG.getExtLoad(ExtType, DL, MVT::i32,
727 Load->getMemOperand());
728 return DAG.getNode(ISD::getExtForLoadExtType(ExtType), DL, VT, ExtLoad32);
731 // Lower loads constant address space global variable loads
732 if (Load->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS &&
733 isa<GlobalVariable>(GetUnderlyingObject(Load->getPointerInfo().V))) {
735 SDValue Ptr = DAG.getZExtOrTrunc(Load->getBasePtr(), DL,
736 getPointerTy(AMDGPUAS::PRIVATE_ADDRESS));
737 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
738 DAG.getConstant(2, MVT::i32));
739 return DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
740 Load->getChain(), Ptr,
741 DAG.getTargetConstant(0, MVT::i32), Op.getOperand(2));
744 if (Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS ||
745 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32))
749 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(),
750 DAG.getConstant(2, MVT::i32));
751 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(),
752 Load->getChain(), Ptr,
753 DAG.getTargetConstant(0, MVT::i32),
755 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
757 DAG.getConstant(0x3, MVT::i32));
758 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
759 DAG.getConstant(3, MVT::i32));
761 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt);
763 EVT MemEltVT = MemVT.getScalarType();
764 if (ExtType == ISD::SEXTLOAD) {
765 SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
766 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
769 return DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
772 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
774 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
775 if (Result.getNode()) {
779 StoreSDNode *Store = cast<StoreSDNode>(Op);
780 SDValue Chain = Store->getChain();
781 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
782 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
783 Store->getValue().getValueType().isVector()) {
784 return SplitVectorStore(Op, DAG);
787 EVT MemVT = Store->getMemoryVT();
788 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS &&
789 MemVT.bitsLT(MVT::i32)) {
791 if (Store->getMemoryVT() == MVT::i8) {
793 } else if (Store->getMemoryVT() == MVT::i16) {
796 SDValue BasePtr = Store->getBasePtr();
797 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr,
798 DAG.getConstant(2, MVT::i32));
799 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32,
800 Chain, Ptr, DAG.getTargetConstant(0, MVT::i32));
802 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr,
803 DAG.getConstant(0x3, MVT::i32));
805 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
806 DAG.getConstant(3, MVT::i32));
808 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
811 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
813 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
814 MaskedValue, ShiftAmt);
816 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, DAG.getConstant(Mask, MVT::i32),
818 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask,
819 DAG.getConstant(0xffffffff, MVT::i32));
820 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
822 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
823 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other,
824 Chain, Value, Ptr, DAG.getTargetConstant(0, MVT::i32));
829 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
830 SelectionDAG &DAG) const {
832 EVT VT = Op.getValueType();
834 SDValue Num = Op.getOperand(0);
835 SDValue Den = Op.getOperand(1);
837 SmallVector<SDValue, 8> Results;
839 // RCP = URECIP(Den) = 2^32 / Den + e
840 // e is rounding error.
841 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
843 // RCP_LO = umulo(RCP, Den) */
844 SDValue RCP_LO = DAG.getNode(ISD::UMULO, DL, VT, RCP, Den);
846 // RCP_HI = mulhu (RCP, Den) */
847 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
849 // NEG_RCP_LO = -RCP_LO
850 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, VT),
853 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
854 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
857 // Calculate the rounding error from the URECIP instruction
858 // E = mulhu(ABS_RCP_LO, RCP)
859 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
862 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
865 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
867 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
868 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, VT),
871 // Quotient = mulhu(Tmp0, Num)
872 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
874 // Num_S_Remainder = Quotient * Den
875 SDValue Num_S_Remainder = DAG.getNode(ISD::UMULO, DL, VT, Quotient, Den);
877 // Remainder = Num - Num_S_Remainder
878 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
880 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
881 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
882 DAG.getConstant(-1, VT),
883 DAG.getConstant(0, VT),
885 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
886 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
888 DAG.getConstant(-1, VT),
889 DAG.getConstant(0, VT),
891 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
892 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
895 // Calculate Division result:
897 // Quotient_A_One = Quotient + 1
898 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
899 DAG.getConstant(1, VT));
901 // Quotient_S_One = Quotient - 1
902 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
903 DAG.getConstant(1, VT));
905 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
906 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
907 Quotient, Quotient_A_One, ISD::SETEQ);
909 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
910 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
911 Quotient_S_One, Div, ISD::SETEQ);
913 // Calculate Rem result:
915 // Remainder_S_Den = Remainder - Den
916 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
918 // Remainder_A_Den = Remainder + Den
919 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
921 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
922 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, VT),
923 Remainder, Remainder_S_Den, ISD::SETEQ);
925 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
926 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, VT),
927 Remainder_A_Den, Rem, ISD::SETEQ);
931 return DAG.getMergeValues(Ops, 2, DL);
934 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
935 SelectionDAG &DAG) const {
936 SDValue S0 = Op.getOperand(0);
938 if (Op.getValueType() != MVT::f32 || S0.getValueType() != MVT::i64)
941 // f32 uint_to_fp i64
942 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
943 DAG.getConstant(0, MVT::i32));
944 SDValue FloatLo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Lo);
945 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, S0,
946 DAG.getConstant(1, MVT::i32));
947 SDValue FloatHi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, Hi);
948 FloatHi = DAG.getNode(ISD::FMUL, DL, MVT::f32, FloatHi,
949 DAG.getConstantFP(4294967296.0f, MVT::f32)); // 2^32
950 return DAG.getNode(ISD::FADD, DL, MVT::f32, FloatLo, FloatHi);
954 SDValue AMDGPUTargetLowering::ExpandSIGN_EXTEND_INREG(SDValue Op,
956 SelectionDAG &DAG) const {
957 MVT VT = Op.getSimpleValueType();
959 SDValue Shift = DAG.getConstant(BitsDiff, VT);
960 // Shift left by 'Shift' bits.
961 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Op.getOperand(0), Shift);
962 // Signed shift Right by 'Shift' bits.
963 return DAG.getNode(ISD::SRA, DL, VT, Shl, Shift);
966 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
967 SelectionDAG &DAG) const {
968 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
969 MVT VT = Op.getSimpleValueType();
970 MVT ScalarVT = VT.getScalarType();
972 unsigned SrcBits = ExtraVT.getScalarType().getSizeInBits();
973 unsigned DestBits = ScalarVT.getSizeInBits();
974 unsigned BitsDiff = DestBits - SrcBits;
976 if (!Subtarget->hasBFE())
977 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
979 SDValue Src = Op.getOperand(0);
982 // Need to scalarize this, and revisit each of the scalars later.
983 // TODO: Don't scalarize on Evergreen?
984 unsigned NElts = VT.getVectorNumElements();
985 SmallVector<SDValue, 8> Args;
986 ExtractVectorElements(Src, DAG, Args, 0, NElts);
988 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
989 for (unsigned I = 0; I < NElts; ++I)
990 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
992 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args.data(), Args.size());
998 // If the source is 32-bits, this is really half of a 2-register pair, and
999 // we need to discard the unused half of the pair.
1000 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
1001 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, TruncSrc);
1004 unsigned NElts = VT.isVector() ? VT.getVectorNumElements() : 1;
1006 // TODO: Match 64-bit BFE. SI has a 64-bit BFE, but it's scalar only so it
1007 // might not be worth the effort, and will need to expand to shifts when
1008 // fixing SGPR copies.
1009 if (SrcBits < 32 && DestBits <= 32) {
1011 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1014 Src = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVT, Src);
1016 // FIXME: This should use TargetConstant, but that hits assertions for
1018 SDValue Ext = DAG.getNode(AMDGPUISD::BFE_I32, DL, ExtVT,
1019 Op.getOperand(0), // Operand
1020 DAG.getConstant(0, ExtVT), // Offset
1021 DAG.getConstant(SrcBits, ExtVT)); // Width
1023 // Truncate to the original type if necessary.
1024 if (ScalarVT == MVT::i32)
1026 return DAG.getNode(ISD::TRUNCATE, DL, VT, Ext);
1029 // For small types, extend to 32-bits first.
1032 MVT ExtVT = (NElts == 1) ? MVT::i32 : MVT::getVectorVT(MVT::i32, NElts);
1034 SDValue TruncSrc = DAG.getNode(ISD::TRUNCATE, DL, ExtVT, Src);
1035 SDValue Ext32 = DAG.getNode(AMDGPUISD::BFE_I32,
1038 TruncSrc, // Operand
1039 DAG.getConstant(0, ExtVT), // Offset
1040 DAG.getConstant(SrcBits, ExtVT)); // Width
1042 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Ext32);
1045 // For everything else, use the standard bitshift expansion.
1046 return ExpandSIGN_EXTEND_INREG(Op, BitsDiff, DAG);
1049 //===----------------------------------------------------------------------===//
1051 //===----------------------------------------------------------------------===//
1053 void AMDGPUTargetLowering::getOriginalFunctionArgs(
1056 const SmallVectorImpl<ISD::InputArg> &Ins,
1057 SmallVectorImpl<ISD::InputArg> &OrigIns) const {
1059 for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1060 if (Ins[i].ArgVT == Ins[i].VT) {
1061 OrigIns.push_back(Ins[i]);
1066 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
1067 // Vector has been split into scalars.
1068 VT = Ins[i].ArgVT.getVectorElementType();
1069 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
1070 Ins[i].ArgVT.getVectorElementType() !=
1071 Ins[i].VT.getVectorElementType()) {
1072 // Vector elements have been promoted
1075 // Vector has been spilt into smaller vectors.
1079 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
1080 Ins[i].OrigArgIndex, Ins[i].PartOffset);
1081 OrigIns.push_back(Arg);
1085 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const {
1086 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1087 return CFP->isExactlyValue(1.0);
1089 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1090 return C->isAllOnesValue();
1095 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const {
1096 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
1097 return CFP->getValueAPF().isZero();
1099 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
1100 return C->isNullValue();
1105 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1106 const TargetRegisterClass *RC,
1107 unsigned Reg, EVT VT) const {
1108 MachineFunction &MF = DAG.getMachineFunction();
1109 MachineRegisterInfo &MRI = MF.getRegInfo();
1110 unsigned VirtualRegister;
1111 if (!MRI.isLiveIn(Reg)) {
1112 VirtualRegister = MRI.createVirtualRegister(RC);
1113 MRI.addLiveIn(Reg, VirtualRegister);
1115 VirtualRegister = MRI.getLiveInVirtReg(Reg);
1117 return DAG.getRegister(VirtualRegister, VT);
1120 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
1122 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
1126 NODE_NAME_CASE(CALL);
1127 NODE_NAME_CASE(UMUL);
1128 NODE_NAME_CASE(DIV_INF);
1129 NODE_NAME_CASE(RET_FLAG);
1130 NODE_NAME_CASE(BRANCH_COND);
1133 NODE_NAME_CASE(DWORDADDR)
1134 NODE_NAME_CASE(FRACT)
1135 NODE_NAME_CASE(FMAX)
1136 NODE_NAME_CASE(SMAX)
1137 NODE_NAME_CASE(UMAX)
1138 NODE_NAME_CASE(FMIN)
1139 NODE_NAME_CASE(SMIN)
1140 NODE_NAME_CASE(UMIN)
1141 NODE_NAME_CASE(BFE_U32)
1142 NODE_NAME_CASE(BFE_I32)
1143 NODE_NAME_CASE(URECIP)
1144 NODE_NAME_CASE(DOT4)
1145 NODE_NAME_CASE(EXPORT)
1146 NODE_NAME_CASE(CONST_ADDRESS)
1147 NODE_NAME_CASE(REGISTER_LOAD)
1148 NODE_NAME_CASE(REGISTER_STORE)
1149 NODE_NAME_CASE(LOAD_CONSTANT)
1150 NODE_NAME_CASE(LOAD_INPUT)
1151 NODE_NAME_CASE(SAMPLE)
1152 NODE_NAME_CASE(SAMPLEB)
1153 NODE_NAME_CASE(SAMPLED)
1154 NODE_NAME_CASE(SAMPLEL)
1155 NODE_NAME_CASE(STORE_MSKOR)
1156 NODE_NAME_CASE(TBUFFER_STORE_FORMAT)