2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "NVPTXISelLowering.h"
16 #include "NVPTXTargetMachine.h"
17 #include "NVPTXTargetObjectFile.h"
18 #include "NVPTXUtilities.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/MC/MCSectionELF.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
41 #define DEBUG_TYPE "nvptx-lower"
45 static unsigned int uniqueCallSite = 0;
47 static cl::opt<bool> sched4reg(
49 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
51 static bool IsPTXVectorType(MVT VT) {
52 switch (VT.SimpleTy) {
71 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
72 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
73 /// into their primitive components.
74 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
75 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
76 /// LowerCall, and LowerReturn.
77 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
78 SmallVectorImpl<EVT> &ValueVTs,
79 SmallVectorImpl<uint64_t> *Offsets = nullptr,
80 uint64_t StartingOffset = 0) {
81 SmallVector<EVT, 16> TempVTs;
82 SmallVector<uint64_t, 16> TempOffsets;
84 ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
85 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
87 uint64_t Off = TempOffsets[i];
89 for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
90 ValueVTs.push_back(VT.getVectorElementType());
92 Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
95 ValueVTs.push_back(VT);
97 Offsets->push_back(Off);
102 // NVPTXTargetLowering Constructor.
103 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
104 : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
105 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
107 // always lower memset, memcpy, and memmove intrinsics to load/store
108 // instructions, rather
109 // then generating calls to memset, mempcy or memmove.
110 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
111 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
112 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
114 setBooleanContents(ZeroOrNegativeOneBooleanContent);
115 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
117 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
118 // condition branches.
119 setJumpIsExpensive(true);
121 // By default, use the Source scheduling
123 setSchedulingPreference(Sched::RegPressure);
125 setSchedulingPreference(Sched::Source);
127 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
128 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
129 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
130 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
131 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
132 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
134 // Operations not directly supported by NVPTX.
135 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
136 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
137 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
138 setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
139 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
140 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
141 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
142 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
143 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
144 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
145 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
146 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
147 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
148 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
149 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
150 // For others we will expand to a SHL/SRA pair.
151 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
152 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
153 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
154 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
155 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
157 setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
158 setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
159 setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
160 setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
161 setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
162 setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
164 if (nvptxSubtarget.hasROT64()) {
165 setOperationAction(ISD::ROTL, MVT::i64, Legal);
166 setOperationAction(ISD::ROTR, MVT::i64, Legal);
168 setOperationAction(ISD::ROTL, MVT::i64, Expand);
169 setOperationAction(ISD::ROTR, MVT::i64, Expand);
171 if (nvptxSubtarget.hasROT32()) {
172 setOperationAction(ISD::ROTL, MVT::i32, Legal);
173 setOperationAction(ISD::ROTR, MVT::i32, Legal);
175 setOperationAction(ISD::ROTL, MVT::i32, Expand);
176 setOperationAction(ISD::ROTR, MVT::i32, Expand);
179 setOperationAction(ISD::ROTL, MVT::i16, Expand);
180 setOperationAction(ISD::ROTR, MVT::i16, Expand);
181 setOperationAction(ISD::ROTL, MVT::i8, Expand);
182 setOperationAction(ISD::ROTR, MVT::i8, Expand);
183 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
184 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
185 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
187 // Indirect branch is not supported.
188 // This also disables Jump Table creation.
189 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
190 setOperationAction(ISD::BRIND, MVT::Other, Expand);
192 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
193 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
195 // We want to legalize constant related memmove and memcopy
197 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
199 // Turn FP extload into load/fextend
200 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
201 // Turn FP truncstore into trunc + store.
202 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
204 // PTX does not support load / store predicate registers
205 setOperationAction(ISD::LOAD, MVT::i1, Custom);
206 setOperationAction(ISD::STORE, MVT::i1, Custom);
208 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
209 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
210 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
211 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
212 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
213 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
215 // This is legal in NVPTX
216 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
217 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
219 // TRAP can be lowered to PTX trap
220 setOperationAction(ISD::TRAP, MVT::Other, Legal);
222 setOperationAction(ISD::ADDC, MVT::i64, Expand);
223 setOperationAction(ISD::ADDE, MVT::i64, Expand);
225 // Register custom handling for vector loads/stores
226 for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
228 MVT VT = (MVT::SimpleValueType) i;
229 if (IsPTXVectorType(VT)) {
230 setOperationAction(ISD::LOAD, VT, Custom);
231 setOperationAction(ISD::STORE, VT, Custom);
232 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
236 // Custom handling for i8 intrinsics
237 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
239 setOperationAction(ISD::CTLZ, MVT::i16, Legal);
240 setOperationAction(ISD::CTLZ, MVT::i32, Legal);
241 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
242 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
243 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
244 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
245 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
246 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
247 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
248 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
249 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
250 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
251 setOperationAction(ISD::CTPOP, MVT::i16, Legal);
252 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
253 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
255 // We have some custom DAG combine patterns for these nodes
256 setTargetDAGCombine(ISD::ADD);
257 setTargetDAGCombine(ISD::AND);
258 setTargetDAGCombine(ISD::FADD);
259 setTargetDAGCombine(ISD::MUL);
260 setTargetDAGCombine(ISD::SHL);
262 // Now deduce the information based on the above mentioned
264 computeRegisterProperties();
267 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
272 return "NVPTXISD::CALL";
273 case NVPTXISD::RET_FLAG:
274 return "NVPTXISD::RET_FLAG";
275 case NVPTXISD::Wrapper:
276 return "NVPTXISD::Wrapper";
277 case NVPTXISD::DeclareParam:
278 return "NVPTXISD::DeclareParam";
279 case NVPTXISD::DeclareScalarParam:
280 return "NVPTXISD::DeclareScalarParam";
281 case NVPTXISD::DeclareRet:
282 return "NVPTXISD::DeclareRet";
283 case NVPTXISD::DeclareRetParam:
284 return "NVPTXISD::DeclareRetParam";
285 case NVPTXISD::PrintCall:
286 return "NVPTXISD::PrintCall";
287 case NVPTXISD::LoadParam:
288 return "NVPTXISD::LoadParam";
289 case NVPTXISD::LoadParamV2:
290 return "NVPTXISD::LoadParamV2";
291 case NVPTXISD::LoadParamV4:
292 return "NVPTXISD::LoadParamV4";
293 case NVPTXISD::StoreParam:
294 return "NVPTXISD::StoreParam";
295 case NVPTXISD::StoreParamV2:
296 return "NVPTXISD::StoreParamV2";
297 case NVPTXISD::StoreParamV4:
298 return "NVPTXISD::StoreParamV4";
299 case NVPTXISD::StoreParamS32:
300 return "NVPTXISD::StoreParamS32";
301 case NVPTXISD::StoreParamU32:
302 return "NVPTXISD::StoreParamU32";
303 case NVPTXISD::CallArgBegin:
304 return "NVPTXISD::CallArgBegin";
305 case NVPTXISD::CallArg:
306 return "NVPTXISD::CallArg";
307 case NVPTXISD::LastCallArg:
308 return "NVPTXISD::LastCallArg";
309 case NVPTXISD::CallArgEnd:
310 return "NVPTXISD::CallArgEnd";
311 case NVPTXISD::CallVoid:
312 return "NVPTXISD::CallVoid";
313 case NVPTXISD::CallVal:
314 return "NVPTXISD::CallVal";
315 case NVPTXISD::CallSymbol:
316 return "NVPTXISD::CallSymbol";
317 case NVPTXISD::Prototype:
318 return "NVPTXISD::Prototype";
319 case NVPTXISD::MoveParam:
320 return "NVPTXISD::MoveParam";
321 case NVPTXISD::StoreRetval:
322 return "NVPTXISD::StoreRetval";
323 case NVPTXISD::StoreRetvalV2:
324 return "NVPTXISD::StoreRetvalV2";
325 case NVPTXISD::StoreRetvalV4:
326 return "NVPTXISD::StoreRetvalV4";
327 case NVPTXISD::PseudoUseParam:
328 return "NVPTXISD::PseudoUseParam";
329 case NVPTXISD::RETURN:
330 return "NVPTXISD::RETURN";
331 case NVPTXISD::CallSeqBegin:
332 return "NVPTXISD::CallSeqBegin";
333 case NVPTXISD::CallSeqEnd:
334 return "NVPTXISD::CallSeqEnd";
335 case NVPTXISD::CallPrototype:
336 return "NVPTXISD::CallPrototype";
337 case NVPTXISD::LoadV2:
338 return "NVPTXISD::LoadV2";
339 case NVPTXISD::LoadV4:
340 return "NVPTXISD::LoadV4";
341 case NVPTXISD::LDGV2:
342 return "NVPTXISD::LDGV2";
343 case NVPTXISD::LDGV4:
344 return "NVPTXISD::LDGV4";
345 case NVPTXISD::LDUV2:
346 return "NVPTXISD::LDUV2";
347 case NVPTXISD::LDUV4:
348 return "NVPTXISD::LDUV4";
349 case NVPTXISD::StoreV2:
350 return "NVPTXISD::StoreV2";
351 case NVPTXISD::StoreV4:
352 return "NVPTXISD::StoreV4";
353 case NVPTXISD::FUN_SHFL_CLAMP:
354 return "NVPTXISD::FUN_SHFL_CLAMP";
355 case NVPTXISD::FUN_SHFR_CLAMP:
356 return "NVPTXISD::FUN_SHFR_CLAMP";
358 return "NVPTXISD::IMAD";
359 case NVPTXISD::MUL_WIDE_SIGNED:
360 return "NVPTXISD::MUL_WIDE_SIGNED";
361 case NVPTXISD::MUL_WIDE_UNSIGNED:
362 return "NVPTXISD::MUL_WIDE_UNSIGNED";
363 case NVPTXISD::Tex1DFloatI32: return "NVPTXISD::Tex1DFloatI32";
364 case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
365 case NVPTXISD::Tex1DFloatFloatLevel:
366 return "NVPTXISD::Tex1DFloatFloatLevel";
367 case NVPTXISD::Tex1DFloatFloatGrad:
368 return "NVPTXISD::Tex1DFloatFloatGrad";
369 case NVPTXISD::Tex1DI32I32: return "NVPTXISD::Tex1DI32I32";
370 case NVPTXISD::Tex1DI32Float: return "NVPTXISD::Tex1DI32Float";
371 case NVPTXISD::Tex1DI32FloatLevel:
372 return "NVPTXISD::Tex1DI32FloatLevel";
373 case NVPTXISD::Tex1DI32FloatGrad:
374 return "NVPTXISD::Tex1DI32FloatGrad";
375 case NVPTXISD::Tex1DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
376 case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
377 case NVPTXISD::Tex1DArrayFloatFloatLevel:
378 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
379 case NVPTXISD::Tex1DArrayFloatFloatGrad:
380 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
381 case NVPTXISD::Tex1DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
382 case NVPTXISD::Tex1DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
383 case NVPTXISD::Tex1DArrayI32FloatLevel:
384 return "NVPTXISD::Tex2DArrayI32FloatLevel";
385 case NVPTXISD::Tex1DArrayI32FloatGrad:
386 return "NVPTXISD::Tex2DArrayI32FloatGrad";
387 case NVPTXISD::Tex2DFloatI32: return "NVPTXISD::Tex2DFloatI32";
388 case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
389 case NVPTXISD::Tex2DFloatFloatLevel:
390 return "NVPTXISD::Tex2DFloatFloatLevel";
391 case NVPTXISD::Tex2DFloatFloatGrad:
392 return "NVPTXISD::Tex2DFloatFloatGrad";
393 case NVPTXISD::Tex2DI32I32: return "NVPTXISD::Tex2DI32I32";
394 case NVPTXISD::Tex2DI32Float: return "NVPTXISD::Tex2DI32Float";
395 case NVPTXISD::Tex2DI32FloatLevel:
396 return "NVPTXISD::Tex2DI32FloatLevel";
397 case NVPTXISD::Tex2DI32FloatGrad:
398 return "NVPTXISD::Tex2DI32FloatGrad";
399 case NVPTXISD::Tex2DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
400 case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
401 case NVPTXISD::Tex2DArrayFloatFloatLevel:
402 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
403 case NVPTXISD::Tex2DArrayFloatFloatGrad:
404 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
405 case NVPTXISD::Tex2DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
406 case NVPTXISD::Tex2DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
407 case NVPTXISD::Tex2DArrayI32FloatLevel:
408 return "NVPTXISD::Tex2DArrayI32FloatLevel";
409 case NVPTXISD::Tex2DArrayI32FloatGrad:
410 return "NVPTXISD::Tex2DArrayI32FloatGrad";
411 case NVPTXISD::Tex3DFloatI32: return "NVPTXISD::Tex3DFloatI32";
412 case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
413 case NVPTXISD::Tex3DFloatFloatLevel:
414 return "NVPTXISD::Tex3DFloatFloatLevel";
415 case NVPTXISD::Tex3DFloatFloatGrad:
416 return "NVPTXISD::Tex3DFloatFloatGrad";
417 case NVPTXISD::Tex3DI32I32: return "NVPTXISD::Tex3DI32I32";
418 case NVPTXISD::Tex3DI32Float: return "NVPTXISD::Tex3DI32Float";
419 case NVPTXISD::Tex3DI32FloatLevel:
420 return "NVPTXISD::Tex3DI32FloatLevel";
421 case NVPTXISD::Tex3DI32FloatGrad:
422 return "NVPTXISD::Tex3DI32FloatGrad";
424 case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
425 case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
426 case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
427 case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
428 case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
429 case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
430 case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
431 case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
432 case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
434 case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
435 case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
436 case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
437 case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
438 case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
439 case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
440 case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
441 case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
442 case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
444 case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
445 case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
446 case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
447 case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
448 case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
449 case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
450 case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
451 case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
452 case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
454 case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
455 case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
456 case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
457 case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
458 case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
459 case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
460 case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
461 case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
462 case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
464 case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
465 case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
466 case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
467 case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
468 case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
469 case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
470 case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
471 case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
472 case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
476 bool NVPTXTargetLowering::shouldSplitVectorType(EVT VT) const {
477 return VT.getScalarType() == MVT::i1;
481 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
483 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
484 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
485 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
489 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
490 const SmallVectorImpl<ISD::OutputArg> &Outs,
491 unsigned retAlignment,
492 const ImmutableCallSite *CS) const {
494 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
495 assert(isABI && "Non-ABI compilation is not supported");
500 O << "prototype_" << uniqueCallSite << " : .callprototype ";
502 if (retTy->getTypeID() == Type::VoidTyID) {
506 if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
508 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
509 size = ITy->getBitWidth();
513 assert(retTy->isFloatingPointTy() &&
514 "Floating point type expected here");
515 size = retTy->getPrimitiveSizeInBits();
518 O << ".param .b" << size << " _";
519 } else if (isa<PointerType>(retTy)) {
520 O << ".param .b" << getPointerTy().getSizeInBits() << " _";
522 if((retTy->getTypeID() == Type::StructTyID) ||
523 isa<VectorType>(retTy)) {
524 O << ".param .align "
527 << getDataLayout()->getTypeAllocSize(retTy) << "]";
529 assert(false && "Unknown return type");
537 MVT thePointerTy = getPointerTy();
540 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
541 Type *Ty = Args[i].Ty;
547 if (Outs[OIdx].Flags.isByVal() == false) {
548 if (Ty->isAggregateType() || Ty->isVectorTy()) {
550 const CallInst *CallI = cast<CallInst>(CS->getInstruction());
551 const DataLayout *TD = getDataLayout();
552 // +1 because index 0 is reserved for return type alignment
553 if (!llvm::getAlign(*CallI, i + 1, align))
554 align = TD->getABITypeAlignment(Ty);
555 unsigned sz = TD->getTypeAllocSize(Ty);
556 O << ".param .align " << align << " .b8 ";
558 O << "[" << sz << "]";
559 // update the index for Outs
560 SmallVector<EVT, 16> vtparts;
561 ComputeValueVTs(*this, Ty, vtparts);
562 if (unsigned len = vtparts.size())
566 // i8 types in IR will be i16 types in SDAG
567 assert((getValueType(Ty) == Outs[OIdx].VT ||
568 (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
569 "type mismatch between callee prototype and arguments");
572 if (isa<IntegerType>(Ty)) {
573 sz = cast<IntegerType>(Ty)->getBitWidth();
576 } else if (isa<PointerType>(Ty))
577 sz = thePointerTy.getSizeInBits();
579 sz = Ty->getPrimitiveSizeInBits();
580 O << ".param .b" << sz << " ";
584 const PointerType *PTy = dyn_cast<PointerType>(Ty);
585 assert(PTy && "Param with byval attribute should be a pointer type");
586 Type *ETy = PTy->getElementType();
588 unsigned align = Outs[OIdx].Flags.getByValAlign();
589 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
590 O << ".param .align " << align << " .b8 ";
592 O << "[" << sz << "]";
599 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
600 const ImmutableCallSite *CS,
602 unsigned Idx) const {
603 const DataLayout *TD = getDataLayout();
605 const Value *DirectCallee = CS->getCalledFunction();
608 // We don't have a direct function symbol, but that may be because of
609 // constant cast instructions in the call.
610 const Instruction *CalleeI = CS->getInstruction();
611 assert(CalleeI && "Call target is not a function or derived value?");
613 // With bitcast'd call targets, the instruction will be the call
614 if (isa<CallInst>(CalleeI)) {
615 // Check if we have call alignment metadata
616 if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
619 const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
620 // Ignore any bitcast instructions
621 while(isa<ConstantExpr>(CalleeV)) {
622 const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
625 // Look through the bitcast
626 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
629 // We have now looked past all of the bitcasts. Do we finally have a
631 if (isa<Function>(CalleeV))
632 DirectCallee = CalleeV;
636 // Check for function alignment information if we found that the
637 // ultimate target is a Function
639 if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
642 // Call is indirect or alignment information is not available, fall back to
643 // the ABI type alignment
644 return TD->getABITypeAlignment(Ty);
647 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
648 SmallVectorImpl<SDValue> &InVals) const {
649 SelectionDAG &DAG = CLI.DAG;
651 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
652 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
653 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
654 SDValue Chain = CLI.Chain;
655 SDValue Callee = CLI.Callee;
656 bool &isTailCall = CLI.IsTailCall;
657 ArgListTy &Args = CLI.getArgs();
658 Type *retTy = CLI.RetTy;
659 ImmutableCallSite *CS = CLI.CS;
661 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
662 assert(isABI && "Non-ABI compilation is not supported");
665 const DataLayout *TD = getDataLayout();
666 MachineFunction &MF = DAG.getMachineFunction();
667 const Function *F = MF.getFunction();
669 SDValue tempChain = Chain;
671 DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
673 SDValue InFlag = Chain.getValue(1);
675 unsigned paramCount = 0;
676 // Args.size() and Outs.size() need not match.
677 // Outs.size() will be larger
678 // * if there is an aggregate argument with multiple fields (each field
679 // showing up separately in Outs)
680 // * if there is a vector argument with more than typical vector-length
681 // elements (generally if more than 4) where each vector element is
682 // individually present in Outs.
683 // So a different index should be used for indexing into Outs/OutVals.
684 // See similar issue in LowerFormalArguments.
686 // Declare the .params or .reg need to pass values
688 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
689 EVT VT = Outs[OIdx].VT;
690 Type *Ty = Args[i].Ty;
692 if (Outs[OIdx].Flags.isByVal() == false) {
693 if (Ty->isAggregateType()) {
695 SmallVector<EVT, 16> vtparts;
696 SmallVector<uint64_t, 16> Offsets;
697 ComputePTXValueVTs(*this, Ty, vtparts, &Offsets, 0);
699 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
700 // declare .param .align <align> .b8 .param<n>[<size>];
701 unsigned sz = TD->getTypeAllocSize(Ty);
702 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
703 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
704 DAG.getConstant(paramCount, MVT::i32),
705 DAG.getConstant(sz, MVT::i32), InFlag };
706 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
708 InFlag = Chain.getValue(1);
709 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
710 EVT elemtype = vtparts[j];
711 unsigned ArgAlign = GreatestCommonDivisor64(align, Offsets[j]);
712 if (elemtype.isInteger() && (sz < 8))
714 SDValue StVal = OutVals[OIdx];
715 if (elemtype.getSizeInBits() < 16) {
716 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
718 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
719 SDValue CopyParamOps[] = { Chain,
720 DAG.getConstant(paramCount, MVT::i32),
721 DAG.getConstant(Offsets[j], MVT::i32),
723 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
724 CopyParamVTs, CopyParamOps,
725 elemtype, MachinePointerInfo(),
727 InFlag = Chain.getValue(1);
730 if (vtparts.size() > 0)
735 if (Ty->isVectorTy()) {
736 EVT ObjectVT = getValueType(Ty);
737 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
738 // declare .param .align <align> .b8 .param<n>[<size>];
739 unsigned sz = TD->getTypeAllocSize(Ty);
740 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
741 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
742 DAG.getConstant(paramCount, MVT::i32),
743 DAG.getConstant(sz, MVT::i32), InFlag };
744 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
746 InFlag = Chain.getValue(1);
747 unsigned NumElts = ObjectVT.getVectorNumElements();
748 EVT EltVT = ObjectVT.getVectorElementType();
750 bool NeedExtend = false;
751 if (EltVT.getSizeInBits() < 16) {
758 SDValue Elt = OutVals[OIdx++];
760 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
762 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
763 SDValue CopyParamOps[] = { Chain,
764 DAG.getConstant(paramCount, MVT::i32),
765 DAG.getConstant(0, MVT::i32), Elt,
767 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
768 CopyParamVTs, CopyParamOps,
769 MemVT, MachinePointerInfo());
770 InFlag = Chain.getValue(1);
771 } else if (NumElts == 2) {
772 SDValue Elt0 = OutVals[OIdx++];
773 SDValue Elt1 = OutVals[OIdx++];
775 Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
776 Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
779 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
780 SDValue CopyParamOps[] = { Chain,
781 DAG.getConstant(paramCount, MVT::i32),
782 DAG.getConstant(0, MVT::i32), Elt0, Elt1,
784 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
785 CopyParamVTs, CopyParamOps,
786 MemVT, MachinePointerInfo());
787 InFlag = Chain.getValue(1);
789 unsigned curOffset = 0;
791 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
793 // vector will be expanded to a power of 2 elements, so we know we can
794 // always round up to the next multiple of 4 when creating the vector
796 // e.g. 4 elem => 1 st.v4
799 // 11 elem => 3 st.v4
800 unsigned VecSize = 4;
801 if (EltVT.getSizeInBits() == 64)
804 // This is potentially only part of a vector, so assume all elements
805 // are packed together.
806 unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
808 for (unsigned i = 0; i < NumElts; i += VecSize) {
811 SmallVector<SDValue, 8> Ops;
812 Ops.push_back(Chain);
813 Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
814 Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
816 unsigned Opc = NVPTXISD::StoreParamV2;
818 StoreVal = OutVals[OIdx++];
820 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
821 Ops.push_back(StoreVal);
823 if (i + 1 < NumElts) {
824 StoreVal = OutVals[OIdx++];
827 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
829 StoreVal = DAG.getUNDEF(EltVT);
831 Ops.push_back(StoreVal);
834 Opc = NVPTXISD::StoreParamV4;
835 if (i + 2 < NumElts) {
836 StoreVal = OutVals[OIdx++];
839 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
841 StoreVal = DAG.getUNDEF(EltVT);
843 Ops.push_back(StoreVal);
845 if (i + 3 < NumElts) {
846 StoreVal = OutVals[OIdx++];
849 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
851 StoreVal = DAG.getUNDEF(EltVT);
853 Ops.push_back(StoreVal);
856 Ops.push_back(InFlag);
858 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
859 Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, Ops,
860 MemVT, MachinePointerInfo());
861 InFlag = Chain.getValue(1);
862 curOffset += PerStoreOffset;
870 // for ABI, declare .param .b<size> .param<n>;
871 unsigned sz = VT.getSizeInBits();
872 bool needExtend = false;
873 if (VT.isInteger()) {
879 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
880 SDValue DeclareParamOps[] = { Chain,
881 DAG.getConstant(paramCount, MVT::i32),
882 DAG.getConstant(sz, MVT::i32),
883 DAG.getConstant(0, MVT::i32), InFlag };
884 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
886 InFlag = Chain.getValue(1);
887 SDValue OutV = OutVals[OIdx];
889 // zext/sext i1 to i16
890 unsigned opc = ISD::ZERO_EXTEND;
891 if (Outs[OIdx].Flags.isSExt())
892 opc = ISD::SIGN_EXTEND;
893 OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
895 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
896 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
897 DAG.getConstant(0, MVT::i32), OutV, InFlag };
899 unsigned opcode = NVPTXISD::StoreParam;
900 if (Outs[OIdx].Flags.isZExt())
901 opcode = NVPTXISD::StoreParamU32;
902 else if (Outs[OIdx].Flags.isSExt())
903 opcode = NVPTXISD::StoreParamS32;
904 Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps,
905 VT, MachinePointerInfo());
907 InFlag = Chain.getValue(1);
912 SmallVector<EVT, 16> vtparts;
913 SmallVector<uint64_t, 16> Offsets;
914 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
915 assert(PTy && "Type of a byval parameter should be pointer");
916 ComputePTXValueVTs(*this, PTy->getElementType(), vtparts, &Offsets, 0);
918 // declare .param .align <align> .b8 .param<n>[<size>];
919 unsigned sz = Outs[OIdx].Flags.getByValSize();
920 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
921 unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
922 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
923 // so we don't need to worry about natural alignment or not.
924 // See TargetLowering::LowerCallTo().
925 SDValue DeclareParamOps[] = {
926 Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
927 DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
930 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
932 InFlag = Chain.getValue(1);
933 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
934 EVT elemtype = vtparts[j];
935 int curOffset = Offsets[j];
936 unsigned PartAlign = GreatestCommonDivisor64(ArgAlign, curOffset);
938 DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
939 DAG.getConstant(curOffset, getPointerTy()));
940 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
941 MachinePointerInfo(), false, false, false,
943 if (elemtype.getSizeInBits() < 16) {
944 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
946 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
947 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
948 DAG.getConstant(curOffset, MVT::i32), theVal,
950 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
951 CopyParamOps, elemtype,
952 MachinePointerInfo());
954 InFlag = Chain.getValue(1);
959 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
960 unsigned retAlignment = 0;
963 if (Ins.size() > 0) {
964 SmallVector<EVT, 16> resvtparts;
965 ComputeValueVTs(*this, retTy, resvtparts);
968 // .param .align 16 .b8 retval0[<size-in-bytes>], or
969 // .param .b<size-in-bits> retval0
970 unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
971 if (retTy->isSingleValueType()) {
972 // Scalar needs to be at least 32bit wide
975 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
976 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
977 DAG.getConstant(resultsz, MVT::i32),
978 DAG.getConstant(0, MVT::i32), InFlag };
979 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
981 InFlag = Chain.getValue(1);
983 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
984 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
985 SDValue DeclareRetOps[] = { Chain,
986 DAG.getConstant(retAlignment, MVT::i32),
987 DAG.getConstant(resultsz / 8, MVT::i32),
988 DAG.getConstant(0, MVT::i32), InFlag };
989 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
991 InFlag = Chain.getValue(1);
996 // This is indirect function call case : PTX requires a prototype of the
998 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
999 // to be emitted, and the label has to used as the last arg of call
1001 // The prototype is embedded in a string and put as the operand for a
1002 // CallPrototype SDNode which will print out to the value of the string.
1003 SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1004 std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
1005 const char *ProtoStr =
1006 nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
1007 SDValue ProtoOps[] = {
1008 Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
1010 Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
1011 InFlag = Chain.getValue(1);
1013 // Op to just print "call"
1014 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1015 SDValue PrintCallOps[] = {
1016 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
1018 Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
1019 dl, PrintCallVTs, PrintCallOps);
1020 InFlag = Chain.getValue(1);
1022 // Ops to print out the function name
1023 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1024 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
1025 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
1026 InFlag = Chain.getValue(1);
1028 // Ops to print out the param list
1029 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1030 SDValue CallArgBeginOps[] = { Chain, InFlag };
1031 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
1033 InFlag = Chain.getValue(1);
1035 for (unsigned i = 0, e = paramCount; i != e; ++i) {
1038 opcode = NVPTXISD::LastCallArg;
1040 opcode = NVPTXISD::CallArg;
1041 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1042 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
1043 DAG.getConstant(i, MVT::i32), InFlag };
1044 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
1045 InFlag = Chain.getValue(1);
1047 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1048 SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
1050 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
1051 InFlag = Chain.getValue(1);
1054 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1055 SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
1057 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
1058 InFlag = Chain.getValue(1);
1061 // Generate loads from param memory/moves from registers for result
1062 if (Ins.size() > 0) {
1063 if (retTy && retTy->isVectorTy()) {
1064 EVT ObjectVT = getValueType(retTy);
1065 unsigned NumElts = ObjectVT.getVectorNumElements();
1066 EVT EltVT = ObjectVT.getVectorElementType();
1067 assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
1068 ObjectVT) == NumElts &&
1069 "Vector was not scalarized");
1070 unsigned sz = EltVT.getSizeInBits();
1071 bool needTruncate = sz < 8 ? true : false;
1074 // Just a simple load
1075 SmallVector<EVT, 4> LoadRetVTs;
1076 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1077 // If loading i1/i8 result, generate
1081 LoadRetVTs.push_back(MVT::i16);
1083 LoadRetVTs.push_back(EltVT);
1084 LoadRetVTs.push_back(MVT::Other);
1085 LoadRetVTs.push_back(MVT::Glue);
1086 SmallVector<SDValue, 4> LoadRetOps;
1087 LoadRetOps.push_back(Chain);
1088 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1089 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1090 LoadRetOps.push_back(InFlag);
1091 SDValue retval = DAG.getMemIntrinsicNode(
1092 NVPTXISD::LoadParam, dl,
1093 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1094 Chain = retval.getValue(1);
1095 InFlag = retval.getValue(2);
1096 SDValue Ret0 = retval;
1098 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
1099 InVals.push_back(Ret0);
1100 } else if (NumElts == 2) {
1102 SmallVector<EVT, 4> LoadRetVTs;
1103 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1104 // If loading i1/i8 result, generate
1108 LoadRetVTs.push_back(MVT::i16);
1109 LoadRetVTs.push_back(MVT::i16);
1111 LoadRetVTs.push_back(EltVT);
1112 LoadRetVTs.push_back(EltVT);
1114 LoadRetVTs.push_back(MVT::Other);
1115 LoadRetVTs.push_back(MVT::Glue);
1116 SmallVector<SDValue, 4> LoadRetOps;
1117 LoadRetOps.push_back(Chain);
1118 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1119 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1120 LoadRetOps.push_back(InFlag);
1121 SDValue retval = DAG.getMemIntrinsicNode(
1122 NVPTXISD::LoadParamV2, dl,
1123 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1124 Chain = retval.getValue(2);
1125 InFlag = retval.getValue(3);
1126 SDValue Ret0 = retval.getValue(0);
1127 SDValue Ret1 = retval.getValue(1);
1129 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
1130 InVals.push_back(Ret0);
1131 Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
1132 InVals.push_back(Ret1);
1134 InVals.push_back(Ret0);
1135 InVals.push_back(Ret1);
1138 // Split into N LoadV4
1140 unsigned VecSize = 4;
1141 unsigned Opc = NVPTXISD::LoadParamV4;
1142 if (EltVT.getSizeInBits() == 64) {
1144 Opc = NVPTXISD::LoadParamV2;
1146 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1147 for (unsigned i = 0; i < NumElts; i += VecSize) {
1148 SmallVector<EVT, 8> LoadRetVTs;
1149 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1150 // If loading i1/i8 result, generate
1154 for (unsigned j = 0; j < VecSize; ++j)
1155 LoadRetVTs.push_back(MVT::i16);
1157 for (unsigned j = 0; j < VecSize; ++j)
1158 LoadRetVTs.push_back(EltVT);
1160 LoadRetVTs.push_back(MVT::Other);
1161 LoadRetVTs.push_back(MVT::Glue);
1162 SmallVector<SDValue, 4> LoadRetOps;
1163 LoadRetOps.push_back(Chain);
1164 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1165 LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
1166 LoadRetOps.push_back(InFlag);
1167 SDValue retval = DAG.getMemIntrinsicNode(
1168 Opc, dl, DAG.getVTList(LoadRetVTs),
1169 LoadRetOps, EltVT, MachinePointerInfo());
1171 Chain = retval.getValue(2);
1172 InFlag = retval.getValue(3);
1174 Chain = retval.getValue(4);
1175 InFlag = retval.getValue(5);
1178 for (unsigned j = 0; j < VecSize; ++j) {
1179 if (i + j >= NumElts)
1181 SDValue Elt = retval.getValue(j);
1183 Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
1184 InVals.push_back(Elt);
1186 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1190 SmallVector<EVT, 16> VTs;
1191 SmallVector<uint64_t, 16> Offsets;
1192 ComputePTXValueVTs(*this, retTy, VTs, &Offsets, 0);
1193 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1194 unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0);
1195 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
1196 unsigned sz = VTs[i].getSizeInBits();
1197 unsigned AlignI = GreatestCommonDivisor64(RetAlign, Offsets[i]);
1198 bool needTruncate = sz < 8 ? true : false;
1199 if (VTs[i].isInteger() && (sz < 8))
1202 SmallVector<EVT, 4> LoadRetVTs;
1203 EVT TheLoadType = VTs[i];
1204 if (retTy->isIntegerTy() &&
1205 TD->getTypeAllocSizeInBits(retTy) < 32) {
1206 // This is for integer types only, and specifically not for
1208 LoadRetVTs.push_back(MVT::i32);
1209 TheLoadType = MVT::i32;
1210 } else if (sz < 16) {
1211 // If loading i1/i8 result, generate
1213 // trunc i16 to i1/i8
1214 LoadRetVTs.push_back(MVT::i16);
1216 LoadRetVTs.push_back(Ins[i].VT);
1217 LoadRetVTs.push_back(MVT::Other);
1218 LoadRetVTs.push_back(MVT::Glue);
1220 SmallVector<SDValue, 4> LoadRetOps;
1221 LoadRetOps.push_back(Chain);
1222 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1223 LoadRetOps.push_back(DAG.getConstant(Offsets[i], MVT::i32));
1224 LoadRetOps.push_back(InFlag);
1225 SDValue retval = DAG.getMemIntrinsicNode(
1226 NVPTXISD::LoadParam, dl,
1227 DAG.getVTList(LoadRetVTs), LoadRetOps,
1228 TheLoadType, MachinePointerInfo(), AlignI);
1229 Chain = retval.getValue(1);
1230 InFlag = retval.getValue(2);
1231 SDValue Ret0 = retval.getValue(0);
1233 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
1234 InVals.push_back(Ret0);
1239 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
1240 DAG.getIntPtrConstant(uniqueCallSite + 1, true),
1244 // set isTailCall to false for now, until we figure out how to express
1245 // tail call optimization in PTX
1250 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1251 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1252 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1254 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1255 SDNode *Node = Op.getNode();
1257 SmallVector<SDValue, 8> Ops;
1258 unsigned NumOperands = Node->getNumOperands();
1259 for (unsigned i = 0; i < NumOperands; ++i) {
1260 SDValue SubOp = Node->getOperand(i);
1261 EVT VVT = SubOp.getNode()->getValueType(0);
1262 EVT EltVT = VVT.getVectorElementType();
1263 unsigned NumSubElem = VVT.getVectorNumElements();
1264 for (unsigned j = 0; j < NumSubElem; ++j) {
1265 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1266 DAG.getIntPtrConstant(j)));
1269 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
1272 /// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
1273 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
1275 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
1277 SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
1278 SelectionDAG &DAG) const {
1279 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
1280 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
1282 EVT VT = Op.getValueType();
1283 unsigned VTBits = VT.getSizeInBits();
1285 SDValue ShOpLo = Op.getOperand(0);
1286 SDValue ShOpHi = Op.getOperand(1);
1287 SDValue ShAmt = Op.getOperand(2);
1288 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
1290 if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
1292 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
1293 // {dHi, dLo} = {aHi, aLo} >> Amt
1295 // dLo = shf.r.clamp aLo, aHi, Amt
1297 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1298 SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
1301 SDValue Ops[2] = { Lo, Hi };
1302 return DAG.getMergeValues(Ops, dl);
1306 // {dHi, dLo} = {aHi, aLo} >> Amt
1307 // - if (Amt>=size) then
1308 // dLo = aHi >> (Amt-size)
1309 // dHi = aHi >> Amt (this is either all 0 or all 1)
1311 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
1314 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
1315 DAG.getConstant(VTBits, MVT::i32), ShAmt);
1316 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
1317 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
1318 DAG.getConstant(VTBits, MVT::i32));
1319 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
1320 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
1321 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
1323 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
1324 DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
1325 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1326 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
1328 SDValue Ops[2] = { Lo, Hi };
1329 return DAG.getMergeValues(Ops, dl);
1333 /// LowerShiftLeftParts - Lower SHL_PARTS, which
1334 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
1336 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
1338 SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
1339 SelectionDAG &DAG) const {
1340 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
1341 assert(Op.getOpcode() == ISD::SHL_PARTS);
1343 EVT VT = Op.getValueType();
1344 unsigned VTBits = VT.getSizeInBits();
1346 SDValue ShOpLo = Op.getOperand(0);
1347 SDValue ShOpHi = Op.getOperand(1);
1348 SDValue ShAmt = Op.getOperand(2);
1350 if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
1352 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
1353 // {dHi, dLo} = {aHi, aLo} << Amt
1354 // dHi = shf.l.clamp aLo, aHi, Amt
1357 SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
1359 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
1361 SDValue Ops[2] = { Lo, Hi };
1362 return DAG.getMergeValues(Ops, dl);
1366 // {dHi, dLo} = {aHi, aLo} << Amt
1367 // - if (Amt>=size) then
1368 // dLo = aLo << Amt (all 0)
1369 // dLo = aLo << (Amt-size)
1372 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
1374 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
1375 DAG.getConstant(VTBits, MVT::i32), ShAmt);
1376 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
1377 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
1378 DAG.getConstant(VTBits, MVT::i32));
1379 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
1380 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
1381 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
1383 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
1384 DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
1385 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
1386 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
1388 SDValue Ops[2] = { Lo, Hi };
1389 return DAG.getMergeValues(Ops, dl);
1394 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1395 switch (Op.getOpcode()) {
1396 case ISD::RETURNADDR:
1398 case ISD::FRAMEADDR:
1400 case ISD::GlobalAddress:
1401 return LowerGlobalAddress(Op, DAG);
1402 case ISD::INTRINSIC_W_CHAIN:
1404 case ISD::BUILD_VECTOR:
1405 case ISD::EXTRACT_SUBVECTOR:
1407 case ISD::CONCAT_VECTORS:
1408 return LowerCONCAT_VECTORS(Op, DAG);
1410 return LowerSTORE(Op, DAG);
1412 return LowerLOAD(Op, DAG);
1413 case ISD::SHL_PARTS:
1414 return LowerShiftLeftParts(Op, DAG);
1415 case ISD::SRA_PARTS:
1416 case ISD::SRL_PARTS:
1417 return LowerShiftRightParts(Op, DAG);
1419 llvm_unreachable("Custom lowering not defined for operation");
1423 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1424 if (Op.getValueType() == MVT::i1)
1425 return LowerLOADi1(Op, DAG);
1432 // v1 = ld i8* addr (-> i16)
1433 // v = trunc i16 to i1
1434 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
1435 SDNode *Node = Op.getNode();
1436 LoadSDNode *LD = cast<LoadSDNode>(Node);
1438 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
1439 assert(Node->getValueType(0) == MVT::i1 &&
1440 "Custom lowering for i1 load only");
1442 DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
1443 LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
1444 LD->isInvariant(), LD->getAlignment());
1445 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
1446 // The legalizer (the caller) is expecting two values from the legalized
1447 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
1448 // in LegalizeDAG.cpp which also uses MergeValues.
1449 SDValue Ops[] = { result, LD->getChain() };
1450 return DAG.getMergeValues(Ops, dl);
1453 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1454 EVT ValVT = Op.getOperand(1).getValueType();
1455 if (ValVT == MVT::i1)
1456 return LowerSTOREi1(Op, DAG);
1457 else if (ValVT.isVector())
1458 return LowerSTOREVector(Op, DAG);
1464 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
1465 SDNode *N = Op.getNode();
1466 SDValue Val = N->getOperand(1);
1468 EVT ValVT = Val.getValueType();
1470 if (ValVT.isVector()) {
1471 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1472 // legal. We can (and should) split that into 2 stores of <2 x double> here
1473 // but I'm leaving that as a TODO for now.
1474 if (!ValVT.isSimple())
1476 switch (ValVT.getSimpleVT().SimpleTy) {
1489 // This is a "native" vector type
1493 unsigned Opcode = 0;
1494 EVT EltVT = ValVT.getVectorElementType();
1495 unsigned NumElts = ValVT.getVectorNumElements();
1497 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
1498 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1499 // stored type to i16 and propagate the "real" type as the memory type.
1500 bool NeedExt = false;
1501 if (EltVT.getSizeInBits() < 16)
1508 Opcode = NVPTXISD::StoreV2;
1511 Opcode = NVPTXISD::StoreV4;
1516 SmallVector<SDValue, 8> Ops;
1518 // First is the chain
1519 Ops.push_back(N->getOperand(0));
1521 // Then the split values
1522 for (unsigned i = 0; i < NumElts; ++i) {
1523 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
1524 DAG.getIntPtrConstant(i));
1526 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
1527 Ops.push_back(ExtVal);
1530 // Then any remaining arguments
1531 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
1532 Ops.push_back(N->getOperand(i));
1535 MemSDNode *MemSD = cast<MemSDNode>(N);
1537 SDValue NewSt = DAG.getMemIntrinsicNode(
1538 Opcode, DL, DAG.getVTList(MVT::Other), Ops,
1539 MemSD->getMemoryVT(), MemSD->getMemOperand());
1541 //return DCI.CombineTo(N, NewSt, true);
1550 // v1 = zxt v to i16
1552 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
1553 SDNode *Node = Op.getNode();
1555 StoreSDNode *ST = cast<StoreSDNode>(Node);
1556 SDValue Tmp1 = ST->getChain();
1557 SDValue Tmp2 = ST->getBasePtr();
1558 SDValue Tmp3 = ST->getValue();
1559 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
1560 unsigned Alignment = ST->getAlignment();
1561 bool isVolatile = ST->isVolatile();
1562 bool isNonTemporal = ST->isNonTemporal();
1563 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
1564 SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
1565 ST->getPointerInfo(), MVT::i8, isNonTemporal,
1566 isVolatile, Alignment);
1570 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
1571 int idx, EVT v) const {
1572 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
1573 std::stringstream suffix;
1575 *name += suffix.str();
1576 return DAG.getTargetExternalSymbol(name->c_str(), v);
1580 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1581 std::string ParamSym;
1582 raw_string_ostream ParamStr(ParamSym);
1584 ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
1587 std::string *SavedStr =
1588 nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
1589 return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
1592 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1593 return getExtSymb(DAG, ".HLPPARAM", idx);
1596 // Check to see if the kernel argument is image*_t or sampler_t
1598 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1599 static const char *const specialTypes[] = { "struct._image2d_t",
1600 "struct._image3d_t",
1601 "struct._sampler_t" };
1603 const Type *Ty = arg->getType();
1604 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1612 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1613 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1615 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1616 if (TypeName == specialTypes[i])
1622 SDValue NVPTXTargetLowering::LowerFormalArguments(
1623 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1624 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
1625 SmallVectorImpl<SDValue> &InVals) const {
1626 MachineFunction &MF = DAG.getMachineFunction();
1627 const DataLayout *TD = getDataLayout();
1629 const Function *F = MF.getFunction();
1630 const AttributeSet &PAL = F->getAttributes();
1631 const TargetLowering *TLI = DAG.getTarget().getTargetLowering();
1633 SDValue Root = DAG.getRoot();
1634 std::vector<SDValue> OutChains;
1636 bool isKernel = llvm::isKernelFunction(*F);
1637 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1638 assert(isABI && "Non-ABI compilation is not supported");
1642 std::vector<Type *> argTypes;
1643 std::vector<const Argument *> theArgs;
1644 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1646 theArgs.push_back(I);
1647 argTypes.push_back(I->getType());
1649 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
1650 // Ins.size() will be larger
1651 // * if there is an aggregate argument with multiple fields (each field
1652 // showing up separately in Ins)
1653 // * if there is a vector argument with more than typical vector-length
1654 // elements (generally if more than 4) where each vector element is
1655 // individually present in Ins.
1656 // So a different index should be used for indexing into Ins.
1657 // See similar issue in LowerCall.
1658 unsigned InsIdx = 0;
1661 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1662 Type *Ty = argTypes[i];
1664 // If the kernel argument is image*_t or sampler_t, convert it to
1665 // a i32 constant holding the parameter position. This can later
1666 // matched in the AsmPrinter to output the correct mangled name.
1667 if (isImageOrSamplerVal(
1669 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
1671 assert(isKernel && "Only kernels can have image/sampler params");
1672 InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
1676 if (theArgs[i]->use_empty()) {
1678 if (Ty->isAggregateType()) {
1679 SmallVector<EVT, 16> vtparts;
1681 ComputePTXValueVTs(*this, Ty, vtparts);
1682 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1683 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1685 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1688 if (vtparts.size() > 0)
1692 if (Ty->isVectorTy()) {
1693 EVT ObjectVT = getValueType(Ty);
1694 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
1695 for (unsigned parti = 0; parti < NumRegs; ++parti) {
1696 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1703 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1707 // In the following cases, assign a node order of "idx+1"
1708 // to newly created nodes. The SDNodes for params have to
1709 // appear in the same order as their order of appearance
1710 // in the original function. "idx+1" holds that order.
1711 if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
1712 if (Ty->isAggregateType()) {
1713 SmallVector<EVT, 16> vtparts;
1714 SmallVector<uint64_t, 16> offsets;
1716 // NOTE: Here, we lose the ability to issue vector loads for vectors
1717 // that are a part of a struct. This should be investigated in the
1719 ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
1720 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1721 bool aggregateIsPacked = false;
1722 if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
1723 aggregateIsPacked = STy->isPacked();
1725 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1726 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1728 EVT partVT = vtparts[parti];
1729 Value *srcValue = Constant::getNullValue(
1730 PointerType::get(partVT.getTypeForEVT(F->getContext()),
1731 llvm::ADDRESS_SPACE_PARAM));
1733 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1734 DAG.getConstant(offsets[parti], getPointerTy()));
1735 unsigned partAlign =
1736 aggregateIsPacked ? 1
1737 : TD->getABITypeAlignment(
1738 partVT.getTypeForEVT(F->getContext()));
1740 if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
1741 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1742 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1743 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1744 MachinePointerInfo(srcValue), partVT, false,
1747 p = DAG.getLoad(partVT, dl, Root, srcAddr,
1748 MachinePointerInfo(srcValue), false, false, false,
1752 p.getNode()->setIROrder(idx + 1);
1753 InVals.push_back(p);
1756 if (vtparts.size() > 0)
1760 if (Ty->isVectorTy()) {
1761 EVT ObjectVT = getValueType(Ty);
1762 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1763 unsigned NumElts = ObjectVT.getVectorNumElements();
1764 assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
1765 "Vector was not scalarized");
1767 EVT EltVT = ObjectVT.getVectorElementType();
1772 // We only have one element, so just directly load it
1773 Value *SrcValue = Constant::getNullValue(PointerType::get(
1774 EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1775 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1776 DAG.getConstant(Ofst, getPointerTy()));
1777 SDValue P = DAG.getLoad(
1778 EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1780 TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
1782 P.getNode()->setIROrder(idx + 1);
1784 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1785 P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
1786 InVals.push_back(P);
1787 Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
1789 } else if (NumElts == 2) {
1791 // f32,f32 = load ...
1792 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
1793 Value *SrcValue = Constant::getNullValue(PointerType::get(
1794 VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1795 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1796 DAG.getConstant(Ofst, getPointerTy()));
1797 SDValue P = DAG.getLoad(
1798 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1800 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1802 P.getNode()->setIROrder(idx + 1);
1804 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1805 DAG.getIntPtrConstant(0));
1806 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1807 DAG.getIntPtrConstant(1));
1809 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
1810 Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
1811 Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
1814 InVals.push_back(Elt0);
1815 InVals.push_back(Elt1);
1816 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1820 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
1822 // vector will be expanded to a power of 2 elements, so we know we can
1823 // always round up to the next multiple of 4 when creating the vector
1825 // e.g. 4 elem => 1 ld.v4
1826 // 6 elem => 2 ld.v4
1827 // 8 elem => 2 ld.v4
1828 // 11 elem => 3 ld.v4
1829 unsigned VecSize = 4;
1830 if (EltVT.getSizeInBits() == 64) {
1833 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1834 for (unsigned i = 0; i < NumElts; i += VecSize) {
1835 Value *SrcValue = Constant::getNullValue(
1836 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
1837 llvm::ADDRESS_SPACE_PARAM));
1839 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1840 DAG.getConstant(Ofst, getPointerTy()));
1841 SDValue P = DAG.getLoad(
1842 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1844 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1846 P.getNode()->setIROrder(idx + 1);
1848 for (unsigned j = 0; j < VecSize; ++j) {
1849 if (i + j >= NumElts)
1851 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1852 DAG.getIntPtrConstant(j));
1853 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1854 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
1855 InVals.push_back(Elt);
1857 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1867 EVT ObjectVT = getValueType(Ty);
1868 // If ABI, load from the param symbol
1869 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1870 Value *srcValue = Constant::getNullValue(PointerType::get(
1871 ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1873 if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1874 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1875 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1876 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1877 MachinePointerInfo(srcValue), ObjectVT, false, false,
1878 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1880 p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1881 MachinePointerInfo(srcValue), false, false, false,
1882 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1885 p.getNode()->setIROrder(idx + 1);
1886 InVals.push_back(p);
1890 // Param has ByVal attribute
1891 // Return MoveParam(param symbol).
1892 // Ideally, the param symbol can be returned directly,
1893 // but when SDNode builder decides to use it in a CopyToReg(),
1894 // machine instruction fails because TargetExternalSymbol
1895 // (not lowered) is target dependent, and CopyToReg assumes
1896 // the source is lowered.
1897 EVT ObjectVT = getValueType(Ty);
1898 assert(ObjectVT == Ins[InsIdx].VT &&
1899 "Ins type did not match function type");
1900 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1901 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1903 p.getNode()->setIROrder(idx + 1);
1905 InVals.push_back(p);
1907 SDValue p2 = DAG.getNode(
1908 ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1909 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
1910 InVals.push_back(p2);
1914 // Clang will check explicit VarArg and issue error if any. However, Clang
1915 // will let code with
1916 // implicit var arg like f() pass. See bug 617733.
1917 // We treat this case as if the arg list is empty.
1918 // if (F.isVarArg()) {
1919 // assert(0 && "VarArg not supported yet!");
1922 if (!OutChains.empty())
1923 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
1930 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1932 const SmallVectorImpl<ISD::OutputArg> &Outs,
1933 const SmallVectorImpl<SDValue> &OutVals,
1934 SDLoc dl, SelectionDAG &DAG) const {
1935 MachineFunction &MF = DAG.getMachineFunction();
1936 const Function *F = MF.getFunction();
1937 Type *RetTy = F->getReturnType();
1938 const DataLayout *TD = getDataLayout();
1940 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1941 assert(isABI && "Non-ABI compilation is not supported");
1945 if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1946 // If we have a vector type, the OutVals array will be the scalarized
1947 // components and we have combine them into 1 or more vector stores.
1948 unsigned NumElts = VTy->getNumElements();
1949 assert(NumElts == Outs.size() && "Bad scalarization of return value");
1951 // const_cast can be removed in later LLVM versions
1952 EVT EltVT = getValueType(RetTy).getVectorElementType();
1953 bool NeedExtend = false;
1954 if (EltVT.getSizeInBits() < 16)
1959 SDValue StoreVal = OutVals[0];
1960 // We only have one element, so just directly store it
1962 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
1963 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
1964 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1965 DAG.getVTList(MVT::Other), Ops,
1966 EltVT, MachinePointerInfo());
1968 } else if (NumElts == 2) {
1970 SDValue StoreVal0 = OutVals[0];
1971 SDValue StoreVal1 = OutVals[1];
1974 StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
1975 StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
1978 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
1980 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
1981 DAG.getVTList(MVT::Other), Ops,
1982 EltVT, MachinePointerInfo());
1985 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
1986 // vector will be expanded to a power of 2 elements, so we know we can
1987 // always round up to the next multiple of 4 when creating the vector
1989 // e.g. 4 elem => 1 st.v4
1990 // 6 elem => 2 st.v4
1991 // 8 elem => 2 st.v4
1992 // 11 elem => 3 st.v4
1994 unsigned VecSize = 4;
1995 if (OutVals[0].getValueType().getSizeInBits() == 64)
1998 unsigned Offset = 0;
2001 EVT::getVectorVT(F->getContext(), EltVT, VecSize);
2002 unsigned PerStoreOffset =
2003 TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
2005 for (unsigned i = 0; i < NumElts; i += VecSize) {
2008 SmallVector<SDValue, 8> Ops;
2009 Ops.push_back(Chain);
2010 Ops.push_back(DAG.getConstant(Offset, MVT::i32));
2011 unsigned Opc = NVPTXISD::StoreRetvalV2;
2012 EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
2014 StoreVal = OutVals[i];
2016 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2017 Ops.push_back(StoreVal);
2019 if (i + 1 < NumElts) {
2020 StoreVal = OutVals[i + 1];
2022 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2024 StoreVal = DAG.getUNDEF(ExtendedVT);
2026 Ops.push_back(StoreVal);
2029 Opc = NVPTXISD::StoreRetvalV4;
2030 if (i + 2 < NumElts) {
2031 StoreVal = OutVals[i + 2];
2034 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2036 StoreVal = DAG.getUNDEF(ExtendedVT);
2038 Ops.push_back(StoreVal);
2040 if (i + 3 < NumElts) {
2041 StoreVal = OutVals[i + 3];
2044 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2046 StoreVal = DAG.getUNDEF(ExtendedVT);
2048 Ops.push_back(StoreVal);
2051 // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
2053 DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), Ops,
2054 EltVT, MachinePointerInfo());
2055 Offset += PerStoreOffset;
2059 SmallVector<EVT, 16> ValVTs;
2060 SmallVector<uint64_t, 16> Offsets;
2061 ComputePTXValueVTs(*this, RetTy, ValVTs, &Offsets, 0);
2062 assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
2064 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2065 SDValue theVal = OutVals[i];
2066 EVT TheValType = theVal.getValueType();
2067 unsigned numElems = 1;
2068 if (TheValType.isVector())
2069 numElems = TheValType.getVectorNumElements();
2070 for (unsigned j = 0, je = numElems; j != je; ++j) {
2071 SDValue TmpVal = theVal;
2072 if (TheValType.isVector())
2073 TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
2074 TheValType.getVectorElementType(), TmpVal,
2075 DAG.getIntPtrConstant(j));
2076 EVT TheStoreType = ValVTs[i];
2077 if (RetTy->isIntegerTy() &&
2078 TD->getTypeAllocSizeInBits(RetTy) < 32) {
2079 // The following zero-extension is for integer types only, and
2080 // specifically not for aggregates.
2081 TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
2082 TheStoreType = MVT::i32;
2084 else if (TmpVal.getValueType().getSizeInBits() < 16)
2085 TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
2089 DAG.getConstant(Offsets[i], MVT::i32),
2091 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
2092 DAG.getVTList(MVT::Other), Ops,
2094 MachinePointerInfo());
2099 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
2103 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
2104 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2105 SelectionDAG &DAG) const {
2106 if (Constraint.length() > 1)
2109 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2112 // NVPTX suuport vector of legal types of any length in Intrinsics because the
2113 // NVPTX specific type legalizer
2114 // will legalize them to the PTX supported length.
2115 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
2116 if (isTypeLegal(VT))
2118 if (VT.isVector()) {
2119 MVT eVT = VT.getVectorElementType();
2120 if (isTypeLegal(eVT))
2126 static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
2127 switch (Intrinsic) {
2131 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2132 return NVPTXISD::Tex1DFloatI32;
2133 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2134 return NVPTXISD::Tex1DFloatFloat;
2135 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2136 return NVPTXISD::Tex1DFloatFloatLevel;
2137 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2138 return NVPTXISD::Tex1DFloatFloatGrad;
2139 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2140 return NVPTXISD::Tex1DI32I32;
2141 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2142 return NVPTXISD::Tex1DI32Float;
2143 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2144 return NVPTXISD::Tex1DI32FloatLevel;
2145 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2146 return NVPTXISD::Tex1DI32FloatGrad;
2148 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2149 return NVPTXISD::Tex1DArrayFloatI32;
2150 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2151 return NVPTXISD::Tex1DArrayFloatFloat;
2152 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2153 return NVPTXISD::Tex1DArrayFloatFloatLevel;
2154 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2155 return NVPTXISD::Tex1DArrayFloatFloatGrad;
2156 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2157 return NVPTXISD::Tex1DArrayI32I32;
2158 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2159 return NVPTXISD::Tex1DArrayI32Float;
2160 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2161 return NVPTXISD::Tex1DArrayI32FloatLevel;
2162 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2163 return NVPTXISD::Tex1DArrayI32FloatGrad;
2165 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2166 return NVPTXISD::Tex2DFloatI32;
2167 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2168 return NVPTXISD::Tex2DFloatFloat;
2169 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2170 return NVPTXISD::Tex2DFloatFloatLevel;
2171 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2172 return NVPTXISD::Tex2DFloatFloatGrad;
2173 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2174 return NVPTXISD::Tex2DI32I32;
2175 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2176 return NVPTXISD::Tex2DI32Float;
2177 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2178 return NVPTXISD::Tex2DI32FloatLevel;
2179 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2180 return NVPTXISD::Tex2DI32FloatGrad;
2182 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2183 return NVPTXISD::Tex2DArrayFloatI32;
2184 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2185 return NVPTXISD::Tex2DArrayFloatFloat;
2186 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2187 return NVPTXISD::Tex2DArrayFloatFloatLevel;
2188 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2189 return NVPTXISD::Tex2DArrayFloatFloatGrad;
2190 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2191 return NVPTXISD::Tex2DArrayI32I32;
2192 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2193 return NVPTXISD::Tex2DArrayI32Float;
2194 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2195 return NVPTXISD::Tex2DArrayI32FloatLevel;
2196 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2197 return NVPTXISD::Tex2DArrayI32FloatGrad;
2199 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2200 return NVPTXISD::Tex3DFloatI32;
2201 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2202 return NVPTXISD::Tex3DFloatFloat;
2203 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2204 return NVPTXISD::Tex3DFloatFloatLevel;
2205 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
2206 return NVPTXISD::Tex3DFloatFloatGrad;
2207 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2208 return NVPTXISD::Tex3DI32I32;
2209 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2210 return NVPTXISD::Tex3DI32Float;
2211 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2212 return NVPTXISD::Tex3DI32FloatLevel;
2213 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32:
2214 return NVPTXISD::Tex3DI32FloatGrad;
2218 static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
2219 switch (Intrinsic) {
2222 case Intrinsic::nvvm_suld_1d_i8_trap:
2223 return NVPTXISD::Suld1DI8Trap;
2224 case Intrinsic::nvvm_suld_1d_i16_trap:
2225 return NVPTXISD::Suld1DI16Trap;
2226 case Intrinsic::nvvm_suld_1d_i32_trap:
2227 return NVPTXISD::Suld1DI32Trap;
2228 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2229 return NVPTXISD::Suld1DV2I8Trap;
2230 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2231 return NVPTXISD::Suld1DV2I16Trap;
2232 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2233 return NVPTXISD::Suld1DV2I32Trap;
2234 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2235 return NVPTXISD::Suld1DV4I8Trap;
2236 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2237 return NVPTXISD::Suld1DV4I16Trap;
2238 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2239 return NVPTXISD::Suld1DV4I32Trap;
2240 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2241 return NVPTXISD::Suld1DArrayI8Trap;
2242 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2243 return NVPTXISD::Suld1DArrayI16Trap;
2244 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2245 return NVPTXISD::Suld1DArrayI32Trap;
2246 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2247 return NVPTXISD::Suld1DArrayV2I8Trap;
2248 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2249 return NVPTXISD::Suld1DArrayV2I16Trap;
2250 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2251 return NVPTXISD::Suld1DArrayV2I32Trap;
2252 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2253 return NVPTXISD::Suld1DArrayV4I8Trap;
2254 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2255 return NVPTXISD::Suld1DArrayV4I16Trap;
2256 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2257 return NVPTXISD::Suld1DArrayV4I32Trap;
2258 case Intrinsic::nvvm_suld_2d_i8_trap:
2259 return NVPTXISD::Suld2DI8Trap;
2260 case Intrinsic::nvvm_suld_2d_i16_trap:
2261 return NVPTXISD::Suld2DI16Trap;
2262 case Intrinsic::nvvm_suld_2d_i32_trap:
2263 return NVPTXISD::Suld2DI32Trap;
2264 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2265 return NVPTXISD::Suld2DV2I8Trap;
2266 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2267 return NVPTXISD::Suld2DV2I16Trap;
2268 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2269 return NVPTXISD::Suld2DV2I32Trap;
2270 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2271 return NVPTXISD::Suld2DV4I8Trap;
2272 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2273 return NVPTXISD::Suld2DV4I16Trap;
2274 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2275 return NVPTXISD::Suld2DV4I32Trap;
2276 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2277 return NVPTXISD::Suld2DArrayI8Trap;
2278 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2279 return NVPTXISD::Suld2DArrayI16Trap;
2280 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2281 return NVPTXISD::Suld2DArrayI32Trap;
2282 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2283 return NVPTXISD::Suld2DArrayV2I8Trap;
2284 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2285 return NVPTXISD::Suld2DArrayV2I16Trap;
2286 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2287 return NVPTXISD::Suld2DArrayV2I32Trap;
2288 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2289 return NVPTXISD::Suld2DArrayV4I8Trap;
2290 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2291 return NVPTXISD::Suld2DArrayV4I16Trap;
2292 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2293 return NVPTXISD::Suld2DArrayV4I32Trap;
2294 case Intrinsic::nvvm_suld_3d_i8_trap:
2295 return NVPTXISD::Suld3DI8Trap;
2296 case Intrinsic::nvvm_suld_3d_i16_trap:
2297 return NVPTXISD::Suld3DI16Trap;
2298 case Intrinsic::nvvm_suld_3d_i32_trap:
2299 return NVPTXISD::Suld3DI32Trap;
2300 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2301 return NVPTXISD::Suld3DV2I8Trap;
2302 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2303 return NVPTXISD::Suld3DV2I16Trap;
2304 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2305 return NVPTXISD::Suld3DV2I32Trap;
2306 case Intrinsic::nvvm_suld_3d_v4i8_trap:
2307 return NVPTXISD::Suld3DV4I8Trap;
2308 case Intrinsic::nvvm_suld_3d_v4i16_trap:
2309 return NVPTXISD::Suld3DV4I16Trap;
2310 case Intrinsic::nvvm_suld_3d_v4i32_trap:
2311 return NVPTXISD::Suld3DV4I32Trap;
2315 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
2317 // because we need the information that is only available in the "Value" type
2319 // pointer. In particular, the address space information.
2320 bool NVPTXTargetLowering::getTgtMemIntrinsic(
2321 IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
2322 switch (Intrinsic) {
2326 case Intrinsic::nvvm_atomic_load_add_f32:
2327 Info.opc = ISD::INTRINSIC_W_CHAIN;
2328 Info.memVT = MVT::f32;
2329 Info.ptrVal = I.getArgOperand(0);
2332 Info.readMem = true;
2333 Info.writeMem = true;
2337 case Intrinsic::nvvm_atomic_load_inc_32:
2338 case Intrinsic::nvvm_atomic_load_dec_32:
2339 Info.opc = ISD::INTRINSIC_W_CHAIN;
2340 Info.memVT = MVT::i32;
2341 Info.ptrVal = I.getArgOperand(0);
2344 Info.readMem = true;
2345 Info.writeMem = true;
2349 case Intrinsic::nvvm_ldu_global_i:
2350 case Intrinsic::nvvm_ldu_global_f:
2351 case Intrinsic::nvvm_ldu_global_p: {
2353 Info.opc = ISD::INTRINSIC_W_CHAIN;
2354 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
2355 Info.memVT = getValueType(I.getType());
2356 else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
2357 Info.memVT = getPointerTy();
2359 Info.memVT = getValueType(I.getType());
2360 Info.ptrVal = I.getArgOperand(0);
2363 Info.readMem = true;
2364 Info.writeMem = false;
2366 // alignment is available as metadata.
2367 // Grab it and set the alignment.
2368 assert(I.hasMetadataOtherThanDebugLoc() && "Must have alignment metadata");
2369 MDNode *AlignMD = I.getMetadata("align");
2370 assert(AlignMD && "Must have a non-null MDNode");
2371 assert(AlignMD->getNumOperands() == 1 && "Must have a single operand");
2372 Value *Align = AlignMD->getOperand(0);
2373 int64_t Alignment = cast<ConstantInt>(Align)->getZExtValue();
2374 Info.align = Alignment;
2378 case Intrinsic::nvvm_ldg_global_i:
2379 case Intrinsic::nvvm_ldg_global_f:
2380 case Intrinsic::nvvm_ldg_global_p: {
2382 Info.opc = ISD::INTRINSIC_W_CHAIN;
2383 if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
2384 Info.memVT = getValueType(I.getType());
2385 else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
2386 Info.memVT = getPointerTy();
2388 Info.memVT = getValueType(I.getType());
2389 Info.ptrVal = I.getArgOperand(0);
2392 Info.readMem = true;
2393 Info.writeMem = false;
2395 // alignment is available as metadata.
2396 // Grab it and set the alignment.
2397 assert(I.hasMetadataOtherThanDebugLoc() && "Must have alignment metadata");
2398 MDNode *AlignMD = I.getMetadata("align");
2399 assert(AlignMD && "Must have a non-null MDNode");
2400 assert(AlignMD->getNumOperands() == 1 && "Must have a single operand");
2401 Value *Align = AlignMD->getOperand(0);
2402 int64_t Alignment = cast<ConstantInt>(Align)->getZExtValue();
2403 Info.align = Alignment;
2408 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2409 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2410 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2411 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2412 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2413 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2414 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2415 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2416 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2417 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2418 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2419 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2420 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2421 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2422 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2423 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2424 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2425 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2426 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2427 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32: {
2428 Info.opc = getOpcForTextureInstr(Intrinsic);
2429 Info.memVT = MVT::f32;
2430 Info.ptrVal = nullptr;
2433 Info.readMem = true;
2434 Info.writeMem = false;
2438 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2439 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2440 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2441 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2442 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2443 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2444 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2445 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2446 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2447 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2448 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2449 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2450 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2451 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2452 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2453 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2454 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2455 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2456 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2457 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32: {
2458 Info.opc = getOpcForTextureInstr(Intrinsic);
2459 Info.memVT = MVT::i32;
2460 Info.ptrVal = nullptr;
2463 Info.readMem = true;
2464 Info.writeMem = false;
2468 case Intrinsic::nvvm_suld_1d_i8_trap:
2469 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2470 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2471 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2472 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2473 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2474 case Intrinsic::nvvm_suld_2d_i8_trap:
2475 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2476 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2477 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2478 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2479 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2480 case Intrinsic::nvvm_suld_3d_i8_trap:
2481 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2482 case Intrinsic::nvvm_suld_3d_v4i8_trap: {
2483 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2484 Info.memVT = MVT::i8;
2485 Info.ptrVal = nullptr;
2488 Info.readMem = true;
2489 Info.writeMem = false;
2493 case Intrinsic::nvvm_suld_1d_i16_trap:
2494 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2495 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2496 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2497 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2498 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2499 case Intrinsic::nvvm_suld_2d_i16_trap:
2500 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2501 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2502 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2503 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2504 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2505 case Intrinsic::nvvm_suld_3d_i16_trap:
2506 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2507 case Intrinsic::nvvm_suld_3d_v4i16_trap: {
2508 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2509 Info.memVT = MVT::i16;
2510 Info.ptrVal = nullptr;
2513 Info.readMem = true;
2514 Info.writeMem = false;
2518 case Intrinsic::nvvm_suld_1d_i32_trap:
2519 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2520 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2521 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2522 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2523 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2524 case Intrinsic::nvvm_suld_2d_i32_trap:
2525 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2526 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2527 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2528 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2529 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2530 case Intrinsic::nvvm_suld_3d_i32_trap:
2531 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2532 case Intrinsic::nvvm_suld_3d_v4i32_trap: {
2533 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2534 Info.memVT = MVT::i32;
2535 Info.ptrVal = nullptr;
2538 Info.readMem = true;
2539 Info.writeMem = false;
2548 /// isLegalAddressingMode - Return true if the addressing mode represented
2549 /// by AM is legal for this target, for a load/store of the specified type.
2550 /// Used to guide target specific optimizations, like loop strength reduction
2551 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
2552 /// (CodeGenPrepare.cpp)
2553 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
2556 // AddrMode - This represents an addressing mode of:
2557 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2559 // The legal address modes are
2566 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
2572 case 0: // "r", "r+i" or "i" is allowed
2575 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
2577 // Otherwise we have r+i.
2580 // No scale > 1 is allowed
2586 //===----------------------------------------------------------------------===//
2587 // NVPTX Inline Assembly Support
2588 //===----------------------------------------------------------------------===//
2590 /// getConstraintType - Given a constraint letter, return the type of
2591 /// constraint it is for this target.
2592 NVPTXTargetLowering::ConstraintType
2593 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
2594 if (Constraint.size() == 1) {
2595 switch (Constraint[0]) {
2607 return C_RegisterClass;
2610 return TargetLowering::getConstraintType(Constraint);
2613 std::pair<unsigned, const TargetRegisterClass *>
2614 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2616 if (Constraint.size() == 1) {
2617 switch (Constraint[0]) {
2619 return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
2621 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2623 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2625 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
2628 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
2630 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2632 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2635 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2638 /// getFunctionAlignment - Return the Log2 alignment of this function.
2639 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
2643 //===----------------------------------------------------------------------===//
2644 // NVPTX DAG Combining
2645 //===----------------------------------------------------------------------===//
2647 extern unsigned FMAContractLevel;
2649 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
2650 /// operands N0 and N1. This is a helper for PerformADDCombine that is
2651 /// called with the default operands, and if that fails, with commuted
2653 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
2654 TargetLowering::DAGCombinerInfo &DCI,
2655 const NVPTXSubtarget &Subtarget,
2656 CodeGenOpt::Level OptLevel) {
2657 SelectionDAG &DAG = DCI.DAG;
2658 // Skip non-integer, non-scalar case
2659 EVT VT=N0.getValueType();
2663 // fold (add (mul a, b), c) -> (mad a, b, c)
2665 if (N0.getOpcode() == ISD::MUL) {
2666 assert (VT.isInteger());
2668 // Since integer multiply-add costs the same as integer multiply
2669 // but is more costly than integer add, do the fusion only when
2670 // the mul is only used in the add.
2671 if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
2672 !N0.getNode()->hasOneUse())
2676 return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
2677 N0.getOperand(0), N0.getOperand(1), N1);
2679 else if (N0.getOpcode() == ISD::FMUL) {
2680 if (VT == MVT::f32 || VT == MVT::f64) {
2681 if (FMAContractLevel == 0)
2684 // For floating point:
2685 // Do the fusion only when the mul has less than 5 uses and all
2687 // The heuristic is that if a use is not an add, then that use
2688 // cannot be fused into fma, therefore mul is still needed anyway.
2689 // If there are more than 4 uses, even if they are all add, fusing
2690 // them will increase register pressue.
2693 int nonAddCount = 0;
2694 for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
2695 UE = N0.getNode()->use_end();
2699 if (User->getOpcode() != ISD::FADD)
2705 int orderNo = N->getIROrder();
2706 int orderNo2 = N0.getNode()->getIROrder();
2707 // simple heuristics here for considering potential register
2708 // pressure, the logics here is that the differnce are used
2709 // to measure the distance between def and use, the longer distance
2710 // more likely cause register pressure.
2711 if (orderNo - orderNo2 < 500)
2714 // Now, check if at least one of the FMUL's operands is live beyond the node N,
2715 // which guarantees that the FMA will not increase register pressure at node N.
2716 bool opIsLive = false;
2717 const SDNode *left = N0.getOperand(0).getNode();
2718 const SDNode *right = N0.getOperand(1).getNode();
2720 if (dyn_cast<ConstantSDNode>(left) || dyn_cast<ConstantSDNode>(right))
2724 for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
2726 int orderNo3 = User->getIROrder();
2727 if (orderNo3 > orderNo) {
2734 for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
2736 int orderNo3 = User->getIROrder();
2737 if (orderNo3 > orderNo) {
2747 return DAG.getNode(ISD::FMA, SDLoc(N), VT,
2748 N0.getOperand(0), N0.getOperand(1), N1);
2755 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
2757 static SDValue PerformADDCombine(SDNode *N,
2758 TargetLowering::DAGCombinerInfo &DCI,
2759 const NVPTXSubtarget &Subtarget,
2760 CodeGenOpt::Level OptLevel) {
2761 SDValue N0 = N->getOperand(0);
2762 SDValue N1 = N->getOperand(1);
2764 // First try with the default operand order.
2765 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget,
2767 if (Result.getNode())
2770 // If that didn't work, try again with the operands commuted.
2771 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
2774 static SDValue PerformANDCombine(SDNode *N,
2775 TargetLowering::DAGCombinerInfo &DCI) {
2776 // The type legalizer turns a vector load of i8 values into a zextload to i16
2777 // registers, optionally ANY_EXTENDs it (if target type is integer),
2778 // and ANDs off the high 8 bits. Since we turn this load into a
2779 // target-specific DAG node, the DAG combiner fails to eliminate these AND
2780 // nodes. Do that here.
2781 SDValue Val = N->getOperand(0);
2782 SDValue Mask = N->getOperand(1);
2784 if (isa<ConstantSDNode>(Val)) {
2785 std::swap(Val, Mask);
2789 // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
2790 if (Val.getOpcode() == ISD::ANY_EXTEND) {
2792 Val = Val->getOperand(0);
2795 if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
2796 Val = Val->getOperand(0);
2799 if (Val->getOpcode() == NVPTXISD::LoadV2 ||
2800 Val->getOpcode() == NVPTXISD::LoadV4) {
2801 ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
2803 // Not an AND with a constant
2807 uint64_t MaskVal = MaskCnst->getZExtValue();
2808 if (MaskVal != 0xff) {
2809 // Not an AND that chops off top 8 bits
2813 MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
2815 // Not a MemSDNode?!?
2819 EVT MemVT = Mem->getMemoryVT();
2820 if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
2821 // We only handle the i8 case
2826 cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
2828 if (ExtType == ISD::SEXTLOAD) {
2829 // If for some reason the load is a sextload, the and is needed to zero
2830 // out the high 8 bits
2835 if (AExt.getNode() != 0) {
2836 // Re-insert the ext as a zext.
2837 Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
2838 AExt.getValueType(), Val);
2842 // If we get here, the AND is unnecessary. Just replace it with the load
2843 DCI.CombineTo(N, Val, AddTo);
2849 enum OperandSignedness {
2855 /// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
2856 /// that can be demoted to \p OptSize bits without loss of information. The
2857 /// signedness of the operand, if determinable, is placed in \p S.
2858 static bool IsMulWideOperandDemotable(SDValue Op,
2860 OperandSignedness &S) {
2863 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
2864 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
2865 EVT OrigVT = Op.getOperand(0).getValueType();
2866 if (OrigVT.getSizeInBits() == OptSize) {
2870 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
2871 EVT OrigVT = Op.getOperand(0).getValueType();
2872 if (OrigVT.getSizeInBits() == OptSize) {
2881 /// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
2882 /// be demoted to \p OptSize bits without loss of information. If the operands
2883 /// contain a constant, it should appear as the RHS operand. The signedness of
2884 /// the operands is placed in \p IsSigned.
2885 static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
2889 OperandSignedness LHSSign;
2891 // The LHS operand must be a demotable op
2892 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
2895 // We should have been able to determine the signedness from the LHS
2896 if (LHSSign == Unknown)
2899 IsSigned = (LHSSign == Signed);
2901 // The RHS can be a demotable op or a constant
2902 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
2903 APInt Val = CI->getAPIntValue();
2904 if (LHSSign == Unsigned) {
2905 if (Val.isIntN(OptSize)) {
2910 if (Val.isSignedIntN(OptSize)) {
2916 OperandSignedness RHSSign;
2917 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
2920 if (LHSSign != RHSSign)
2927 /// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
2928 /// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
2929 /// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
2931 static SDValue TryMULWIDECombine(SDNode *N,
2932 TargetLowering::DAGCombinerInfo &DCI) {
2933 EVT MulType = N->getValueType(0);
2934 if (MulType != MVT::i32 && MulType != MVT::i64) {
2938 unsigned OptSize = MulType.getSizeInBits() >> 1;
2939 SDValue LHS = N->getOperand(0);
2940 SDValue RHS = N->getOperand(1);
2942 // Canonicalize the multiply so the constant (if any) is on the right
2943 if (N->getOpcode() == ISD::MUL) {
2944 if (isa<ConstantSDNode>(LHS)) {
2945 std::swap(LHS, RHS);
2949 // If we have a SHL, determine the actual multiply amount
2950 if (N->getOpcode() == ISD::SHL) {
2951 ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
2956 APInt ShiftAmt = ShlRHS->getAPIntValue();
2957 unsigned BitWidth = MulType.getSizeInBits();
2958 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
2959 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
2960 RHS = DCI.DAG.getConstant(MulVal, MulType);
2967 // Verify that our operands are demotable
2968 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
2973 if (MulType == MVT::i32) {
2974 DemotedVT = MVT::i16;
2976 DemotedVT = MVT::i32;
2979 // Truncate the operands to the correct size. Note that these are just for
2980 // type consistency and will (likely) be eliminated in later phases.
2982 DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, LHS);
2984 DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, RHS);
2988 Opc = NVPTXISD::MUL_WIDE_SIGNED;
2990 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
2993 return DCI.DAG.getNode(Opc, SDLoc(N), MulType, TruncLHS, TruncRHS);
2996 /// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
2997 static SDValue PerformMULCombine(SDNode *N,
2998 TargetLowering::DAGCombinerInfo &DCI,
2999 CodeGenOpt::Level OptLevel) {
3001 // Try mul.wide combining at OptLevel > 0
3002 SDValue Ret = TryMULWIDECombine(N, DCI);
3010 /// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
3011 static SDValue PerformSHLCombine(SDNode *N,
3012 TargetLowering::DAGCombinerInfo &DCI,
3013 CodeGenOpt::Level OptLevel) {
3015 // Try mul.wide combining at OptLevel > 0
3016 SDValue Ret = TryMULWIDECombine(N, DCI);
3024 SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
3025 DAGCombinerInfo &DCI) const {
3026 // FIXME: Get this from the DAG somehow
3027 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive;
3028 switch (N->getOpcode()) {
3032 return PerformADDCombine(N, DCI, nvptxSubtarget, OptLevel);
3034 return PerformMULCombine(N, DCI, OptLevel);
3036 return PerformSHLCombine(N, DCI, OptLevel);
3038 return PerformANDCombine(N, DCI);
3043 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
3044 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
3045 SmallVectorImpl<SDValue> &Results) {
3046 EVT ResVT = N->getValueType(0);
3049 assert(ResVT.isVector() && "Vector load must have vector type");
3051 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
3052 // legal. We can (and should) split that into 2 loads of <2 x double> here
3053 // but I'm leaving that as a TODO for now.
3054 assert(ResVT.isSimple() && "Can only handle simple types");
3055 switch (ResVT.getSimpleVT().SimpleTy) {
3068 // This is a "native" vector type
3072 EVT EltVT = ResVT.getVectorElementType();
3073 unsigned NumElts = ResVT.getVectorNumElements();
3075 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3076 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3077 // loaded type to i16 and propagate the "real" type as the memory type.
3078 bool NeedTrunc = false;
3079 if (EltVT.getSizeInBits() < 16) {
3084 unsigned Opcode = 0;
3091 Opcode = NVPTXISD::LoadV2;
3092 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
3095 Opcode = NVPTXISD::LoadV4;
3096 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
3097 LdResVTs = DAG.getVTList(ListVTs);
3102 SmallVector<SDValue, 8> OtherOps;
3104 // Copy regular operands
3105 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
3106 OtherOps.push_back(N->getOperand(i));
3108 LoadSDNode *LD = cast<LoadSDNode>(N);
3110 // The select routine does not have access to the LoadSDNode instance, so
3111 // pass along the extension information
3112 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
3114 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
3116 LD->getMemOperand());
3118 SmallVector<SDValue, 4> ScalarRes;
3120 for (unsigned i = 0; i < NumElts; ++i) {
3121 SDValue Res = NewLD.getValue(i);
3123 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
3124 ScalarRes.push_back(Res);
3127 SDValue LoadChain = NewLD.getValue(NumElts);
3129 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3131 Results.push_back(BuildVec);
3132 Results.push_back(LoadChain);
3135 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
3136 SmallVectorImpl<SDValue> &Results) {
3137 SDValue Chain = N->getOperand(0);
3138 SDValue Intrin = N->getOperand(1);
3141 // Get the intrinsic ID
3142 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
3146 case Intrinsic::nvvm_ldg_global_i:
3147 case Intrinsic::nvvm_ldg_global_f:
3148 case Intrinsic::nvvm_ldg_global_p:
3149 case Intrinsic::nvvm_ldu_global_i:
3150 case Intrinsic::nvvm_ldu_global_f:
3151 case Intrinsic::nvvm_ldu_global_p: {
3152 EVT ResVT = N->getValueType(0);
3154 if (ResVT.isVector()) {
3157 unsigned NumElts = ResVT.getVectorNumElements();
3158 EVT EltVT = ResVT.getVectorElementType();
3160 // Since LDU/LDG are target nodes, we cannot rely on DAG type
3162 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3163 // loaded type to i16 and propagate the "real" type as the memory type.
3164 bool NeedTrunc = false;
3165 if (EltVT.getSizeInBits() < 16) {
3170 unsigned Opcode = 0;
3180 case Intrinsic::nvvm_ldg_global_i:
3181 case Intrinsic::nvvm_ldg_global_f:
3182 case Intrinsic::nvvm_ldg_global_p:
3183 Opcode = NVPTXISD::LDGV2;
3185 case Intrinsic::nvvm_ldu_global_i:
3186 case Intrinsic::nvvm_ldu_global_f:
3187 case Intrinsic::nvvm_ldu_global_p:
3188 Opcode = NVPTXISD::LDUV2;
3191 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
3197 case Intrinsic::nvvm_ldg_global_i:
3198 case Intrinsic::nvvm_ldg_global_f:
3199 case Intrinsic::nvvm_ldg_global_p:
3200 Opcode = NVPTXISD::LDGV4;
3202 case Intrinsic::nvvm_ldu_global_i:
3203 case Intrinsic::nvvm_ldu_global_f:
3204 case Intrinsic::nvvm_ldu_global_p:
3205 Opcode = NVPTXISD::LDUV4;
3208 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
3209 LdResVTs = DAG.getVTList(ListVTs);
3214 SmallVector<SDValue, 8> OtherOps;
3216 // Copy regular operands
3218 OtherOps.push_back(Chain); // Chain
3219 // Skip operand 1 (intrinsic ID)
3221 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
3222 OtherOps.push_back(N->getOperand(i));
3224 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
3226 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
3227 MemSD->getMemoryVT(),
3228 MemSD->getMemOperand());
3230 SmallVector<SDValue, 4> ScalarRes;
3232 for (unsigned i = 0; i < NumElts; ++i) {
3233 SDValue Res = NewLD.getValue(i);
3236 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
3237 ScalarRes.push_back(Res);
3240 SDValue LoadChain = NewLD.getValue(NumElts);
3243 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3245 Results.push_back(BuildVec);
3246 Results.push_back(LoadChain);
3249 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
3250 "Custom handling of non-i8 ldu/ldg?");
3252 // Just copy all operands as-is
3253 SmallVector<SDValue, 4> Ops;
3254 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
3255 Ops.push_back(N->getOperand(i));
3257 // Force output to i16
3258 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
3260 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
3262 // We make sure the memory type is i8, which will be used during isel
3263 // to select the proper instruction.
3265 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
3266 MVT::i8, MemSD->getMemOperand());
3268 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
3269 NewLD.getValue(0)));
3270 Results.push_back(NewLD.getValue(1));
3276 void NVPTXTargetLowering::ReplaceNodeResults(
3277 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
3278 switch (N->getOpcode()) {
3280 report_fatal_error("Unhandled custom legalization");
3282 ReplaceLoadVector(N, DAG, Results);
3284 case ISD::INTRINSIC_W_CHAIN:
3285 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
3290 // Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
3291 void NVPTXSection::anchor() {}
3293 NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
3297 delete ReadOnlySection;
3299 delete StaticCtorSection;
3300 delete StaticDtorSection;
3302 delete EHFrameSection;
3303 delete DwarfAbbrevSection;
3304 delete DwarfInfoSection;
3305 delete DwarfLineSection;
3306 delete DwarfFrameSection;
3307 delete DwarfPubTypesSection;
3308 delete DwarfDebugInlineSection;
3309 delete DwarfStrSection;
3310 delete DwarfLocSection;
3311 delete DwarfARangesSection;
3312 delete DwarfRangesSection;
3313 delete DwarfMacroInfoSection;