2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "NVPTXISelLowering.h"
16 #include "NVPTXTargetMachine.h"
17 #include "NVPTXTargetObjectFile.h"
18 #include "NVPTXUtilities.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/MC/MCSectionELF.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
40 #define DEBUG_TYPE "nvptx-lower"
44 static unsigned int uniqueCallSite = 0;
46 static cl::opt<bool> sched4reg(
48 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
50 static bool IsPTXVectorType(MVT VT) {
51 switch (VT.SimpleTy) {
70 static uint64_t GCD( int a, int b)
72 if (a < b) std::swap(a,b);
81 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
82 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
83 /// into their primitive components.
84 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
85 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
86 /// LowerCall, and LowerReturn.
87 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
88 SmallVectorImpl<EVT> &ValueVTs,
89 SmallVectorImpl<uint64_t> *Offsets = nullptr,
90 uint64_t StartingOffset = 0) {
91 SmallVector<EVT, 16> TempVTs;
92 SmallVector<uint64_t, 16> TempOffsets;
94 ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
95 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
97 uint64_t Off = TempOffsets[i];
99 for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
100 ValueVTs.push_back(VT.getVectorElementType());
102 Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
105 ValueVTs.push_back(VT);
107 Offsets->push_back(Off);
112 // NVPTXTargetLowering Constructor.
113 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
114 : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
115 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
117 // always lower memset, memcpy, and memmove intrinsics to load/store
118 // instructions, rather
119 // then generating calls to memset, mempcy or memmove.
120 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
121 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
122 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
124 setBooleanContents(ZeroOrNegativeOneBooleanContent);
125 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
127 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
128 // condition branches.
129 setJumpIsExpensive(true);
131 // By default, use the Source scheduling
133 setSchedulingPreference(Sched::RegPressure);
135 setSchedulingPreference(Sched::Source);
137 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
138 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
139 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
140 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
141 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
142 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
144 // Operations not directly supported by NVPTX.
145 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
146 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
147 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
148 setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
149 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
150 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
151 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
152 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
153 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
154 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
155 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
156 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
157 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
158 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
159 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
160 // For others we will expand to a SHL/SRA pair.
161 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
162 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
163 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
164 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
165 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
167 setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
168 setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
169 setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
170 setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
171 setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
172 setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
174 if (nvptxSubtarget.hasROT64()) {
175 setOperationAction(ISD::ROTL, MVT::i64, Legal);
176 setOperationAction(ISD::ROTR, MVT::i64, Legal);
178 setOperationAction(ISD::ROTL, MVT::i64, Expand);
179 setOperationAction(ISD::ROTR, MVT::i64, Expand);
181 if (nvptxSubtarget.hasROT32()) {
182 setOperationAction(ISD::ROTL, MVT::i32, Legal);
183 setOperationAction(ISD::ROTR, MVT::i32, Legal);
185 setOperationAction(ISD::ROTL, MVT::i32, Expand);
186 setOperationAction(ISD::ROTR, MVT::i32, Expand);
189 setOperationAction(ISD::ROTL, MVT::i16, Expand);
190 setOperationAction(ISD::ROTR, MVT::i16, Expand);
191 setOperationAction(ISD::ROTL, MVT::i8, Expand);
192 setOperationAction(ISD::ROTR, MVT::i8, Expand);
193 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
194 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
195 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
197 // Indirect branch is not supported.
198 // This also disables Jump Table creation.
199 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
200 setOperationAction(ISD::BRIND, MVT::Other, Expand);
202 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
203 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
205 // We want to legalize constant related memmove and memcopy
207 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
209 // Turn FP extload into load/fextend
210 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
211 // Turn FP truncstore into trunc + store.
212 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
214 // PTX does not support load / store predicate registers
215 setOperationAction(ISD::LOAD, MVT::i1, Custom);
216 setOperationAction(ISD::STORE, MVT::i1, Custom);
218 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
219 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
220 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
221 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
222 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
223 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
225 // This is legal in NVPTX
226 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
227 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
229 // TRAP can be lowered to PTX trap
230 setOperationAction(ISD::TRAP, MVT::Other, Legal);
232 setOperationAction(ISD::ADDC, MVT::i64, Expand);
233 setOperationAction(ISD::ADDE, MVT::i64, Expand);
235 // Register custom handling for vector loads/stores
236 for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
238 MVT VT = (MVT::SimpleValueType) i;
239 if (IsPTXVectorType(VT)) {
240 setOperationAction(ISD::LOAD, VT, Custom);
241 setOperationAction(ISD::STORE, VT, Custom);
242 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
246 // Custom handling for i8 intrinsics
247 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
249 setOperationAction(ISD::CTLZ, MVT::i16, Legal);
250 setOperationAction(ISD::CTLZ, MVT::i32, Legal);
251 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
252 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
253 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
254 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
255 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
256 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
257 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
258 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
259 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
260 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
261 setOperationAction(ISD::CTPOP, MVT::i16, Legal);
262 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
263 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
265 // We have some custom DAG combine patterns for these nodes
266 setTargetDAGCombine(ISD::ADD);
267 setTargetDAGCombine(ISD::AND);
268 setTargetDAGCombine(ISD::FADD);
269 setTargetDAGCombine(ISD::MUL);
270 setTargetDAGCombine(ISD::SHL);
272 // Now deduce the information based on the above mentioned
274 computeRegisterProperties();
277 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
282 return "NVPTXISD::CALL";
283 case NVPTXISD::RET_FLAG:
284 return "NVPTXISD::RET_FLAG";
285 case NVPTXISD::Wrapper:
286 return "NVPTXISD::Wrapper";
287 case NVPTXISD::DeclareParam:
288 return "NVPTXISD::DeclareParam";
289 case NVPTXISD::DeclareScalarParam:
290 return "NVPTXISD::DeclareScalarParam";
291 case NVPTXISD::DeclareRet:
292 return "NVPTXISD::DeclareRet";
293 case NVPTXISD::DeclareRetParam:
294 return "NVPTXISD::DeclareRetParam";
295 case NVPTXISD::PrintCall:
296 return "NVPTXISD::PrintCall";
297 case NVPTXISD::LoadParam:
298 return "NVPTXISD::LoadParam";
299 case NVPTXISD::LoadParamV2:
300 return "NVPTXISD::LoadParamV2";
301 case NVPTXISD::LoadParamV4:
302 return "NVPTXISD::LoadParamV4";
303 case NVPTXISD::StoreParam:
304 return "NVPTXISD::StoreParam";
305 case NVPTXISD::StoreParamV2:
306 return "NVPTXISD::StoreParamV2";
307 case NVPTXISD::StoreParamV4:
308 return "NVPTXISD::StoreParamV4";
309 case NVPTXISD::StoreParamS32:
310 return "NVPTXISD::StoreParamS32";
311 case NVPTXISD::StoreParamU32:
312 return "NVPTXISD::StoreParamU32";
313 case NVPTXISD::CallArgBegin:
314 return "NVPTXISD::CallArgBegin";
315 case NVPTXISD::CallArg:
316 return "NVPTXISD::CallArg";
317 case NVPTXISD::LastCallArg:
318 return "NVPTXISD::LastCallArg";
319 case NVPTXISD::CallArgEnd:
320 return "NVPTXISD::CallArgEnd";
321 case NVPTXISD::CallVoid:
322 return "NVPTXISD::CallVoid";
323 case NVPTXISD::CallVal:
324 return "NVPTXISD::CallVal";
325 case NVPTXISD::CallSymbol:
326 return "NVPTXISD::CallSymbol";
327 case NVPTXISD::Prototype:
328 return "NVPTXISD::Prototype";
329 case NVPTXISD::MoveParam:
330 return "NVPTXISD::MoveParam";
331 case NVPTXISD::StoreRetval:
332 return "NVPTXISD::StoreRetval";
333 case NVPTXISD::StoreRetvalV2:
334 return "NVPTXISD::StoreRetvalV2";
335 case NVPTXISD::StoreRetvalV4:
336 return "NVPTXISD::StoreRetvalV4";
337 case NVPTXISD::PseudoUseParam:
338 return "NVPTXISD::PseudoUseParam";
339 case NVPTXISD::RETURN:
340 return "NVPTXISD::RETURN";
341 case NVPTXISD::CallSeqBegin:
342 return "NVPTXISD::CallSeqBegin";
343 case NVPTXISD::CallSeqEnd:
344 return "NVPTXISD::CallSeqEnd";
345 case NVPTXISD::CallPrototype:
346 return "NVPTXISD::CallPrototype";
347 case NVPTXISD::LoadV2:
348 return "NVPTXISD::LoadV2";
349 case NVPTXISD::LoadV4:
350 return "NVPTXISD::LoadV4";
351 case NVPTXISD::LDGV2:
352 return "NVPTXISD::LDGV2";
353 case NVPTXISD::LDGV4:
354 return "NVPTXISD::LDGV4";
355 case NVPTXISD::LDUV2:
356 return "NVPTXISD::LDUV2";
357 case NVPTXISD::LDUV4:
358 return "NVPTXISD::LDUV4";
359 case NVPTXISD::StoreV2:
360 return "NVPTXISD::StoreV2";
361 case NVPTXISD::StoreV4:
362 return "NVPTXISD::StoreV4";
363 case NVPTXISD::FUN_SHFL_CLAMP:
364 return "NVPTXISD::FUN_SHFL_CLAMP";
365 case NVPTXISD::FUN_SHFR_CLAMP:
366 return "NVPTXISD::FUN_SHFR_CLAMP";
368 return "NVPTXISD::IMAD";
369 case NVPTXISD::MUL_WIDE_SIGNED:
370 return "NVPTXISD::MUL_WIDE_SIGNED";
371 case NVPTXISD::MUL_WIDE_UNSIGNED:
372 return "NVPTXISD::MUL_WIDE_UNSIGNED";
373 case NVPTXISD::Tex1DFloatI32: return "NVPTXISD::Tex1DFloatI32";
374 case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
375 case NVPTXISD::Tex1DFloatFloatLevel:
376 return "NVPTXISD::Tex1DFloatFloatLevel";
377 case NVPTXISD::Tex1DFloatFloatGrad:
378 return "NVPTXISD::Tex1DFloatFloatGrad";
379 case NVPTXISD::Tex1DI32I32: return "NVPTXISD::Tex1DI32I32";
380 case NVPTXISD::Tex1DI32Float: return "NVPTXISD::Tex1DI32Float";
381 case NVPTXISD::Tex1DI32FloatLevel:
382 return "NVPTXISD::Tex1DI32FloatLevel";
383 case NVPTXISD::Tex1DI32FloatGrad:
384 return "NVPTXISD::Tex1DI32FloatGrad";
385 case NVPTXISD::Tex1DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
386 case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
387 case NVPTXISD::Tex1DArrayFloatFloatLevel:
388 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
389 case NVPTXISD::Tex1DArrayFloatFloatGrad:
390 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
391 case NVPTXISD::Tex1DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
392 case NVPTXISD::Tex1DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
393 case NVPTXISD::Tex1DArrayI32FloatLevel:
394 return "NVPTXISD::Tex2DArrayI32FloatLevel";
395 case NVPTXISD::Tex1DArrayI32FloatGrad:
396 return "NVPTXISD::Tex2DArrayI32FloatGrad";
397 case NVPTXISD::Tex2DFloatI32: return "NVPTXISD::Tex2DFloatI32";
398 case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
399 case NVPTXISD::Tex2DFloatFloatLevel:
400 return "NVPTXISD::Tex2DFloatFloatLevel";
401 case NVPTXISD::Tex2DFloatFloatGrad:
402 return "NVPTXISD::Tex2DFloatFloatGrad";
403 case NVPTXISD::Tex2DI32I32: return "NVPTXISD::Tex2DI32I32";
404 case NVPTXISD::Tex2DI32Float: return "NVPTXISD::Tex2DI32Float";
405 case NVPTXISD::Tex2DI32FloatLevel:
406 return "NVPTXISD::Tex2DI32FloatLevel";
407 case NVPTXISD::Tex2DI32FloatGrad:
408 return "NVPTXISD::Tex2DI32FloatGrad";
409 case NVPTXISD::Tex2DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
410 case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
411 case NVPTXISD::Tex2DArrayFloatFloatLevel:
412 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
413 case NVPTXISD::Tex2DArrayFloatFloatGrad:
414 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
415 case NVPTXISD::Tex2DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
416 case NVPTXISD::Tex2DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
417 case NVPTXISD::Tex2DArrayI32FloatLevel:
418 return "NVPTXISD::Tex2DArrayI32FloatLevel";
419 case NVPTXISD::Tex2DArrayI32FloatGrad:
420 return "NVPTXISD::Tex2DArrayI32FloatGrad";
421 case NVPTXISD::Tex3DFloatI32: return "NVPTXISD::Tex3DFloatI32";
422 case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
423 case NVPTXISD::Tex3DFloatFloatLevel:
424 return "NVPTXISD::Tex3DFloatFloatLevel";
425 case NVPTXISD::Tex3DFloatFloatGrad:
426 return "NVPTXISD::Tex3DFloatFloatGrad";
427 case NVPTXISD::Tex3DI32I32: return "NVPTXISD::Tex3DI32I32";
428 case NVPTXISD::Tex3DI32Float: return "NVPTXISD::Tex3DI32Float";
429 case NVPTXISD::Tex3DI32FloatLevel:
430 return "NVPTXISD::Tex3DI32FloatLevel";
431 case NVPTXISD::Tex3DI32FloatGrad:
432 return "NVPTXISD::Tex3DI32FloatGrad";
434 case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
435 case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
436 case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
437 case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
438 case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
439 case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
440 case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
441 case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
442 case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
444 case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
445 case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
446 case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
447 case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
448 case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
449 case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
450 case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
451 case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
452 case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
454 case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
455 case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
456 case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
457 case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
458 case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
459 case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
460 case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
461 case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
462 case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
464 case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
465 case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
466 case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
467 case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
468 case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
469 case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
470 case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
471 case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
472 case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
474 case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
475 case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
476 case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
477 case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
478 case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
479 case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
480 case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
481 case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
482 case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
486 bool NVPTXTargetLowering::shouldSplitVectorType(EVT VT) const {
487 return VT.getScalarType() == MVT::i1;
491 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
493 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
494 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
495 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
499 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
500 const SmallVectorImpl<ISD::OutputArg> &Outs,
501 unsigned retAlignment,
502 const ImmutableCallSite *CS) const {
504 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
505 assert(isABI && "Non-ABI compilation is not supported");
510 O << "prototype_" << uniqueCallSite << " : .callprototype ";
512 if (retTy->getTypeID() == Type::VoidTyID) {
516 if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
518 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
519 size = ITy->getBitWidth();
523 assert(retTy->isFloatingPointTy() &&
524 "Floating point type expected here");
525 size = retTy->getPrimitiveSizeInBits();
528 O << ".param .b" << size << " _";
529 } else if (isa<PointerType>(retTy)) {
530 O << ".param .b" << getPointerTy().getSizeInBits() << " _";
532 if((retTy->getTypeID() == Type::StructTyID) ||
533 isa<VectorType>(retTy)) {
534 O << ".param .align "
537 << getDataLayout()->getTypeAllocSize(retTy) << "]";
539 assert(false && "Unknown return type");
547 MVT thePointerTy = getPointerTy();
550 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
551 Type *Ty = Args[i].Ty;
557 if (Outs[OIdx].Flags.isByVal() == false) {
558 if (Ty->isAggregateType() || Ty->isVectorTy()) {
560 const CallInst *CallI = cast<CallInst>(CS->getInstruction());
561 const DataLayout *TD = getDataLayout();
562 // +1 because index 0 is reserved for return type alignment
563 if (!llvm::getAlign(*CallI, i + 1, align))
564 align = TD->getABITypeAlignment(Ty);
565 unsigned sz = TD->getTypeAllocSize(Ty);
566 O << ".param .align " << align << " .b8 ";
568 O << "[" << sz << "]";
569 // update the index for Outs
570 SmallVector<EVT, 16> vtparts;
571 ComputeValueVTs(*this, Ty, vtparts);
572 if (unsigned len = vtparts.size())
576 // i8 types in IR will be i16 types in SDAG
577 assert((getValueType(Ty) == Outs[OIdx].VT ||
578 (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
579 "type mismatch between callee prototype and arguments");
582 if (isa<IntegerType>(Ty)) {
583 sz = cast<IntegerType>(Ty)->getBitWidth();
586 } else if (isa<PointerType>(Ty))
587 sz = thePointerTy.getSizeInBits();
589 sz = Ty->getPrimitiveSizeInBits();
590 O << ".param .b" << sz << " ";
594 const PointerType *PTy = dyn_cast<PointerType>(Ty);
595 assert(PTy && "Param with byval attribute should be a pointer type");
596 Type *ETy = PTy->getElementType();
598 unsigned align = Outs[OIdx].Flags.getByValAlign();
599 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
600 O << ".param .align " << align << " .b8 ";
602 O << "[" << sz << "]";
609 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
610 const ImmutableCallSite *CS,
612 unsigned Idx) const {
613 const DataLayout *TD = getDataLayout();
615 const Value *DirectCallee = CS->getCalledFunction();
618 // We don't have a direct function symbol, but that may be because of
619 // constant cast instructions in the call.
620 const Instruction *CalleeI = CS->getInstruction();
621 assert(CalleeI && "Call target is not a function or derived value?");
623 // With bitcast'd call targets, the instruction will be the call
624 if (isa<CallInst>(CalleeI)) {
625 // Check if we have call alignment metadata
626 if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
629 const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
630 // Ignore any bitcast instructions
631 while(isa<ConstantExpr>(CalleeV)) {
632 const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
635 // Look through the bitcast
636 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
639 // We have now looked past all of the bitcasts. Do we finally have a
641 if (isa<Function>(CalleeV))
642 DirectCallee = CalleeV;
646 // Check for function alignment information if we found that the
647 // ultimate target is a Function
649 if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
652 // Call is indirect or alignment information is not available, fall back to
653 // the ABI type alignment
654 return TD->getABITypeAlignment(Ty);
657 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
658 SmallVectorImpl<SDValue> &InVals) const {
659 SelectionDAG &DAG = CLI.DAG;
661 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
662 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
663 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
664 SDValue Chain = CLI.Chain;
665 SDValue Callee = CLI.Callee;
666 bool &isTailCall = CLI.IsTailCall;
667 ArgListTy &Args = CLI.getArgs();
668 Type *retTy = CLI.RetTy;
669 ImmutableCallSite *CS = CLI.CS;
671 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
672 assert(isABI && "Non-ABI compilation is not supported");
675 const DataLayout *TD = getDataLayout();
676 MachineFunction &MF = DAG.getMachineFunction();
677 const Function *F = MF.getFunction();
679 SDValue tempChain = Chain;
681 DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
683 SDValue InFlag = Chain.getValue(1);
685 unsigned paramCount = 0;
686 // Args.size() and Outs.size() need not match.
687 // Outs.size() will be larger
688 // * if there is an aggregate argument with multiple fields (each field
689 // showing up separately in Outs)
690 // * if there is a vector argument with more than typical vector-length
691 // elements (generally if more than 4) where each vector element is
692 // individually present in Outs.
693 // So a different index should be used for indexing into Outs/OutVals.
694 // See similar issue in LowerFormalArguments.
696 // Declare the .params or .reg need to pass values
698 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
699 EVT VT = Outs[OIdx].VT;
700 Type *Ty = Args[i].Ty;
702 if (Outs[OIdx].Flags.isByVal() == false) {
703 if (Ty->isAggregateType()) {
705 SmallVector<EVT, 16> vtparts;
706 SmallVector<uint64_t, 16> Offsets;
707 ComputePTXValueVTs(*this, Ty, vtparts, &Offsets, 0);
709 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
710 // declare .param .align <align> .b8 .param<n>[<size>];
711 unsigned sz = TD->getTypeAllocSize(Ty);
712 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
713 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
714 DAG.getConstant(paramCount, MVT::i32),
715 DAG.getConstant(sz, MVT::i32), InFlag };
716 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
718 InFlag = Chain.getValue(1);
719 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
720 EVT elemtype = vtparts[j];
721 unsigned ArgAlign = GCD(align, Offsets[j]);
722 if (elemtype.isInteger() && (sz < 8))
724 SDValue StVal = OutVals[OIdx];
725 if (elemtype.getSizeInBits() < 16) {
726 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
728 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
729 SDValue CopyParamOps[] = { Chain,
730 DAG.getConstant(paramCount, MVT::i32),
731 DAG.getConstant(Offsets[j], MVT::i32),
733 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
734 CopyParamVTs, CopyParamOps,
735 elemtype, MachinePointerInfo(),
737 InFlag = Chain.getValue(1);
740 if (vtparts.size() > 0)
745 if (Ty->isVectorTy()) {
746 EVT ObjectVT = getValueType(Ty);
747 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
748 // declare .param .align <align> .b8 .param<n>[<size>];
749 unsigned sz = TD->getTypeAllocSize(Ty);
750 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
751 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
752 DAG.getConstant(paramCount, MVT::i32),
753 DAG.getConstant(sz, MVT::i32), InFlag };
754 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
756 InFlag = Chain.getValue(1);
757 unsigned NumElts = ObjectVT.getVectorNumElements();
758 EVT EltVT = ObjectVT.getVectorElementType();
760 bool NeedExtend = false;
761 if (EltVT.getSizeInBits() < 16) {
768 SDValue Elt = OutVals[OIdx++];
770 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
772 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
773 SDValue CopyParamOps[] = { Chain,
774 DAG.getConstant(paramCount, MVT::i32),
775 DAG.getConstant(0, MVT::i32), Elt,
777 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
778 CopyParamVTs, CopyParamOps,
779 MemVT, MachinePointerInfo());
780 InFlag = Chain.getValue(1);
781 } else if (NumElts == 2) {
782 SDValue Elt0 = OutVals[OIdx++];
783 SDValue Elt1 = OutVals[OIdx++];
785 Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
786 Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
789 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
790 SDValue CopyParamOps[] = { Chain,
791 DAG.getConstant(paramCount, MVT::i32),
792 DAG.getConstant(0, MVT::i32), Elt0, Elt1,
794 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
795 CopyParamVTs, CopyParamOps,
796 MemVT, MachinePointerInfo());
797 InFlag = Chain.getValue(1);
799 unsigned curOffset = 0;
801 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
803 // vector will be expanded to a power of 2 elements, so we know we can
804 // always round up to the next multiple of 4 when creating the vector
806 // e.g. 4 elem => 1 st.v4
809 // 11 elem => 3 st.v4
810 unsigned VecSize = 4;
811 if (EltVT.getSizeInBits() == 64)
814 // This is potentially only part of a vector, so assume all elements
815 // are packed together.
816 unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
818 for (unsigned i = 0; i < NumElts; i += VecSize) {
821 SmallVector<SDValue, 8> Ops;
822 Ops.push_back(Chain);
823 Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
824 Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
826 unsigned Opc = NVPTXISD::StoreParamV2;
828 StoreVal = OutVals[OIdx++];
830 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
831 Ops.push_back(StoreVal);
833 if (i + 1 < NumElts) {
834 StoreVal = OutVals[OIdx++];
837 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
839 StoreVal = DAG.getUNDEF(EltVT);
841 Ops.push_back(StoreVal);
844 Opc = NVPTXISD::StoreParamV4;
845 if (i + 2 < NumElts) {
846 StoreVal = OutVals[OIdx++];
849 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
851 StoreVal = DAG.getUNDEF(EltVT);
853 Ops.push_back(StoreVal);
855 if (i + 3 < NumElts) {
856 StoreVal = OutVals[OIdx++];
859 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
861 StoreVal = DAG.getUNDEF(EltVT);
863 Ops.push_back(StoreVal);
866 Ops.push_back(InFlag);
868 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
869 Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, Ops,
870 MemVT, MachinePointerInfo());
871 InFlag = Chain.getValue(1);
872 curOffset += PerStoreOffset;
880 // for ABI, declare .param .b<size> .param<n>;
881 unsigned sz = VT.getSizeInBits();
882 bool needExtend = false;
883 if (VT.isInteger()) {
889 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
890 SDValue DeclareParamOps[] = { Chain,
891 DAG.getConstant(paramCount, MVT::i32),
892 DAG.getConstant(sz, MVT::i32),
893 DAG.getConstant(0, MVT::i32), InFlag };
894 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
896 InFlag = Chain.getValue(1);
897 SDValue OutV = OutVals[OIdx];
899 // zext/sext i1 to i16
900 unsigned opc = ISD::ZERO_EXTEND;
901 if (Outs[OIdx].Flags.isSExt())
902 opc = ISD::SIGN_EXTEND;
903 OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
905 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
906 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
907 DAG.getConstant(0, MVT::i32), OutV, InFlag };
909 unsigned opcode = NVPTXISD::StoreParam;
910 if (Outs[OIdx].Flags.isZExt())
911 opcode = NVPTXISD::StoreParamU32;
912 else if (Outs[OIdx].Flags.isSExt())
913 opcode = NVPTXISD::StoreParamS32;
914 Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps,
915 VT, MachinePointerInfo());
917 InFlag = Chain.getValue(1);
922 SmallVector<EVT, 16> vtparts;
923 SmallVector<uint64_t, 16> Offsets;
924 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
925 assert(PTy && "Type of a byval parameter should be pointer");
926 ComputePTXValueVTs(*this, PTy->getElementType(), vtparts, &Offsets, 0);
928 // declare .param .align <align> .b8 .param<n>[<size>];
929 unsigned sz = Outs[OIdx].Flags.getByValSize();
930 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
931 unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
932 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
933 // so we don't need to worry about natural alignment or not.
934 // See TargetLowering::LowerCallTo().
935 SDValue DeclareParamOps[] = {
936 Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
937 DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
940 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
942 InFlag = Chain.getValue(1);
943 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
944 EVT elemtype = vtparts[j];
945 int curOffset = Offsets[j];
946 unsigned PartAlign = GCD(ArgAlign, curOffset);
948 DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
949 DAG.getConstant(curOffset, getPointerTy()));
950 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
951 MachinePointerInfo(), false, false, false,
953 if (elemtype.getSizeInBits() < 16) {
954 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
956 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
957 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
958 DAG.getConstant(curOffset, MVT::i32), theVal,
960 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
961 CopyParamOps, elemtype,
962 MachinePointerInfo());
964 InFlag = Chain.getValue(1);
969 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
970 unsigned retAlignment = 0;
973 if (Ins.size() > 0) {
974 SmallVector<EVT, 16> resvtparts;
975 ComputeValueVTs(*this, retTy, resvtparts);
978 // .param .align 16 .b8 retval0[<size-in-bytes>], or
979 // .param .b<size-in-bits> retval0
980 unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
981 if (retTy->isSingleValueType()) {
982 // Scalar needs to be at least 32bit wide
985 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
986 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
987 DAG.getConstant(resultsz, MVT::i32),
988 DAG.getConstant(0, MVT::i32), InFlag };
989 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
991 InFlag = Chain.getValue(1);
993 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
994 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
995 SDValue DeclareRetOps[] = { Chain,
996 DAG.getConstant(retAlignment, MVT::i32),
997 DAG.getConstant(resultsz / 8, MVT::i32),
998 DAG.getConstant(0, MVT::i32), InFlag };
999 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
1001 InFlag = Chain.getValue(1);
1006 // This is indirect function call case : PTX requires a prototype of the
1008 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1009 // to be emitted, and the label has to used as the last arg of call
1011 // The prototype is embedded in a string and put as the operand for a
1012 // CallPrototype SDNode which will print out to the value of the string.
1013 SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1014 std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
1015 const char *ProtoStr =
1016 nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
1017 SDValue ProtoOps[] = {
1018 Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
1020 Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
1021 InFlag = Chain.getValue(1);
1023 // Op to just print "call"
1024 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1025 SDValue PrintCallOps[] = {
1026 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
1028 Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
1029 dl, PrintCallVTs, PrintCallOps);
1030 InFlag = Chain.getValue(1);
1032 // Ops to print out the function name
1033 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1034 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
1035 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
1036 InFlag = Chain.getValue(1);
1038 // Ops to print out the param list
1039 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1040 SDValue CallArgBeginOps[] = { Chain, InFlag };
1041 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
1043 InFlag = Chain.getValue(1);
1045 for (unsigned i = 0, e = paramCount; i != e; ++i) {
1048 opcode = NVPTXISD::LastCallArg;
1050 opcode = NVPTXISD::CallArg;
1051 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1052 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
1053 DAG.getConstant(i, MVT::i32), InFlag };
1054 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
1055 InFlag = Chain.getValue(1);
1057 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1058 SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
1060 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
1061 InFlag = Chain.getValue(1);
1064 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1065 SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
1067 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
1068 InFlag = Chain.getValue(1);
1071 // Generate loads from param memory/moves from registers for result
1072 if (Ins.size() > 0) {
1073 if (retTy && retTy->isVectorTy()) {
1074 EVT ObjectVT = getValueType(retTy);
1075 unsigned NumElts = ObjectVT.getVectorNumElements();
1076 EVT EltVT = ObjectVT.getVectorElementType();
1077 assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
1078 ObjectVT) == NumElts &&
1079 "Vector was not scalarized");
1080 unsigned sz = EltVT.getSizeInBits();
1081 bool needTruncate = sz < 8 ? true : false;
1084 // Just a simple load
1085 SmallVector<EVT, 4> LoadRetVTs;
1086 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1087 // If loading i1/i8 result, generate
1091 LoadRetVTs.push_back(MVT::i16);
1093 LoadRetVTs.push_back(EltVT);
1094 LoadRetVTs.push_back(MVT::Other);
1095 LoadRetVTs.push_back(MVT::Glue);
1096 SmallVector<SDValue, 4> LoadRetOps;
1097 LoadRetOps.push_back(Chain);
1098 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1099 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1100 LoadRetOps.push_back(InFlag);
1101 SDValue retval = DAG.getMemIntrinsicNode(
1102 NVPTXISD::LoadParam, dl,
1103 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1104 Chain = retval.getValue(1);
1105 InFlag = retval.getValue(2);
1106 SDValue Ret0 = retval;
1108 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
1109 InVals.push_back(Ret0);
1110 } else if (NumElts == 2) {
1112 SmallVector<EVT, 4> LoadRetVTs;
1113 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1114 // If loading i1/i8 result, generate
1118 LoadRetVTs.push_back(MVT::i16);
1119 LoadRetVTs.push_back(MVT::i16);
1121 LoadRetVTs.push_back(EltVT);
1122 LoadRetVTs.push_back(EltVT);
1124 LoadRetVTs.push_back(MVT::Other);
1125 LoadRetVTs.push_back(MVT::Glue);
1126 SmallVector<SDValue, 4> LoadRetOps;
1127 LoadRetOps.push_back(Chain);
1128 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1129 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1130 LoadRetOps.push_back(InFlag);
1131 SDValue retval = DAG.getMemIntrinsicNode(
1132 NVPTXISD::LoadParamV2, dl,
1133 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1134 Chain = retval.getValue(2);
1135 InFlag = retval.getValue(3);
1136 SDValue Ret0 = retval.getValue(0);
1137 SDValue Ret1 = retval.getValue(1);
1139 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
1140 InVals.push_back(Ret0);
1141 Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
1142 InVals.push_back(Ret1);
1144 InVals.push_back(Ret0);
1145 InVals.push_back(Ret1);
1148 // Split into N LoadV4
1150 unsigned VecSize = 4;
1151 unsigned Opc = NVPTXISD::LoadParamV4;
1152 if (EltVT.getSizeInBits() == 64) {
1154 Opc = NVPTXISD::LoadParamV2;
1156 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1157 for (unsigned i = 0; i < NumElts; i += VecSize) {
1158 SmallVector<EVT, 8> LoadRetVTs;
1159 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1160 // If loading i1/i8 result, generate
1164 for (unsigned j = 0; j < VecSize; ++j)
1165 LoadRetVTs.push_back(MVT::i16);
1167 for (unsigned j = 0; j < VecSize; ++j)
1168 LoadRetVTs.push_back(EltVT);
1170 LoadRetVTs.push_back(MVT::Other);
1171 LoadRetVTs.push_back(MVT::Glue);
1172 SmallVector<SDValue, 4> LoadRetOps;
1173 LoadRetOps.push_back(Chain);
1174 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1175 LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
1176 LoadRetOps.push_back(InFlag);
1177 SDValue retval = DAG.getMemIntrinsicNode(
1178 Opc, dl, DAG.getVTList(LoadRetVTs),
1179 LoadRetOps, EltVT, MachinePointerInfo());
1181 Chain = retval.getValue(2);
1182 InFlag = retval.getValue(3);
1184 Chain = retval.getValue(4);
1185 InFlag = retval.getValue(5);
1188 for (unsigned j = 0; j < VecSize; ++j) {
1189 if (i + j >= NumElts)
1191 SDValue Elt = retval.getValue(j);
1193 Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
1194 InVals.push_back(Elt);
1196 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1200 SmallVector<EVT, 16> VTs;
1201 SmallVector<uint64_t, 16> Offsets;
1202 ComputePTXValueVTs(*this, retTy, VTs, &Offsets, 0);
1203 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1204 unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0);
1205 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
1206 unsigned sz = VTs[i].getSizeInBits();
1207 unsigned AlignI = GCD(RetAlign, Offsets[i]);
1208 bool needTruncate = sz < 8 ? true : false;
1209 if (VTs[i].isInteger() && (sz < 8))
1212 SmallVector<EVT, 4> LoadRetVTs;
1213 EVT TheLoadType = VTs[i];
1214 if (retTy->isIntegerTy() &&
1215 TD->getTypeAllocSizeInBits(retTy) < 32) {
1216 // This is for integer types only, and specifically not for
1218 LoadRetVTs.push_back(MVT::i32);
1219 TheLoadType = MVT::i32;
1220 } else if (sz < 16) {
1221 // If loading i1/i8 result, generate
1223 // trunc i16 to i1/i8
1224 LoadRetVTs.push_back(MVT::i16);
1226 LoadRetVTs.push_back(Ins[i].VT);
1227 LoadRetVTs.push_back(MVT::Other);
1228 LoadRetVTs.push_back(MVT::Glue);
1230 SmallVector<SDValue, 4> LoadRetOps;
1231 LoadRetOps.push_back(Chain);
1232 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1233 LoadRetOps.push_back(DAG.getConstant(Offsets[i], MVT::i32));
1234 LoadRetOps.push_back(InFlag);
1235 SDValue retval = DAG.getMemIntrinsicNode(
1236 NVPTXISD::LoadParam, dl,
1237 DAG.getVTList(LoadRetVTs), LoadRetOps,
1238 TheLoadType, MachinePointerInfo(), AlignI);
1239 Chain = retval.getValue(1);
1240 InFlag = retval.getValue(2);
1241 SDValue Ret0 = retval.getValue(0);
1243 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
1244 InVals.push_back(Ret0);
1249 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
1250 DAG.getIntPtrConstant(uniqueCallSite + 1, true),
1254 // set isTailCall to false for now, until we figure out how to express
1255 // tail call optimization in PTX
1260 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1261 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1262 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1264 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1265 SDNode *Node = Op.getNode();
1267 SmallVector<SDValue, 8> Ops;
1268 unsigned NumOperands = Node->getNumOperands();
1269 for (unsigned i = 0; i < NumOperands; ++i) {
1270 SDValue SubOp = Node->getOperand(i);
1271 EVT VVT = SubOp.getNode()->getValueType(0);
1272 EVT EltVT = VVT.getVectorElementType();
1273 unsigned NumSubElem = VVT.getVectorNumElements();
1274 for (unsigned j = 0; j < NumSubElem; ++j) {
1275 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1276 DAG.getIntPtrConstant(j)));
1279 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
1282 /// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
1283 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
1285 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
1287 SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
1288 SelectionDAG &DAG) const {
1289 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
1290 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
1292 EVT VT = Op.getValueType();
1293 unsigned VTBits = VT.getSizeInBits();
1295 SDValue ShOpLo = Op.getOperand(0);
1296 SDValue ShOpHi = Op.getOperand(1);
1297 SDValue ShAmt = Op.getOperand(2);
1298 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
1300 if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
1302 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
1303 // {dHi, dLo} = {aHi, aLo} >> Amt
1305 // dLo = shf.r.clamp aLo, aHi, Amt
1307 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1308 SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
1311 SDValue Ops[2] = { Lo, Hi };
1312 return DAG.getMergeValues(Ops, dl);
1316 // {dHi, dLo} = {aHi, aLo} >> Amt
1317 // - if (Amt>=size) then
1318 // dLo = aHi >> (Amt-size)
1319 // dHi = aHi >> Amt (this is either all 0 or all 1)
1321 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
1324 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
1325 DAG.getConstant(VTBits, MVT::i32), ShAmt);
1326 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
1327 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
1328 DAG.getConstant(VTBits, MVT::i32));
1329 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
1330 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
1331 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
1333 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
1334 DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
1335 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1336 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
1338 SDValue Ops[2] = { Lo, Hi };
1339 return DAG.getMergeValues(Ops, dl);
1343 /// LowerShiftLeftParts - Lower SHL_PARTS, which
1344 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
1346 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
1348 SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
1349 SelectionDAG &DAG) const {
1350 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
1351 assert(Op.getOpcode() == ISD::SHL_PARTS);
1353 EVT VT = Op.getValueType();
1354 unsigned VTBits = VT.getSizeInBits();
1356 SDValue ShOpLo = Op.getOperand(0);
1357 SDValue ShOpHi = Op.getOperand(1);
1358 SDValue ShAmt = Op.getOperand(2);
1360 if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
1362 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
1363 // {dHi, dLo} = {aHi, aLo} << Amt
1364 // dHi = shf.l.clamp aLo, aHi, Amt
1367 SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
1369 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
1371 SDValue Ops[2] = { Lo, Hi };
1372 return DAG.getMergeValues(Ops, dl);
1376 // {dHi, dLo} = {aHi, aLo} << Amt
1377 // - if (Amt>=size) then
1378 // dLo = aLo << Amt (all 0)
1379 // dLo = aLo << (Amt-size)
1382 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
1384 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
1385 DAG.getConstant(VTBits, MVT::i32), ShAmt);
1386 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
1387 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
1388 DAG.getConstant(VTBits, MVT::i32));
1389 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
1390 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
1391 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
1393 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
1394 DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
1395 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
1396 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
1398 SDValue Ops[2] = { Lo, Hi };
1399 return DAG.getMergeValues(Ops, dl);
1404 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1405 switch (Op.getOpcode()) {
1406 case ISD::RETURNADDR:
1408 case ISD::FRAMEADDR:
1410 case ISD::GlobalAddress:
1411 return LowerGlobalAddress(Op, DAG);
1412 case ISD::INTRINSIC_W_CHAIN:
1414 case ISD::BUILD_VECTOR:
1415 case ISD::EXTRACT_SUBVECTOR:
1417 case ISD::CONCAT_VECTORS:
1418 return LowerCONCAT_VECTORS(Op, DAG);
1420 return LowerSTORE(Op, DAG);
1422 return LowerLOAD(Op, DAG);
1423 case ISD::SHL_PARTS:
1424 return LowerShiftLeftParts(Op, DAG);
1425 case ISD::SRA_PARTS:
1426 case ISD::SRL_PARTS:
1427 return LowerShiftRightParts(Op, DAG);
1429 llvm_unreachable("Custom lowering not defined for operation");
1433 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1434 if (Op.getValueType() == MVT::i1)
1435 return LowerLOADi1(Op, DAG);
1442 // v1 = ld i8* addr (-> i16)
1443 // v = trunc i16 to i1
1444 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
1445 SDNode *Node = Op.getNode();
1446 LoadSDNode *LD = cast<LoadSDNode>(Node);
1448 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
1449 assert(Node->getValueType(0) == MVT::i1 &&
1450 "Custom lowering for i1 load only");
1452 DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
1453 LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
1454 LD->isInvariant(), LD->getAlignment());
1455 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
1456 // The legalizer (the caller) is expecting two values from the legalized
1457 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
1458 // in LegalizeDAG.cpp which also uses MergeValues.
1459 SDValue Ops[] = { result, LD->getChain() };
1460 return DAG.getMergeValues(Ops, dl);
1463 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1464 EVT ValVT = Op.getOperand(1).getValueType();
1465 if (ValVT == MVT::i1)
1466 return LowerSTOREi1(Op, DAG);
1467 else if (ValVT.isVector())
1468 return LowerSTOREVector(Op, DAG);
1474 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
1475 SDNode *N = Op.getNode();
1476 SDValue Val = N->getOperand(1);
1478 EVT ValVT = Val.getValueType();
1480 if (ValVT.isVector()) {
1481 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1482 // legal. We can (and should) split that into 2 stores of <2 x double> here
1483 // but I'm leaving that as a TODO for now.
1484 if (!ValVT.isSimple())
1486 switch (ValVT.getSimpleVT().SimpleTy) {
1499 // This is a "native" vector type
1503 unsigned Opcode = 0;
1504 EVT EltVT = ValVT.getVectorElementType();
1505 unsigned NumElts = ValVT.getVectorNumElements();
1507 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
1508 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1509 // stored type to i16 and propagate the "real" type as the memory type.
1510 bool NeedExt = false;
1511 if (EltVT.getSizeInBits() < 16)
1518 Opcode = NVPTXISD::StoreV2;
1521 Opcode = NVPTXISD::StoreV4;
1526 SmallVector<SDValue, 8> Ops;
1528 // First is the chain
1529 Ops.push_back(N->getOperand(0));
1531 // Then the split values
1532 for (unsigned i = 0; i < NumElts; ++i) {
1533 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
1534 DAG.getIntPtrConstant(i));
1536 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
1537 Ops.push_back(ExtVal);
1540 // Then any remaining arguments
1541 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
1542 Ops.push_back(N->getOperand(i));
1545 MemSDNode *MemSD = cast<MemSDNode>(N);
1547 SDValue NewSt = DAG.getMemIntrinsicNode(
1548 Opcode, DL, DAG.getVTList(MVT::Other), Ops,
1549 MemSD->getMemoryVT(), MemSD->getMemOperand());
1551 //return DCI.CombineTo(N, NewSt, true);
1560 // v1 = zxt v to i16
1562 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
1563 SDNode *Node = Op.getNode();
1565 StoreSDNode *ST = cast<StoreSDNode>(Node);
1566 SDValue Tmp1 = ST->getChain();
1567 SDValue Tmp2 = ST->getBasePtr();
1568 SDValue Tmp3 = ST->getValue();
1569 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
1570 unsigned Alignment = ST->getAlignment();
1571 bool isVolatile = ST->isVolatile();
1572 bool isNonTemporal = ST->isNonTemporal();
1573 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
1574 SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
1575 ST->getPointerInfo(), MVT::i8, isNonTemporal,
1576 isVolatile, Alignment);
1580 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
1581 int idx, EVT v) const {
1582 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
1583 std::stringstream suffix;
1585 *name += suffix.str();
1586 return DAG.getTargetExternalSymbol(name->c_str(), v);
1590 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1591 std::string ParamSym;
1592 raw_string_ostream ParamStr(ParamSym);
1594 ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
1597 std::string *SavedStr =
1598 nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
1599 return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
1602 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1603 return getExtSymb(DAG, ".HLPPARAM", idx);
1606 // Check to see if the kernel argument is image*_t or sampler_t
1608 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1609 static const char *const specialTypes[] = { "struct._image2d_t",
1610 "struct._image3d_t",
1611 "struct._sampler_t" };
1613 const Type *Ty = arg->getType();
1614 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1622 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1623 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1625 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1626 if (TypeName == specialTypes[i])
1632 SDValue NVPTXTargetLowering::LowerFormalArguments(
1633 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1634 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
1635 SmallVectorImpl<SDValue> &InVals) const {
1636 MachineFunction &MF = DAG.getMachineFunction();
1637 const DataLayout *TD = getDataLayout();
1639 const Function *F = MF.getFunction();
1640 const AttributeSet &PAL = F->getAttributes();
1641 const TargetLowering *TLI = DAG.getTarget().getTargetLowering();
1643 SDValue Root = DAG.getRoot();
1644 std::vector<SDValue> OutChains;
1646 bool isKernel = llvm::isKernelFunction(*F);
1647 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1648 assert(isABI && "Non-ABI compilation is not supported");
1652 std::vector<Type *> argTypes;
1653 std::vector<const Argument *> theArgs;
1654 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1656 theArgs.push_back(I);
1657 argTypes.push_back(I->getType());
1659 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
1660 // Ins.size() will be larger
1661 // * if there is an aggregate argument with multiple fields (each field
1662 // showing up separately in Ins)
1663 // * if there is a vector argument with more than typical vector-length
1664 // elements (generally if more than 4) where each vector element is
1665 // individually present in Ins.
1666 // So a different index should be used for indexing into Ins.
1667 // See similar issue in LowerCall.
1668 unsigned InsIdx = 0;
1671 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1672 Type *Ty = argTypes[i];
1674 // If the kernel argument is image*_t or sampler_t, convert it to
1675 // a i32 constant holding the parameter position. This can later
1676 // matched in the AsmPrinter to output the correct mangled name.
1677 if (isImageOrSamplerVal(
1679 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
1681 assert(isKernel && "Only kernels can have image/sampler params");
1682 InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
1686 if (theArgs[i]->use_empty()) {
1688 if (Ty->isAggregateType()) {
1689 SmallVector<EVT, 16> vtparts;
1691 ComputePTXValueVTs(*this, Ty, vtparts);
1692 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1693 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1695 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1698 if (vtparts.size() > 0)
1702 if (Ty->isVectorTy()) {
1703 EVT ObjectVT = getValueType(Ty);
1704 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
1705 for (unsigned parti = 0; parti < NumRegs; ++parti) {
1706 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1713 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1717 // In the following cases, assign a node order of "idx+1"
1718 // to newly created nodes. The SDNodes for params have to
1719 // appear in the same order as their order of appearance
1720 // in the original function. "idx+1" holds that order.
1721 if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
1722 if (Ty->isAggregateType()) {
1723 SmallVector<EVT, 16> vtparts;
1724 SmallVector<uint64_t, 16> offsets;
1726 // NOTE: Here, we lose the ability to issue vector loads for vectors
1727 // that are a part of a struct. This should be investigated in the
1729 ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
1730 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1731 bool aggregateIsPacked = false;
1732 if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
1733 aggregateIsPacked = STy->isPacked();
1735 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1736 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1738 EVT partVT = vtparts[parti];
1739 Value *srcValue = Constant::getNullValue(
1740 PointerType::get(partVT.getTypeForEVT(F->getContext()),
1741 llvm::ADDRESS_SPACE_PARAM));
1743 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1744 DAG.getConstant(offsets[parti], getPointerTy()));
1745 unsigned partAlign =
1746 aggregateIsPacked ? 1
1747 : TD->getABITypeAlignment(
1748 partVT.getTypeForEVT(F->getContext()));
1750 if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
1751 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1752 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1753 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1754 MachinePointerInfo(srcValue), partVT, false,
1757 p = DAG.getLoad(partVT, dl, Root, srcAddr,
1758 MachinePointerInfo(srcValue), false, false, false,
1762 p.getNode()->setIROrder(idx + 1);
1763 InVals.push_back(p);
1766 if (vtparts.size() > 0)
1770 if (Ty->isVectorTy()) {
1771 EVT ObjectVT = getValueType(Ty);
1772 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1773 unsigned NumElts = ObjectVT.getVectorNumElements();
1774 assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
1775 "Vector was not scalarized");
1777 EVT EltVT = ObjectVT.getVectorElementType();
1782 // We only have one element, so just directly load it
1783 Value *SrcValue = Constant::getNullValue(PointerType::get(
1784 EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1785 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1786 DAG.getConstant(Ofst, getPointerTy()));
1787 SDValue P = DAG.getLoad(
1788 EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1790 TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
1792 P.getNode()->setIROrder(idx + 1);
1794 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1795 P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
1796 InVals.push_back(P);
1797 Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
1799 } else if (NumElts == 2) {
1801 // f32,f32 = load ...
1802 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
1803 Value *SrcValue = Constant::getNullValue(PointerType::get(
1804 VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1805 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1806 DAG.getConstant(Ofst, getPointerTy()));
1807 SDValue P = DAG.getLoad(
1808 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1810 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1812 P.getNode()->setIROrder(idx + 1);
1814 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1815 DAG.getIntPtrConstant(0));
1816 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1817 DAG.getIntPtrConstant(1));
1819 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
1820 Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
1821 Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
1824 InVals.push_back(Elt0);
1825 InVals.push_back(Elt1);
1826 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1830 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
1832 // vector will be expanded to a power of 2 elements, so we know we can
1833 // always round up to the next multiple of 4 when creating the vector
1835 // e.g. 4 elem => 1 ld.v4
1836 // 6 elem => 2 ld.v4
1837 // 8 elem => 2 ld.v4
1838 // 11 elem => 3 ld.v4
1839 unsigned VecSize = 4;
1840 if (EltVT.getSizeInBits() == 64) {
1843 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1844 for (unsigned i = 0; i < NumElts; i += VecSize) {
1845 Value *SrcValue = Constant::getNullValue(
1846 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
1847 llvm::ADDRESS_SPACE_PARAM));
1849 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1850 DAG.getConstant(Ofst, getPointerTy()));
1851 SDValue P = DAG.getLoad(
1852 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1854 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1856 P.getNode()->setIROrder(idx + 1);
1858 for (unsigned j = 0; j < VecSize; ++j) {
1859 if (i + j >= NumElts)
1861 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1862 DAG.getIntPtrConstant(j));
1863 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1864 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
1865 InVals.push_back(Elt);
1867 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1877 EVT ObjectVT = getValueType(Ty);
1878 // If ABI, load from the param symbol
1879 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1880 Value *srcValue = Constant::getNullValue(PointerType::get(
1881 ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1883 if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1884 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1885 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1886 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1887 MachinePointerInfo(srcValue), ObjectVT, false, false,
1888 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1890 p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1891 MachinePointerInfo(srcValue), false, false, false,
1892 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1895 p.getNode()->setIROrder(idx + 1);
1896 InVals.push_back(p);
1900 // Param has ByVal attribute
1901 // Return MoveParam(param symbol).
1902 // Ideally, the param symbol can be returned directly,
1903 // but when SDNode builder decides to use it in a CopyToReg(),
1904 // machine instruction fails because TargetExternalSymbol
1905 // (not lowered) is target dependent, and CopyToReg assumes
1906 // the source is lowered.
1907 EVT ObjectVT = getValueType(Ty);
1908 assert(ObjectVT == Ins[InsIdx].VT &&
1909 "Ins type did not match function type");
1910 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1911 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1913 p.getNode()->setIROrder(idx + 1);
1915 InVals.push_back(p);
1917 SDValue p2 = DAG.getNode(
1918 ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1919 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
1920 InVals.push_back(p2);
1924 // Clang will check explicit VarArg and issue error if any. However, Clang
1925 // will let code with
1926 // implicit var arg like f() pass. See bug 617733.
1927 // We treat this case as if the arg list is empty.
1928 // if (F.isVarArg()) {
1929 // assert(0 && "VarArg not supported yet!");
1932 if (!OutChains.empty())
1933 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
1940 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1942 const SmallVectorImpl<ISD::OutputArg> &Outs,
1943 const SmallVectorImpl<SDValue> &OutVals,
1944 SDLoc dl, SelectionDAG &DAG) const {
1945 MachineFunction &MF = DAG.getMachineFunction();
1946 const Function *F = MF.getFunction();
1947 Type *RetTy = F->getReturnType();
1948 const DataLayout *TD = getDataLayout();
1950 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1951 assert(isABI && "Non-ABI compilation is not supported");
1955 if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1956 // If we have a vector type, the OutVals array will be the scalarized
1957 // components and we have combine them into 1 or more vector stores.
1958 unsigned NumElts = VTy->getNumElements();
1959 assert(NumElts == Outs.size() && "Bad scalarization of return value");
1961 // const_cast can be removed in later LLVM versions
1962 EVT EltVT = getValueType(RetTy).getVectorElementType();
1963 bool NeedExtend = false;
1964 if (EltVT.getSizeInBits() < 16)
1969 SDValue StoreVal = OutVals[0];
1970 // We only have one element, so just directly store it
1972 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
1973 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
1974 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1975 DAG.getVTList(MVT::Other), Ops,
1976 EltVT, MachinePointerInfo());
1978 } else if (NumElts == 2) {
1980 SDValue StoreVal0 = OutVals[0];
1981 SDValue StoreVal1 = OutVals[1];
1984 StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
1985 StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
1988 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
1990 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
1991 DAG.getVTList(MVT::Other), Ops,
1992 EltVT, MachinePointerInfo());
1995 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
1996 // vector will be expanded to a power of 2 elements, so we know we can
1997 // always round up to the next multiple of 4 when creating the vector
1999 // e.g. 4 elem => 1 st.v4
2000 // 6 elem => 2 st.v4
2001 // 8 elem => 2 st.v4
2002 // 11 elem => 3 st.v4
2004 unsigned VecSize = 4;
2005 if (OutVals[0].getValueType().getSizeInBits() == 64)
2008 unsigned Offset = 0;
2011 EVT::getVectorVT(F->getContext(), EltVT, VecSize);
2012 unsigned PerStoreOffset =
2013 TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
2015 for (unsigned i = 0; i < NumElts; i += VecSize) {
2018 SmallVector<SDValue, 8> Ops;
2019 Ops.push_back(Chain);
2020 Ops.push_back(DAG.getConstant(Offset, MVT::i32));
2021 unsigned Opc = NVPTXISD::StoreRetvalV2;
2022 EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
2024 StoreVal = OutVals[i];
2026 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2027 Ops.push_back(StoreVal);
2029 if (i + 1 < NumElts) {
2030 StoreVal = OutVals[i + 1];
2032 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2034 StoreVal = DAG.getUNDEF(ExtendedVT);
2036 Ops.push_back(StoreVal);
2039 Opc = NVPTXISD::StoreRetvalV4;
2040 if (i + 2 < NumElts) {
2041 StoreVal = OutVals[i + 2];
2044 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2046 StoreVal = DAG.getUNDEF(ExtendedVT);
2048 Ops.push_back(StoreVal);
2050 if (i + 3 < NumElts) {
2051 StoreVal = OutVals[i + 3];
2054 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2056 StoreVal = DAG.getUNDEF(ExtendedVT);
2058 Ops.push_back(StoreVal);
2061 // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
2063 DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), Ops,
2064 EltVT, MachinePointerInfo());
2065 Offset += PerStoreOffset;
2069 SmallVector<EVT, 16> ValVTs;
2070 SmallVector<uint64_t, 16> Offsets;
2071 ComputePTXValueVTs(*this, RetTy, ValVTs, &Offsets, 0);
2072 assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
2074 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2075 SDValue theVal = OutVals[i];
2076 EVT TheValType = theVal.getValueType();
2077 unsigned numElems = 1;
2078 if (TheValType.isVector())
2079 numElems = TheValType.getVectorNumElements();
2080 for (unsigned j = 0, je = numElems; j != je; ++j) {
2081 SDValue TmpVal = theVal;
2082 if (TheValType.isVector())
2083 TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
2084 TheValType.getVectorElementType(), TmpVal,
2085 DAG.getIntPtrConstant(j));
2086 EVT TheStoreType = ValVTs[i];
2087 if (RetTy->isIntegerTy() &&
2088 TD->getTypeAllocSizeInBits(RetTy) < 32) {
2089 // The following zero-extension is for integer types only, and
2090 // specifically not for aggregates.
2091 TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
2092 TheStoreType = MVT::i32;
2094 else if (TmpVal.getValueType().getSizeInBits() < 16)
2095 TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
2099 DAG.getConstant(Offsets[i], MVT::i32),
2101 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
2102 DAG.getVTList(MVT::Other), Ops,
2104 MachinePointerInfo());
2109 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
2113 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
2114 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2115 SelectionDAG &DAG) const {
2116 if (Constraint.length() > 1)
2119 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2122 // NVPTX suuport vector of legal types of any length in Intrinsics because the
2123 // NVPTX specific type legalizer
2124 // will legalize them to the PTX supported length.
2125 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
2126 if (isTypeLegal(VT))
2128 if (VT.isVector()) {
2129 MVT eVT = VT.getVectorElementType();
2130 if (isTypeLegal(eVT))
2136 static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
2137 switch (Intrinsic) {
2141 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2142 return NVPTXISD::Tex1DFloatI32;
2143 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2144 return NVPTXISD::Tex1DFloatFloat;
2145 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2146 return NVPTXISD::Tex1DFloatFloatLevel;
2147 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2148 return NVPTXISD::Tex1DFloatFloatGrad;
2149 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2150 return NVPTXISD::Tex1DI32I32;
2151 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2152 return NVPTXISD::Tex1DI32Float;
2153 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2154 return NVPTXISD::Tex1DI32FloatLevel;
2155 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2156 return NVPTXISD::Tex1DI32FloatGrad;
2158 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2159 return NVPTXISD::Tex1DArrayFloatI32;
2160 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2161 return NVPTXISD::Tex1DArrayFloatFloat;
2162 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2163 return NVPTXISD::Tex1DArrayFloatFloatLevel;
2164 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2165 return NVPTXISD::Tex1DArrayFloatFloatGrad;
2166 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2167 return NVPTXISD::Tex1DArrayI32I32;
2168 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2169 return NVPTXISD::Tex1DArrayI32Float;
2170 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2171 return NVPTXISD::Tex1DArrayI32FloatLevel;
2172 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2173 return NVPTXISD::Tex1DArrayI32FloatGrad;
2175 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2176 return NVPTXISD::Tex2DFloatI32;
2177 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2178 return NVPTXISD::Tex2DFloatFloat;
2179 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2180 return NVPTXISD::Tex2DFloatFloatLevel;
2181 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2182 return NVPTXISD::Tex2DFloatFloatGrad;
2183 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2184 return NVPTXISD::Tex2DI32I32;
2185 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2186 return NVPTXISD::Tex2DI32Float;
2187 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2188 return NVPTXISD::Tex2DI32FloatLevel;
2189 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2190 return NVPTXISD::Tex2DI32FloatGrad;
2192 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2193 return NVPTXISD::Tex2DArrayFloatI32;
2194 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2195 return NVPTXISD::Tex2DArrayFloatFloat;
2196 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2197 return NVPTXISD::Tex2DArrayFloatFloatLevel;
2198 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2199 return NVPTXISD::Tex2DArrayFloatFloatGrad;
2200 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2201 return NVPTXISD::Tex2DArrayI32I32;
2202 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2203 return NVPTXISD::Tex2DArrayI32Float;
2204 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2205 return NVPTXISD::Tex2DArrayI32FloatLevel;
2206 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2207 return NVPTXISD::Tex2DArrayI32FloatGrad;
2209 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2210 return NVPTXISD::Tex3DFloatI32;
2211 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2212 return NVPTXISD::Tex3DFloatFloat;
2213 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2214 return NVPTXISD::Tex3DFloatFloatLevel;
2215 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
2216 return NVPTXISD::Tex3DFloatFloatGrad;
2217 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2218 return NVPTXISD::Tex3DI32I32;
2219 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2220 return NVPTXISD::Tex3DI32Float;
2221 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2222 return NVPTXISD::Tex3DI32FloatLevel;
2223 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32:
2224 return NVPTXISD::Tex3DI32FloatGrad;
2228 static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
2229 switch (Intrinsic) {
2232 case Intrinsic::nvvm_suld_1d_i8_trap:
2233 return NVPTXISD::Suld1DI8Trap;
2234 case Intrinsic::nvvm_suld_1d_i16_trap:
2235 return NVPTXISD::Suld1DI16Trap;
2236 case Intrinsic::nvvm_suld_1d_i32_trap:
2237 return NVPTXISD::Suld1DI32Trap;
2238 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2239 return NVPTXISD::Suld1DV2I8Trap;
2240 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2241 return NVPTXISD::Suld1DV2I16Trap;
2242 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2243 return NVPTXISD::Suld1DV2I32Trap;
2244 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2245 return NVPTXISD::Suld1DV4I8Trap;
2246 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2247 return NVPTXISD::Suld1DV4I16Trap;
2248 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2249 return NVPTXISD::Suld1DV4I32Trap;
2250 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2251 return NVPTXISD::Suld1DArrayI8Trap;
2252 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2253 return NVPTXISD::Suld1DArrayI16Trap;
2254 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2255 return NVPTXISD::Suld1DArrayI32Trap;
2256 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2257 return NVPTXISD::Suld1DArrayV2I8Trap;
2258 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2259 return NVPTXISD::Suld1DArrayV2I16Trap;
2260 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2261 return NVPTXISD::Suld1DArrayV2I32Trap;
2262 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2263 return NVPTXISD::Suld1DArrayV4I8Trap;
2264 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2265 return NVPTXISD::Suld1DArrayV4I16Trap;
2266 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2267 return NVPTXISD::Suld1DArrayV4I32Trap;
2268 case Intrinsic::nvvm_suld_2d_i8_trap:
2269 return NVPTXISD::Suld2DI8Trap;
2270 case Intrinsic::nvvm_suld_2d_i16_trap:
2271 return NVPTXISD::Suld2DI16Trap;
2272 case Intrinsic::nvvm_suld_2d_i32_trap:
2273 return NVPTXISD::Suld2DI32Trap;
2274 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2275 return NVPTXISD::Suld2DV2I8Trap;
2276 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2277 return NVPTXISD::Suld2DV2I16Trap;
2278 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2279 return NVPTXISD::Suld2DV2I32Trap;
2280 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2281 return NVPTXISD::Suld2DV4I8Trap;
2282 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2283 return NVPTXISD::Suld2DV4I16Trap;
2284 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2285 return NVPTXISD::Suld2DV4I32Trap;
2286 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2287 return NVPTXISD::Suld2DArrayI8Trap;
2288 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2289 return NVPTXISD::Suld2DArrayI16Trap;
2290 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2291 return NVPTXISD::Suld2DArrayI32Trap;
2292 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2293 return NVPTXISD::Suld2DArrayV2I8Trap;
2294 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2295 return NVPTXISD::Suld2DArrayV2I16Trap;
2296 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2297 return NVPTXISD::Suld2DArrayV2I32Trap;
2298 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2299 return NVPTXISD::Suld2DArrayV4I8Trap;
2300 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2301 return NVPTXISD::Suld2DArrayV4I16Trap;
2302 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2303 return NVPTXISD::Suld2DArrayV4I32Trap;
2304 case Intrinsic::nvvm_suld_3d_i8_trap:
2305 return NVPTXISD::Suld3DI8Trap;
2306 case Intrinsic::nvvm_suld_3d_i16_trap:
2307 return NVPTXISD::Suld3DI16Trap;
2308 case Intrinsic::nvvm_suld_3d_i32_trap:
2309 return NVPTXISD::Suld3DI32Trap;
2310 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2311 return NVPTXISD::Suld3DV2I8Trap;
2312 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2313 return NVPTXISD::Suld3DV2I16Trap;
2314 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2315 return NVPTXISD::Suld3DV2I32Trap;
2316 case Intrinsic::nvvm_suld_3d_v4i8_trap:
2317 return NVPTXISD::Suld3DV4I8Trap;
2318 case Intrinsic::nvvm_suld_3d_v4i16_trap:
2319 return NVPTXISD::Suld3DV4I16Trap;
2320 case Intrinsic::nvvm_suld_3d_v4i32_trap:
2321 return NVPTXISD::Suld3DV4I32Trap;
2325 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
2327 // because we need the information that is only available in the "Value" type
2329 // pointer. In particular, the address space information.
2330 bool NVPTXTargetLowering::getTgtMemIntrinsic(
2331 IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
2332 switch (Intrinsic) {
2336 case Intrinsic::nvvm_atomic_load_add_f32:
2337 Info.opc = ISD::INTRINSIC_W_CHAIN;
2338 Info.memVT = MVT::f32;
2339 Info.ptrVal = I.getArgOperand(0);
2342 Info.readMem = true;
2343 Info.writeMem = true;
2347 case Intrinsic::nvvm_atomic_load_inc_32:
2348 case Intrinsic::nvvm_atomic_load_dec_32:
2349 Info.opc = ISD::INTRINSIC_W_CHAIN;
2350 Info.memVT = MVT::i32;
2351 Info.ptrVal = I.getArgOperand(0);
2354 Info.readMem = true;
2355 Info.writeMem = true;
2359 case Intrinsic::nvvm_ldu_global_i:
2360 case Intrinsic::nvvm_ldu_global_f:
2361 case Intrinsic::nvvm_ldu_global_p: {
2363 Info.opc = ISD::INTRINSIC_W_CHAIN;
2364 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
2365 Info.memVT = getValueType(I.getType());
2366 else if(Intrinsic == Intrinsic::nvvm_ldu_global_p)
2367 Info.memVT = getPointerTy();
2369 Info.memVT = getValueType(I.getType());
2370 Info.ptrVal = I.getArgOperand(0);
2373 Info.readMem = true;
2374 Info.writeMem = false;
2376 // alignment is available as metadata.
2377 // Grab it and set the alignment.
2378 assert(I.hasMetadataOtherThanDebugLoc() && "Must have alignment metadata");
2379 MDNode *AlignMD = I.getMetadata("align");
2380 assert(AlignMD && "Must have a non-null MDNode");
2381 assert(AlignMD->getNumOperands() == 1 && "Must have a single operand");
2382 Value *Align = AlignMD->getOperand(0);
2383 int64_t Alignment = cast<ConstantInt>(Align)->getZExtValue();
2384 Info.align = Alignment;
2388 case Intrinsic::nvvm_ldg_global_i:
2389 case Intrinsic::nvvm_ldg_global_f:
2390 case Intrinsic::nvvm_ldg_global_p: {
2392 Info.opc = ISD::INTRINSIC_W_CHAIN;
2393 if (Intrinsic == Intrinsic::nvvm_ldg_global_i)
2394 Info.memVT = getValueType(I.getType());
2395 else if(Intrinsic == Intrinsic::nvvm_ldg_global_p)
2396 Info.memVT = getPointerTy();
2398 Info.memVT = getValueType(I.getType());
2399 Info.ptrVal = I.getArgOperand(0);
2402 Info.readMem = true;
2403 Info.writeMem = false;
2405 // alignment is available as metadata.
2406 // Grab it and set the alignment.
2407 assert(I.hasMetadataOtherThanDebugLoc() && "Must have alignment metadata");
2408 MDNode *AlignMD = I.getMetadata("align");
2409 assert(AlignMD && "Must have a non-null MDNode");
2410 assert(AlignMD->getNumOperands() == 1 && "Must have a single operand");
2411 Value *Align = AlignMD->getOperand(0);
2412 int64_t Alignment = cast<ConstantInt>(Align)->getZExtValue();
2413 Info.align = Alignment;
2418 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2419 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2420 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2421 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2422 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2423 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2424 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2425 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2426 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2427 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2428 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2429 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2430 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2431 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2432 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2433 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2434 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2435 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2436 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2437 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32: {
2438 Info.opc = getOpcForTextureInstr(Intrinsic);
2439 Info.memVT = MVT::f32;
2440 Info.ptrVal = nullptr;
2443 Info.readMem = true;
2444 Info.writeMem = false;
2448 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2449 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2450 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2451 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2452 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2453 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2454 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2455 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2456 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2457 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2458 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2459 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2460 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2461 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2462 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2463 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2464 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2465 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2466 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2467 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32: {
2468 Info.opc = getOpcForTextureInstr(Intrinsic);
2469 Info.memVT = MVT::i32;
2470 Info.ptrVal = nullptr;
2473 Info.readMem = true;
2474 Info.writeMem = false;
2478 case Intrinsic::nvvm_suld_1d_i8_trap:
2479 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2480 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2481 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2482 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2483 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2484 case Intrinsic::nvvm_suld_2d_i8_trap:
2485 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2486 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2487 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2488 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2489 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2490 case Intrinsic::nvvm_suld_3d_i8_trap:
2491 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2492 case Intrinsic::nvvm_suld_3d_v4i8_trap: {
2493 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2494 Info.memVT = MVT::i8;
2495 Info.ptrVal = nullptr;
2498 Info.readMem = true;
2499 Info.writeMem = false;
2503 case Intrinsic::nvvm_suld_1d_i16_trap:
2504 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2505 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2506 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2507 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2508 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2509 case Intrinsic::nvvm_suld_2d_i16_trap:
2510 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2511 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2512 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2513 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2514 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2515 case Intrinsic::nvvm_suld_3d_i16_trap:
2516 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2517 case Intrinsic::nvvm_suld_3d_v4i16_trap: {
2518 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2519 Info.memVT = MVT::i16;
2520 Info.ptrVal = nullptr;
2523 Info.readMem = true;
2524 Info.writeMem = false;
2528 case Intrinsic::nvvm_suld_1d_i32_trap:
2529 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2530 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2531 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2532 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2533 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2534 case Intrinsic::nvvm_suld_2d_i32_trap:
2535 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2536 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2537 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2538 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2539 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2540 case Intrinsic::nvvm_suld_3d_i32_trap:
2541 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2542 case Intrinsic::nvvm_suld_3d_v4i32_trap: {
2543 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2544 Info.memVT = MVT::i32;
2545 Info.ptrVal = nullptr;
2548 Info.readMem = true;
2549 Info.writeMem = false;
2558 /// isLegalAddressingMode - Return true if the addressing mode represented
2559 /// by AM is legal for this target, for a load/store of the specified type.
2560 /// Used to guide target specific optimizations, like loop strength reduction
2561 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
2562 /// (CodeGenPrepare.cpp)
2563 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
2566 // AddrMode - This represents an addressing mode of:
2567 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2569 // The legal address modes are
2576 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
2582 case 0: // "r", "r+i" or "i" is allowed
2585 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
2587 // Otherwise we have r+i.
2590 // No scale > 1 is allowed
2596 //===----------------------------------------------------------------------===//
2597 // NVPTX Inline Assembly Support
2598 //===----------------------------------------------------------------------===//
2600 /// getConstraintType - Given a constraint letter, return the type of
2601 /// constraint it is for this target.
2602 NVPTXTargetLowering::ConstraintType
2603 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
2604 if (Constraint.size() == 1) {
2605 switch (Constraint[0]) {
2617 return C_RegisterClass;
2620 return TargetLowering::getConstraintType(Constraint);
2623 std::pair<unsigned, const TargetRegisterClass *>
2624 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2626 if (Constraint.size() == 1) {
2627 switch (Constraint[0]) {
2629 return std::make_pair(0U, &NVPTX::Int1RegsRegClass);
2631 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2633 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2635 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
2638 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
2640 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2642 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2645 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2648 /// getFunctionAlignment - Return the Log2 alignment of this function.
2649 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
2653 //===----------------------------------------------------------------------===//
2654 // NVPTX DAG Combining
2655 //===----------------------------------------------------------------------===//
2657 extern unsigned FMAContractLevel;
2659 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
2660 /// operands N0 and N1. This is a helper for PerformADDCombine that is
2661 /// called with the default operands, and if that fails, with commuted
2663 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
2664 TargetLowering::DAGCombinerInfo &DCI,
2665 const NVPTXSubtarget &Subtarget,
2666 CodeGenOpt::Level OptLevel) {
2667 SelectionDAG &DAG = DCI.DAG;
2668 // Skip non-integer, non-scalar case
2669 EVT VT=N0.getValueType();
2673 // fold (add (mul a, b), c) -> (mad a, b, c)
2675 if (N0.getOpcode() == ISD::MUL) {
2676 assert (VT.isInteger());
2678 // Since integer multiply-add costs the same as integer multiply
2679 // but is more costly than integer add, do the fusion only when
2680 // the mul is only used in the add.
2681 if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
2682 !N0.getNode()->hasOneUse())
2686 return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
2687 N0.getOperand(0), N0.getOperand(1), N1);
2689 else if (N0.getOpcode() == ISD::FMUL) {
2690 if (VT == MVT::f32 || VT == MVT::f64) {
2691 if (FMAContractLevel == 0)
2694 // For floating point:
2695 // Do the fusion only when the mul has less than 5 uses and all
2697 // The heuristic is that if a use is not an add, then that use
2698 // cannot be fused into fma, therefore mul is still needed anyway.
2699 // If there are more than 4 uses, even if they are all add, fusing
2700 // them will increase register pressue.
2703 int nonAddCount = 0;
2704 for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
2705 UE = N0.getNode()->use_end();
2709 if (User->getOpcode() != ISD::FADD)
2715 int orderNo = N->getIROrder();
2716 int orderNo2 = N0.getNode()->getIROrder();
2717 // simple heuristics here for considering potential register
2718 // pressure, the logics here is that the differnce are used
2719 // to measure the distance between def and use, the longer distance
2720 // more likely cause register pressure.
2721 if (orderNo - orderNo2 < 500)
2724 // Now, check if at least one of the FMUL's operands is live beyond the node N,
2725 // which guarantees that the FMA will not increase register pressure at node N.
2726 bool opIsLive = false;
2727 const SDNode *left = N0.getOperand(0).getNode();
2728 const SDNode *right = N0.getOperand(1).getNode();
2730 if (dyn_cast<ConstantSDNode>(left) || dyn_cast<ConstantSDNode>(right))
2734 for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
2736 int orderNo3 = User->getIROrder();
2737 if (orderNo3 > orderNo) {
2744 for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
2746 int orderNo3 = User->getIROrder();
2747 if (orderNo3 > orderNo) {
2757 return DAG.getNode(ISD::FMA, SDLoc(N), VT,
2758 N0.getOperand(0), N0.getOperand(1), N1);
2765 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
2767 static SDValue PerformADDCombine(SDNode *N,
2768 TargetLowering::DAGCombinerInfo &DCI,
2769 const NVPTXSubtarget &Subtarget,
2770 CodeGenOpt::Level OptLevel) {
2771 SDValue N0 = N->getOperand(0);
2772 SDValue N1 = N->getOperand(1);
2774 // First try with the default operand order.
2775 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget,
2777 if (Result.getNode())
2780 // If that didn't work, try again with the operands commuted.
2781 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
2784 static SDValue PerformANDCombine(SDNode *N,
2785 TargetLowering::DAGCombinerInfo &DCI) {
2786 // The type legalizer turns a vector load of i8 values into a zextload to i16
2787 // registers, optionally ANY_EXTENDs it (if target type is integer),
2788 // and ANDs off the high 8 bits. Since we turn this load into a
2789 // target-specific DAG node, the DAG combiner fails to eliminate these AND
2790 // nodes. Do that here.
2791 SDValue Val = N->getOperand(0);
2792 SDValue Mask = N->getOperand(1);
2794 if (isa<ConstantSDNode>(Val)) {
2795 std::swap(Val, Mask);
2799 // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
2800 if (Val.getOpcode() == ISD::ANY_EXTEND) {
2802 Val = Val->getOperand(0);
2805 if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
2806 Val = Val->getOperand(0);
2809 if (Val->getOpcode() == NVPTXISD::LoadV2 ||
2810 Val->getOpcode() == NVPTXISD::LoadV4) {
2811 ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
2813 // Not an AND with a constant
2817 uint64_t MaskVal = MaskCnst->getZExtValue();
2818 if (MaskVal != 0xff) {
2819 // Not an AND that chops off top 8 bits
2823 MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
2825 // Not a MemSDNode?!?
2829 EVT MemVT = Mem->getMemoryVT();
2830 if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
2831 // We only handle the i8 case
2836 cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
2838 if (ExtType == ISD::SEXTLOAD) {
2839 // If for some reason the load is a sextload, the and is needed to zero
2840 // out the high 8 bits
2845 if (AExt.getNode() != 0) {
2846 // Re-insert the ext as a zext.
2847 Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
2848 AExt.getValueType(), Val);
2852 // If we get here, the AND is unnecessary. Just replace it with the load
2853 DCI.CombineTo(N, Val, AddTo);
2859 enum OperandSignedness {
2865 /// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
2866 /// that can be demoted to \p OptSize bits without loss of information. The
2867 /// signedness of the operand, if determinable, is placed in \p S.
2868 static bool IsMulWideOperandDemotable(SDValue Op,
2870 OperandSignedness &S) {
2873 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
2874 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
2875 EVT OrigVT = Op.getOperand(0).getValueType();
2876 if (OrigVT.getSizeInBits() == OptSize) {
2880 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
2881 EVT OrigVT = Op.getOperand(0).getValueType();
2882 if (OrigVT.getSizeInBits() == OptSize) {
2891 /// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
2892 /// be demoted to \p OptSize bits without loss of information. If the operands
2893 /// contain a constant, it should appear as the RHS operand. The signedness of
2894 /// the operands is placed in \p IsSigned.
2895 static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
2899 OperandSignedness LHSSign;
2901 // The LHS operand must be a demotable op
2902 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
2905 // We should have been able to determine the signedness from the LHS
2906 if (LHSSign == Unknown)
2909 IsSigned = (LHSSign == Signed);
2911 // The RHS can be a demotable op or a constant
2912 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
2913 APInt Val = CI->getAPIntValue();
2914 if (LHSSign == Unsigned) {
2915 if (Val.isIntN(OptSize)) {
2920 if (Val.isSignedIntN(OptSize)) {
2926 OperandSignedness RHSSign;
2927 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
2930 if (LHSSign != RHSSign)
2937 /// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
2938 /// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
2939 /// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
2941 static SDValue TryMULWIDECombine(SDNode *N,
2942 TargetLowering::DAGCombinerInfo &DCI) {
2943 EVT MulType = N->getValueType(0);
2944 if (MulType != MVT::i32 && MulType != MVT::i64) {
2948 unsigned OptSize = MulType.getSizeInBits() >> 1;
2949 SDValue LHS = N->getOperand(0);
2950 SDValue RHS = N->getOperand(1);
2952 // Canonicalize the multiply so the constant (if any) is on the right
2953 if (N->getOpcode() == ISD::MUL) {
2954 if (isa<ConstantSDNode>(LHS)) {
2955 std::swap(LHS, RHS);
2959 // If we have a SHL, determine the actual multiply amount
2960 if (N->getOpcode() == ISD::SHL) {
2961 ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
2966 APInt ShiftAmt = ShlRHS->getAPIntValue();
2967 unsigned BitWidth = MulType.getSizeInBits();
2968 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
2969 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
2970 RHS = DCI.DAG.getConstant(MulVal, MulType);
2977 // Verify that our operands are demotable
2978 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
2983 if (MulType == MVT::i32) {
2984 DemotedVT = MVT::i16;
2986 DemotedVT = MVT::i32;
2989 // Truncate the operands to the correct size. Note that these are just for
2990 // type consistency and will (likely) be eliminated in later phases.
2992 DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, LHS);
2994 DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, RHS);
2998 Opc = NVPTXISD::MUL_WIDE_SIGNED;
3000 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
3003 return DCI.DAG.getNode(Opc, SDLoc(N), MulType, TruncLHS, TruncRHS);
3006 /// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
3007 static SDValue PerformMULCombine(SDNode *N,
3008 TargetLowering::DAGCombinerInfo &DCI,
3009 CodeGenOpt::Level OptLevel) {
3011 // Try mul.wide combining at OptLevel > 0
3012 SDValue Ret = TryMULWIDECombine(N, DCI);
3020 /// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
3021 static SDValue PerformSHLCombine(SDNode *N,
3022 TargetLowering::DAGCombinerInfo &DCI,
3023 CodeGenOpt::Level OptLevel) {
3025 // Try mul.wide combining at OptLevel > 0
3026 SDValue Ret = TryMULWIDECombine(N, DCI);
3034 SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
3035 DAGCombinerInfo &DCI) const {
3036 // FIXME: Get this from the DAG somehow
3037 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive;
3038 switch (N->getOpcode()) {
3042 return PerformADDCombine(N, DCI, nvptxSubtarget, OptLevel);
3044 return PerformMULCombine(N, DCI, OptLevel);
3046 return PerformSHLCombine(N, DCI, OptLevel);
3048 return PerformANDCombine(N, DCI);
3053 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
3054 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
3055 SmallVectorImpl<SDValue> &Results) {
3056 EVT ResVT = N->getValueType(0);
3059 assert(ResVT.isVector() && "Vector load must have vector type");
3061 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
3062 // legal. We can (and should) split that into 2 loads of <2 x double> here
3063 // but I'm leaving that as a TODO for now.
3064 assert(ResVT.isSimple() && "Can only handle simple types");
3065 switch (ResVT.getSimpleVT().SimpleTy) {
3078 // This is a "native" vector type
3082 EVT EltVT = ResVT.getVectorElementType();
3083 unsigned NumElts = ResVT.getVectorNumElements();
3085 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3086 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3087 // loaded type to i16 and propagate the "real" type as the memory type.
3088 bool NeedTrunc = false;
3089 if (EltVT.getSizeInBits() < 16) {
3094 unsigned Opcode = 0;
3101 Opcode = NVPTXISD::LoadV2;
3102 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
3105 Opcode = NVPTXISD::LoadV4;
3106 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
3107 LdResVTs = DAG.getVTList(ListVTs);
3112 SmallVector<SDValue, 8> OtherOps;
3114 // Copy regular operands
3115 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
3116 OtherOps.push_back(N->getOperand(i));
3118 LoadSDNode *LD = cast<LoadSDNode>(N);
3120 // The select routine does not have access to the LoadSDNode instance, so
3121 // pass along the extension information
3122 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
3124 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
3126 LD->getMemOperand());
3128 SmallVector<SDValue, 4> ScalarRes;
3130 for (unsigned i = 0; i < NumElts; ++i) {
3131 SDValue Res = NewLD.getValue(i);
3133 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
3134 ScalarRes.push_back(Res);
3137 SDValue LoadChain = NewLD.getValue(NumElts);
3139 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3141 Results.push_back(BuildVec);
3142 Results.push_back(LoadChain);
3145 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
3146 SmallVectorImpl<SDValue> &Results) {
3147 SDValue Chain = N->getOperand(0);
3148 SDValue Intrin = N->getOperand(1);
3151 // Get the intrinsic ID
3152 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
3156 case Intrinsic::nvvm_ldg_global_i:
3157 case Intrinsic::nvvm_ldg_global_f:
3158 case Intrinsic::nvvm_ldg_global_p:
3159 case Intrinsic::nvvm_ldu_global_i:
3160 case Intrinsic::nvvm_ldu_global_f:
3161 case Intrinsic::nvvm_ldu_global_p: {
3162 EVT ResVT = N->getValueType(0);
3164 if (ResVT.isVector()) {
3167 unsigned NumElts = ResVT.getVectorNumElements();
3168 EVT EltVT = ResVT.getVectorElementType();
3170 // Since LDU/LDG are target nodes, we cannot rely on DAG type
3172 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3173 // loaded type to i16 and propagate the "real" type as the memory type.
3174 bool NeedTrunc = false;
3175 if (EltVT.getSizeInBits() < 16) {
3180 unsigned Opcode = 0;
3190 case Intrinsic::nvvm_ldg_global_i:
3191 case Intrinsic::nvvm_ldg_global_f:
3192 case Intrinsic::nvvm_ldg_global_p:
3193 Opcode = NVPTXISD::LDGV2;
3195 case Intrinsic::nvvm_ldu_global_i:
3196 case Intrinsic::nvvm_ldu_global_f:
3197 case Intrinsic::nvvm_ldu_global_p:
3198 Opcode = NVPTXISD::LDUV2;
3201 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
3207 case Intrinsic::nvvm_ldg_global_i:
3208 case Intrinsic::nvvm_ldg_global_f:
3209 case Intrinsic::nvvm_ldg_global_p:
3210 Opcode = NVPTXISD::LDGV4;
3212 case Intrinsic::nvvm_ldu_global_i:
3213 case Intrinsic::nvvm_ldu_global_f:
3214 case Intrinsic::nvvm_ldu_global_p:
3215 Opcode = NVPTXISD::LDUV4;
3218 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
3219 LdResVTs = DAG.getVTList(ListVTs);
3224 SmallVector<SDValue, 8> OtherOps;
3226 // Copy regular operands
3228 OtherOps.push_back(Chain); // Chain
3229 // Skip operand 1 (intrinsic ID)
3231 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
3232 OtherOps.push_back(N->getOperand(i));
3234 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
3236 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
3237 MemSD->getMemoryVT(),
3238 MemSD->getMemOperand());
3240 SmallVector<SDValue, 4> ScalarRes;
3242 for (unsigned i = 0; i < NumElts; ++i) {
3243 SDValue Res = NewLD.getValue(i);
3246 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
3247 ScalarRes.push_back(Res);
3250 SDValue LoadChain = NewLD.getValue(NumElts);
3253 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3255 Results.push_back(BuildVec);
3256 Results.push_back(LoadChain);
3259 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
3260 "Custom handling of non-i8 ldu/ldg?");
3262 // Just copy all operands as-is
3263 SmallVector<SDValue, 4> Ops;
3264 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
3265 Ops.push_back(N->getOperand(i));
3267 // Force output to i16
3268 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
3270 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
3272 // We make sure the memory type is i8, which will be used during isel
3273 // to select the proper instruction.
3275 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
3276 MVT::i8, MemSD->getMemOperand());
3278 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
3279 NewLD.getValue(0)));
3280 Results.push_back(NewLD.getValue(1));
3286 void NVPTXTargetLowering::ReplaceNodeResults(
3287 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
3288 switch (N->getOpcode()) {
3290 report_fatal_error("Unhandled custom legalization");
3292 ReplaceLoadVector(N, DAG, Results);
3294 case ISD::INTRINSIC_W_CHAIN:
3295 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
3300 // Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
3301 void NVPTXSection::anchor() {}
3303 NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
3307 delete ReadOnlySection;
3309 delete StaticCtorSection;
3310 delete StaticDtorSection;
3312 delete EHFrameSection;
3313 delete DwarfAbbrevSection;
3314 delete DwarfInfoSection;
3315 delete DwarfLineSection;
3316 delete DwarfFrameSection;
3317 delete DwarfPubTypesSection;
3318 delete DwarfDebugInlineSection;
3319 delete DwarfStrSection;
3320 delete DwarfLocSection;
3321 delete DwarfARangesSection;
3322 delete DwarfRangesSection;
3323 delete DwarfMacroInfoSection;