2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "NVPTXISelLowering.h"
16 #include "NVPTXTargetMachine.h"
17 #include "NVPTXTargetObjectFile.h"
18 #include "NVPTXUtilities.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/MC/MCSectionELF.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
40 #define DEBUG_TYPE "nvptx-lower"
44 static unsigned int uniqueCallSite = 0;
46 static cl::opt<bool> sched4reg(
48 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
50 static bool IsPTXVectorType(MVT VT) {
51 switch (VT.SimpleTy) {
70 static uint64_t GCD( int a, int b)
72 if (a < b) std::swap(a,b);
81 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
82 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
83 /// into their primitive components.
84 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
85 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
86 /// LowerCall, and LowerReturn.
87 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
88 SmallVectorImpl<EVT> &ValueVTs,
89 SmallVectorImpl<uint64_t> *Offsets = nullptr,
90 uint64_t StartingOffset = 0) {
91 SmallVector<EVT, 16> TempVTs;
92 SmallVector<uint64_t, 16> TempOffsets;
94 ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
95 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
97 uint64_t Off = TempOffsets[i];
99 for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
100 ValueVTs.push_back(VT.getVectorElementType());
102 Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
105 ValueVTs.push_back(VT);
107 Offsets->push_back(Off);
112 // NVPTXTargetLowering Constructor.
113 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
114 : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
115 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
117 // always lower memset, memcpy, and memmove intrinsics to load/store
118 // instructions, rather
119 // then generating calls to memset, mempcy or memmove.
120 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
121 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
122 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
124 setBooleanContents(ZeroOrNegativeOneBooleanContent);
125 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
127 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
128 // condition branches.
129 setJumpIsExpensive(true);
131 // By default, use the Source scheduling
133 setSchedulingPreference(Sched::RegPressure);
135 setSchedulingPreference(Sched::Source);
137 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
138 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
139 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
140 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
141 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
142 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
144 // Operations not directly supported by NVPTX.
145 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
146 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
147 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
148 setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
149 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
150 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
151 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
152 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
153 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
154 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
155 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
156 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
157 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
158 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
159 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
160 // For others we will expand to a SHL/SRA pair.
161 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
162 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
163 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
164 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
165 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
167 setOperationAction(ISD::SHL_PARTS, MVT::i32 , Custom);
168 setOperationAction(ISD::SRA_PARTS, MVT::i32 , Custom);
169 setOperationAction(ISD::SRL_PARTS, MVT::i32 , Custom);
170 setOperationAction(ISD::SHL_PARTS, MVT::i64 , Custom);
171 setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
172 setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
174 if (nvptxSubtarget.hasROT64()) {
175 setOperationAction(ISD::ROTL, MVT::i64, Legal);
176 setOperationAction(ISD::ROTR, MVT::i64, Legal);
178 setOperationAction(ISD::ROTL, MVT::i64, Expand);
179 setOperationAction(ISD::ROTR, MVT::i64, Expand);
181 if (nvptxSubtarget.hasROT32()) {
182 setOperationAction(ISD::ROTL, MVT::i32, Legal);
183 setOperationAction(ISD::ROTR, MVT::i32, Legal);
185 setOperationAction(ISD::ROTL, MVT::i32, Expand);
186 setOperationAction(ISD::ROTR, MVT::i32, Expand);
189 setOperationAction(ISD::ROTL, MVT::i16, Expand);
190 setOperationAction(ISD::ROTR, MVT::i16, Expand);
191 setOperationAction(ISD::ROTL, MVT::i8, Expand);
192 setOperationAction(ISD::ROTR, MVT::i8, Expand);
193 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
194 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
195 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
197 // Indirect branch is not supported.
198 // This also disables Jump Table creation.
199 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
200 setOperationAction(ISD::BRIND, MVT::Other, Expand);
202 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
203 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
205 // We want to legalize constant related memmove and memcopy
207 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
209 // Turn FP extload into load/fextend
210 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
211 // Turn FP truncstore into trunc + store.
212 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
214 // PTX does not support load / store predicate registers
215 setOperationAction(ISD::LOAD, MVT::i1, Custom);
216 setOperationAction(ISD::STORE, MVT::i1, Custom);
218 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
219 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
220 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
221 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
222 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
223 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
225 // This is legal in NVPTX
226 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
227 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
229 // TRAP can be lowered to PTX trap
230 setOperationAction(ISD::TRAP, MVT::Other, Legal);
232 setOperationAction(ISD::ADDC, MVT::i64, Expand);
233 setOperationAction(ISD::ADDE, MVT::i64, Expand);
235 // Register custom handling for vector loads/stores
236 for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
238 MVT VT = (MVT::SimpleValueType) i;
239 if (IsPTXVectorType(VT)) {
240 setOperationAction(ISD::LOAD, VT, Custom);
241 setOperationAction(ISD::STORE, VT, Custom);
242 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
246 // Custom handling for i8 intrinsics
247 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
249 setOperationAction(ISD::CTLZ, MVT::i16, Legal);
250 setOperationAction(ISD::CTLZ, MVT::i32, Legal);
251 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
252 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
253 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
254 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
255 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
256 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
257 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
258 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
259 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
260 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
261 setOperationAction(ISD::CTPOP, MVT::i16, Legal);
262 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
263 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
265 // We have some custom DAG combine patterns for these nodes
266 setTargetDAGCombine(ISD::ADD);
267 setTargetDAGCombine(ISD::AND);
268 setTargetDAGCombine(ISD::FADD);
269 setTargetDAGCombine(ISD::MUL);
270 setTargetDAGCombine(ISD::SHL);
272 // Now deduce the information based on the above mentioned
274 computeRegisterProperties();
277 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
282 return "NVPTXISD::CALL";
283 case NVPTXISD::RET_FLAG:
284 return "NVPTXISD::RET_FLAG";
285 case NVPTXISD::Wrapper:
286 return "NVPTXISD::Wrapper";
287 case NVPTXISD::DeclareParam:
288 return "NVPTXISD::DeclareParam";
289 case NVPTXISD::DeclareScalarParam:
290 return "NVPTXISD::DeclareScalarParam";
291 case NVPTXISD::DeclareRet:
292 return "NVPTXISD::DeclareRet";
293 case NVPTXISD::DeclareRetParam:
294 return "NVPTXISD::DeclareRetParam";
295 case NVPTXISD::PrintCall:
296 return "NVPTXISD::PrintCall";
297 case NVPTXISD::LoadParam:
298 return "NVPTXISD::LoadParam";
299 case NVPTXISD::LoadParamV2:
300 return "NVPTXISD::LoadParamV2";
301 case NVPTXISD::LoadParamV4:
302 return "NVPTXISD::LoadParamV4";
303 case NVPTXISD::StoreParam:
304 return "NVPTXISD::StoreParam";
305 case NVPTXISD::StoreParamV2:
306 return "NVPTXISD::StoreParamV2";
307 case NVPTXISD::StoreParamV4:
308 return "NVPTXISD::StoreParamV4";
309 case NVPTXISD::StoreParamS32:
310 return "NVPTXISD::StoreParamS32";
311 case NVPTXISD::StoreParamU32:
312 return "NVPTXISD::StoreParamU32";
313 case NVPTXISD::CallArgBegin:
314 return "NVPTXISD::CallArgBegin";
315 case NVPTXISD::CallArg:
316 return "NVPTXISD::CallArg";
317 case NVPTXISD::LastCallArg:
318 return "NVPTXISD::LastCallArg";
319 case NVPTXISD::CallArgEnd:
320 return "NVPTXISD::CallArgEnd";
321 case NVPTXISD::CallVoid:
322 return "NVPTXISD::CallVoid";
323 case NVPTXISD::CallVal:
324 return "NVPTXISD::CallVal";
325 case NVPTXISD::CallSymbol:
326 return "NVPTXISD::CallSymbol";
327 case NVPTXISD::Prototype:
328 return "NVPTXISD::Prototype";
329 case NVPTXISD::MoveParam:
330 return "NVPTXISD::MoveParam";
331 case NVPTXISD::StoreRetval:
332 return "NVPTXISD::StoreRetval";
333 case NVPTXISD::StoreRetvalV2:
334 return "NVPTXISD::StoreRetvalV2";
335 case NVPTXISD::StoreRetvalV4:
336 return "NVPTXISD::StoreRetvalV4";
337 case NVPTXISD::PseudoUseParam:
338 return "NVPTXISD::PseudoUseParam";
339 case NVPTXISD::RETURN:
340 return "NVPTXISD::RETURN";
341 case NVPTXISD::CallSeqBegin:
342 return "NVPTXISD::CallSeqBegin";
343 case NVPTXISD::CallSeqEnd:
344 return "NVPTXISD::CallSeqEnd";
345 case NVPTXISD::CallPrototype:
346 return "NVPTXISD::CallPrototype";
347 case NVPTXISD::LoadV2:
348 return "NVPTXISD::LoadV2";
349 case NVPTXISD::LoadV4:
350 return "NVPTXISD::LoadV4";
351 case NVPTXISD::LDGV2:
352 return "NVPTXISD::LDGV2";
353 case NVPTXISD::LDGV4:
354 return "NVPTXISD::LDGV4";
355 case NVPTXISD::LDUV2:
356 return "NVPTXISD::LDUV2";
357 case NVPTXISD::LDUV4:
358 return "NVPTXISD::LDUV4";
359 case NVPTXISD::StoreV2:
360 return "NVPTXISD::StoreV2";
361 case NVPTXISD::StoreV4:
362 return "NVPTXISD::StoreV4";
363 case NVPTXISD::FUN_SHFL_CLAMP:
364 return "NVPTXISD::FUN_SHFL_CLAMP";
365 case NVPTXISD::FUN_SHFR_CLAMP:
366 return "NVPTXISD::FUN_SHFR_CLAMP";
368 return "NVPTXISD::IMAD";
369 case NVPTXISD::MUL_WIDE_SIGNED:
370 return "NVPTXISD::MUL_WIDE_SIGNED";
371 case NVPTXISD::MUL_WIDE_UNSIGNED:
372 return "NVPTXISD::MUL_WIDE_UNSIGNED";
373 case NVPTXISD::Tex1DFloatI32: return "NVPTXISD::Tex1DFloatI32";
374 case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
375 case NVPTXISD::Tex1DFloatFloatLevel:
376 return "NVPTXISD::Tex1DFloatFloatLevel";
377 case NVPTXISD::Tex1DFloatFloatGrad:
378 return "NVPTXISD::Tex1DFloatFloatGrad";
379 case NVPTXISD::Tex1DI32I32: return "NVPTXISD::Tex1DI32I32";
380 case NVPTXISD::Tex1DI32Float: return "NVPTXISD::Tex1DI32Float";
381 case NVPTXISD::Tex1DI32FloatLevel:
382 return "NVPTXISD::Tex1DI32FloatLevel";
383 case NVPTXISD::Tex1DI32FloatGrad:
384 return "NVPTXISD::Tex1DI32FloatGrad";
385 case NVPTXISD::Tex1DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
386 case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
387 case NVPTXISD::Tex1DArrayFloatFloatLevel:
388 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
389 case NVPTXISD::Tex1DArrayFloatFloatGrad:
390 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
391 case NVPTXISD::Tex1DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
392 case NVPTXISD::Tex1DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
393 case NVPTXISD::Tex1DArrayI32FloatLevel:
394 return "NVPTXISD::Tex2DArrayI32FloatLevel";
395 case NVPTXISD::Tex1DArrayI32FloatGrad:
396 return "NVPTXISD::Tex2DArrayI32FloatGrad";
397 case NVPTXISD::Tex2DFloatI32: return "NVPTXISD::Tex2DFloatI32";
398 case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
399 case NVPTXISD::Tex2DFloatFloatLevel:
400 return "NVPTXISD::Tex2DFloatFloatLevel";
401 case NVPTXISD::Tex2DFloatFloatGrad:
402 return "NVPTXISD::Tex2DFloatFloatGrad";
403 case NVPTXISD::Tex2DI32I32: return "NVPTXISD::Tex2DI32I32";
404 case NVPTXISD::Tex2DI32Float: return "NVPTXISD::Tex2DI32Float";
405 case NVPTXISD::Tex2DI32FloatLevel:
406 return "NVPTXISD::Tex2DI32FloatLevel";
407 case NVPTXISD::Tex2DI32FloatGrad:
408 return "NVPTXISD::Tex2DI32FloatGrad";
409 case NVPTXISD::Tex2DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
410 case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
411 case NVPTXISD::Tex2DArrayFloatFloatLevel:
412 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
413 case NVPTXISD::Tex2DArrayFloatFloatGrad:
414 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
415 case NVPTXISD::Tex2DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
416 case NVPTXISD::Tex2DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
417 case NVPTXISD::Tex2DArrayI32FloatLevel:
418 return "NVPTXISD::Tex2DArrayI32FloatLevel";
419 case NVPTXISD::Tex2DArrayI32FloatGrad:
420 return "NVPTXISD::Tex2DArrayI32FloatGrad";
421 case NVPTXISD::Tex3DFloatI32: return "NVPTXISD::Tex3DFloatI32";
422 case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
423 case NVPTXISD::Tex3DFloatFloatLevel:
424 return "NVPTXISD::Tex3DFloatFloatLevel";
425 case NVPTXISD::Tex3DFloatFloatGrad:
426 return "NVPTXISD::Tex3DFloatFloatGrad";
427 case NVPTXISD::Tex3DI32I32: return "NVPTXISD::Tex3DI32I32";
428 case NVPTXISD::Tex3DI32Float: return "NVPTXISD::Tex3DI32Float";
429 case NVPTXISD::Tex3DI32FloatLevel:
430 return "NVPTXISD::Tex3DI32FloatLevel";
431 case NVPTXISD::Tex3DI32FloatGrad:
432 return "NVPTXISD::Tex3DI32FloatGrad";
434 case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
435 case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
436 case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
437 case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
438 case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
439 case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
440 case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
441 case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
442 case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
444 case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
445 case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
446 case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
447 case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
448 case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
449 case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
450 case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
451 case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
452 case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
454 case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
455 case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
456 case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
457 case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
458 case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
459 case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
460 case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
461 case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
462 case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
464 case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
465 case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
466 case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
467 case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
468 case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
469 case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
470 case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
471 case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
472 case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
474 case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
475 case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
476 case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
477 case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
478 case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
479 case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
480 case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
481 case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
482 case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
486 bool NVPTXTargetLowering::shouldSplitVectorType(EVT VT) const {
487 return VT.getScalarType() == MVT::i1;
491 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
493 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
494 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
495 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
499 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
500 const SmallVectorImpl<ISD::OutputArg> &Outs,
501 unsigned retAlignment,
502 const ImmutableCallSite *CS) const {
504 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
505 assert(isABI && "Non-ABI compilation is not supported");
510 O << "prototype_" << uniqueCallSite << " : .callprototype ";
512 if (retTy->getTypeID() == Type::VoidTyID) {
516 if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
518 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
519 size = ITy->getBitWidth();
523 assert(retTy->isFloatingPointTy() &&
524 "Floating point type expected here");
525 size = retTy->getPrimitiveSizeInBits();
528 O << ".param .b" << size << " _";
529 } else if (isa<PointerType>(retTy)) {
530 O << ".param .b" << getPointerTy().getSizeInBits() << " _";
532 if((retTy->getTypeID() == Type::StructTyID) ||
533 isa<VectorType>(retTy)) {
534 O << ".param .align "
537 << getDataLayout()->getTypeAllocSize(retTy) << "]";
539 assert(false && "Unknown return type");
547 MVT thePointerTy = getPointerTy();
550 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
551 Type *Ty = Args[i].Ty;
557 if (Outs[OIdx].Flags.isByVal() == false) {
558 if (Ty->isAggregateType() || Ty->isVectorTy()) {
560 const CallInst *CallI = cast<CallInst>(CS->getInstruction());
561 const DataLayout *TD = getDataLayout();
562 // +1 because index 0 is reserved for return type alignment
563 if (!llvm::getAlign(*CallI, i + 1, align))
564 align = TD->getABITypeAlignment(Ty);
565 unsigned sz = TD->getTypeAllocSize(Ty);
566 O << ".param .align " << align << " .b8 ";
568 O << "[" << sz << "]";
569 // update the index for Outs
570 SmallVector<EVT, 16> vtparts;
571 ComputeValueVTs(*this, Ty, vtparts);
572 if (unsigned len = vtparts.size())
576 // i8 types in IR will be i16 types in SDAG
577 assert((getValueType(Ty) == Outs[OIdx].VT ||
578 (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
579 "type mismatch between callee prototype and arguments");
582 if (isa<IntegerType>(Ty)) {
583 sz = cast<IntegerType>(Ty)->getBitWidth();
586 } else if (isa<PointerType>(Ty))
587 sz = thePointerTy.getSizeInBits();
589 sz = Ty->getPrimitiveSizeInBits();
590 O << ".param .b" << sz << " ";
594 const PointerType *PTy = dyn_cast<PointerType>(Ty);
595 assert(PTy && "Param with byval attribute should be a pointer type");
596 Type *ETy = PTy->getElementType();
598 unsigned align = Outs[OIdx].Flags.getByValAlign();
599 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
600 O << ".param .align " << align << " .b8 ";
602 O << "[" << sz << "]";
609 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
610 const ImmutableCallSite *CS,
612 unsigned Idx) const {
613 const DataLayout *TD = getDataLayout();
615 const Value *DirectCallee = CS->getCalledFunction();
618 // We don't have a direct function symbol, but that may be because of
619 // constant cast instructions in the call.
620 const Instruction *CalleeI = CS->getInstruction();
621 assert(CalleeI && "Call target is not a function or derived value?");
623 // With bitcast'd call targets, the instruction will be the call
624 if (isa<CallInst>(CalleeI)) {
625 // Check if we have call alignment metadata
626 if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
629 const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
630 // Ignore any bitcast instructions
631 while(isa<ConstantExpr>(CalleeV)) {
632 const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
635 // Look through the bitcast
636 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
639 // We have now looked past all of the bitcasts. Do we finally have a
641 if (isa<Function>(CalleeV))
642 DirectCallee = CalleeV;
646 // Check for function alignment information if we found that the
647 // ultimate target is a Function
649 if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
652 // Call is indirect or alignment information is not available, fall back to
653 // the ABI type alignment
654 return TD->getABITypeAlignment(Ty);
657 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
658 SmallVectorImpl<SDValue> &InVals) const {
659 SelectionDAG &DAG = CLI.DAG;
661 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
662 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
663 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
664 SDValue Chain = CLI.Chain;
665 SDValue Callee = CLI.Callee;
666 bool &isTailCall = CLI.IsTailCall;
667 ArgListTy &Args = CLI.getArgs();
668 Type *retTy = CLI.RetTy;
669 ImmutableCallSite *CS = CLI.CS;
671 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
672 assert(isABI && "Non-ABI compilation is not supported");
675 const DataLayout *TD = getDataLayout();
676 MachineFunction &MF = DAG.getMachineFunction();
677 const Function *F = MF.getFunction();
679 SDValue tempChain = Chain;
681 DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
683 SDValue InFlag = Chain.getValue(1);
685 unsigned paramCount = 0;
686 // Args.size() and Outs.size() need not match.
687 // Outs.size() will be larger
688 // * if there is an aggregate argument with multiple fields (each field
689 // showing up separately in Outs)
690 // * if there is a vector argument with more than typical vector-length
691 // elements (generally if more than 4) where each vector element is
692 // individually present in Outs.
693 // So a different index should be used for indexing into Outs/OutVals.
694 // See similar issue in LowerFormalArguments.
696 // Declare the .params or .reg need to pass values
698 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
699 EVT VT = Outs[OIdx].VT;
700 Type *Ty = Args[i].Ty;
702 if (Outs[OIdx].Flags.isByVal() == false) {
703 if (Ty->isAggregateType()) {
705 SmallVector<EVT, 16> vtparts;
706 SmallVector<uint64_t, 16> Offsets;
707 ComputePTXValueVTs(*this, Ty, vtparts, &Offsets, 0);
709 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
710 // declare .param .align <align> .b8 .param<n>[<size>];
711 unsigned sz = TD->getTypeAllocSize(Ty);
712 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
713 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
714 DAG.getConstant(paramCount, MVT::i32),
715 DAG.getConstant(sz, MVT::i32), InFlag };
716 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
718 InFlag = Chain.getValue(1);
719 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
720 EVT elemtype = vtparts[j];
721 unsigned ArgAlign = GCD(align, Offsets[j]);
722 if (elemtype.isInteger() && (sz < 8))
724 SDValue StVal = OutVals[OIdx];
725 if (elemtype.getSizeInBits() < 16) {
726 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
728 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
729 SDValue CopyParamOps[] = { Chain,
730 DAG.getConstant(paramCount, MVT::i32),
731 DAG.getConstant(Offsets[j], MVT::i32),
733 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
734 CopyParamVTs, CopyParamOps,
735 elemtype, MachinePointerInfo(),
737 InFlag = Chain.getValue(1);
740 if (vtparts.size() > 0)
745 if (Ty->isVectorTy()) {
746 EVT ObjectVT = getValueType(Ty);
747 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
748 // declare .param .align <align> .b8 .param<n>[<size>];
749 unsigned sz = TD->getTypeAllocSize(Ty);
750 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
751 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
752 DAG.getConstant(paramCount, MVT::i32),
753 DAG.getConstant(sz, MVT::i32), InFlag };
754 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
756 InFlag = Chain.getValue(1);
757 unsigned NumElts = ObjectVT.getVectorNumElements();
758 EVT EltVT = ObjectVT.getVectorElementType();
760 bool NeedExtend = false;
761 if (EltVT.getSizeInBits() < 16) {
768 SDValue Elt = OutVals[OIdx++];
770 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
772 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
773 SDValue CopyParamOps[] = { Chain,
774 DAG.getConstant(paramCount, MVT::i32),
775 DAG.getConstant(0, MVT::i32), Elt,
777 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
778 CopyParamVTs, CopyParamOps,
779 MemVT, MachinePointerInfo());
780 InFlag = Chain.getValue(1);
781 } else if (NumElts == 2) {
782 SDValue Elt0 = OutVals[OIdx++];
783 SDValue Elt1 = OutVals[OIdx++];
785 Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
786 Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
789 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
790 SDValue CopyParamOps[] = { Chain,
791 DAG.getConstant(paramCount, MVT::i32),
792 DAG.getConstant(0, MVT::i32), Elt0, Elt1,
794 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
795 CopyParamVTs, CopyParamOps,
796 MemVT, MachinePointerInfo());
797 InFlag = Chain.getValue(1);
799 unsigned curOffset = 0;
801 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
803 // vector will be expanded to a power of 2 elements, so we know we can
804 // always round up to the next multiple of 4 when creating the vector
806 // e.g. 4 elem => 1 st.v4
809 // 11 elem => 3 st.v4
810 unsigned VecSize = 4;
811 if (EltVT.getSizeInBits() == 64)
814 // This is potentially only part of a vector, so assume all elements
815 // are packed together.
816 unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
818 for (unsigned i = 0; i < NumElts; i += VecSize) {
821 SmallVector<SDValue, 8> Ops;
822 Ops.push_back(Chain);
823 Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
824 Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
826 unsigned Opc = NVPTXISD::StoreParamV2;
828 StoreVal = OutVals[OIdx++];
830 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
831 Ops.push_back(StoreVal);
833 if (i + 1 < NumElts) {
834 StoreVal = OutVals[OIdx++];
837 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
839 StoreVal = DAG.getUNDEF(EltVT);
841 Ops.push_back(StoreVal);
844 Opc = NVPTXISD::StoreParamV4;
845 if (i + 2 < NumElts) {
846 StoreVal = OutVals[OIdx++];
849 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
851 StoreVal = DAG.getUNDEF(EltVT);
853 Ops.push_back(StoreVal);
855 if (i + 3 < NumElts) {
856 StoreVal = OutVals[OIdx++];
859 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
861 StoreVal = DAG.getUNDEF(EltVT);
863 Ops.push_back(StoreVal);
866 Ops.push_back(InFlag);
868 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
869 Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, Ops,
870 MemVT, MachinePointerInfo());
871 InFlag = Chain.getValue(1);
872 curOffset += PerStoreOffset;
880 // for ABI, declare .param .b<size> .param<n>;
881 unsigned sz = VT.getSizeInBits();
882 bool needExtend = false;
883 if (VT.isInteger()) {
889 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
890 SDValue DeclareParamOps[] = { Chain,
891 DAG.getConstant(paramCount, MVT::i32),
892 DAG.getConstant(sz, MVT::i32),
893 DAG.getConstant(0, MVT::i32), InFlag };
894 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
896 InFlag = Chain.getValue(1);
897 SDValue OutV = OutVals[OIdx];
899 // zext/sext i1 to i16
900 unsigned opc = ISD::ZERO_EXTEND;
901 if (Outs[OIdx].Flags.isSExt())
902 opc = ISD::SIGN_EXTEND;
903 OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
905 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
906 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
907 DAG.getConstant(0, MVT::i32), OutV, InFlag };
909 unsigned opcode = NVPTXISD::StoreParam;
910 if (Outs[OIdx].Flags.isZExt())
911 opcode = NVPTXISD::StoreParamU32;
912 else if (Outs[OIdx].Flags.isSExt())
913 opcode = NVPTXISD::StoreParamS32;
914 Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps,
915 VT, MachinePointerInfo());
917 InFlag = Chain.getValue(1);
922 SmallVector<EVT, 16> vtparts;
923 SmallVector<uint64_t, 16> Offsets;
924 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
925 assert(PTy && "Type of a byval parameter should be pointer");
926 ComputePTXValueVTs(*this, PTy->getElementType(), vtparts, &Offsets, 0);
928 // declare .param .align <align> .b8 .param<n>[<size>];
929 unsigned sz = Outs[OIdx].Flags.getByValSize();
930 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
931 unsigned ArgAlign = Outs[OIdx].Flags.getByValAlign();
932 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
933 // so we don't need to worry about natural alignment or not.
934 // See TargetLowering::LowerCallTo().
935 SDValue DeclareParamOps[] = {
936 Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
937 DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
940 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
942 InFlag = Chain.getValue(1);
943 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
944 EVT elemtype = vtparts[j];
945 int curOffset = Offsets[j];
946 unsigned PartAlign = GCD(ArgAlign, curOffset);
948 DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
949 DAG.getConstant(curOffset, getPointerTy()));
950 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
951 MachinePointerInfo(), false, false, false,
953 if (elemtype.getSizeInBits() < 16) {
954 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
956 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
957 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
958 DAG.getConstant(curOffset, MVT::i32), theVal,
960 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
961 CopyParamOps, elemtype,
962 MachinePointerInfo());
964 InFlag = Chain.getValue(1);
969 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
970 unsigned retAlignment = 0;
973 if (Ins.size() > 0) {
974 SmallVector<EVT, 16> resvtparts;
975 ComputeValueVTs(*this, retTy, resvtparts);
978 // .param .align 16 .b8 retval0[<size-in-bytes>], or
979 // .param .b<size-in-bits> retval0
980 unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
981 if (retTy->isSingleValueType()) {
982 // Scalar needs to be at least 32bit wide
985 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
986 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
987 DAG.getConstant(resultsz, MVT::i32),
988 DAG.getConstant(0, MVT::i32), InFlag };
989 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
991 InFlag = Chain.getValue(1);
993 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
994 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
995 SDValue DeclareRetOps[] = { Chain,
996 DAG.getConstant(retAlignment, MVT::i32),
997 DAG.getConstant(resultsz / 8, MVT::i32),
998 DAG.getConstant(0, MVT::i32), InFlag };
999 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
1001 InFlag = Chain.getValue(1);
1006 // This is indirect function call case : PTX requires a prototype of the
1008 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1009 // to be emitted, and the label has to used as the last arg of call
1011 // The prototype is embedded in a string and put as the operand for a
1012 // CallPrototype SDNode which will print out to the value of the string.
1013 SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1014 std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
1015 const char *ProtoStr =
1016 nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
1017 SDValue ProtoOps[] = {
1018 Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
1020 Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
1021 InFlag = Chain.getValue(1);
1023 // Op to just print "call"
1024 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1025 SDValue PrintCallOps[] = {
1026 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
1028 Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
1029 dl, PrintCallVTs, PrintCallOps);
1030 InFlag = Chain.getValue(1);
1032 // Ops to print out the function name
1033 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1034 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
1035 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
1036 InFlag = Chain.getValue(1);
1038 // Ops to print out the param list
1039 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1040 SDValue CallArgBeginOps[] = { Chain, InFlag };
1041 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
1043 InFlag = Chain.getValue(1);
1045 for (unsigned i = 0, e = paramCount; i != e; ++i) {
1048 opcode = NVPTXISD::LastCallArg;
1050 opcode = NVPTXISD::CallArg;
1051 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1052 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
1053 DAG.getConstant(i, MVT::i32), InFlag };
1054 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
1055 InFlag = Chain.getValue(1);
1057 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1058 SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
1060 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
1061 InFlag = Chain.getValue(1);
1064 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1065 SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
1067 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
1068 InFlag = Chain.getValue(1);
1071 // Generate loads from param memory/moves from registers for result
1072 if (Ins.size() > 0) {
1073 if (retTy && retTy->isVectorTy()) {
1074 EVT ObjectVT = getValueType(retTy);
1075 unsigned NumElts = ObjectVT.getVectorNumElements();
1076 EVT EltVT = ObjectVT.getVectorElementType();
1077 assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
1078 ObjectVT) == NumElts &&
1079 "Vector was not scalarized");
1080 unsigned sz = EltVT.getSizeInBits();
1081 bool needTruncate = sz < 8 ? true : false;
1084 // Just a simple load
1085 SmallVector<EVT, 4> LoadRetVTs;
1086 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1087 // If loading i1/i8 result, generate
1091 LoadRetVTs.push_back(MVT::i16);
1093 LoadRetVTs.push_back(EltVT);
1094 LoadRetVTs.push_back(MVT::Other);
1095 LoadRetVTs.push_back(MVT::Glue);
1096 SmallVector<SDValue, 4> LoadRetOps;
1097 LoadRetOps.push_back(Chain);
1098 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1099 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1100 LoadRetOps.push_back(InFlag);
1101 SDValue retval = DAG.getMemIntrinsicNode(
1102 NVPTXISD::LoadParam, dl,
1103 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1104 Chain = retval.getValue(1);
1105 InFlag = retval.getValue(2);
1106 SDValue Ret0 = retval;
1108 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
1109 InVals.push_back(Ret0);
1110 } else if (NumElts == 2) {
1112 SmallVector<EVT, 4> LoadRetVTs;
1113 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1114 // If loading i1/i8 result, generate
1118 LoadRetVTs.push_back(MVT::i16);
1119 LoadRetVTs.push_back(MVT::i16);
1121 LoadRetVTs.push_back(EltVT);
1122 LoadRetVTs.push_back(EltVT);
1124 LoadRetVTs.push_back(MVT::Other);
1125 LoadRetVTs.push_back(MVT::Glue);
1126 SmallVector<SDValue, 4> LoadRetOps;
1127 LoadRetOps.push_back(Chain);
1128 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1129 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1130 LoadRetOps.push_back(InFlag);
1131 SDValue retval = DAG.getMemIntrinsicNode(
1132 NVPTXISD::LoadParamV2, dl,
1133 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1134 Chain = retval.getValue(2);
1135 InFlag = retval.getValue(3);
1136 SDValue Ret0 = retval.getValue(0);
1137 SDValue Ret1 = retval.getValue(1);
1139 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
1140 InVals.push_back(Ret0);
1141 Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
1142 InVals.push_back(Ret1);
1144 InVals.push_back(Ret0);
1145 InVals.push_back(Ret1);
1148 // Split into N LoadV4
1150 unsigned VecSize = 4;
1151 unsigned Opc = NVPTXISD::LoadParamV4;
1152 if (EltVT.getSizeInBits() == 64) {
1154 Opc = NVPTXISD::LoadParamV2;
1156 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1157 for (unsigned i = 0; i < NumElts; i += VecSize) {
1158 SmallVector<EVT, 8> LoadRetVTs;
1159 if (EltVT == MVT::i1 || EltVT == MVT::i8) {
1160 // If loading i1/i8 result, generate
1164 for (unsigned j = 0; j < VecSize; ++j)
1165 LoadRetVTs.push_back(MVT::i16);
1167 for (unsigned j = 0; j < VecSize; ++j)
1168 LoadRetVTs.push_back(EltVT);
1170 LoadRetVTs.push_back(MVT::Other);
1171 LoadRetVTs.push_back(MVT::Glue);
1172 SmallVector<SDValue, 4> LoadRetOps;
1173 LoadRetOps.push_back(Chain);
1174 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1175 LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
1176 LoadRetOps.push_back(InFlag);
1177 SDValue retval = DAG.getMemIntrinsicNode(
1178 Opc, dl, DAG.getVTList(LoadRetVTs),
1179 LoadRetOps, EltVT, MachinePointerInfo());
1181 Chain = retval.getValue(2);
1182 InFlag = retval.getValue(3);
1184 Chain = retval.getValue(4);
1185 InFlag = retval.getValue(5);
1188 for (unsigned j = 0; j < VecSize; ++j) {
1189 if (i + j >= NumElts)
1191 SDValue Elt = retval.getValue(j);
1193 Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
1194 InVals.push_back(Elt);
1196 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1200 SmallVector<EVT, 16> VTs;
1201 SmallVector<uint64_t, 16> Offsets;
1202 ComputePTXValueVTs(*this, retTy, VTs, &Offsets, 0);
1203 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1204 unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0);
1205 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
1206 unsigned sz = VTs[i].getSizeInBits();
1207 unsigned AlignI = GCD(RetAlign, Offsets[i]);
1208 bool needTruncate = sz < 8 ? true : false;
1209 if (VTs[i].isInteger() && (sz < 8))
1212 SmallVector<EVT, 4> LoadRetVTs;
1213 EVT TheLoadType = VTs[i];
1214 if (retTy->isIntegerTy() &&
1215 TD->getTypeAllocSizeInBits(retTy) < 32) {
1216 // This is for integer types only, and specifically not for
1218 LoadRetVTs.push_back(MVT::i32);
1219 TheLoadType = MVT::i32;
1220 } else if (sz < 16) {
1221 // If loading i1/i8 result, generate
1223 // trunc i16 to i1/i8
1224 LoadRetVTs.push_back(MVT::i16);
1226 LoadRetVTs.push_back(Ins[i].VT);
1227 LoadRetVTs.push_back(MVT::Other);
1228 LoadRetVTs.push_back(MVT::Glue);
1230 SmallVector<SDValue, 4> LoadRetOps;
1231 LoadRetOps.push_back(Chain);
1232 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1233 LoadRetOps.push_back(DAG.getConstant(Offsets[i], MVT::i32));
1234 LoadRetOps.push_back(InFlag);
1235 SDValue retval = DAG.getMemIntrinsicNode(
1236 NVPTXISD::LoadParam, dl,
1237 DAG.getVTList(LoadRetVTs), LoadRetOps,
1238 TheLoadType, MachinePointerInfo(), AlignI);
1239 Chain = retval.getValue(1);
1240 InFlag = retval.getValue(2);
1241 SDValue Ret0 = retval.getValue(0);
1243 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
1244 InVals.push_back(Ret0);
1249 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
1250 DAG.getIntPtrConstant(uniqueCallSite + 1, true),
1254 // set isTailCall to false for now, until we figure out how to express
1255 // tail call optimization in PTX
1260 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1261 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1262 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1264 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1265 SDNode *Node = Op.getNode();
1267 SmallVector<SDValue, 8> Ops;
1268 unsigned NumOperands = Node->getNumOperands();
1269 for (unsigned i = 0; i < NumOperands; ++i) {
1270 SDValue SubOp = Node->getOperand(i);
1271 EVT VVT = SubOp.getNode()->getValueType(0);
1272 EVT EltVT = VVT.getVectorElementType();
1273 unsigned NumSubElem = VVT.getVectorNumElements();
1274 for (unsigned j = 0; j < NumSubElem; ++j) {
1275 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1276 DAG.getIntPtrConstant(j)));
1279 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
1282 /// LowerShiftRightParts - Lower SRL_PARTS, SRA_PARTS, which
1283 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
1285 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
1287 SDValue NVPTXTargetLowering::LowerShiftRightParts(SDValue Op,
1288 SelectionDAG &DAG) const {
1289 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
1290 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
1292 EVT VT = Op.getValueType();
1293 unsigned VTBits = VT.getSizeInBits();
1295 SDValue ShOpLo = Op.getOperand(0);
1296 SDValue ShOpHi = Op.getOperand(1);
1297 SDValue ShAmt = Op.getOperand(2);
1298 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
1300 if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
1302 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
1303 // {dHi, dLo} = {aHi, aLo} >> Amt
1305 // dLo = shf.r.clamp aLo, aHi, Amt
1307 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1308 SDValue Lo = DAG.getNode(NVPTXISD::FUN_SHFR_CLAMP, dl, VT, ShOpLo, ShOpHi,
1311 SDValue Ops[2] = { Lo, Hi };
1312 return DAG.getMergeValues(Ops, dl);
1316 // {dHi, dLo} = {aHi, aLo} >> Amt
1317 // - if (Amt>=size) then
1318 // dLo = aHi >> (Amt-size)
1319 // dHi = aHi >> Amt (this is either all 0 or all 1)
1321 // dLo = (aLo >>logic Amt) | (aHi << (size-Amt))
1324 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
1325 DAG.getConstant(VTBits, MVT::i32), ShAmt);
1326 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
1327 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
1328 DAG.getConstant(VTBits, MVT::i32));
1329 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
1330 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
1331 SDValue TrueVal = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
1333 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
1334 DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
1335 SDValue Hi = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
1336 SDValue Lo = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
1338 SDValue Ops[2] = { Lo, Hi };
1339 return DAG.getMergeValues(Ops, dl);
1343 /// LowerShiftLeftParts - Lower SHL_PARTS, which
1344 /// 1) returns two i32 values and take a 2 x i32 value to shift plus a shift
1346 /// 2) returns two i64 values and take a 2 x i64 value to shift plus a shift
1348 SDValue NVPTXTargetLowering::LowerShiftLeftParts(SDValue Op,
1349 SelectionDAG &DAG) const {
1350 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
1351 assert(Op.getOpcode() == ISD::SHL_PARTS);
1353 EVT VT = Op.getValueType();
1354 unsigned VTBits = VT.getSizeInBits();
1356 SDValue ShOpLo = Op.getOperand(0);
1357 SDValue ShOpHi = Op.getOperand(1);
1358 SDValue ShAmt = Op.getOperand(2);
1360 if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
1362 // For 32bit and sm35, we can use the funnel shift 'shf' instruction.
1363 // {dHi, dLo} = {aHi, aLo} << Amt
1364 // dHi = shf.l.clamp aLo, aHi, Amt
1367 SDValue Hi = DAG.getNode(NVPTXISD::FUN_SHFL_CLAMP, dl, VT, ShOpLo, ShOpHi,
1369 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
1371 SDValue Ops[2] = { Lo, Hi };
1372 return DAG.getMergeValues(Ops, dl);
1376 // {dHi, dLo} = {aHi, aLo} << Amt
1377 // - if (Amt>=size) then
1378 // dLo = aLo << Amt (all 0)
1379 // dLo = aLo << (Amt-size)
1382 // dHi = (aHi << Amt) | (aLo >> (size-Amt))
1384 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
1385 DAG.getConstant(VTBits, MVT::i32), ShAmt);
1386 SDValue Tmp1 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
1387 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
1388 DAG.getConstant(VTBits, MVT::i32));
1389 SDValue Tmp2 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
1390 SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
1391 SDValue TrueVal = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
1393 SDValue Cmp = DAG.getSetCC(dl, MVT::i1, ShAmt,
1394 DAG.getConstant(VTBits, MVT::i32), ISD::SETGE);
1395 SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
1396 SDValue Hi = DAG.getNode(ISD::SELECT, dl, VT, Cmp, TrueVal, FalseVal);
1398 SDValue Ops[2] = { Lo, Hi };
1399 return DAG.getMergeValues(Ops, dl);
1404 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1405 switch (Op.getOpcode()) {
1406 case ISD::RETURNADDR:
1408 case ISD::FRAMEADDR:
1410 case ISD::GlobalAddress:
1411 return LowerGlobalAddress(Op, DAG);
1412 case ISD::INTRINSIC_W_CHAIN:
1414 case ISD::BUILD_VECTOR:
1415 case ISD::EXTRACT_SUBVECTOR:
1417 case ISD::CONCAT_VECTORS:
1418 return LowerCONCAT_VECTORS(Op, DAG);
1420 return LowerSTORE(Op, DAG);
1422 return LowerLOAD(Op, DAG);
1423 case ISD::SHL_PARTS:
1424 return LowerShiftLeftParts(Op, DAG);
1425 case ISD::SRA_PARTS:
1426 case ISD::SRL_PARTS:
1427 return LowerShiftRightParts(Op, DAG);
1429 llvm_unreachable("Custom lowering not defined for operation");
1433 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1434 if (Op.getValueType() == MVT::i1)
1435 return LowerLOADi1(Op, DAG);
1442 // v1 = ld i8* addr (-> i16)
1443 // v = trunc i16 to i1
1444 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
1445 SDNode *Node = Op.getNode();
1446 LoadSDNode *LD = cast<LoadSDNode>(Node);
1448 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
1449 assert(Node->getValueType(0) == MVT::i1 &&
1450 "Custom lowering for i1 load only");
1452 DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
1453 LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
1454 LD->isInvariant(), LD->getAlignment());
1455 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
1456 // The legalizer (the caller) is expecting two values from the legalized
1457 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
1458 // in LegalizeDAG.cpp which also uses MergeValues.
1459 SDValue Ops[] = { result, LD->getChain() };
1460 return DAG.getMergeValues(Ops, dl);
1463 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1464 EVT ValVT = Op.getOperand(1).getValueType();
1465 if (ValVT == MVT::i1)
1466 return LowerSTOREi1(Op, DAG);
1467 else if (ValVT.isVector())
1468 return LowerSTOREVector(Op, DAG);
1474 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
1475 SDNode *N = Op.getNode();
1476 SDValue Val = N->getOperand(1);
1478 EVT ValVT = Val.getValueType();
1480 if (ValVT.isVector()) {
1481 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1482 // legal. We can (and should) split that into 2 stores of <2 x double> here
1483 // but I'm leaving that as a TODO for now.
1484 if (!ValVT.isSimple())
1486 switch (ValVT.getSimpleVT().SimpleTy) {
1499 // This is a "native" vector type
1503 unsigned Opcode = 0;
1504 EVT EltVT = ValVT.getVectorElementType();
1505 unsigned NumElts = ValVT.getVectorNumElements();
1507 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
1508 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1509 // stored type to i16 and propagate the "real" type as the memory type.
1510 bool NeedExt = false;
1511 if (EltVT.getSizeInBits() < 16)
1518 Opcode = NVPTXISD::StoreV2;
1521 Opcode = NVPTXISD::StoreV4;
1526 SmallVector<SDValue, 8> Ops;
1528 // First is the chain
1529 Ops.push_back(N->getOperand(0));
1531 // Then the split values
1532 for (unsigned i = 0; i < NumElts; ++i) {
1533 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
1534 DAG.getIntPtrConstant(i));
1536 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
1537 Ops.push_back(ExtVal);
1540 // Then any remaining arguments
1541 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
1542 Ops.push_back(N->getOperand(i));
1545 MemSDNode *MemSD = cast<MemSDNode>(N);
1547 SDValue NewSt = DAG.getMemIntrinsicNode(
1548 Opcode, DL, DAG.getVTList(MVT::Other), Ops,
1549 MemSD->getMemoryVT(), MemSD->getMemOperand());
1551 //return DCI.CombineTo(N, NewSt, true);
1560 // v1 = zxt v to i16
1562 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
1563 SDNode *Node = Op.getNode();
1565 StoreSDNode *ST = cast<StoreSDNode>(Node);
1566 SDValue Tmp1 = ST->getChain();
1567 SDValue Tmp2 = ST->getBasePtr();
1568 SDValue Tmp3 = ST->getValue();
1569 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
1570 unsigned Alignment = ST->getAlignment();
1571 bool isVolatile = ST->isVolatile();
1572 bool isNonTemporal = ST->isNonTemporal();
1573 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
1574 SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
1575 ST->getPointerInfo(), MVT::i8, isNonTemporal,
1576 isVolatile, Alignment);
1580 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
1581 int idx, EVT v) const {
1582 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
1583 std::stringstream suffix;
1585 *name += suffix.str();
1586 return DAG.getTargetExternalSymbol(name->c_str(), v);
1590 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1591 std::string ParamSym;
1592 raw_string_ostream ParamStr(ParamSym);
1594 ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
1597 std::string *SavedStr =
1598 nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
1599 return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
1602 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1603 return getExtSymb(DAG, ".HLPPARAM", idx);
1606 // Check to see if the kernel argument is image*_t or sampler_t
1608 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1609 static const char *const specialTypes[] = { "struct._image2d_t",
1610 "struct._image3d_t",
1611 "struct._sampler_t" };
1613 const Type *Ty = arg->getType();
1614 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1622 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1623 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1625 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1626 if (TypeName == specialTypes[i])
1632 SDValue NVPTXTargetLowering::LowerFormalArguments(
1633 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1634 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
1635 SmallVectorImpl<SDValue> &InVals) const {
1636 MachineFunction &MF = DAG.getMachineFunction();
1637 const DataLayout *TD = getDataLayout();
1639 const Function *F = MF.getFunction();
1640 const AttributeSet &PAL = F->getAttributes();
1641 const TargetLowering *TLI = DAG.getTarget().getTargetLowering();
1643 SDValue Root = DAG.getRoot();
1644 std::vector<SDValue> OutChains;
1646 bool isKernel = llvm::isKernelFunction(*F);
1647 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1648 assert(isABI && "Non-ABI compilation is not supported");
1652 std::vector<Type *> argTypes;
1653 std::vector<const Argument *> theArgs;
1654 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1656 theArgs.push_back(I);
1657 argTypes.push_back(I->getType());
1659 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
1660 // Ins.size() will be larger
1661 // * if there is an aggregate argument with multiple fields (each field
1662 // showing up separately in Ins)
1663 // * if there is a vector argument with more than typical vector-length
1664 // elements (generally if more than 4) where each vector element is
1665 // individually present in Ins.
1666 // So a different index should be used for indexing into Ins.
1667 // See similar issue in LowerCall.
1668 unsigned InsIdx = 0;
1671 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1672 Type *Ty = argTypes[i];
1674 // If the kernel argument is image*_t or sampler_t, convert it to
1675 // a i32 constant holding the parameter position. This can later
1676 // matched in the AsmPrinter to output the correct mangled name.
1677 if (isImageOrSamplerVal(
1679 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
1681 assert(isKernel && "Only kernels can have image/sampler params");
1682 InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
1686 if (theArgs[i]->use_empty()) {
1688 if (Ty->isAggregateType()) {
1689 SmallVector<EVT, 16> vtparts;
1691 ComputePTXValueVTs(*this, Ty, vtparts);
1692 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1693 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1695 EVT partVT = vtparts[parti];
1696 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
1699 if (vtparts.size() > 0)
1703 if (Ty->isVectorTy()) {
1704 EVT ObjectVT = getValueType(Ty);
1705 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
1706 for (unsigned parti = 0; parti < NumRegs; ++parti) {
1707 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1714 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1718 // In the following cases, assign a node order of "idx+1"
1719 // to newly created nodes. The SDNodes for params have to
1720 // appear in the same order as their order of appearance
1721 // in the original function. "idx+1" holds that order.
1722 if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
1723 if (Ty->isAggregateType()) {
1724 SmallVector<EVT, 16> vtparts;
1725 SmallVector<uint64_t, 16> offsets;
1727 // NOTE: Here, we lose the ability to issue vector loads for vectors
1728 // that are a part of a struct. This should be investigated in the
1730 ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
1731 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1732 bool aggregateIsPacked = false;
1733 if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
1734 aggregateIsPacked = STy->isPacked();
1736 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1737 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1739 EVT partVT = vtparts[parti];
1740 Value *srcValue = Constant::getNullValue(
1741 PointerType::get(partVT.getTypeForEVT(F->getContext()),
1742 llvm::ADDRESS_SPACE_PARAM));
1744 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1745 DAG.getConstant(offsets[parti], getPointerTy()));
1746 unsigned partAlign =
1747 aggregateIsPacked ? 1
1748 : TD->getABITypeAlignment(
1749 partVT.getTypeForEVT(F->getContext()));
1751 if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
1752 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1753 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1754 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1755 MachinePointerInfo(srcValue), partVT, false,
1758 p = DAG.getLoad(partVT, dl, Root, srcAddr,
1759 MachinePointerInfo(srcValue), false, false, false,
1763 p.getNode()->setIROrder(idx + 1);
1764 InVals.push_back(p);
1767 if (vtparts.size() > 0)
1771 if (Ty->isVectorTy()) {
1772 EVT ObjectVT = getValueType(Ty);
1773 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1774 unsigned NumElts = ObjectVT.getVectorNumElements();
1775 assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
1776 "Vector was not scalarized");
1778 EVT EltVT = ObjectVT.getVectorElementType();
1783 // We only have one element, so just directly load it
1784 Value *SrcValue = Constant::getNullValue(PointerType::get(
1785 EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1786 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1787 DAG.getConstant(Ofst, getPointerTy()));
1788 SDValue P = DAG.getLoad(
1789 EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1791 TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
1793 P.getNode()->setIROrder(idx + 1);
1795 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1796 P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
1797 InVals.push_back(P);
1798 Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
1800 } else if (NumElts == 2) {
1802 // f32,f32 = load ...
1803 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
1804 Value *SrcValue = Constant::getNullValue(PointerType::get(
1805 VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1806 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1807 DAG.getConstant(Ofst, getPointerTy()));
1808 SDValue P = DAG.getLoad(
1809 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1811 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1813 P.getNode()->setIROrder(idx + 1);
1815 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1816 DAG.getIntPtrConstant(0));
1817 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1818 DAG.getIntPtrConstant(1));
1820 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
1821 Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
1822 Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
1825 InVals.push_back(Elt0);
1826 InVals.push_back(Elt1);
1827 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1831 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
1833 // vector will be expanded to a power of 2 elements, so we know we can
1834 // always round up to the next multiple of 4 when creating the vector
1836 // e.g. 4 elem => 1 ld.v4
1837 // 6 elem => 2 ld.v4
1838 // 8 elem => 2 ld.v4
1839 // 11 elem => 3 ld.v4
1840 unsigned VecSize = 4;
1841 if (EltVT.getSizeInBits() == 64) {
1844 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1845 for (unsigned i = 0; i < NumElts; i += VecSize) {
1846 Value *SrcValue = Constant::getNullValue(
1847 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
1848 llvm::ADDRESS_SPACE_PARAM));
1850 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1851 DAG.getConstant(Ofst, getPointerTy()));
1852 SDValue P = DAG.getLoad(
1853 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1855 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1857 P.getNode()->setIROrder(idx + 1);
1859 for (unsigned j = 0; j < VecSize; ++j) {
1860 if (i + j >= NumElts)
1862 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1863 DAG.getIntPtrConstant(j));
1864 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1865 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
1866 InVals.push_back(Elt);
1868 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1878 EVT ObjectVT = getValueType(Ty);
1879 // If ABI, load from the param symbol
1880 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1881 Value *srcValue = Constant::getNullValue(PointerType::get(
1882 ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1884 if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1885 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1886 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1887 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1888 MachinePointerInfo(srcValue), ObjectVT, false, false,
1889 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1891 p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1892 MachinePointerInfo(srcValue), false, false, false,
1893 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1896 p.getNode()->setIROrder(idx + 1);
1897 InVals.push_back(p);
1901 // Param has ByVal attribute
1902 // Return MoveParam(param symbol).
1903 // Ideally, the param symbol can be returned directly,
1904 // but when SDNode builder decides to use it in a CopyToReg(),
1905 // machine instruction fails because TargetExternalSymbol
1906 // (not lowered) is target dependent, and CopyToReg assumes
1907 // the source is lowered.
1908 EVT ObjectVT = getValueType(Ty);
1909 assert(ObjectVT == Ins[InsIdx].VT &&
1910 "Ins type did not match function type");
1911 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1912 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1914 p.getNode()->setIROrder(idx + 1);
1916 InVals.push_back(p);
1918 SDValue p2 = DAG.getNode(
1919 ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1920 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
1921 InVals.push_back(p2);
1925 // Clang will check explicit VarArg and issue error if any. However, Clang
1926 // will let code with
1927 // implicit var arg like f() pass. See bug 617733.
1928 // We treat this case as if the arg list is empty.
1929 // if (F.isVarArg()) {
1930 // assert(0 && "VarArg not supported yet!");
1933 if (!OutChains.empty())
1934 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
1941 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1943 const SmallVectorImpl<ISD::OutputArg> &Outs,
1944 const SmallVectorImpl<SDValue> &OutVals,
1945 SDLoc dl, SelectionDAG &DAG) const {
1946 MachineFunction &MF = DAG.getMachineFunction();
1947 const Function *F = MF.getFunction();
1948 Type *RetTy = F->getReturnType();
1949 const DataLayout *TD = getDataLayout();
1951 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1952 assert(isABI && "Non-ABI compilation is not supported");
1956 if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1957 // If we have a vector type, the OutVals array will be the scalarized
1958 // components and we have combine them into 1 or more vector stores.
1959 unsigned NumElts = VTy->getNumElements();
1960 assert(NumElts == Outs.size() && "Bad scalarization of return value");
1962 // const_cast can be removed in later LLVM versions
1963 EVT EltVT = getValueType(RetTy).getVectorElementType();
1964 bool NeedExtend = false;
1965 if (EltVT.getSizeInBits() < 16)
1970 SDValue StoreVal = OutVals[0];
1971 // We only have one element, so just directly store it
1973 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
1974 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
1975 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1976 DAG.getVTList(MVT::Other), Ops,
1977 EltVT, MachinePointerInfo());
1979 } else if (NumElts == 2) {
1981 SDValue StoreVal0 = OutVals[0];
1982 SDValue StoreVal1 = OutVals[1];
1985 StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
1986 StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
1989 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
1991 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
1992 DAG.getVTList(MVT::Other), Ops,
1993 EltVT, MachinePointerInfo());
1996 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
1997 // vector will be expanded to a power of 2 elements, so we know we can
1998 // always round up to the next multiple of 4 when creating the vector
2000 // e.g. 4 elem => 1 st.v4
2001 // 6 elem => 2 st.v4
2002 // 8 elem => 2 st.v4
2003 // 11 elem => 3 st.v4
2005 unsigned VecSize = 4;
2006 if (OutVals[0].getValueType().getSizeInBits() == 64)
2009 unsigned Offset = 0;
2012 EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
2013 unsigned PerStoreOffset =
2014 TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
2016 for (unsigned i = 0; i < NumElts; i += VecSize) {
2019 SmallVector<SDValue, 8> Ops;
2020 Ops.push_back(Chain);
2021 Ops.push_back(DAG.getConstant(Offset, MVT::i32));
2022 unsigned Opc = NVPTXISD::StoreRetvalV2;
2023 EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
2025 StoreVal = OutVals[i];
2027 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2028 Ops.push_back(StoreVal);
2030 if (i + 1 < NumElts) {
2031 StoreVal = OutVals[i + 1];
2033 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2035 StoreVal = DAG.getUNDEF(ExtendedVT);
2037 Ops.push_back(StoreVal);
2040 Opc = NVPTXISD::StoreRetvalV4;
2041 if (i + 2 < NumElts) {
2042 StoreVal = OutVals[i + 2];
2045 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2047 StoreVal = DAG.getUNDEF(ExtendedVT);
2049 Ops.push_back(StoreVal);
2051 if (i + 3 < NumElts) {
2052 StoreVal = OutVals[i + 3];
2055 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
2057 StoreVal = DAG.getUNDEF(ExtendedVT);
2059 Ops.push_back(StoreVal);
2062 // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
2064 DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), Ops,
2065 EltVT, MachinePointerInfo());
2066 Offset += PerStoreOffset;
2070 SmallVector<EVT, 16> ValVTs;
2071 // const_cast is necessary since we are still using an LLVM version from
2072 // before the type system re-write.
2073 ComputePTXValueVTs(*this, RetTy, ValVTs);
2074 assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
2076 unsigned SizeSoFar = 0;
2077 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2078 SDValue theVal = OutVals[i];
2079 EVT TheValType = theVal.getValueType();
2080 unsigned numElems = 1;
2081 if (TheValType.isVector())
2082 numElems = TheValType.getVectorNumElements();
2083 for (unsigned j = 0, je = numElems; j != je; ++j) {
2084 SDValue TmpVal = theVal;
2085 if (TheValType.isVector())
2086 TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
2087 TheValType.getVectorElementType(), TmpVal,
2088 DAG.getIntPtrConstant(j));
2089 EVT TheStoreType = ValVTs[i];
2090 if (RetTy->isIntegerTy() &&
2091 TD->getTypeAllocSizeInBits(RetTy) < 32) {
2092 // The following zero-extension is for integer types only, and
2093 // specifically not for aggregates.
2094 TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
2095 TheStoreType = MVT::i32;
2097 else if (TmpVal.getValueType().getSizeInBits() < 16)
2098 TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
2100 SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
2101 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
2102 DAG.getVTList(MVT::Other), Ops,
2104 MachinePointerInfo());
2105 if(TheValType.isVector())
2107 TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
2109 SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
2114 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
2118 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
2119 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2120 SelectionDAG &DAG) const {
2121 if (Constraint.length() > 1)
2124 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2127 // NVPTX suuport vector of legal types of any length in Intrinsics because the
2128 // NVPTX specific type legalizer
2129 // will legalize them to the PTX supported length.
2130 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
2131 if (isTypeLegal(VT))
2133 if (VT.isVector()) {
2134 MVT eVT = VT.getVectorElementType();
2135 if (isTypeLegal(eVT))
2141 static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
2142 switch (Intrinsic) {
2146 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2147 return NVPTXISD::Tex1DFloatI32;
2148 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2149 return NVPTXISD::Tex1DFloatFloat;
2150 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2151 return NVPTXISD::Tex1DFloatFloatLevel;
2152 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2153 return NVPTXISD::Tex1DFloatFloatGrad;
2154 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2155 return NVPTXISD::Tex1DI32I32;
2156 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2157 return NVPTXISD::Tex1DI32Float;
2158 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2159 return NVPTXISD::Tex1DI32FloatLevel;
2160 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2161 return NVPTXISD::Tex1DI32FloatGrad;
2163 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2164 return NVPTXISD::Tex1DArrayFloatI32;
2165 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2166 return NVPTXISD::Tex1DArrayFloatFloat;
2167 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2168 return NVPTXISD::Tex1DArrayFloatFloatLevel;
2169 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2170 return NVPTXISD::Tex1DArrayFloatFloatGrad;
2171 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2172 return NVPTXISD::Tex1DArrayI32I32;
2173 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2174 return NVPTXISD::Tex1DArrayI32Float;
2175 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2176 return NVPTXISD::Tex1DArrayI32FloatLevel;
2177 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2178 return NVPTXISD::Tex1DArrayI32FloatGrad;
2180 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2181 return NVPTXISD::Tex2DFloatI32;
2182 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2183 return NVPTXISD::Tex2DFloatFloat;
2184 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2185 return NVPTXISD::Tex2DFloatFloatLevel;
2186 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2187 return NVPTXISD::Tex2DFloatFloatGrad;
2188 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2189 return NVPTXISD::Tex2DI32I32;
2190 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2191 return NVPTXISD::Tex2DI32Float;
2192 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2193 return NVPTXISD::Tex2DI32FloatLevel;
2194 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2195 return NVPTXISD::Tex2DI32FloatGrad;
2197 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2198 return NVPTXISD::Tex2DArrayFloatI32;
2199 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2200 return NVPTXISD::Tex2DArrayFloatFloat;
2201 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2202 return NVPTXISD::Tex2DArrayFloatFloatLevel;
2203 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2204 return NVPTXISD::Tex2DArrayFloatFloatGrad;
2205 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2206 return NVPTXISD::Tex2DArrayI32I32;
2207 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2208 return NVPTXISD::Tex2DArrayI32Float;
2209 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2210 return NVPTXISD::Tex2DArrayI32FloatLevel;
2211 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2212 return NVPTXISD::Tex2DArrayI32FloatGrad;
2214 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2215 return NVPTXISD::Tex3DFloatI32;
2216 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2217 return NVPTXISD::Tex3DFloatFloat;
2218 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2219 return NVPTXISD::Tex3DFloatFloatLevel;
2220 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
2221 return NVPTXISD::Tex3DFloatFloatGrad;
2222 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2223 return NVPTXISD::Tex3DI32I32;
2224 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2225 return NVPTXISD::Tex3DI32Float;
2226 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2227 return NVPTXISD::Tex3DI32FloatLevel;
2228 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32:
2229 return NVPTXISD::Tex3DI32FloatGrad;
2233 static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
2234 switch (Intrinsic) {
2237 case Intrinsic::nvvm_suld_1d_i8_trap:
2238 return NVPTXISD::Suld1DI8Trap;
2239 case Intrinsic::nvvm_suld_1d_i16_trap:
2240 return NVPTXISD::Suld1DI16Trap;
2241 case Intrinsic::nvvm_suld_1d_i32_trap:
2242 return NVPTXISD::Suld1DI32Trap;
2243 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2244 return NVPTXISD::Suld1DV2I8Trap;
2245 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2246 return NVPTXISD::Suld1DV2I16Trap;
2247 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2248 return NVPTXISD::Suld1DV2I32Trap;
2249 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2250 return NVPTXISD::Suld1DV4I8Trap;
2251 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2252 return NVPTXISD::Suld1DV4I16Trap;
2253 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2254 return NVPTXISD::Suld1DV4I32Trap;
2255 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2256 return NVPTXISD::Suld1DArrayI8Trap;
2257 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2258 return NVPTXISD::Suld1DArrayI16Trap;
2259 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2260 return NVPTXISD::Suld1DArrayI32Trap;
2261 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2262 return NVPTXISD::Suld1DArrayV2I8Trap;
2263 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2264 return NVPTXISD::Suld1DArrayV2I16Trap;
2265 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2266 return NVPTXISD::Suld1DArrayV2I32Trap;
2267 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2268 return NVPTXISD::Suld1DArrayV4I8Trap;
2269 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2270 return NVPTXISD::Suld1DArrayV4I16Trap;
2271 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2272 return NVPTXISD::Suld1DArrayV4I32Trap;
2273 case Intrinsic::nvvm_suld_2d_i8_trap:
2274 return NVPTXISD::Suld2DI8Trap;
2275 case Intrinsic::nvvm_suld_2d_i16_trap:
2276 return NVPTXISD::Suld2DI16Trap;
2277 case Intrinsic::nvvm_suld_2d_i32_trap:
2278 return NVPTXISD::Suld2DI32Trap;
2279 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2280 return NVPTXISD::Suld2DV2I8Trap;
2281 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2282 return NVPTXISD::Suld2DV2I16Trap;
2283 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2284 return NVPTXISD::Suld2DV2I32Trap;
2285 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2286 return NVPTXISD::Suld2DV4I8Trap;
2287 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2288 return NVPTXISD::Suld2DV4I16Trap;
2289 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2290 return NVPTXISD::Suld2DV4I32Trap;
2291 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2292 return NVPTXISD::Suld2DArrayI8Trap;
2293 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2294 return NVPTXISD::Suld2DArrayI16Trap;
2295 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2296 return NVPTXISD::Suld2DArrayI32Trap;
2297 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2298 return NVPTXISD::Suld2DArrayV2I8Trap;
2299 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2300 return NVPTXISD::Suld2DArrayV2I16Trap;
2301 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2302 return NVPTXISD::Suld2DArrayV2I32Trap;
2303 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2304 return NVPTXISD::Suld2DArrayV4I8Trap;
2305 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2306 return NVPTXISD::Suld2DArrayV4I16Trap;
2307 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2308 return NVPTXISD::Suld2DArrayV4I32Trap;
2309 case Intrinsic::nvvm_suld_3d_i8_trap:
2310 return NVPTXISD::Suld3DI8Trap;
2311 case Intrinsic::nvvm_suld_3d_i16_trap:
2312 return NVPTXISD::Suld3DI16Trap;
2313 case Intrinsic::nvvm_suld_3d_i32_trap:
2314 return NVPTXISD::Suld3DI32Trap;
2315 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2316 return NVPTXISD::Suld3DV2I8Trap;
2317 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2318 return NVPTXISD::Suld3DV2I16Trap;
2319 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2320 return NVPTXISD::Suld3DV2I32Trap;
2321 case Intrinsic::nvvm_suld_3d_v4i8_trap:
2322 return NVPTXISD::Suld3DV4I8Trap;
2323 case Intrinsic::nvvm_suld_3d_v4i16_trap:
2324 return NVPTXISD::Suld3DV4I16Trap;
2325 case Intrinsic::nvvm_suld_3d_v4i32_trap:
2326 return NVPTXISD::Suld3DV4I32Trap;
2330 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
2332 // because we need the information that is only available in the "Value" type
2334 // pointer. In particular, the address space information.
2335 bool NVPTXTargetLowering::getTgtMemIntrinsic(
2336 IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
2337 switch (Intrinsic) {
2341 case Intrinsic::nvvm_atomic_load_add_f32:
2342 Info.opc = ISD::INTRINSIC_W_CHAIN;
2343 Info.memVT = MVT::f32;
2344 Info.ptrVal = I.getArgOperand(0);
2347 Info.readMem = true;
2348 Info.writeMem = true;
2352 case Intrinsic::nvvm_atomic_load_inc_32:
2353 case Intrinsic::nvvm_atomic_load_dec_32:
2354 Info.opc = ISD::INTRINSIC_W_CHAIN;
2355 Info.memVT = MVT::i32;
2356 Info.ptrVal = I.getArgOperand(0);
2359 Info.readMem = true;
2360 Info.writeMem = true;
2364 case Intrinsic::nvvm_ldu_global_i:
2365 case Intrinsic::nvvm_ldu_global_f:
2366 case Intrinsic::nvvm_ldu_global_p:
2368 Info.opc = ISD::INTRINSIC_W_CHAIN;
2369 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
2370 Info.memVT = getValueType(I.getType());
2371 else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
2372 Info.memVT = getValueType(I.getType());
2374 Info.memVT = MVT::f32;
2375 Info.ptrVal = I.getArgOperand(0);
2378 Info.readMem = true;
2379 Info.writeMem = false;
2383 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2384 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2385 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2386 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2387 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2388 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2389 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2390 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2391 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2392 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2393 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2394 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2395 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2396 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2397 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2398 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2399 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2400 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2401 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2402 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32: {
2403 Info.opc = getOpcForTextureInstr(Intrinsic);
2404 Info.memVT = MVT::f32;
2405 Info.ptrVal = nullptr;
2408 Info.readMem = true;
2409 Info.writeMem = false;
2413 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2414 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2415 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2416 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2417 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2418 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2419 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2420 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2421 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2422 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2423 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2424 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2425 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2426 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2427 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2428 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2429 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2430 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2431 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2432 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32: {
2433 Info.opc = getOpcForTextureInstr(Intrinsic);
2434 Info.memVT = MVT::i32;
2435 Info.ptrVal = nullptr;
2438 Info.readMem = true;
2439 Info.writeMem = false;
2443 case Intrinsic::nvvm_suld_1d_i8_trap:
2444 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2445 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2446 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2447 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2448 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2449 case Intrinsic::nvvm_suld_2d_i8_trap:
2450 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2451 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2452 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2453 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2454 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2455 case Intrinsic::nvvm_suld_3d_i8_trap:
2456 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2457 case Intrinsic::nvvm_suld_3d_v4i8_trap: {
2458 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2459 Info.memVT = MVT::i8;
2460 Info.ptrVal = nullptr;
2463 Info.readMem = true;
2464 Info.writeMem = false;
2468 case Intrinsic::nvvm_suld_1d_i16_trap:
2469 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2470 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2471 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2472 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2473 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2474 case Intrinsic::nvvm_suld_2d_i16_trap:
2475 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2476 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2477 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2478 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2479 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2480 case Intrinsic::nvvm_suld_3d_i16_trap:
2481 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2482 case Intrinsic::nvvm_suld_3d_v4i16_trap: {
2483 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2484 Info.memVT = MVT::i16;
2485 Info.ptrVal = nullptr;
2488 Info.readMem = true;
2489 Info.writeMem = false;
2493 case Intrinsic::nvvm_suld_1d_i32_trap:
2494 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2495 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2496 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2497 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2498 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2499 case Intrinsic::nvvm_suld_2d_i32_trap:
2500 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2501 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2502 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2503 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2504 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2505 case Intrinsic::nvvm_suld_3d_i32_trap:
2506 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2507 case Intrinsic::nvvm_suld_3d_v4i32_trap: {
2508 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2509 Info.memVT = MVT::i32;
2510 Info.ptrVal = nullptr;
2513 Info.readMem = true;
2514 Info.writeMem = false;
2523 /// isLegalAddressingMode - Return true if the addressing mode represented
2524 /// by AM is legal for this target, for a load/store of the specified type.
2525 /// Used to guide target specific optimizations, like loop strength reduction
2526 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
2527 /// (CodeGenPrepare.cpp)
2528 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
2531 // AddrMode - This represents an addressing mode of:
2532 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2534 // The legal address modes are
2541 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
2547 case 0: // "r", "r+i" or "i" is allowed
2550 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
2552 // Otherwise we have r+i.
2555 // No scale > 1 is allowed
2561 //===----------------------------------------------------------------------===//
2562 // NVPTX Inline Assembly Support
2563 //===----------------------------------------------------------------------===//
2565 /// getConstraintType - Given a constraint letter, return the type of
2566 /// constraint it is for this target.
2567 NVPTXTargetLowering::ConstraintType
2568 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
2569 if (Constraint.size() == 1) {
2570 switch (Constraint[0]) {
2581 return C_RegisterClass;
2584 return TargetLowering::getConstraintType(Constraint);
2587 std::pair<unsigned, const TargetRegisterClass *>
2588 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2590 if (Constraint.size() == 1) {
2591 switch (Constraint[0]) {
2593 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2595 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2597 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
2600 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
2602 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2604 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2607 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2610 /// getFunctionAlignment - Return the Log2 alignment of this function.
2611 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
2615 //===----------------------------------------------------------------------===//
2616 // NVPTX DAG Combining
2617 //===----------------------------------------------------------------------===//
2619 extern unsigned FMAContractLevel;
2621 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
2622 /// operands N0 and N1. This is a helper for PerformADDCombine that is
2623 /// called with the default operands, and if that fails, with commuted
2625 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
2626 TargetLowering::DAGCombinerInfo &DCI,
2627 const NVPTXSubtarget &Subtarget,
2628 CodeGenOpt::Level OptLevel) {
2629 SelectionDAG &DAG = DCI.DAG;
2630 // Skip non-integer, non-scalar case
2631 EVT VT=N0.getValueType();
2635 // fold (add (mul a, b), c) -> (mad a, b, c)
2637 if (N0.getOpcode() == ISD::MUL) {
2638 assert (VT.isInteger());
2640 // Since integer multiply-add costs the same as integer multiply
2641 // but is more costly than integer add, do the fusion only when
2642 // the mul is only used in the add.
2643 if (OptLevel==CodeGenOpt::None || VT != MVT::i32 ||
2644 !N0.getNode()->hasOneUse())
2648 return DAG.getNode(NVPTXISD::IMAD, SDLoc(N), VT,
2649 N0.getOperand(0), N0.getOperand(1), N1);
2651 else if (N0.getOpcode() == ISD::FMUL) {
2652 if (VT == MVT::f32 || VT == MVT::f64) {
2653 if (FMAContractLevel == 0)
2656 // For floating point:
2657 // Do the fusion only when the mul has less than 5 uses and all
2659 // The heuristic is that if a use is not an add, then that use
2660 // cannot be fused into fma, therefore mul is still needed anyway.
2661 // If there are more than 4 uses, even if they are all add, fusing
2662 // them will increase register pressue.
2665 int nonAddCount = 0;
2666 for (SDNode::use_iterator UI = N0.getNode()->use_begin(),
2667 UE = N0.getNode()->use_end();
2671 if (User->getOpcode() != ISD::FADD)
2677 int orderNo = N->getIROrder();
2678 int orderNo2 = N0.getNode()->getIROrder();
2679 // simple heuristics here for considering potential register
2680 // pressure, the logics here is that the differnce are used
2681 // to measure the distance between def and use, the longer distance
2682 // more likely cause register pressure.
2683 if (orderNo - orderNo2 < 500)
2686 // Now, check if at least one of the FMUL's operands is live beyond the node N,
2687 // which guarantees that the FMA will not increase register pressure at node N.
2688 bool opIsLive = false;
2689 const SDNode *left = N0.getOperand(0).getNode();
2690 const SDNode *right = N0.getOperand(1).getNode();
2692 if (dyn_cast<ConstantSDNode>(left) || dyn_cast<ConstantSDNode>(right))
2696 for (SDNode::use_iterator UI = left->use_begin(), UE = left->use_end(); UI != UE; ++UI) {
2698 int orderNo3 = User->getIROrder();
2699 if (orderNo3 > orderNo) {
2706 for (SDNode::use_iterator UI = right->use_begin(), UE = right->use_end(); UI != UE; ++UI) {
2708 int orderNo3 = User->getIROrder();
2709 if (orderNo3 > orderNo) {
2719 return DAG.getNode(ISD::FMA, SDLoc(N), VT,
2720 N0.getOperand(0), N0.getOperand(1), N1);
2727 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
2729 static SDValue PerformADDCombine(SDNode *N,
2730 TargetLowering::DAGCombinerInfo &DCI,
2731 const NVPTXSubtarget &Subtarget,
2732 CodeGenOpt::Level OptLevel) {
2733 SDValue N0 = N->getOperand(0);
2734 SDValue N1 = N->getOperand(1);
2736 // First try with the default operand order.
2737 SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget,
2739 if (Result.getNode())
2742 // If that didn't work, try again with the operands commuted.
2743 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget, OptLevel);
2746 static SDValue PerformANDCombine(SDNode *N,
2747 TargetLowering::DAGCombinerInfo &DCI) {
2748 // The type legalizer turns a vector load of i8 values into a zextload to i16
2749 // registers, optionally ANY_EXTENDs it (if target type is integer),
2750 // and ANDs off the high 8 bits. Since we turn this load into a
2751 // target-specific DAG node, the DAG combiner fails to eliminate these AND
2752 // nodes. Do that here.
2753 SDValue Val = N->getOperand(0);
2754 SDValue Mask = N->getOperand(1);
2756 if (isa<ConstantSDNode>(Val)) {
2757 std::swap(Val, Mask);
2761 // Generally, we will see zextload -> IMOV16rr -> ANY_EXTEND -> and
2762 if (Val.getOpcode() == ISD::ANY_EXTEND) {
2764 Val = Val->getOperand(0);
2767 if (Val->isMachineOpcode() && Val->getMachineOpcode() == NVPTX::IMOV16rr) {
2768 Val = Val->getOperand(0);
2771 if (Val->getOpcode() == NVPTXISD::LoadV2 ||
2772 Val->getOpcode() == NVPTXISD::LoadV4) {
2773 ConstantSDNode *MaskCnst = dyn_cast<ConstantSDNode>(Mask);
2775 // Not an AND with a constant
2779 uint64_t MaskVal = MaskCnst->getZExtValue();
2780 if (MaskVal != 0xff) {
2781 // Not an AND that chops off top 8 bits
2785 MemSDNode *Mem = dyn_cast<MemSDNode>(Val);
2787 // Not a MemSDNode?!?
2791 EVT MemVT = Mem->getMemoryVT();
2792 if (MemVT != MVT::v2i8 && MemVT != MVT::v4i8) {
2793 // We only handle the i8 case
2798 cast<ConstantSDNode>(Val->getOperand(Val->getNumOperands()-1))->
2800 if (ExtType == ISD::SEXTLOAD) {
2801 // If for some reason the load is a sextload, the and is needed to zero
2802 // out the high 8 bits
2807 if (AExt.getNode() != 0) {
2808 // Re-insert the ext as a zext.
2809 Val = DCI.DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N),
2810 AExt.getValueType(), Val);
2814 // If we get here, the AND is unnecessary. Just replace it with the load
2815 DCI.CombineTo(N, Val, AddTo);
2821 enum OperandSignedness {
2827 /// IsMulWideOperandDemotable - Checks if the provided DAG node is an operand
2828 /// that can be demoted to \p OptSize bits without loss of information. The
2829 /// signedness of the operand, if determinable, is placed in \p S.
2830 static bool IsMulWideOperandDemotable(SDValue Op,
2832 OperandSignedness &S) {
2835 if (Op.getOpcode() == ISD::SIGN_EXTEND ||
2836 Op.getOpcode() == ISD::SIGN_EXTEND_INREG) {
2837 EVT OrigVT = Op.getOperand(0).getValueType();
2838 if (OrigVT.getSizeInBits() == OptSize) {
2842 } else if (Op.getOpcode() == ISD::ZERO_EXTEND) {
2843 EVT OrigVT = Op.getOperand(0).getValueType();
2844 if (OrigVT.getSizeInBits() == OptSize) {
2853 /// AreMulWideOperandsDemotable - Checks if the given LHS and RHS operands can
2854 /// be demoted to \p OptSize bits without loss of information. If the operands
2855 /// contain a constant, it should appear as the RHS operand. The signedness of
2856 /// the operands is placed in \p IsSigned.
2857 static bool AreMulWideOperandsDemotable(SDValue LHS, SDValue RHS,
2861 OperandSignedness LHSSign;
2863 // The LHS operand must be a demotable op
2864 if (!IsMulWideOperandDemotable(LHS, OptSize, LHSSign))
2867 // We should have been able to determine the signedness from the LHS
2868 if (LHSSign == Unknown)
2871 IsSigned = (LHSSign == Signed);
2873 // The RHS can be a demotable op or a constant
2874 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(RHS)) {
2875 APInt Val = CI->getAPIntValue();
2876 if (LHSSign == Unsigned) {
2877 if (Val.isIntN(OptSize)) {
2882 if (Val.isSignedIntN(OptSize)) {
2888 OperandSignedness RHSSign;
2889 if (!IsMulWideOperandDemotable(RHS, OptSize, RHSSign))
2892 if (LHSSign != RHSSign)
2899 /// TryMULWIDECombine - Attempt to replace a multiply of M bits with a multiply
2900 /// of M/2 bits that produces an M-bit result (i.e. mul.wide). This transform
2901 /// works on both multiply DAG nodes and SHL DAG nodes with a constant shift
2903 static SDValue TryMULWIDECombine(SDNode *N,
2904 TargetLowering::DAGCombinerInfo &DCI) {
2905 EVT MulType = N->getValueType(0);
2906 if (MulType != MVT::i32 && MulType != MVT::i64) {
2910 unsigned OptSize = MulType.getSizeInBits() >> 1;
2911 SDValue LHS = N->getOperand(0);
2912 SDValue RHS = N->getOperand(1);
2914 // Canonicalize the multiply so the constant (if any) is on the right
2915 if (N->getOpcode() == ISD::MUL) {
2916 if (isa<ConstantSDNode>(LHS)) {
2917 std::swap(LHS, RHS);
2921 // If we have a SHL, determine the actual multiply amount
2922 if (N->getOpcode() == ISD::SHL) {
2923 ConstantSDNode *ShlRHS = dyn_cast<ConstantSDNode>(RHS);
2928 APInt ShiftAmt = ShlRHS->getAPIntValue();
2929 unsigned BitWidth = MulType.getSizeInBits();
2930 if (ShiftAmt.sge(0) && ShiftAmt.slt(BitWidth)) {
2931 APInt MulVal = APInt(BitWidth, 1) << ShiftAmt;
2932 RHS = DCI.DAG.getConstant(MulVal, MulType);
2939 // Verify that our operands are demotable
2940 if (!AreMulWideOperandsDemotable(LHS, RHS, OptSize, Signed)) {
2945 if (MulType == MVT::i32) {
2946 DemotedVT = MVT::i16;
2948 DemotedVT = MVT::i32;
2951 // Truncate the operands to the correct size. Note that these are just for
2952 // type consistency and will (likely) be eliminated in later phases.
2954 DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, LHS);
2956 DCI.DAG.getNode(ISD::TRUNCATE, SDLoc(N), DemotedVT, RHS);
2960 Opc = NVPTXISD::MUL_WIDE_SIGNED;
2962 Opc = NVPTXISD::MUL_WIDE_UNSIGNED;
2965 return DCI.DAG.getNode(Opc, SDLoc(N), MulType, TruncLHS, TruncRHS);
2968 /// PerformMULCombine - Runs PTX-specific DAG combine patterns on MUL nodes.
2969 static SDValue PerformMULCombine(SDNode *N,
2970 TargetLowering::DAGCombinerInfo &DCI,
2971 CodeGenOpt::Level OptLevel) {
2973 // Try mul.wide combining at OptLevel > 0
2974 SDValue Ret = TryMULWIDECombine(N, DCI);
2982 /// PerformSHLCombine - Runs PTX-specific DAG combine patterns on SHL nodes.
2983 static SDValue PerformSHLCombine(SDNode *N,
2984 TargetLowering::DAGCombinerInfo &DCI,
2985 CodeGenOpt::Level OptLevel) {
2987 // Try mul.wide combining at OptLevel > 0
2988 SDValue Ret = TryMULWIDECombine(N, DCI);
2996 SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
2997 DAGCombinerInfo &DCI) const {
2998 // FIXME: Get this from the DAG somehow
2999 CodeGenOpt::Level OptLevel = CodeGenOpt::Aggressive;
3000 switch (N->getOpcode()) {
3004 return PerformADDCombine(N, DCI, nvptxSubtarget, OptLevel);
3006 return PerformMULCombine(N, DCI, OptLevel);
3008 return PerformSHLCombine(N, DCI, OptLevel);
3010 return PerformANDCombine(N, DCI);
3015 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
3016 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
3017 SmallVectorImpl<SDValue> &Results) {
3018 EVT ResVT = N->getValueType(0);
3021 assert(ResVT.isVector() && "Vector load must have vector type");
3023 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
3024 // legal. We can (and should) split that into 2 loads of <2 x double> here
3025 // but I'm leaving that as a TODO for now.
3026 assert(ResVT.isSimple() && "Can only handle simple types");
3027 switch (ResVT.getSimpleVT().SimpleTy) {
3040 // This is a "native" vector type
3044 EVT EltVT = ResVT.getVectorElementType();
3045 unsigned NumElts = ResVT.getVectorNumElements();
3047 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
3048 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3049 // loaded type to i16 and propagate the "real" type as the memory type.
3050 bool NeedTrunc = false;
3051 if (EltVT.getSizeInBits() < 16) {
3056 unsigned Opcode = 0;
3063 Opcode = NVPTXISD::LoadV2;
3064 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
3067 Opcode = NVPTXISD::LoadV4;
3068 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
3069 LdResVTs = DAG.getVTList(ListVTs);
3074 SmallVector<SDValue, 8> OtherOps;
3076 // Copy regular operands
3077 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
3078 OtherOps.push_back(N->getOperand(i));
3080 LoadSDNode *LD = cast<LoadSDNode>(N);
3082 // The select routine does not have access to the LoadSDNode instance, so
3083 // pass along the extension information
3084 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
3086 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
3088 LD->getMemOperand());
3090 SmallVector<SDValue, 4> ScalarRes;
3092 for (unsigned i = 0; i < NumElts; ++i) {
3093 SDValue Res = NewLD.getValue(i);
3095 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
3096 ScalarRes.push_back(Res);
3099 SDValue LoadChain = NewLD.getValue(NumElts);
3101 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3103 Results.push_back(BuildVec);
3104 Results.push_back(LoadChain);
3107 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
3108 SmallVectorImpl<SDValue> &Results) {
3109 SDValue Chain = N->getOperand(0);
3110 SDValue Intrin = N->getOperand(1);
3113 // Get the intrinsic ID
3114 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
3118 case Intrinsic::nvvm_ldg_global_i:
3119 case Intrinsic::nvvm_ldg_global_f:
3120 case Intrinsic::nvvm_ldg_global_p:
3121 case Intrinsic::nvvm_ldu_global_i:
3122 case Intrinsic::nvvm_ldu_global_f:
3123 case Intrinsic::nvvm_ldu_global_p: {
3124 EVT ResVT = N->getValueType(0);
3126 if (ResVT.isVector()) {
3129 unsigned NumElts = ResVT.getVectorNumElements();
3130 EVT EltVT = ResVT.getVectorElementType();
3132 // Since LDU/LDG are target nodes, we cannot rely on DAG type
3134 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
3135 // loaded type to i16 and propagate the "real" type as the memory type.
3136 bool NeedTrunc = false;
3137 if (EltVT.getSizeInBits() < 16) {
3142 unsigned Opcode = 0;
3152 case Intrinsic::nvvm_ldg_global_i:
3153 case Intrinsic::nvvm_ldg_global_f:
3154 case Intrinsic::nvvm_ldg_global_p:
3155 Opcode = NVPTXISD::LDGV2;
3157 case Intrinsic::nvvm_ldu_global_i:
3158 case Intrinsic::nvvm_ldu_global_f:
3159 case Intrinsic::nvvm_ldu_global_p:
3160 Opcode = NVPTXISD::LDUV2;
3163 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
3169 case Intrinsic::nvvm_ldg_global_i:
3170 case Intrinsic::nvvm_ldg_global_f:
3171 case Intrinsic::nvvm_ldg_global_p:
3172 Opcode = NVPTXISD::LDGV4;
3174 case Intrinsic::nvvm_ldu_global_i:
3175 case Intrinsic::nvvm_ldu_global_f:
3176 case Intrinsic::nvvm_ldu_global_p:
3177 Opcode = NVPTXISD::LDUV4;
3180 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
3181 LdResVTs = DAG.getVTList(ListVTs);
3186 SmallVector<SDValue, 8> OtherOps;
3188 // Copy regular operands
3190 OtherOps.push_back(Chain); // Chain
3191 // Skip operand 1 (intrinsic ID)
3193 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
3194 OtherOps.push_back(N->getOperand(i));
3196 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
3198 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
3199 MemSD->getMemoryVT(),
3200 MemSD->getMemOperand());
3202 SmallVector<SDValue, 4> ScalarRes;
3204 for (unsigned i = 0; i < NumElts; ++i) {
3205 SDValue Res = NewLD.getValue(i);
3208 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
3209 ScalarRes.push_back(Res);
3212 SDValue LoadChain = NewLD.getValue(NumElts);
3215 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
3217 Results.push_back(BuildVec);
3218 Results.push_back(LoadChain);
3221 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
3222 "Custom handling of non-i8 ldu/ldg?");
3224 // Just copy all operands as-is
3225 SmallVector<SDValue, 4> Ops;
3226 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
3227 Ops.push_back(N->getOperand(i));
3229 // Force output to i16
3230 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
3232 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
3234 // We make sure the memory type is i8, which will be used during isel
3235 // to select the proper instruction.
3237 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
3238 MVT::i8, MemSD->getMemOperand());
3240 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
3241 NewLD.getValue(0)));
3242 Results.push_back(NewLD.getValue(1));
3248 void NVPTXTargetLowering::ReplaceNodeResults(
3249 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
3250 switch (N->getOpcode()) {
3252 report_fatal_error("Unhandled custom legalization");
3254 ReplaceLoadVector(N, DAG, Results);
3256 case ISD::INTRINSIC_W_CHAIN:
3257 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
3262 // Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
3263 void NVPTXSection::anchor() {}
3265 NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
3269 delete ReadOnlySection;
3271 delete StaticCtorSection;
3272 delete StaticDtorSection;
3274 delete EHFrameSection;
3275 delete DwarfAbbrevSection;
3276 delete DwarfInfoSection;
3277 delete DwarfLineSection;
3278 delete DwarfFrameSection;
3279 delete DwarfPubTypesSection;
3280 delete DwarfDebugInlineSection;
3281 delete DwarfStrSection;
3282 delete DwarfLocSection;
3283 delete DwarfARangesSection;
3284 delete DwarfRangesSection;
3285 delete DwarfMacroInfoSection;