2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "NVPTXISelLowering.h"
16 #include "NVPTXTargetMachine.h"
17 #include "NVPTXTargetObjectFile.h"
18 #include "NVPTXUtilities.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/MC/MCSectionELF.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
40 #define DEBUG_TYPE "nvptx-lower"
44 static unsigned int uniqueCallSite = 0;
46 static cl::opt<bool> sched4reg(
48 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
50 static bool IsPTXVectorType(MVT VT) {
51 switch (VT.SimpleTy) {
70 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
71 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
72 /// into their primitive components.
73 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
74 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
75 /// LowerCall, and LowerReturn.
76 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
77 SmallVectorImpl<EVT> &ValueVTs,
78 SmallVectorImpl<uint64_t> *Offsets = nullptr,
79 uint64_t StartingOffset = 0) {
80 SmallVector<EVT, 16> TempVTs;
81 SmallVector<uint64_t, 16> TempOffsets;
83 ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
84 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
86 uint64_t Off = TempOffsets[i];
88 for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
89 ValueVTs.push_back(VT.getVectorElementType());
91 Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
94 ValueVTs.push_back(VT);
96 Offsets->push_back(Off);
101 // NVPTXTargetLowering Constructor.
102 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
103 : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
104 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
106 // always lower memset, memcpy, and memmove intrinsics to load/store
107 // instructions, rather
108 // then generating calls to memset, mempcy or memmove.
109 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
110 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
111 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
113 setBooleanContents(ZeroOrNegativeOneBooleanContent);
115 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
116 // condition branches.
117 setJumpIsExpensive(true);
119 // By default, use the Source scheduling
121 setSchedulingPreference(Sched::RegPressure);
123 setSchedulingPreference(Sched::Source);
125 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
126 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
127 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
128 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
129 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
130 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
132 // Operations not directly supported by NVPTX.
133 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
134 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
135 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
136 setOperationAction(ISD::SELECT_CC, MVT::i8, Expand);
137 setOperationAction(ISD::SELECT_CC, MVT::i16, Expand);
138 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
139 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
140 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
141 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
142 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
143 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
144 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
145 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
146 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
147 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
148 // For others we will expand to a SHL/SRA pair.
149 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
150 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
151 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
152 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
153 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
155 if (nvptxSubtarget.hasROT64()) {
156 setOperationAction(ISD::ROTL, MVT::i64, Legal);
157 setOperationAction(ISD::ROTR, MVT::i64, Legal);
159 setOperationAction(ISD::ROTL, MVT::i64, Expand);
160 setOperationAction(ISD::ROTR, MVT::i64, Expand);
162 if (nvptxSubtarget.hasROT32()) {
163 setOperationAction(ISD::ROTL, MVT::i32, Legal);
164 setOperationAction(ISD::ROTR, MVT::i32, Legal);
166 setOperationAction(ISD::ROTL, MVT::i32, Expand);
167 setOperationAction(ISD::ROTR, MVT::i32, Expand);
170 setOperationAction(ISD::ROTL, MVT::i16, Expand);
171 setOperationAction(ISD::ROTR, MVT::i16, Expand);
172 setOperationAction(ISD::ROTL, MVT::i8, Expand);
173 setOperationAction(ISD::ROTR, MVT::i8, Expand);
174 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
175 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
176 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
178 // Indirect branch is not supported.
179 // This also disables Jump Table creation.
180 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
181 setOperationAction(ISD::BRIND, MVT::Other, Expand);
183 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
184 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
186 // We want to legalize constant related memmove and memcopy
188 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
190 // Turn FP extload into load/fextend
191 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
192 // Turn FP truncstore into trunc + store.
193 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
195 // PTX does not support load / store predicate registers
196 setOperationAction(ISD::LOAD, MVT::i1, Custom);
197 setOperationAction(ISD::STORE, MVT::i1, Custom);
199 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
200 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
201 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
202 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
203 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
204 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
206 // This is legal in NVPTX
207 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
208 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
210 // TRAP can be lowered to PTX trap
211 setOperationAction(ISD::TRAP, MVT::Other, Legal);
213 setOperationAction(ISD::ADDC, MVT::i64, Expand);
214 setOperationAction(ISD::ADDE, MVT::i64, Expand);
216 // Register custom handling for vector loads/stores
217 for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
219 MVT VT = (MVT::SimpleValueType) i;
220 if (IsPTXVectorType(VT)) {
221 setOperationAction(ISD::LOAD, VT, Custom);
222 setOperationAction(ISD::STORE, VT, Custom);
223 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
227 // Custom handling for i8 intrinsics
228 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
230 setOperationAction(ISD::CTLZ, MVT::i16, Legal);
231 setOperationAction(ISD::CTLZ, MVT::i32, Legal);
232 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
233 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
234 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
235 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
236 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
237 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
238 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
239 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
240 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
241 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
242 setOperationAction(ISD::CTPOP, MVT::i16, Legal);
243 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
244 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
246 // Now deduce the information based on the above mentioned
248 computeRegisterProperties();
251 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
256 return "NVPTXISD::CALL";
257 case NVPTXISD::RET_FLAG:
258 return "NVPTXISD::RET_FLAG";
259 case NVPTXISD::Wrapper:
260 return "NVPTXISD::Wrapper";
261 case NVPTXISD::DeclareParam:
262 return "NVPTXISD::DeclareParam";
263 case NVPTXISD::DeclareScalarParam:
264 return "NVPTXISD::DeclareScalarParam";
265 case NVPTXISD::DeclareRet:
266 return "NVPTXISD::DeclareRet";
267 case NVPTXISD::DeclareRetParam:
268 return "NVPTXISD::DeclareRetParam";
269 case NVPTXISD::PrintCall:
270 return "NVPTXISD::PrintCall";
271 case NVPTXISD::LoadParam:
272 return "NVPTXISD::LoadParam";
273 case NVPTXISD::LoadParamV2:
274 return "NVPTXISD::LoadParamV2";
275 case NVPTXISD::LoadParamV4:
276 return "NVPTXISD::LoadParamV4";
277 case NVPTXISD::StoreParam:
278 return "NVPTXISD::StoreParam";
279 case NVPTXISD::StoreParamV2:
280 return "NVPTXISD::StoreParamV2";
281 case NVPTXISD::StoreParamV4:
282 return "NVPTXISD::StoreParamV4";
283 case NVPTXISD::StoreParamS32:
284 return "NVPTXISD::StoreParamS32";
285 case NVPTXISD::StoreParamU32:
286 return "NVPTXISD::StoreParamU32";
287 case NVPTXISD::CallArgBegin:
288 return "NVPTXISD::CallArgBegin";
289 case NVPTXISD::CallArg:
290 return "NVPTXISD::CallArg";
291 case NVPTXISD::LastCallArg:
292 return "NVPTXISD::LastCallArg";
293 case NVPTXISD::CallArgEnd:
294 return "NVPTXISD::CallArgEnd";
295 case NVPTXISD::CallVoid:
296 return "NVPTXISD::CallVoid";
297 case NVPTXISD::CallVal:
298 return "NVPTXISD::CallVal";
299 case NVPTXISD::CallSymbol:
300 return "NVPTXISD::CallSymbol";
301 case NVPTXISD::Prototype:
302 return "NVPTXISD::Prototype";
303 case NVPTXISD::MoveParam:
304 return "NVPTXISD::MoveParam";
305 case NVPTXISD::StoreRetval:
306 return "NVPTXISD::StoreRetval";
307 case NVPTXISD::StoreRetvalV2:
308 return "NVPTXISD::StoreRetvalV2";
309 case NVPTXISD::StoreRetvalV4:
310 return "NVPTXISD::StoreRetvalV4";
311 case NVPTXISD::PseudoUseParam:
312 return "NVPTXISD::PseudoUseParam";
313 case NVPTXISD::RETURN:
314 return "NVPTXISD::RETURN";
315 case NVPTXISD::CallSeqBegin:
316 return "NVPTXISD::CallSeqBegin";
317 case NVPTXISD::CallSeqEnd:
318 return "NVPTXISD::CallSeqEnd";
319 case NVPTXISD::CallPrototype:
320 return "NVPTXISD::CallPrototype";
321 case NVPTXISD::LoadV2:
322 return "NVPTXISD::LoadV2";
323 case NVPTXISD::LoadV4:
324 return "NVPTXISD::LoadV4";
325 case NVPTXISD::LDGV2:
326 return "NVPTXISD::LDGV2";
327 case NVPTXISD::LDGV4:
328 return "NVPTXISD::LDGV4";
329 case NVPTXISD::LDUV2:
330 return "NVPTXISD::LDUV2";
331 case NVPTXISD::LDUV4:
332 return "NVPTXISD::LDUV4";
333 case NVPTXISD::StoreV2:
334 return "NVPTXISD::StoreV2";
335 case NVPTXISD::StoreV4:
336 return "NVPTXISD::StoreV4";
337 case NVPTXISD::Tex1DFloatI32: return "NVPTXISD::Tex1DFloatI32";
338 case NVPTXISD::Tex1DFloatFloat: return "NVPTXISD::Tex1DFloatFloat";
339 case NVPTXISD::Tex1DFloatFloatLevel:
340 return "NVPTXISD::Tex1DFloatFloatLevel";
341 case NVPTXISD::Tex1DFloatFloatGrad:
342 return "NVPTXISD::Tex1DFloatFloatGrad";
343 case NVPTXISD::Tex1DI32I32: return "NVPTXISD::Tex1DI32I32";
344 case NVPTXISD::Tex1DI32Float: return "NVPTXISD::Tex1DI32Float";
345 case NVPTXISD::Tex1DI32FloatLevel:
346 return "NVPTXISD::Tex1DI32FloatLevel";
347 case NVPTXISD::Tex1DI32FloatGrad:
348 return "NVPTXISD::Tex1DI32FloatGrad";
349 case NVPTXISD::Tex1DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
350 case NVPTXISD::Tex1DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
351 case NVPTXISD::Tex1DArrayFloatFloatLevel:
352 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
353 case NVPTXISD::Tex1DArrayFloatFloatGrad:
354 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
355 case NVPTXISD::Tex1DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
356 case NVPTXISD::Tex1DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
357 case NVPTXISD::Tex1DArrayI32FloatLevel:
358 return "NVPTXISD::Tex2DArrayI32FloatLevel";
359 case NVPTXISD::Tex1DArrayI32FloatGrad:
360 return "NVPTXISD::Tex2DArrayI32FloatGrad";
361 case NVPTXISD::Tex2DFloatI32: return "NVPTXISD::Tex2DFloatI32";
362 case NVPTXISD::Tex2DFloatFloat: return "NVPTXISD::Tex2DFloatFloat";
363 case NVPTXISD::Tex2DFloatFloatLevel:
364 return "NVPTXISD::Tex2DFloatFloatLevel";
365 case NVPTXISD::Tex2DFloatFloatGrad:
366 return "NVPTXISD::Tex2DFloatFloatGrad";
367 case NVPTXISD::Tex2DI32I32: return "NVPTXISD::Tex2DI32I32";
368 case NVPTXISD::Tex2DI32Float: return "NVPTXISD::Tex2DI32Float";
369 case NVPTXISD::Tex2DI32FloatLevel:
370 return "NVPTXISD::Tex2DI32FloatLevel";
371 case NVPTXISD::Tex2DI32FloatGrad:
372 return "NVPTXISD::Tex2DI32FloatGrad";
373 case NVPTXISD::Tex2DArrayFloatI32: return "NVPTXISD::Tex2DArrayFloatI32";
374 case NVPTXISD::Tex2DArrayFloatFloat: return "NVPTXISD::Tex2DArrayFloatFloat";
375 case NVPTXISD::Tex2DArrayFloatFloatLevel:
376 return "NVPTXISD::Tex2DArrayFloatFloatLevel";
377 case NVPTXISD::Tex2DArrayFloatFloatGrad:
378 return "NVPTXISD::Tex2DArrayFloatFloatGrad";
379 case NVPTXISD::Tex2DArrayI32I32: return "NVPTXISD::Tex2DArrayI32I32";
380 case NVPTXISD::Tex2DArrayI32Float: return "NVPTXISD::Tex2DArrayI32Float";
381 case NVPTXISD::Tex2DArrayI32FloatLevel:
382 return "NVPTXISD::Tex2DArrayI32FloatLevel";
383 case NVPTXISD::Tex2DArrayI32FloatGrad:
384 return "NVPTXISD::Tex2DArrayI32FloatGrad";
385 case NVPTXISD::Tex3DFloatI32: return "NVPTXISD::Tex3DFloatI32";
386 case NVPTXISD::Tex3DFloatFloat: return "NVPTXISD::Tex3DFloatFloat";
387 case NVPTXISD::Tex3DFloatFloatLevel:
388 return "NVPTXISD::Tex3DFloatFloatLevel";
389 case NVPTXISD::Tex3DFloatFloatGrad:
390 return "NVPTXISD::Tex3DFloatFloatGrad";
391 case NVPTXISD::Tex3DI32I32: return "NVPTXISD::Tex3DI32I32";
392 case NVPTXISD::Tex3DI32Float: return "NVPTXISD::Tex3DI32Float";
393 case NVPTXISD::Tex3DI32FloatLevel:
394 return "NVPTXISD::Tex3DI32FloatLevel";
395 case NVPTXISD::Tex3DI32FloatGrad:
396 return "NVPTXISD::Tex3DI32FloatGrad";
398 case NVPTXISD::Suld1DI8Trap: return "NVPTXISD::Suld1DI8Trap";
399 case NVPTXISD::Suld1DI16Trap: return "NVPTXISD::Suld1DI16Trap";
400 case NVPTXISD::Suld1DI32Trap: return "NVPTXISD::Suld1DI32Trap";
401 case NVPTXISD::Suld1DV2I8Trap: return "NVPTXISD::Suld1DV2I8Trap";
402 case NVPTXISD::Suld1DV2I16Trap: return "NVPTXISD::Suld1DV2I16Trap";
403 case NVPTXISD::Suld1DV2I32Trap: return "NVPTXISD::Suld1DV2I32Trap";
404 case NVPTXISD::Suld1DV4I8Trap: return "NVPTXISD::Suld1DV4I8Trap";
405 case NVPTXISD::Suld1DV4I16Trap: return "NVPTXISD::Suld1DV4I16Trap";
406 case NVPTXISD::Suld1DV4I32Trap: return "NVPTXISD::Suld1DV4I32Trap";
408 case NVPTXISD::Suld1DArrayI8Trap: return "NVPTXISD::Suld1DArrayI8Trap";
409 case NVPTXISD::Suld1DArrayI16Trap: return "NVPTXISD::Suld1DArrayI16Trap";
410 case NVPTXISD::Suld1DArrayI32Trap: return "NVPTXISD::Suld1DArrayI32Trap";
411 case NVPTXISD::Suld1DArrayV2I8Trap: return "NVPTXISD::Suld1DArrayV2I8Trap";
412 case NVPTXISD::Suld1DArrayV2I16Trap: return "NVPTXISD::Suld1DArrayV2I16Trap";
413 case NVPTXISD::Suld1DArrayV2I32Trap: return "NVPTXISD::Suld1DArrayV2I32Trap";
414 case NVPTXISD::Suld1DArrayV4I8Trap: return "NVPTXISD::Suld1DArrayV4I8Trap";
415 case NVPTXISD::Suld1DArrayV4I16Trap: return "NVPTXISD::Suld1DArrayV4I16Trap";
416 case NVPTXISD::Suld1DArrayV4I32Trap: return "NVPTXISD::Suld1DArrayV4I32Trap";
418 case NVPTXISD::Suld2DI8Trap: return "NVPTXISD::Suld2DI8Trap";
419 case NVPTXISD::Suld2DI16Trap: return "NVPTXISD::Suld2DI16Trap";
420 case NVPTXISD::Suld2DI32Trap: return "NVPTXISD::Suld2DI32Trap";
421 case NVPTXISD::Suld2DV2I8Trap: return "NVPTXISD::Suld2DV2I8Trap";
422 case NVPTXISD::Suld2DV2I16Trap: return "NVPTXISD::Suld2DV2I16Trap";
423 case NVPTXISD::Suld2DV2I32Trap: return "NVPTXISD::Suld2DV2I32Trap";
424 case NVPTXISD::Suld2DV4I8Trap: return "NVPTXISD::Suld2DV4I8Trap";
425 case NVPTXISD::Suld2DV4I16Trap: return "NVPTXISD::Suld2DV4I16Trap";
426 case NVPTXISD::Suld2DV4I32Trap: return "NVPTXISD::Suld2DV4I32Trap";
428 case NVPTXISD::Suld2DArrayI8Trap: return "NVPTXISD::Suld2DArrayI8Trap";
429 case NVPTXISD::Suld2DArrayI16Trap: return "NVPTXISD::Suld2DArrayI16Trap";
430 case NVPTXISD::Suld2DArrayI32Trap: return "NVPTXISD::Suld2DArrayI32Trap";
431 case NVPTXISD::Suld2DArrayV2I8Trap: return "NVPTXISD::Suld2DArrayV2I8Trap";
432 case NVPTXISD::Suld2DArrayV2I16Trap: return "NVPTXISD::Suld2DArrayV2I16Trap";
433 case NVPTXISD::Suld2DArrayV2I32Trap: return "NVPTXISD::Suld2DArrayV2I32Trap";
434 case NVPTXISD::Suld2DArrayV4I8Trap: return "NVPTXISD::Suld2DArrayV4I8Trap";
435 case NVPTXISD::Suld2DArrayV4I16Trap: return "NVPTXISD::Suld2DArrayV4I16Trap";
436 case NVPTXISD::Suld2DArrayV4I32Trap: return "NVPTXISD::Suld2DArrayV4I32Trap";
438 case NVPTXISD::Suld3DI8Trap: return "NVPTXISD::Suld3DI8Trap";
439 case NVPTXISD::Suld3DI16Trap: return "NVPTXISD::Suld3DI16Trap";
440 case NVPTXISD::Suld3DI32Trap: return "NVPTXISD::Suld3DI32Trap";
441 case NVPTXISD::Suld3DV2I8Trap: return "NVPTXISD::Suld3DV2I8Trap";
442 case NVPTXISD::Suld3DV2I16Trap: return "NVPTXISD::Suld3DV2I16Trap";
443 case NVPTXISD::Suld3DV2I32Trap: return "NVPTXISD::Suld3DV2I32Trap";
444 case NVPTXISD::Suld3DV4I8Trap: return "NVPTXISD::Suld3DV4I8Trap";
445 case NVPTXISD::Suld3DV4I16Trap: return "NVPTXISD::Suld3DV4I16Trap";
446 case NVPTXISD::Suld3DV4I32Trap: return "NVPTXISD::Suld3DV4I32Trap";
450 bool NVPTXTargetLowering::shouldSplitVectorType(EVT VT) const {
451 return VT.getScalarType() == MVT::i1;
455 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
457 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
458 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
459 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
463 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
464 const SmallVectorImpl<ISD::OutputArg> &Outs,
465 unsigned retAlignment,
466 const ImmutableCallSite *CS) const {
468 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
469 assert(isABI && "Non-ABI compilation is not supported");
474 O << "prototype_" << uniqueCallSite << " : .callprototype ";
476 if (retTy->getTypeID() == Type::VoidTyID) {
480 if (retTy->isFloatingPointTy() || retTy->isIntegerTy()) {
482 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
483 size = ITy->getBitWidth();
487 assert(retTy->isFloatingPointTy() &&
488 "Floating point type expected here");
489 size = retTy->getPrimitiveSizeInBits();
492 O << ".param .b" << size << " _";
493 } else if (isa<PointerType>(retTy)) {
494 O << ".param .b" << getPointerTy().getSizeInBits() << " _";
496 if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) {
497 SmallVector<EVT, 16> vtparts;
498 ComputeValueVTs(*this, retTy, vtparts);
499 unsigned totalsz = 0;
500 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
502 EVT elemtype = vtparts[i];
503 if (vtparts[i].isVector()) {
504 elems = vtparts[i].getVectorNumElements();
505 elemtype = vtparts[i].getVectorElementType();
507 // TODO: no need to loop
508 for (unsigned j = 0, je = elems; j != je; ++j) {
509 unsigned sz = elemtype.getSizeInBits();
510 if (elemtype.isInteger() && (sz < 8))
515 O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
517 assert(false && "Unknown return type");
525 MVT thePointerTy = getPointerTy();
528 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
529 Type *Ty = Args[i].Ty;
535 if (Outs[OIdx].Flags.isByVal() == false) {
536 if (Ty->isAggregateType() || Ty->isVectorTy()) {
538 const CallInst *CallI = cast<CallInst>(CS->getInstruction());
539 const DataLayout *TD = getDataLayout();
540 // +1 because index 0 is reserved for return type alignment
541 if (!llvm::getAlign(*CallI, i + 1, align))
542 align = TD->getABITypeAlignment(Ty);
543 unsigned sz = TD->getTypeAllocSize(Ty);
544 O << ".param .align " << align << " .b8 ";
546 O << "[" << sz << "]";
547 // update the index for Outs
548 SmallVector<EVT, 16> vtparts;
549 ComputeValueVTs(*this, Ty, vtparts);
550 if (unsigned len = vtparts.size())
554 // i8 types in IR will be i16 types in SDAG
555 assert((getValueType(Ty) == Outs[OIdx].VT ||
556 (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
557 "type mismatch between callee prototype and arguments");
560 if (isa<IntegerType>(Ty)) {
561 sz = cast<IntegerType>(Ty)->getBitWidth();
564 } else if (isa<PointerType>(Ty))
565 sz = thePointerTy.getSizeInBits();
567 sz = Ty->getPrimitiveSizeInBits();
568 O << ".param .b" << sz << " ";
572 const PointerType *PTy = dyn_cast<PointerType>(Ty);
573 assert(PTy && "Param with byval attribute should be a pointer type");
574 Type *ETy = PTy->getElementType();
576 unsigned align = Outs[OIdx].Flags.getByValAlign();
577 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
578 O << ".param .align " << align << " .b8 ";
580 O << "[" << sz << "]";
587 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
588 const ImmutableCallSite *CS,
590 unsigned Idx) const {
591 const DataLayout *TD = getDataLayout();
593 const Value *DirectCallee = CS->getCalledFunction();
596 // We don't have a direct function symbol, but that may be because of
597 // constant cast instructions in the call.
598 const Instruction *CalleeI = CS->getInstruction();
599 assert(CalleeI && "Call target is not a function or derived value?");
601 // With bitcast'd call targets, the instruction will be the call
602 if (isa<CallInst>(CalleeI)) {
603 // Check if we have call alignment metadata
604 if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
607 const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
608 // Ignore any bitcast instructions
609 while(isa<ConstantExpr>(CalleeV)) {
610 const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
613 // Look through the bitcast
614 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
617 // We have now looked past all of the bitcasts. Do we finally have a
619 if (isa<Function>(CalleeV))
620 DirectCallee = CalleeV;
624 // Check for function alignment information if we found that the
625 // ultimate target is a Function
627 if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
630 // Call is indirect or alignment information is not available, fall back to
631 // the ABI type alignment
632 return TD->getABITypeAlignment(Ty);
635 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
636 SmallVectorImpl<SDValue> &InVals) const {
637 SelectionDAG &DAG = CLI.DAG;
639 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
640 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
641 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
642 SDValue Chain = CLI.Chain;
643 SDValue Callee = CLI.Callee;
644 bool &isTailCall = CLI.IsTailCall;
645 ArgListTy &Args = CLI.getArgs();
646 Type *retTy = CLI.RetTy;
647 ImmutableCallSite *CS = CLI.CS;
649 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
650 assert(isABI && "Non-ABI compilation is not supported");
653 const DataLayout *TD = getDataLayout();
654 MachineFunction &MF = DAG.getMachineFunction();
655 const Function *F = MF.getFunction();
657 SDValue tempChain = Chain;
659 DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
661 SDValue InFlag = Chain.getValue(1);
663 unsigned paramCount = 0;
664 // Args.size() and Outs.size() need not match.
665 // Outs.size() will be larger
666 // * if there is an aggregate argument with multiple fields (each field
667 // showing up separately in Outs)
668 // * if there is a vector argument with more than typical vector-length
669 // elements (generally if more than 4) where each vector element is
670 // individually present in Outs.
671 // So a different index should be used for indexing into Outs/OutVals.
672 // See similar issue in LowerFormalArguments.
674 // Declare the .params or .reg need to pass values
676 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
677 EVT VT = Outs[OIdx].VT;
678 Type *Ty = Args[i].Ty;
680 if (Outs[OIdx].Flags.isByVal() == false) {
681 if (Ty->isAggregateType()) {
683 SmallVector<EVT, 16> vtparts;
684 ComputeValueVTs(*this, Ty, vtparts);
686 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
687 // declare .param .align <align> .b8 .param<n>[<size>];
688 unsigned sz = TD->getTypeAllocSize(Ty);
689 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
690 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
691 DAG.getConstant(paramCount, MVT::i32),
692 DAG.getConstant(sz, MVT::i32), InFlag };
693 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
695 InFlag = Chain.getValue(1);
696 unsigned curOffset = 0;
697 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
699 EVT elemtype = vtparts[j];
700 if (vtparts[j].isVector()) {
701 elems = vtparts[j].getVectorNumElements();
702 elemtype = vtparts[j].getVectorElementType();
704 for (unsigned k = 0, ke = elems; k != ke; ++k) {
705 unsigned sz = elemtype.getSizeInBits();
706 if (elemtype.isInteger() && (sz < 8))
708 SDValue StVal = OutVals[OIdx];
709 if (elemtype.getSizeInBits() < 16) {
710 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
712 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
713 SDValue CopyParamOps[] = { Chain,
714 DAG.getConstant(paramCount, MVT::i32),
715 DAG.getConstant(curOffset, MVT::i32),
717 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
718 CopyParamVTs, CopyParamOps,
719 elemtype, MachinePointerInfo());
720 InFlag = Chain.getValue(1);
725 if (vtparts.size() > 0)
730 if (Ty->isVectorTy()) {
731 EVT ObjectVT = getValueType(Ty);
732 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
733 // declare .param .align <align> .b8 .param<n>[<size>];
734 unsigned sz = TD->getTypeAllocSize(Ty);
735 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
736 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
737 DAG.getConstant(paramCount, MVT::i32),
738 DAG.getConstant(sz, MVT::i32), InFlag };
739 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
741 InFlag = Chain.getValue(1);
742 unsigned NumElts = ObjectVT.getVectorNumElements();
743 EVT EltVT = ObjectVT.getVectorElementType();
745 bool NeedExtend = false;
746 if (EltVT.getSizeInBits() < 16) {
753 SDValue Elt = OutVals[OIdx++];
755 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
757 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
758 SDValue CopyParamOps[] = { Chain,
759 DAG.getConstant(paramCount, MVT::i32),
760 DAG.getConstant(0, MVT::i32), Elt,
762 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
763 CopyParamVTs, CopyParamOps,
764 MemVT, MachinePointerInfo());
765 InFlag = Chain.getValue(1);
766 } else if (NumElts == 2) {
767 SDValue Elt0 = OutVals[OIdx++];
768 SDValue Elt1 = OutVals[OIdx++];
770 Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
771 Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
774 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
775 SDValue CopyParamOps[] = { Chain,
776 DAG.getConstant(paramCount, MVT::i32),
777 DAG.getConstant(0, MVT::i32), Elt0, Elt1,
779 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
780 CopyParamVTs, CopyParamOps,
781 MemVT, MachinePointerInfo());
782 InFlag = Chain.getValue(1);
784 unsigned curOffset = 0;
786 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
788 // vector will be expanded to a power of 2 elements, so we know we can
789 // always round up to the next multiple of 4 when creating the vector
791 // e.g. 4 elem => 1 st.v4
794 // 11 elem => 3 st.v4
795 unsigned VecSize = 4;
796 if (EltVT.getSizeInBits() == 64)
799 // This is potentially only part of a vector, so assume all elements
800 // are packed together.
801 unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
803 for (unsigned i = 0; i < NumElts; i += VecSize) {
806 SmallVector<SDValue, 8> Ops;
807 Ops.push_back(Chain);
808 Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
809 Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
811 unsigned Opc = NVPTXISD::StoreParamV2;
813 StoreVal = OutVals[OIdx++];
815 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
816 Ops.push_back(StoreVal);
818 if (i + 1 < NumElts) {
819 StoreVal = OutVals[OIdx++];
822 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
824 StoreVal = DAG.getUNDEF(EltVT);
826 Ops.push_back(StoreVal);
829 Opc = NVPTXISD::StoreParamV4;
830 if (i + 2 < NumElts) {
831 StoreVal = OutVals[OIdx++];
834 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
836 StoreVal = DAG.getUNDEF(EltVT);
838 Ops.push_back(StoreVal);
840 if (i + 3 < NumElts) {
841 StoreVal = OutVals[OIdx++];
844 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
846 StoreVal = DAG.getUNDEF(EltVT);
848 Ops.push_back(StoreVal);
851 Ops.push_back(InFlag);
853 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
854 Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, Ops,
855 MemVT, MachinePointerInfo());
856 InFlag = Chain.getValue(1);
857 curOffset += PerStoreOffset;
865 // for ABI, declare .param .b<size> .param<n>;
866 unsigned sz = VT.getSizeInBits();
867 bool needExtend = false;
868 if (VT.isInteger()) {
874 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
875 SDValue DeclareParamOps[] = { Chain,
876 DAG.getConstant(paramCount, MVT::i32),
877 DAG.getConstant(sz, MVT::i32),
878 DAG.getConstant(0, MVT::i32), InFlag };
879 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
881 InFlag = Chain.getValue(1);
882 SDValue OutV = OutVals[OIdx];
884 // zext/sext i1 to i16
885 unsigned opc = ISD::ZERO_EXTEND;
886 if (Outs[OIdx].Flags.isSExt())
887 opc = ISD::SIGN_EXTEND;
888 OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
890 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
891 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
892 DAG.getConstant(0, MVT::i32), OutV, InFlag };
894 unsigned opcode = NVPTXISD::StoreParam;
895 if (Outs[OIdx].Flags.isZExt())
896 opcode = NVPTXISD::StoreParamU32;
897 else if (Outs[OIdx].Flags.isSExt())
898 opcode = NVPTXISD::StoreParamS32;
899 Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps,
900 VT, MachinePointerInfo());
902 InFlag = Chain.getValue(1);
907 SmallVector<EVT, 16> vtparts;
908 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
909 assert(PTy && "Type of a byval parameter should be pointer");
910 ComputeValueVTs(*this, PTy->getElementType(), vtparts);
912 // declare .param .align <align> .b8 .param<n>[<size>];
913 unsigned sz = Outs[OIdx].Flags.getByValSize();
914 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
915 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
916 // so we don't need to worry about natural alignment or not.
917 // See TargetLowering::LowerCallTo().
918 SDValue DeclareParamOps[] = {
919 Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
920 DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
923 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
925 InFlag = Chain.getValue(1);
926 unsigned curOffset = 0;
927 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
929 EVT elemtype = vtparts[j];
930 if (vtparts[j].isVector()) {
931 elems = vtparts[j].getVectorNumElements();
932 elemtype = vtparts[j].getVectorElementType();
934 for (unsigned k = 0, ke = elems; k != ke; ++k) {
935 unsigned sz = elemtype.getSizeInBits();
936 if (elemtype.isInteger() && (sz < 8))
939 DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
940 DAG.getConstant(curOffset, getPointerTy()));
941 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
942 MachinePointerInfo(), false, false, false,
944 if (elemtype.getSizeInBits() < 16) {
945 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
947 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
948 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
949 DAG.getConstant(curOffset, MVT::i32), theVal,
951 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
952 CopyParamOps, elemtype,
953 MachinePointerInfo());
955 InFlag = Chain.getValue(1);
962 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
963 unsigned retAlignment = 0;
966 if (Ins.size() > 0) {
967 SmallVector<EVT, 16> resvtparts;
968 ComputeValueVTs(*this, retTy, resvtparts);
971 // .param .align 16 .b8 retval0[<size-in-bytes>], or
972 // .param .b<size-in-bits> retval0
973 unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
974 if (retTy->isSingleValueType()) {
975 // Scalar needs to be at least 32bit wide
978 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
979 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
980 DAG.getConstant(resultsz, MVT::i32),
981 DAG.getConstant(0, MVT::i32), InFlag };
982 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
984 InFlag = Chain.getValue(1);
986 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
987 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
988 SDValue DeclareRetOps[] = { Chain,
989 DAG.getConstant(retAlignment, MVT::i32),
990 DAG.getConstant(resultsz / 8, MVT::i32),
991 DAG.getConstant(0, MVT::i32), InFlag };
992 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
994 InFlag = Chain.getValue(1);
999 // This is indirect function call case : PTX requires a prototype of the
1001 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
1002 // to be emitted, and the label has to used as the last arg of call
1004 // The prototype is embedded in a string and put as the operand for a
1005 // CallPrototype SDNode which will print out to the value of the string.
1006 SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1007 std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
1008 const char *ProtoStr =
1009 nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
1010 SDValue ProtoOps[] = {
1011 Chain, DAG.getTargetExternalSymbol(ProtoStr, MVT::i32), InFlag,
1013 Chain = DAG.getNode(NVPTXISD::CallPrototype, dl, ProtoVTs, ProtoOps);
1014 InFlag = Chain.getValue(1);
1016 // Op to just print "call"
1017 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1018 SDValue PrintCallOps[] = {
1019 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
1021 Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
1022 dl, PrintCallVTs, PrintCallOps);
1023 InFlag = Chain.getValue(1);
1025 // Ops to print out the function name
1026 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1027 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
1028 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps);
1029 InFlag = Chain.getValue(1);
1031 // Ops to print out the param list
1032 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1033 SDValue CallArgBeginOps[] = { Chain, InFlag };
1034 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
1036 InFlag = Chain.getValue(1);
1038 for (unsigned i = 0, e = paramCount; i != e; ++i) {
1041 opcode = NVPTXISD::LastCallArg;
1043 opcode = NVPTXISD::CallArg;
1044 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1045 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
1046 DAG.getConstant(i, MVT::i32), InFlag };
1047 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps);
1048 InFlag = Chain.getValue(1);
1050 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1051 SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
1053 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps);
1054 InFlag = Chain.getValue(1);
1057 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1058 SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
1060 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps);
1061 InFlag = Chain.getValue(1);
1064 // Generate loads from param memory/moves from registers for result
1065 if (Ins.size() > 0) {
1066 unsigned resoffset = 0;
1067 if (retTy && retTy->isVectorTy()) {
1068 EVT ObjectVT = getValueType(retTy);
1069 unsigned NumElts = ObjectVT.getVectorNumElements();
1070 EVT EltVT = ObjectVT.getVectorElementType();
1071 assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
1072 ObjectVT) == NumElts &&
1073 "Vector was not scalarized");
1074 unsigned sz = EltVT.getSizeInBits();
1075 bool needTruncate = sz < 16 ? true : false;
1078 // Just a simple load
1079 SmallVector<EVT, 4> LoadRetVTs;
1081 // If loading i1 result, generate
1084 LoadRetVTs.push_back(MVT::i16);
1086 LoadRetVTs.push_back(EltVT);
1087 LoadRetVTs.push_back(MVT::Other);
1088 LoadRetVTs.push_back(MVT::Glue);
1089 SmallVector<SDValue, 4> LoadRetOps;
1090 LoadRetOps.push_back(Chain);
1091 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1092 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1093 LoadRetOps.push_back(InFlag);
1094 SDValue retval = DAG.getMemIntrinsicNode(
1095 NVPTXISD::LoadParam, dl,
1096 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1097 Chain = retval.getValue(1);
1098 InFlag = retval.getValue(2);
1099 SDValue Ret0 = retval;
1101 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
1102 InVals.push_back(Ret0);
1103 } else if (NumElts == 2) {
1105 SmallVector<EVT, 4> LoadRetVTs;
1107 // If loading i1 result, generate
1110 LoadRetVTs.push_back(MVT::i16);
1111 LoadRetVTs.push_back(MVT::i16);
1113 LoadRetVTs.push_back(EltVT);
1114 LoadRetVTs.push_back(EltVT);
1116 LoadRetVTs.push_back(MVT::Other);
1117 LoadRetVTs.push_back(MVT::Glue);
1118 SmallVector<SDValue, 4> LoadRetOps;
1119 LoadRetOps.push_back(Chain);
1120 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1121 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1122 LoadRetOps.push_back(InFlag);
1123 SDValue retval = DAG.getMemIntrinsicNode(
1124 NVPTXISD::LoadParamV2, dl,
1125 DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
1126 Chain = retval.getValue(2);
1127 InFlag = retval.getValue(3);
1128 SDValue Ret0 = retval.getValue(0);
1129 SDValue Ret1 = retval.getValue(1);
1131 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
1132 InVals.push_back(Ret0);
1133 Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
1134 InVals.push_back(Ret1);
1136 InVals.push_back(Ret0);
1137 InVals.push_back(Ret1);
1140 // Split into N LoadV4
1142 unsigned VecSize = 4;
1143 unsigned Opc = NVPTXISD::LoadParamV4;
1144 if (EltVT.getSizeInBits() == 64) {
1146 Opc = NVPTXISD::LoadParamV2;
1148 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1149 for (unsigned i = 0; i < NumElts; i += VecSize) {
1150 SmallVector<EVT, 8> LoadRetVTs;
1152 // If loading i1 result, generate
1155 for (unsigned j = 0; j < VecSize; ++j)
1156 LoadRetVTs.push_back(MVT::i16);
1158 for (unsigned j = 0; j < VecSize; ++j)
1159 LoadRetVTs.push_back(EltVT);
1161 LoadRetVTs.push_back(MVT::Other);
1162 LoadRetVTs.push_back(MVT::Glue);
1163 SmallVector<SDValue, 4> LoadRetOps;
1164 LoadRetOps.push_back(Chain);
1165 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1166 LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
1167 LoadRetOps.push_back(InFlag);
1168 SDValue retval = DAG.getMemIntrinsicNode(
1169 Opc, dl, DAG.getVTList(LoadRetVTs),
1170 LoadRetOps, EltVT, MachinePointerInfo());
1172 Chain = retval.getValue(2);
1173 InFlag = retval.getValue(3);
1175 Chain = retval.getValue(4);
1176 InFlag = retval.getValue(5);
1179 for (unsigned j = 0; j < VecSize; ++j) {
1180 if (i + j >= NumElts)
1182 SDValue Elt = retval.getValue(j);
1184 Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
1185 InVals.push_back(Elt);
1187 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1191 SmallVector<EVT, 16> VTs;
1192 ComputePTXValueVTs(*this, retTy, VTs);
1193 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1194 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
1195 unsigned sz = VTs[i].getSizeInBits();
1196 bool needTruncate = sz < 8 ? true : false;
1197 if (VTs[i].isInteger() && (sz < 8))
1200 SmallVector<EVT, 4> LoadRetVTs;
1201 EVT TheLoadType = VTs[i];
1202 if (retTy->isIntegerTy() &&
1203 TD->getTypeAllocSizeInBits(retTy) < 32) {
1204 // This is for integer types only, and specifically not for
1206 LoadRetVTs.push_back(MVT::i32);
1207 TheLoadType = MVT::i32;
1208 } else if (sz < 16) {
1209 // If loading i1/i8 result, generate
1211 // trunc i16 to i1/i8
1212 LoadRetVTs.push_back(MVT::i16);
1214 LoadRetVTs.push_back(Ins[i].VT);
1215 LoadRetVTs.push_back(MVT::Other);
1216 LoadRetVTs.push_back(MVT::Glue);
1218 SmallVector<SDValue, 4> LoadRetOps;
1219 LoadRetOps.push_back(Chain);
1220 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1221 LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
1222 LoadRetOps.push_back(InFlag);
1223 SDValue retval = DAG.getMemIntrinsicNode(
1224 NVPTXISD::LoadParam, dl,
1225 DAG.getVTList(LoadRetVTs), LoadRetOps,
1226 TheLoadType, MachinePointerInfo());
1227 Chain = retval.getValue(1);
1228 InFlag = retval.getValue(2);
1229 SDValue Ret0 = retval.getValue(0);
1231 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
1232 InVals.push_back(Ret0);
1233 resoffset += sz / 8;
1238 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
1239 DAG.getIntPtrConstant(uniqueCallSite + 1, true),
1243 // set isTailCall to false for now, until we figure out how to express
1244 // tail call optimization in PTX
1249 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1250 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1251 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1253 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1254 SDNode *Node = Op.getNode();
1256 SmallVector<SDValue, 8> Ops;
1257 unsigned NumOperands = Node->getNumOperands();
1258 for (unsigned i = 0; i < NumOperands; ++i) {
1259 SDValue SubOp = Node->getOperand(i);
1260 EVT VVT = SubOp.getNode()->getValueType(0);
1261 EVT EltVT = VVT.getVectorElementType();
1262 unsigned NumSubElem = VVT.getVectorNumElements();
1263 for (unsigned j = 0; j < NumSubElem; ++j) {
1264 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1265 DAG.getIntPtrConstant(j)));
1268 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), Ops);
1272 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1273 switch (Op.getOpcode()) {
1274 case ISD::RETURNADDR:
1276 case ISD::FRAMEADDR:
1278 case ISD::GlobalAddress:
1279 return LowerGlobalAddress(Op, DAG);
1280 case ISD::INTRINSIC_W_CHAIN:
1282 case ISD::BUILD_VECTOR:
1283 case ISD::EXTRACT_SUBVECTOR:
1285 case ISD::CONCAT_VECTORS:
1286 return LowerCONCAT_VECTORS(Op, DAG);
1288 return LowerSTORE(Op, DAG);
1290 return LowerLOAD(Op, DAG);
1292 llvm_unreachable("Custom lowering not defined for operation");
1296 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1297 if (Op.getValueType() == MVT::i1)
1298 return LowerLOADi1(Op, DAG);
1305 // v1 = ld i8* addr (-> i16)
1306 // v = trunc i16 to i1
1307 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
1308 SDNode *Node = Op.getNode();
1309 LoadSDNode *LD = cast<LoadSDNode>(Node);
1311 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
1312 assert(Node->getValueType(0) == MVT::i1 &&
1313 "Custom lowering for i1 load only");
1315 DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
1316 LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
1317 LD->isInvariant(), LD->getAlignment());
1318 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
1319 // The legalizer (the caller) is expecting two values from the legalized
1320 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
1321 // in LegalizeDAG.cpp which also uses MergeValues.
1322 SDValue Ops[] = { result, LD->getChain() };
1323 return DAG.getMergeValues(Ops, dl);
1326 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1327 EVT ValVT = Op.getOperand(1).getValueType();
1328 if (ValVT == MVT::i1)
1329 return LowerSTOREi1(Op, DAG);
1330 else if (ValVT.isVector())
1331 return LowerSTOREVector(Op, DAG);
1337 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
1338 SDNode *N = Op.getNode();
1339 SDValue Val = N->getOperand(1);
1341 EVT ValVT = Val.getValueType();
1343 if (ValVT.isVector()) {
1344 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1345 // legal. We can (and should) split that into 2 stores of <2 x double> here
1346 // but I'm leaving that as a TODO for now.
1347 if (!ValVT.isSimple())
1349 switch (ValVT.getSimpleVT().SimpleTy) {
1362 // This is a "native" vector type
1366 unsigned Opcode = 0;
1367 EVT EltVT = ValVT.getVectorElementType();
1368 unsigned NumElts = ValVT.getVectorNumElements();
1370 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
1371 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1372 // stored type to i16 and propagate the "real" type as the memory type.
1373 bool NeedExt = false;
1374 if (EltVT.getSizeInBits() < 16)
1381 Opcode = NVPTXISD::StoreV2;
1384 Opcode = NVPTXISD::StoreV4;
1389 SmallVector<SDValue, 8> Ops;
1391 // First is the chain
1392 Ops.push_back(N->getOperand(0));
1394 // Then the split values
1395 for (unsigned i = 0; i < NumElts; ++i) {
1396 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
1397 DAG.getIntPtrConstant(i));
1399 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
1400 Ops.push_back(ExtVal);
1403 // Then any remaining arguments
1404 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
1405 Ops.push_back(N->getOperand(i));
1408 MemSDNode *MemSD = cast<MemSDNode>(N);
1410 SDValue NewSt = DAG.getMemIntrinsicNode(
1411 Opcode, DL, DAG.getVTList(MVT::Other), Ops,
1412 MemSD->getMemoryVT(), MemSD->getMemOperand());
1414 //return DCI.CombineTo(N, NewSt, true);
1423 // v1 = zxt v to i16
1425 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
1426 SDNode *Node = Op.getNode();
1428 StoreSDNode *ST = cast<StoreSDNode>(Node);
1429 SDValue Tmp1 = ST->getChain();
1430 SDValue Tmp2 = ST->getBasePtr();
1431 SDValue Tmp3 = ST->getValue();
1432 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
1433 unsigned Alignment = ST->getAlignment();
1434 bool isVolatile = ST->isVolatile();
1435 bool isNonTemporal = ST->isNonTemporal();
1436 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
1437 SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
1438 ST->getPointerInfo(), MVT::i8, isNonTemporal,
1439 isVolatile, Alignment);
1443 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
1444 int idx, EVT v) const {
1445 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
1446 std::stringstream suffix;
1448 *name += suffix.str();
1449 return DAG.getTargetExternalSymbol(name->c_str(), v);
1453 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1454 std::string ParamSym;
1455 raw_string_ostream ParamStr(ParamSym);
1457 ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
1460 std::string *SavedStr =
1461 nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
1462 return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
1465 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1466 return getExtSymb(DAG, ".HLPPARAM", idx);
1469 // Check to see if the kernel argument is image*_t or sampler_t
1471 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1472 static const char *const specialTypes[] = { "struct._image2d_t",
1473 "struct._image3d_t",
1474 "struct._sampler_t" };
1476 const Type *Ty = arg->getType();
1477 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1485 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1486 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1488 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1489 if (TypeName == specialTypes[i])
1495 SDValue NVPTXTargetLowering::LowerFormalArguments(
1496 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1497 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
1498 SmallVectorImpl<SDValue> &InVals) const {
1499 MachineFunction &MF = DAG.getMachineFunction();
1500 const DataLayout *TD = getDataLayout();
1502 const Function *F = MF.getFunction();
1503 const AttributeSet &PAL = F->getAttributes();
1504 const TargetLowering *TLI = nvTM->getTargetLowering();
1506 SDValue Root = DAG.getRoot();
1507 std::vector<SDValue> OutChains;
1509 bool isKernel = llvm::isKernelFunction(*F);
1510 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1511 assert(isABI && "Non-ABI compilation is not supported");
1515 std::vector<Type *> argTypes;
1516 std::vector<const Argument *> theArgs;
1517 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1519 theArgs.push_back(I);
1520 argTypes.push_back(I->getType());
1522 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
1523 // Ins.size() will be larger
1524 // * if there is an aggregate argument with multiple fields (each field
1525 // showing up separately in Ins)
1526 // * if there is a vector argument with more than typical vector-length
1527 // elements (generally if more than 4) where each vector element is
1528 // individually present in Ins.
1529 // So a different index should be used for indexing into Ins.
1530 // See similar issue in LowerCall.
1531 unsigned InsIdx = 0;
1534 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1535 Type *Ty = argTypes[i];
1537 // If the kernel argument is image*_t or sampler_t, convert it to
1538 // a i32 constant holding the parameter position. This can later
1539 // matched in the AsmPrinter to output the correct mangled name.
1540 if (isImageOrSamplerVal(
1542 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
1544 assert(isKernel && "Only kernels can have image/sampler params");
1545 InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
1549 if (theArgs[i]->use_empty()) {
1551 if (Ty->isAggregateType()) {
1552 SmallVector<EVT, 16> vtparts;
1554 ComputePTXValueVTs(*this, Ty, vtparts);
1555 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1556 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1558 EVT partVT = vtparts[parti];
1559 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
1562 if (vtparts.size() > 0)
1566 if (Ty->isVectorTy()) {
1567 EVT ObjectVT = getValueType(Ty);
1568 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
1569 for (unsigned parti = 0; parti < NumRegs; ++parti) {
1570 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1577 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1581 // In the following cases, assign a node order of "idx+1"
1582 // to newly created nodes. The SDNodes for params have to
1583 // appear in the same order as their order of appearance
1584 // in the original function. "idx+1" holds that order.
1585 if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
1586 if (Ty->isAggregateType()) {
1587 SmallVector<EVT, 16> vtparts;
1588 SmallVector<uint64_t, 16> offsets;
1590 // NOTE: Here, we lose the ability to issue vector loads for vectors
1591 // that are a part of a struct. This should be investigated in the
1593 ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
1594 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1595 bool aggregateIsPacked = false;
1596 if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
1597 aggregateIsPacked = STy->isPacked();
1599 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1600 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1602 EVT partVT = vtparts[parti];
1603 Value *srcValue = Constant::getNullValue(
1604 PointerType::get(partVT.getTypeForEVT(F->getContext()),
1605 llvm::ADDRESS_SPACE_PARAM));
1607 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1608 DAG.getConstant(offsets[parti], getPointerTy()));
1609 unsigned partAlign =
1610 aggregateIsPacked ? 1
1611 : TD->getABITypeAlignment(
1612 partVT.getTypeForEVT(F->getContext()));
1614 if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
1615 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1616 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1617 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1618 MachinePointerInfo(srcValue), partVT, false,
1621 p = DAG.getLoad(partVT, dl, Root, srcAddr,
1622 MachinePointerInfo(srcValue), false, false, false,
1626 p.getNode()->setIROrder(idx + 1);
1627 InVals.push_back(p);
1630 if (vtparts.size() > 0)
1634 if (Ty->isVectorTy()) {
1635 EVT ObjectVT = getValueType(Ty);
1636 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1637 unsigned NumElts = ObjectVT.getVectorNumElements();
1638 assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
1639 "Vector was not scalarized");
1641 EVT EltVT = ObjectVT.getVectorElementType();
1646 // We only have one element, so just directly load it
1647 Value *SrcValue = Constant::getNullValue(PointerType::get(
1648 EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1649 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1650 DAG.getConstant(Ofst, getPointerTy()));
1651 SDValue P = DAG.getLoad(
1652 EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1654 TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
1656 P.getNode()->setIROrder(idx + 1);
1658 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1659 P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
1660 InVals.push_back(P);
1661 Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
1663 } else if (NumElts == 2) {
1665 // f32,f32 = load ...
1666 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
1667 Value *SrcValue = Constant::getNullValue(PointerType::get(
1668 VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1669 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1670 DAG.getConstant(Ofst, getPointerTy()));
1671 SDValue P = DAG.getLoad(
1672 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1674 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1676 P.getNode()->setIROrder(idx + 1);
1678 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1679 DAG.getIntPtrConstant(0));
1680 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1681 DAG.getIntPtrConstant(1));
1683 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
1684 Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
1685 Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
1688 InVals.push_back(Elt0);
1689 InVals.push_back(Elt1);
1690 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1694 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
1696 // vector will be expanded to a power of 2 elements, so we know we can
1697 // always round up to the next multiple of 4 when creating the vector
1699 // e.g. 4 elem => 1 ld.v4
1700 // 6 elem => 2 ld.v4
1701 // 8 elem => 2 ld.v4
1702 // 11 elem => 3 ld.v4
1703 unsigned VecSize = 4;
1704 if (EltVT.getSizeInBits() == 64) {
1707 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1708 for (unsigned i = 0; i < NumElts; i += VecSize) {
1709 Value *SrcValue = Constant::getNullValue(
1710 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
1711 llvm::ADDRESS_SPACE_PARAM));
1713 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1714 DAG.getConstant(Ofst, getPointerTy()));
1715 SDValue P = DAG.getLoad(
1716 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1718 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1720 P.getNode()->setIROrder(idx + 1);
1722 for (unsigned j = 0; j < VecSize; ++j) {
1723 if (i + j >= NumElts)
1725 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1726 DAG.getIntPtrConstant(j));
1727 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1728 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
1729 InVals.push_back(Elt);
1731 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1741 EVT ObjectVT = getValueType(Ty);
1742 // If ABI, load from the param symbol
1743 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1744 Value *srcValue = Constant::getNullValue(PointerType::get(
1745 ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1747 if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1748 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1749 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1750 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1751 MachinePointerInfo(srcValue), ObjectVT, false, false,
1752 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1754 p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1755 MachinePointerInfo(srcValue), false, false, false,
1756 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1759 p.getNode()->setIROrder(idx + 1);
1760 InVals.push_back(p);
1764 // Param has ByVal attribute
1765 // Return MoveParam(param symbol).
1766 // Ideally, the param symbol can be returned directly,
1767 // but when SDNode builder decides to use it in a CopyToReg(),
1768 // machine instruction fails because TargetExternalSymbol
1769 // (not lowered) is target dependent, and CopyToReg assumes
1770 // the source is lowered.
1771 EVT ObjectVT = getValueType(Ty);
1772 assert(ObjectVT == Ins[InsIdx].VT &&
1773 "Ins type did not match function type");
1774 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1775 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1777 p.getNode()->setIROrder(idx + 1);
1779 InVals.push_back(p);
1781 SDValue p2 = DAG.getNode(
1782 ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1783 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
1784 InVals.push_back(p2);
1788 // Clang will check explicit VarArg and issue error if any. However, Clang
1789 // will let code with
1790 // implicit var arg like f() pass. See bug 617733.
1791 // We treat this case as if the arg list is empty.
1792 // if (F.isVarArg()) {
1793 // assert(0 && "VarArg not supported yet!");
1796 if (!OutChains.empty())
1797 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains));
1804 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1806 const SmallVectorImpl<ISD::OutputArg> &Outs,
1807 const SmallVectorImpl<SDValue> &OutVals,
1808 SDLoc dl, SelectionDAG &DAG) const {
1809 MachineFunction &MF = DAG.getMachineFunction();
1810 const Function *F = MF.getFunction();
1811 Type *RetTy = F->getReturnType();
1812 const DataLayout *TD = getDataLayout();
1814 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1815 assert(isABI && "Non-ABI compilation is not supported");
1819 if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1820 // If we have a vector type, the OutVals array will be the scalarized
1821 // components and we have combine them into 1 or more vector stores.
1822 unsigned NumElts = VTy->getNumElements();
1823 assert(NumElts == Outs.size() && "Bad scalarization of return value");
1825 // const_cast can be removed in later LLVM versions
1826 EVT EltVT = getValueType(RetTy).getVectorElementType();
1827 bool NeedExtend = false;
1828 if (EltVT.getSizeInBits() < 16)
1833 SDValue StoreVal = OutVals[0];
1834 // We only have one element, so just directly store it
1836 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
1837 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
1838 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1839 DAG.getVTList(MVT::Other), Ops,
1840 EltVT, MachinePointerInfo());
1842 } else if (NumElts == 2) {
1844 SDValue StoreVal0 = OutVals[0];
1845 SDValue StoreVal1 = OutVals[1];
1848 StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
1849 StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
1852 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
1854 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
1855 DAG.getVTList(MVT::Other), Ops,
1856 EltVT, MachinePointerInfo());
1859 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
1860 // vector will be expanded to a power of 2 elements, so we know we can
1861 // always round up to the next multiple of 4 when creating the vector
1863 // e.g. 4 elem => 1 st.v4
1864 // 6 elem => 2 st.v4
1865 // 8 elem => 2 st.v4
1866 // 11 elem => 3 st.v4
1868 unsigned VecSize = 4;
1869 if (OutVals[0].getValueType().getSizeInBits() == 64)
1872 unsigned Offset = 0;
1875 EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
1876 unsigned PerStoreOffset =
1877 TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1879 for (unsigned i = 0; i < NumElts; i += VecSize) {
1882 SmallVector<SDValue, 8> Ops;
1883 Ops.push_back(Chain);
1884 Ops.push_back(DAG.getConstant(Offset, MVT::i32));
1885 unsigned Opc = NVPTXISD::StoreRetvalV2;
1886 EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
1888 StoreVal = OutVals[i];
1890 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1891 Ops.push_back(StoreVal);
1893 if (i + 1 < NumElts) {
1894 StoreVal = OutVals[i + 1];
1896 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1898 StoreVal = DAG.getUNDEF(ExtendedVT);
1900 Ops.push_back(StoreVal);
1903 Opc = NVPTXISD::StoreRetvalV4;
1904 if (i + 2 < NumElts) {
1905 StoreVal = OutVals[i + 2];
1908 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1910 StoreVal = DAG.getUNDEF(ExtendedVT);
1912 Ops.push_back(StoreVal);
1914 if (i + 3 < NumElts) {
1915 StoreVal = OutVals[i + 3];
1918 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1920 StoreVal = DAG.getUNDEF(ExtendedVT);
1922 Ops.push_back(StoreVal);
1925 // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
1927 DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), Ops,
1928 EltVT, MachinePointerInfo());
1929 Offset += PerStoreOffset;
1933 SmallVector<EVT, 16> ValVTs;
1934 // const_cast is necessary since we are still using an LLVM version from
1935 // before the type system re-write.
1936 ComputePTXValueVTs(*this, RetTy, ValVTs);
1937 assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
1939 unsigned SizeSoFar = 0;
1940 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1941 SDValue theVal = OutVals[i];
1942 EVT TheValType = theVal.getValueType();
1943 unsigned numElems = 1;
1944 if (TheValType.isVector())
1945 numElems = TheValType.getVectorNumElements();
1946 for (unsigned j = 0, je = numElems; j != je; ++j) {
1947 SDValue TmpVal = theVal;
1948 if (TheValType.isVector())
1949 TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1950 TheValType.getVectorElementType(), TmpVal,
1951 DAG.getIntPtrConstant(j));
1952 EVT TheStoreType = ValVTs[i];
1953 if (RetTy->isIntegerTy() &&
1954 TD->getTypeAllocSizeInBits(RetTy) < 32) {
1955 // The following zero-extension is for integer types only, and
1956 // specifically not for aggregates.
1957 TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
1958 TheStoreType = MVT::i32;
1960 else if (TmpVal.getValueType().getSizeInBits() < 16)
1961 TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
1963 SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
1964 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1965 DAG.getVTList(MVT::Other), Ops,
1967 MachinePointerInfo());
1968 if(TheValType.isVector())
1970 TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
1972 SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
1977 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
1981 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
1982 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
1983 SelectionDAG &DAG) const {
1984 if (Constraint.length() > 1)
1987 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1990 // NVPTX suuport vector of legal types of any length in Intrinsics because the
1991 // NVPTX specific type legalizer
1992 // will legalize them to the PTX supported length.
1993 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
1994 if (isTypeLegal(VT))
1996 if (VT.isVector()) {
1997 MVT eVT = VT.getVectorElementType();
1998 if (isTypeLegal(eVT))
2004 static unsigned getOpcForTextureInstr(unsigned Intrinsic) {
2005 switch (Intrinsic) {
2009 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2010 return NVPTXISD::Tex1DFloatI32;
2011 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2012 return NVPTXISD::Tex1DFloatFloat;
2013 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2014 return NVPTXISD::Tex1DFloatFloatLevel;
2015 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2016 return NVPTXISD::Tex1DFloatFloatGrad;
2017 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2018 return NVPTXISD::Tex1DI32I32;
2019 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2020 return NVPTXISD::Tex1DI32Float;
2021 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2022 return NVPTXISD::Tex1DI32FloatLevel;
2023 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2024 return NVPTXISD::Tex1DI32FloatGrad;
2026 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2027 return NVPTXISD::Tex1DArrayFloatI32;
2028 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2029 return NVPTXISD::Tex1DArrayFloatFloat;
2030 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2031 return NVPTXISD::Tex1DArrayFloatFloatLevel;
2032 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2033 return NVPTXISD::Tex1DArrayFloatFloatGrad;
2034 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2035 return NVPTXISD::Tex1DArrayI32I32;
2036 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2037 return NVPTXISD::Tex1DArrayI32Float;
2038 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2039 return NVPTXISD::Tex1DArrayI32FloatLevel;
2040 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2041 return NVPTXISD::Tex1DArrayI32FloatGrad;
2043 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2044 return NVPTXISD::Tex2DFloatI32;
2045 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2046 return NVPTXISD::Tex2DFloatFloat;
2047 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2048 return NVPTXISD::Tex2DFloatFloatLevel;
2049 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2050 return NVPTXISD::Tex2DFloatFloatGrad;
2051 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2052 return NVPTXISD::Tex2DI32I32;
2053 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2054 return NVPTXISD::Tex2DI32Float;
2055 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2056 return NVPTXISD::Tex2DI32FloatLevel;
2057 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2058 return NVPTXISD::Tex2DI32FloatGrad;
2060 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2061 return NVPTXISD::Tex2DArrayFloatI32;
2062 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2063 return NVPTXISD::Tex2DArrayFloatFloat;
2064 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2065 return NVPTXISD::Tex2DArrayFloatFloatLevel;
2066 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2067 return NVPTXISD::Tex2DArrayFloatFloatGrad;
2068 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2069 return NVPTXISD::Tex2DArrayI32I32;
2070 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2071 return NVPTXISD::Tex2DArrayI32Float;
2072 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2073 return NVPTXISD::Tex2DArrayI32FloatLevel;
2074 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2075 return NVPTXISD::Tex2DArrayI32FloatGrad;
2077 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2078 return NVPTXISD::Tex3DFloatI32;
2079 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2080 return NVPTXISD::Tex3DFloatFloat;
2081 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2082 return NVPTXISD::Tex3DFloatFloatLevel;
2083 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32:
2084 return NVPTXISD::Tex3DFloatFloatGrad;
2085 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2086 return NVPTXISD::Tex3DI32I32;
2087 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2088 return NVPTXISD::Tex3DI32Float;
2089 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2090 return NVPTXISD::Tex3DI32FloatLevel;
2091 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32:
2092 return NVPTXISD::Tex3DI32FloatGrad;
2096 static unsigned getOpcForSurfaceInstr(unsigned Intrinsic) {
2097 switch (Intrinsic) {
2100 case Intrinsic::nvvm_suld_1d_i8_trap:
2101 return NVPTXISD::Suld1DI8Trap;
2102 case Intrinsic::nvvm_suld_1d_i16_trap:
2103 return NVPTXISD::Suld1DI16Trap;
2104 case Intrinsic::nvvm_suld_1d_i32_trap:
2105 return NVPTXISD::Suld1DI32Trap;
2106 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2107 return NVPTXISD::Suld1DV2I8Trap;
2108 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2109 return NVPTXISD::Suld1DV2I16Trap;
2110 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2111 return NVPTXISD::Suld1DV2I32Trap;
2112 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2113 return NVPTXISD::Suld1DV4I8Trap;
2114 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2115 return NVPTXISD::Suld1DV4I16Trap;
2116 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2117 return NVPTXISD::Suld1DV4I32Trap;
2118 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2119 return NVPTXISD::Suld1DArrayI8Trap;
2120 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2121 return NVPTXISD::Suld1DArrayI16Trap;
2122 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2123 return NVPTXISD::Suld1DArrayI32Trap;
2124 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2125 return NVPTXISD::Suld1DArrayV2I8Trap;
2126 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2127 return NVPTXISD::Suld1DArrayV2I16Trap;
2128 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2129 return NVPTXISD::Suld1DArrayV2I32Trap;
2130 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2131 return NVPTXISD::Suld1DArrayV4I8Trap;
2132 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2133 return NVPTXISD::Suld1DArrayV4I16Trap;
2134 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2135 return NVPTXISD::Suld1DArrayV4I32Trap;
2136 case Intrinsic::nvvm_suld_2d_i8_trap:
2137 return NVPTXISD::Suld2DI8Trap;
2138 case Intrinsic::nvvm_suld_2d_i16_trap:
2139 return NVPTXISD::Suld2DI16Trap;
2140 case Intrinsic::nvvm_suld_2d_i32_trap:
2141 return NVPTXISD::Suld2DI32Trap;
2142 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2143 return NVPTXISD::Suld2DV2I8Trap;
2144 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2145 return NVPTXISD::Suld2DV2I16Trap;
2146 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2147 return NVPTXISD::Suld2DV2I32Trap;
2148 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2149 return NVPTXISD::Suld2DV4I8Trap;
2150 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2151 return NVPTXISD::Suld2DV4I16Trap;
2152 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2153 return NVPTXISD::Suld2DV4I32Trap;
2154 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2155 return NVPTXISD::Suld2DArrayI8Trap;
2156 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2157 return NVPTXISD::Suld2DArrayI16Trap;
2158 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2159 return NVPTXISD::Suld2DArrayI32Trap;
2160 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2161 return NVPTXISD::Suld2DArrayV2I8Trap;
2162 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2163 return NVPTXISD::Suld2DArrayV2I16Trap;
2164 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2165 return NVPTXISD::Suld2DArrayV2I32Trap;
2166 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2167 return NVPTXISD::Suld2DArrayV4I8Trap;
2168 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2169 return NVPTXISD::Suld2DArrayV4I16Trap;
2170 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2171 return NVPTXISD::Suld2DArrayV4I32Trap;
2172 case Intrinsic::nvvm_suld_3d_i8_trap:
2173 return NVPTXISD::Suld3DI8Trap;
2174 case Intrinsic::nvvm_suld_3d_i16_trap:
2175 return NVPTXISD::Suld3DI16Trap;
2176 case Intrinsic::nvvm_suld_3d_i32_trap:
2177 return NVPTXISD::Suld3DI32Trap;
2178 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2179 return NVPTXISD::Suld3DV2I8Trap;
2180 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2181 return NVPTXISD::Suld3DV2I16Trap;
2182 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2183 return NVPTXISD::Suld3DV2I32Trap;
2184 case Intrinsic::nvvm_suld_3d_v4i8_trap:
2185 return NVPTXISD::Suld3DV4I8Trap;
2186 case Intrinsic::nvvm_suld_3d_v4i16_trap:
2187 return NVPTXISD::Suld3DV4I16Trap;
2188 case Intrinsic::nvvm_suld_3d_v4i32_trap:
2189 return NVPTXISD::Suld3DV4I32Trap;
2193 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
2195 // because we need the information that is only available in the "Value" type
2197 // pointer. In particular, the address space information.
2198 bool NVPTXTargetLowering::getTgtMemIntrinsic(
2199 IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
2200 switch (Intrinsic) {
2204 case Intrinsic::nvvm_atomic_load_add_f32:
2205 Info.opc = ISD::INTRINSIC_W_CHAIN;
2206 Info.memVT = MVT::f32;
2207 Info.ptrVal = I.getArgOperand(0);
2210 Info.readMem = true;
2211 Info.writeMem = true;
2215 case Intrinsic::nvvm_atomic_load_inc_32:
2216 case Intrinsic::nvvm_atomic_load_dec_32:
2217 Info.opc = ISD::INTRINSIC_W_CHAIN;
2218 Info.memVT = MVT::i32;
2219 Info.ptrVal = I.getArgOperand(0);
2222 Info.readMem = true;
2223 Info.writeMem = true;
2227 case Intrinsic::nvvm_ldu_global_i:
2228 case Intrinsic::nvvm_ldu_global_f:
2229 case Intrinsic::nvvm_ldu_global_p:
2231 Info.opc = ISD::INTRINSIC_W_CHAIN;
2232 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
2233 Info.memVT = getValueType(I.getType());
2234 else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
2235 Info.memVT = getValueType(I.getType());
2237 Info.memVT = MVT::f32;
2238 Info.ptrVal = I.getArgOperand(0);
2241 Info.readMem = true;
2242 Info.writeMem = false;
2246 case Intrinsic::nvvm_tex_1d_v4f32_i32:
2247 case Intrinsic::nvvm_tex_1d_v4f32_f32:
2248 case Intrinsic::nvvm_tex_1d_level_v4f32_f32:
2249 case Intrinsic::nvvm_tex_1d_grad_v4f32_f32:
2250 case Intrinsic::nvvm_tex_1d_array_v4f32_i32:
2251 case Intrinsic::nvvm_tex_1d_array_v4f32_f32:
2252 case Intrinsic::nvvm_tex_1d_array_level_v4f32_f32:
2253 case Intrinsic::nvvm_tex_1d_array_grad_v4f32_f32:
2254 case Intrinsic::nvvm_tex_2d_v4f32_i32:
2255 case Intrinsic::nvvm_tex_2d_v4f32_f32:
2256 case Intrinsic::nvvm_tex_2d_level_v4f32_f32:
2257 case Intrinsic::nvvm_tex_2d_grad_v4f32_f32:
2258 case Intrinsic::nvvm_tex_2d_array_v4f32_i32:
2259 case Intrinsic::nvvm_tex_2d_array_v4f32_f32:
2260 case Intrinsic::nvvm_tex_2d_array_level_v4f32_f32:
2261 case Intrinsic::nvvm_tex_2d_array_grad_v4f32_f32:
2262 case Intrinsic::nvvm_tex_3d_v4f32_i32:
2263 case Intrinsic::nvvm_tex_3d_v4f32_f32:
2264 case Intrinsic::nvvm_tex_3d_level_v4f32_f32:
2265 case Intrinsic::nvvm_tex_3d_grad_v4f32_f32: {
2266 Info.opc = getOpcForTextureInstr(Intrinsic);
2267 Info.memVT = MVT::f32;
2268 Info.ptrVal = nullptr;
2271 Info.readMem = true;
2272 Info.writeMem = false;
2276 case Intrinsic::nvvm_tex_1d_v4i32_i32:
2277 case Intrinsic::nvvm_tex_1d_v4i32_f32:
2278 case Intrinsic::nvvm_tex_1d_level_v4i32_f32:
2279 case Intrinsic::nvvm_tex_1d_grad_v4i32_f32:
2280 case Intrinsic::nvvm_tex_1d_array_v4i32_i32:
2281 case Intrinsic::nvvm_tex_1d_array_v4i32_f32:
2282 case Intrinsic::nvvm_tex_1d_array_level_v4i32_f32:
2283 case Intrinsic::nvvm_tex_1d_array_grad_v4i32_f32:
2284 case Intrinsic::nvvm_tex_2d_v4i32_i32:
2285 case Intrinsic::nvvm_tex_2d_v4i32_f32:
2286 case Intrinsic::nvvm_tex_2d_level_v4i32_f32:
2287 case Intrinsic::nvvm_tex_2d_grad_v4i32_f32:
2288 case Intrinsic::nvvm_tex_2d_array_v4i32_i32:
2289 case Intrinsic::nvvm_tex_2d_array_v4i32_f32:
2290 case Intrinsic::nvvm_tex_2d_array_level_v4i32_f32:
2291 case Intrinsic::nvvm_tex_2d_array_grad_v4i32_f32:
2292 case Intrinsic::nvvm_tex_3d_v4i32_i32:
2293 case Intrinsic::nvvm_tex_3d_v4i32_f32:
2294 case Intrinsic::nvvm_tex_3d_level_v4i32_f32:
2295 case Intrinsic::nvvm_tex_3d_grad_v4i32_f32: {
2296 Info.opc = getOpcForTextureInstr(Intrinsic);
2297 Info.memVT = MVT::i32;
2298 Info.ptrVal = nullptr;
2301 Info.readMem = true;
2302 Info.writeMem = false;
2306 case Intrinsic::nvvm_suld_1d_i8_trap:
2307 case Intrinsic::nvvm_suld_1d_v2i8_trap:
2308 case Intrinsic::nvvm_suld_1d_v4i8_trap:
2309 case Intrinsic::nvvm_suld_1d_array_i8_trap:
2310 case Intrinsic::nvvm_suld_1d_array_v2i8_trap:
2311 case Intrinsic::nvvm_suld_1d_array_v4i8_trap:
2312 case Intrinsic::nvvm_suld_2d_i8_trap:
2313 case Intrinsic::nvvm_suld_2d_v2i8_trap:
2314 case Intrinsic::nvvm_suld_2d_v4i8_trap:
2315 case Intrinsic::nvvm_suld_2d_array_i8_trap:
2316 case Intrinsic::nvvm_suld_2d_array_v2i8_trap:
2317 case Intrinsic::nvvm_suld_2d_array_v4i8_trap:
2318 case Intrinsic::nvvm_suld_3d_i8_trap:
2319 case Intrinsic::nvvm_suld_3d_v2i8_trap:
2320 case Intrinsic::nvvm_suld_3d_v4i8_trap: {
2321 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2322 Info.memVT = MVT::i8;
2323 Info.ptrVal = nullptr;
2326 Info.readMem = true;
2327 Info.writeMem = false;
2331 case Intrinsic::nvvm_suld_1d_i16_trap:
2332 case Intrinsic::nvvm_suld_1d_v2i16_trap:
2333 case Intrinsic::nvvm_suld_1d_v4i16_trap:
2334 case Intrinsic::nvvm_suld_1d_array_i16_trap:
2335 case Intrinsic::nvvm_suld_1d_array_v2i16_trap:
2336 case Intrinsic::nvvm_suld_1d_array_v4i16_trap:
2337 case Intrinsic::nvvm_suld_2d_i16_trap:
2338 case Intrinsic::nvvm_suld_2d_v2i16_trap:
2339 case Intrinsic::nvvm_suld_2d_v4i16_trap:
2340 case Intrinsic::nvvm_suld_2d_array_i16_trap:
2341 case Intrinsic::nvvm_suld_2d_array_v2i16_trap:
2342 case Intrinsic::nvvm_suld_2d_array_v4i16_trap:
2343 case Intrinsic::nvvm_suld_3d_i16_trap:
2344 case Intrinsic::nvvm_suld_3d_v2i16_trap:
2345 case Intrinsic::nvvm_suld_3d_v4i16_trap: {
2346 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2347 Info.memVT = MVT::i16;
2348 Info.ptrVal = nullptr;
2351 Info.readMem = true;
2352 Info.writeMem = false;
2356 case Intrinsic::nvvm_suld_1d_i32_trap:
2357 case Intrinsic::nvvm_suld_1d_v2i32_trap:
2358 case Intrinsic::nvvm_suld_1d_v4i32_trap:
2359 case Intrinsic::nvvm_suld_1d_array_i32_trap:
2360 case Intrinsic::nvvm_suld_1d_array_v2i32_trap:
2361 case Intrinsic::nvvm_suld_1d_array_v4i32_trap:
2362 case Intrinsic::nvvm_suld_2d_i32_trap:
2363 case Intrinsic::nvvm_suld_2d_v2i32_trap:
2364 case Intrinsic::nvvm_suld_2d_v4i32_trap:
2365 case Intrinsic::nvvm_suld_2d_array_i32_trap:
2366 case Intrinsic::nvvm_suld_2d_array_v2i32_trap:
2367 case Intrinsic::nvvm_suld_2d_array_v4i32_trap:
2368 case Intrinsic::nvvm_suld_3d_i32_trap:
2369 case Intrinsic::nvvm_suld_3d_v2i32_trap:
2370 case Intrinsic::nvvm_suld_3d_v4i32_trap: {
2371 Info.opc = getOpcForSurfaceInstr(Intrinsic);
2372 Info.memVT = MVT::i32;
2373 Info.ptrVal = nullptr;
2376 Info.readMem = true;
2377 Info.writeMem = false;
2386 /// isLegalAddressingMode - Return true if the addressing mode represented
2387 /// by AM is legal for this target, for a load/store of the specified type.
2388 /// Used to guide target specific optimizations, like loop strength reduction
2389 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
2390 /// (CodeGenPrepare.cpp)
2391 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
2394 // AddrMode - This represents an addressing mode of:
2395 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
2397 // The legal address modes are
2404 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
2410 case 0: // "r", "r+i" or "i" is allowed
2413 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
2415 // Otherwise we have r+i.
2418 // No scale > 1 is allowed
2424 //===----------------------------------------------------------------------===//
2425 // NVPTX Inline Assembly Support
2426 //===----------------------------------------------------------------------===//
2428 /// getConstraintType - Given a constraint letter, return the type of
2429 /// constraint it is for this target.
2430 NVPTXTargetLowering::ConstraintType
2431 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
2432 if (Constraint.size() == 1) {
2433 switch (Constraint[0]) {
2444 return C_RegisterClass;
2447 return TargetLowering::getConstraintType(Constraint);
2450 std::pair<unsigned, const TargetRegisterClass *>
2451 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2453 if (Constraint.size() == 1) {
2454 switch (Constraint[0]) {
2456 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2458 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2460 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
2463 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
2465 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2467 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2470 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2473 /// getFunctionAlignment - Return the Log2 alignment of this function.
2474 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
2478 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
2479 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
2480 SmallVectorImpl<SDValue> &Results) {
2481 EVT ResVT = N->getValueType(0);
2484 assert(ResVT.isVector() && "Vector load must have vector type");
2486 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
2487 // legal. We can (and should) split that into 2 loads of <2 x double> here
2488 // but I'm leaving that as a TODO for now.
2489 assert(ResVT.isSimple() && "Can only handle simple types");
2490 switch (ResVT.getSimpleVT().SimpleTy) {
2503 // This is a "native" vector type
2507 EVT EltVT = ResVT.getVectorElementType();
2508 unsigned NumElts = ResVT.getVectorNumElements();
2510 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
2511 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2512 // loaded type to i16 and propagate the "real" type as the memory type.
2513 bool NeedTrunc = false;
2514 if (EltVT.getSizeInBits() < 16) {
2519 unsigned Opcode = 0;
2526 Opcode = NVPTXISD::LoadV2;
2527 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2530 Opcode = NVPTXISD::LoadV4;
2531 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2532 LdResVTs = DAG.getVTList(ListVTs);
2537 SmallVector<SDValue, 8> OtherOps;
2539 // Copy regular operands
2540 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2541 OtherOps.push_back(N->getOperand(i));
2543 LoadSDNode *LD = cast<LoadSDNode>(N);
2545 // The select routine does not have access to the LoadSDNode instance, so
2546 // pass along the extension information
2547 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
2549 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
2551 LD->getMemOperand());
2553 SmallVector<SDValue, 4> ScalarRes;
2555 for (unsigned i = 0; i < NumElts; ++i) {
2556 SDValue Res = NewLD.getValue(i);
2558 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2559 ScalarRes.push_back(Res);
2562 SDValue LoadChain = NewLD.getValue(NumElts);
2564 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2566 Results.push_back(BuildVec);
2567 Results.push_back(LoadChain);
2570 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
2571 SmallVectorImpl<SDValue> &Results) {
2572 SDValue Chain = N->getOperand(0);
2573 SDValue Intrin = N->getOperand(1);
2576 // Get the intrinsic ID
2577 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2581 case Intrinsic::nvvm_ldg_global_i:
2582 case Intrinsic::nvvm_ldg_global_f:
2583 case Intrinsic::nvvm_ldg_global_p:
2584 case Intrinsic::nvvm_ldu_global_i:
2585 case Intrinsic::nvvm_ldu_global_f:
2586 case Intrinsic::nvvm_ldu_global_p: {
2587 EVT ResVT = N->getValueType(0);
2589 if (ResVT.isVector()) {
2592 unsigned NumElts = ResVT.getVectorNumElements();
2593 EVT EltVT = ResVT.getVectorElementType();
2595 // Since LDU/LDG are target nodes, we cannot rely on DAG type
2597 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2598 // loaded type to i16 and propagate the "real" type as the memory type.
2599 bool NeedTrunc = false;
2600 if (EltVT.getSizeInBits() < 16) {
2605 unsigned Opcode = 0;
2615 case Intrinsic::nvvm_ldg_global_i:
2616 case Intrinsic::nvvm_ldg_global_f:
2617 case Intrinsic::nvvm_ldg_global_p:
2618 Opcode = NVPTXISD::LDGV2;
2620 case Intrinsic::nvvm_ldu_global_i:
2621 case Intrinsic::nvvm_ldu_global_f:
2622 case Intrinsic::nvvm_ldu_global_p:
2623 Opcode = NVPTXISD::LDUV2;
2626 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2632 case Intrinsic::nvvm_ldg_global_i:
2633 case Intrinsic::nvvm_ldg_global_f:
2634 case Intrinsic::nvvm_ldg_global_p:
2635 Opcode = NVPTXISD::LDGV4;
2637 case Intrinsic::nvvm_ldu_global_i:
2638 case Intrinsic::nvvm_ldu_global_f:
2639 case Intrinsic::nvvm_ldu_global_p:
2640 Opcode = NVPTXISD::LDUV4;
2643 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2644 LdResVTs = DAG.getVTList(ListVTs);
2649 SmallVector<SDValue, 8> OtherOps;
2651 // Copy regular operands
2653 OtherOps.push_back(Chain); // Chain
2654 // Skip operand 1 (intrinsic ID)
2656 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
2657 OtherOps.push_back(N->getOperand(i));
2659 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2661 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, OtherOps,
2662 MemSD->getMemoryVT(),
2663 MemSD->getMemOperand());
2665 SmallVector<SDValue, 4> ScalarRes;
2667 for (unsigned i = 0; i < NumElts; ++i) {
2668 SDValue Res = NewLD.getValue(i);
2671 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2672 ScalarRes.push_back(Res);
2675 SDValue LoadChain = NewLD.getValue(NumElts);
2678 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, ScalarRes);
2680 Results.push_back(BuildVec);
2681 Results.push_back(LoadChain);
2684 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
2685 "Custom handling of non-i8 ldu/ldg?");
2687 // Just copy all operands as-is
2688 SmallVector<SDValue, 4> Ops;
2689 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2690 Ops.push_back(N->getOperand(i));
2692 // Force output to i16
2693 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
2695 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2697 // We make sure the memory type is i8, which will be used during isel
2698 // to select the proper instruction.
2700 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, Ops,
2701 MVT::i8, MemSD->getMemOperand());
2703 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
2704 NewLD.getValue(0)));
2705 Results.push_back(NewLD.getValue(1));
2711 void NVPTXTargetLowering::ReplaceNodeResults(
2712 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
2713 switch (N->getOpcode()) {
2715 report_fatal_error("Unhandled custom legalization");
2717 ReplaceLoadVector(N, DAG, Results);
2719 case ISD::INTRINSIC_W_CHAIN:
2720 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
2725 // Pin NVPTXSection's and NVPTXTargetObjectFile's vtables to this file.
2726 void NVPTXSection::anchor() {}
2728 NVPTXTargetObjectFile::~NVPTXTargetObjectFile() {
2732 delete ReadOnlySection;
2734 delete StaticCtorSection;
2735 delete StaticDtorSection;
2737 delete EHFrameSection;
2738 delete DwarfAbbrevSection;
2739 delete DwarfInfoSection;
2740 delete DwarfLineSection;
2741 delete DwarfFrameSection;
2742 delete DwarfPubTypesSection;
2743 delete DwarfDebugInlineSection;
2744 delete DwarfStrSection;
2745 delete DwarfLocSection;
2746 delete DwarfARangesSection;
2747 delete DwarfRangesSection;
2748 delete DwarfMacroInfoSection;