2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "NVPTXISelLowering.h"
16 #include "NVPTXTargetMachine.h"
17 #include "NVPTXTargetObjectFile.h"
18 #include "NVPTXUtilities.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GlobalValue.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/MC/MCSectionELF.h"
32 #include "llvm/Support/CallSite.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
40 #define DEBUG_TYPE "nvptx-lower"
44 static unsigned int uniqueCallSite = 0;
46 static cl::opt<bool> sched4reg(
48 cl::desc("NVPTX Specific: schedule for register pressue"), cl::init(false));
50 static bool IsPTXVectorType(MVT VT) {
51 switch (VT.SimpleTy) {
70 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
71 /// EVTs that compose it. Unlike ComputeValueVTs, this will break apart vectors
72 /// into their primitive components.
73 /// NOTE: This is a band-aid for code that expects ComputeValueVTs to return the
74 /// same number of types as the Ins/Outs arrays in LowerFormalArguments,
75 /// LowerCall, and LowerReturn.
76 static void ComputePTXValueVTs(const TargetLowering &TLI, Type *Ty,
77 SmallVectorImpl<EVT> &ValueVTs,
78 SmallVectorImpl<uint64_t> *Offsets = 0,
79 uint64_t StartingOffset = 0) {
80 SmallVector<EVT, 16> TempVTs;
81 SmallVector<uint64_t, 16> TempOffsets;
83 ComputeValueVTs(TLI, Ty, TempVTs, &TempOffsets, StartingOffset);
84 for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
86 uint64_t Off = TempOffsets[i];
88 for (unsigned j = 0, je = VT.getVectorNumElements(); j != je; ++j) {
89 ValueVTs.push_back(VT.getVectorElementType());
91 Offsets->push_back(Off+j*VT.getVectorElementType().getStoreSize());
94 ValueVTs.push_back(VT);
96 Offsets->push_back(Off);
101 // NVPTXTargetLowering Constructor.
102 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
103 : TargetLowering(TM, new NVPTXTargetObjectFile()), nvTM(&TM),
104 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
106 // always lower memset, memcpy, and memmove intrinsics to load/store
107 // instructions, rather
108 // then generating calls to memset, mempcy or memmove.
109 MaxStoresPerMemset = (unsigned) 0xFFFFFFFF;
110 MaxStoresPerMemcpy = (unsigned) 0xFFFFFFFF;
111 MaxStoresPerMemmove = (unsigned) 0xFFFFFFFF;
113 setBooleanContents(ZeroOrNegativeOneBooleanContent);
115 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
116 // condition branches.
117 setJumpIsExpensive(true);
119 // By default, use the Source scheduling
121 setSchedulingPreference(Sched::RegPressure);
123 setSchedulingPreference(Sched::Source);
125 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
126 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
127 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
128 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
129 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
130 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
132 // Operations not directly supported by NVPTX.
133 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
134 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
135 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
136 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
137 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
138 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
139 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
140 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
141 // Some SIGN_EXTEND_INREG can be done using cvt instruction.
142 // For others we will expand to a SHL/SRA pair.
143 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
144 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
145 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
146 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
147 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
149 if (nvptxSubtarget.hasROT64()) {
150 setOperationAction(ISD::ROTL, MVT::i64, Legal);
151 setOperationAction(ISD::ROTR, MVT::i64, Legal);
153 setOperationAction(ISD::ROTL, MVT::i64, Expand);
154 setOperationAction(ISD::ROTR, MVT::i64, Expand);
156 if (nvptxSubtarget.hasROT32()) {
157 setOperationAction(ISD::ROTL, MVT::i32, Legal);
158 setOperationAction(ISD::ROTR, MVT::i32, Legal);
160 setOperationAction(ISD::ROTL, MVT::i32, Expand);
161 setOperationAction(ISD::ROTR, MVT::i32, Expand);
164 setOperationAction(ISD::ROTL, MVT::i16, Expand);
165 setOperationAction(ISD::ROTR, MVT::i16, Expand);
166 setOperationAction(ISD::ROTL, MVT::i8, Expand);
167 setOperationAction(ISD::ROTR, MVT::i8, Expand);
168 setOperationAction(ISD::BSWAP, MVT::i16, Expand);
169 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
170 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
172 // Indirect branch is not supported.
173 // This also disables Jump Table creation.
174 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
175 setOperationAction(ISD::BRIND, MVT::Other, Expand);
177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
180 // We want to legalize constant related memmove and memcopy
182 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
184 // Turn FP extload into load/fextend
185 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
186 // Turn FP truncstore into trunc + store.
187 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
189 // PTX does not support load / store predicate registers
190 setOperationAction(ISD::LOAD, MVT::i1, Custom);
191 setOperationAction(ISD::STORE, MVT::i1, Custom);
193 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
194 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
195 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
196 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
197 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
198 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
200 // This is legal in NVPTX
201 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
202 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
204 // TRAP can be lowered to PTX trap
205 setOperationAction(ISD::TRAP, MVT::Other, Legal);
207 setOperationAction(ISD::ADDC, MVT::i64, Expand);
208 setOperationAction(ISD::ADDE, MVT::i64, Expand);
210 // Register custom handling for vector loads/stores
211 for (int i = MVT::FIRST_VECTOR_VALUETYPE; i <= MVT::LAST_VECTOR_VALUETYPE;
213 MVT VT = (MVT::SimpleValueType) i;
214 if (IsPTXVectorType(VT)) {
215 setOperationAction(ISD::LOAD, VT, Custom);
216 setOperationAction(ISD::STORE, VT, Custom);
217 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
221 // Custom handling for i8 intrinsics
222 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
224 setOperationAction(ISD::CTLZ, MVT::i16, Legal);
225 setOperationAction(ISD::CTLZ, MVT::i32, Legal);
226 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
227 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16, Legal);
228 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Legal);
229 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Legal);
230 setOperationAction(ISD::CTTZ, MVT::i16, Expand);
231 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
232 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
233 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16, Expand);
234 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
235 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
236 setOperationAction(ISD::CTPOP, MVT::i16, Legal);
237 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
238 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
240 // Now deduce the information based on the above mentioned
242 computeRegisterProperties();
245 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
250 return "NVPTXISD::CALL";
251 case NVPTXISD::RET_FLAG:
252 return "NVPTXISD::RET_FLAG";
253 case NVPTXISD::Wrapper:
254 return "NVPTXISD::Wrapper";
255 case NVPTXISD::DeclareParam:
256 return "NVPTXISD::DeclareParam";
257 case NVPTXISD::DeclareScalarParam:
258 return "NVPTXISD::DeclareScalarParam";
259 case NVPTXISD::DeclareRet:
260 return "NVPTXISD::DeclareRet";
261 case NVPTXISD::DeclareRetParam:
262 return "NVPTXISD::DeclareRetParam";
263 case NVPTXISD::PrintCall:
264 return "NVPTXISD::PrintCall";
265 case NVPTXISD::LoadParam:
266 return "NVPTXISD::LoadParam";
267 case NVPTXISD::LoadParamV2:
268 return "NVPTXISD::LoadParamV2";
269 case NVPTXISD::LoadParamV4:
270 return "NVPTXISD::LoadParamV4";
271 case NVPTXISD::StoreParam:
272 return "NVPTXISD::StoreParam";
273 case NVPTXISD::StoreParamV2:
274 return "NVPTXISD::StoreParamV2";
275 case NVPTXISD::StoreParamV4:
276 return "NVPTXISD::StoreParamV4";
277 case NVPTXISD::StoreParamS32:
278 return "NVPTXISD::StoreParamS32";
279 case NVPTXISD::StoreParamU32:
280 return "NVPTXISD::StoreParamU32";
281 case NVPTXISD::CallArgBegin:
282 return "NVPTXISD::CallArgBegin";
283 case NVPTXISD::CallArg:
284 return "NVPTXISD::CallArg";
285 case NVPTXISD::LastCallArg:
286 return "NVPTXISD::LastCallArg";
287 case NVPTXISD::CallArgEnd:
288 return "NVPTXISD::CallArgEnd";
289 case NVPTXISD::CallVoid:
290 return "NVPTXISD::CallVoid";
291 case NVPTXISD::CallVal:
292 return "NVPTXISD::CallVal";
293 case NVPTXISD::CallSymbol:
294 return "NVPTXISD::CallSymbol";
295 case NVPTXISD::Prototype:
296 return "NVPTXISD::Prototype";
297 case NVPTXISD::MoveParam:
298 return "NVPTXISD::MoveParam";
299 case NVPTXISD::StoreRetval:
300 return "NVPTXISD::StoreRetval";
301 case NVPTXISD::StoreRetvalV2:
302 return "NVPTXISD::StoreRetvalV2";
303 case NVPTXISD::StoreRetvalV4:
304 return "NVPTXISD::StoreRetvalV4";
305 case NVPTXISD::PseudoUseParam:
306 return "NVPTXISD::PseudoUseParam";
307 case NVPTXISD::RETURN:
308 return "NVPTXISD::RETURN";
309 case NVPTXISD::CallSeqBegin:
310 return "NVPTXISD::CallSeqBegin";
311 case NVPTXISD::CallSeqEnd:
312 return "NVPTXISD::CallSeqEnd";
313 case NVPTXISD::LoadV2:
314 return "NVPTXISD::LoadV2";
315 case NVPTXISD::LoadV4:
316 return "NVPTXISD::LoadV4";
317 case NVPTXISD::LDGV2:
318 return "NVPTXISD::LDGV2";
319 case NVPTXISD::LDGV4:
320 return "NVPTXISD::LDGV4";
321 case NVPTXISD::LDUV2:
322 return "NVPTXISD::LDUV2";
323 case NVPTXISD::LDUV4:
324 return "NVPTXISD::LDUV4";
325 case NVPTXISD::StoreV2:
326 return "NVPTXISD::StoreV2";
327 case NVPTXISD::StoreV4:
328 return "NVPTXISD::StoreV4";
332 bool NVPTXTargetLowering::shouldSplitVectorElementType(EVT VT) const {
333 return VT == MVT::i1;
337 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
339 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
340 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
341 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
345 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
346 const SmallVectorImpl<ISD::OutputArg> &Outs,
347 unsigned retAlignment,
348 const ImmutableCallSite *CS) const {
350 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
351 assert(isABI && "Non-ABI compilation is not supported");
356 O << "prototype_" << uniqueCallSite << " : .callprototype ";
358 if (retTy->getTypeID() == Type::VoidTyID) {
362 if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
364 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
365 size = ITy->getBitWidth();
369 assert(retTy->isFloatingPointTy() &&
370 "Floating point type expected here");
371 size = retTy->getPrimitiveSizeInBits();
374 O << ".param .b" << size << " _";
375 } else if (isa<PointerType>(retTy)) {
376 O << ".param .b" << getPointerTy().getSizeInBits() << " _";
378 if ((retTy->getTypeID() == Type::StructTyID) || isa<VectorType>(retTy)) {
379 SmallVector<EVT, 16> vtparts;
380 ComputeValueVTs(*this, retTy, vtparts);
381 unsigned totalsz = 0;
382 for (unsigned i = 0, e = vtparts.size(); i != e; ++i) {
384 EVT elemtype = vtparts[i];
385 if (vtparts[i].isVector()) {
386 elems = vtparts[i].getVectorNumElements();
387 elemtype = vtparts[i].getVectorElementType();
389 // TODO: no need to loop
390 for (unsigned j = 0, je = elems; j != je; ++j) {
391 unsigned sz = elemtype.getSizeInBits();
392 if (elemtype.isInteger() && (sz < 8))
397 O << ".param .align " << retAlignment << " .b8 _[" << totalsz << "]";
399 assert(false && "Unknown return type");
407 MVT thePointerTy = getPointerTy();
410 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
411 Type *Ty = Args[i].Ty;
417 if (Outs[OIdx].Flags.isByVal() == false) {
418 if (Ty->isAggregateType() || Ty->isVectorTy()) {
420 const CallInst *CallI = cast<CallInst>(CS->getInstruction());
421 const DataLayout *TD = getDataLayout();
422 // +1 because index 0 is reserved for return type alignment
423 if (!llvm::getAlign(*CallI, i + 1, align))
424 align = TD->getABITypeAlignment(Ty);
425 unsigned sz = TD->getTypeAllocSize(Ty);
426 O << ".param .align " << align << " .b8 ";
428 O << "[" << sz << "]";
429 // update the index for Outs
430 SmallVector<EVT, 16> vtparts;
431 ComputeValueVTs(*this, Ty, vtparts);
432 if (unsigned len = vtparts.size())
436 // i8 types in IR will be i16 types in SDAG
437 assert((getValueType(Ty) == Outs[OIdx].VT ||
438 (getValueType(Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
439 "type mismatch between callee prototype and arguments");
442 if (isa<IntegerType>(Ty)) {
443 sz = cast<IntegerType>(Ty)->getBitWidth();
446 } else if (isa<PointerType>(Ty))
447 sz = thePointerTy.getSizeInBits();
449 sz = Ty->getPrimitiveSizeInBits();
450 O << ".param .b" << sz << " ";
454 const PointerType *PTy = dyn_cast<PointerType>(Ty);
455 assert(PTy && "Param with byval attribute should be a pointer type");
456 Type *ETy = PTy->getElementType();
458 unsigned align = Outs[OIdx].Flags.getByValAlign();
459 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
460 O << ".param .align " << align << " .b8 ";
462 O << "[" << sz << "]";
469 NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
470 const ImmutableCallSite *CS,
472 unsigned Idx) const {
473 const DataLayout *TD = getDataLayout();
475 const Value *DirectCallee = CS->getCalledFunction();
478 // We don't have a direct function symbol, but that may be because of
479 // constant cast instructions in the call.
480 const Instruction *CalleeI = CS->getInstruction();
481 assert(CalleeI && "Call target is not a function or derived value?");
483 // With bitcast'd call targets, the instruction will be the call
484 if (isa<CallInst>(CalleeI)) {
485 // Check if we have call alignment metadata
486 if (llvm::getAlign(*cast<CallInst>(CalleeI), Idx, Align))
489 const Value *CalleeV = cast<CallInst>(CalleeI)->getCalledValue();
490 // Ignore any bitcast instructions
491 while(isa<ConstantExpr>(CalleeV)) {
492 const ConstantExpr *CE = cast<ConstantExpr>(CalleeV);
495 // Look through the bitcast
496 CalleeV = cast<ConstantExpr>(CalleeV)->getOperand(0);
499 // We have now looked past all of the bitcasts. Do we finally have a
501 if (isa<Function>(CalleeV))
502 DirectCallee = CalleeV;
506 // Check for function alignment information if we found that the
507 // ultimate target is a Function
509 if (llvm::getAlign(*cast<Function>(DirectCallee), Idx, Align))
512 // Call is indirect or alignment information is not available, fall back to
513 // the ABI type alignment
514 return TD->getABITypeAlignment(Ty);
517 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
518 SmallVectorImpl<SDValue> &InVals) const {
519 SelectionDAG &DAG = CLI.DAG;
521 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
522 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
523 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
524 SDValue Chain = CLI.Chain;
525 SDValue Callee = CLI.Callee;
526 bool &isTailCall = CLI.IsTailCall;
527 ArgListTy &Args = CLI.Args;
528 Type *retTy = CLI.RetTy;
529 ImmutableCallSite *CS = CLI.CS;
531 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
532 assert(isABI && "Non-ABI compilation is not supported");
535 const DataLayout *TD = getDataLayout();
536 MachineFunction &MF = DAG.getMachineFunction();
537 const Function *F = MF.getFunction();
539 SDValue tempChain = Chain;
541 DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
543 SDValue InFlag = Chain.getValue(1);
545 unsigned paramCount = 0;
546 // Args.size() and Outs.size() need not match.
547 // Outs.size() will be larger
548 // * if there is an aggregate argument with multiple fields (each field
549 // showing up separately in Outs)
550 // * if there is a vector argument with more than typical vector-length
551 // elements (generally if more than 4) where each vector element is
552 // individually present in Outs.
553 // So a different index should be used for indexing into Outs/OutVals.
554 // See similar issue in LowerFormalArguments.
556 // Declare the .params or .reg need to pass values
558 for (unsigned i = 0, e = Args.size(); i != e; ++i, ++OIdx) {
559 EVT VT = Outs[OIdx].VT;
560 Type *Ty = Args[i].Ty;
562 if (Outs[OIdx].Flags.isByVal() == false) {
563 if (Ty->isAggregateType()) {
565 SmallVector<EVT, 16> vtparts;
566 ComputeValueVTs(*this, Ty, vtparts);
568 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
569 // declare .param .align <align> .b8 .param<n>[<size>];
570 unsigned sz = TD->getTypeAllocSize(Ty);
571 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
572 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
573 DAG.getConstant(paramCount, MVT::i32),
574 DAG.getConstant(sz, MVT::i32), InFlag };
575 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
577 InFlag = Chain.getValue(1);
578 unsigned curOffset = 0;
579 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
581 EVT elemtype = vtparts[j];
582 if (vtparts[j].isVector()) {
583 elems = vtparts[j].getVectorNumElements();
584 elemtype = vtparts[j].getVectorElementType();
586 for (unsigned k = 0, ke = elems; k != ke; ++k) {
587 unsigned sz = elemtype.getSizeInBits();
588 if (elemtype.isInteger() && (sz < 8))
590 SDValue StVal = OutVals[OIdx];
591 if (elemtype.getSizeInBits() < 16) {
592 StVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, StVal);
594 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
595 SDValue CopyParamOps[] = { Chain,
596 DAG.getConstant(paramCount, MVT::i32),
597 DAG.getConstant(curOffset, MVT::i32),
599 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
600 CopyParamVTs, &CopyParamOps[0], 5,
601 elemtype, MachinePointerInfo());
602 InFlag = Chain.getValue(1);
607 if (vtparts.size() > 0)
612 if (Ty->isVectorTy()) {
613 EVT ObjectVT = getValueType(Ty);
614 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
615 // declare .param .align <align> .b8 .param<n>[<size>];
616 unsigned sz = TD->getTypeAllocSize(Ty);
617 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
618 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(align, MVT::i32),
619 DAG.getConstant(paramCount, MVT::i32),
620 DAG.getConstant(sz, MVT::i32), InFlag };
621 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
623 InFlag = Chain.getValue(1);
624 unsigned NumElts = ObjectVT.getVectorNumElements();
625 EVT EltVT = ObjectVT.getVectorElementType();
627 bool NeedExtend = false;
628 if (EltVT.getSizeInBits() < 16) {
635 SDValue Elt = OutVals[OIdx++];
637 Elt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt);
639 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
640 SDValue CopyParamOps[] = { Chain,
641 DAG.getConstant(paramCount, MVT::i32),
642 DAG.getConstant(0, MVT::i32), Elt,
644 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl,
645 CopyParamVTs, &CopyParamOps[0], 5,
646 MemVT, MachinePointerInfo());
647 InFlag = Chain.getValue(1);
648 } else if (NumElts == 2) {
649 SDValue Elt0 = OutVals[OIdx++];
650 SDValue Elt1 = OutVals[OIdx++];
652 Elt0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt0);
653 Elt1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Elt1);
656 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
657 SDValue CopyParamOps[] = { Chain,
658 DAG.getConstant(paramCount, MVT::i32),
659 DAG.getConstant(0, MVT::i32), Elt0, Elt1,
661 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParamV2, dl,
662 CopyParamVTs, &CopyParamOps[0], 6,
663 MemVT, MachinePointerInfo());
664 InFlag = Chain.getValue(1);
666 unsigned curOffset = 0;
668 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
670 // vector will be expanded to a power of 2 elements, so we know we can
671 // always round up to the next multiple of 4 when creating the vector
673 // e.g. 4 elem => 1 st.v4
676 // 11 elem => 3 st.v4
677 unsigned VecSize = 4;
678 if (EltVT.getSizeInBits() == 64)
681 // This is potentially only part of a vector, so assume all elements
682 // are packed together.
683 unsigned PerStoreOffset = MemVT.getStoreSizeInBits() / 8 * VecSize;
685 for (unsigned i = 0; i < NumElts; i += VecSize) {
688 SmallVector<SDValue, 8> Ops;
689 Ops.push_back(Chain);
690 Ops.push_back(DAG.getConstant(paramCount, MVT::i32));
691 Ops.push_back(DAG.getConstant(curOffset, MVT::i32));
693 unsigned Opc = NVPTXISD::StoreParamV2;
695 StoreVal = OutVals[OIdx++];
697 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
698 Ops.push_back(StoreVal);
700 if (i + 1 < NumElts) {
701 StoreVal = OutVals[OIdx++];
704 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
706 StoreVal = DAG.getUNDEF(EltVT);
708 Ops.push_back(StoreVal);
711 Opc = NVPTXISD::StoreParamV4;
712 if (i + 2 < NumElts) {
713 StoreVal = OutVals[OIdx++];
716 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
718 StoreVal = DAG.getUNDEF(EltVT);
720 Ops.push_back(StoreVal);
722 if (i + 3 < NumElts) {
723 StoreVal = OutVals[OIdx++];
726 DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
728 StoreVal = DAG.getUNDEF(EltVT);
730 Ops.push_back(StoreVal);
733 Ops.push_back(InFlag);
735 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
736 Chain = DAG.getMemIntrinsicNode(Opc, dl, CopyParamVTs, &Ops[0],
738 MachinePointerInfo());
739 InFlag = Chain.getValue(1);
740 curOffset += PerStoreOffset;
748 // for ABI, declare .param .b<size> .param<n>;
749 unsigned sz = VT.getSizeInBits();
750 bool needExtend = false;
751 if (VT.isInteger()) {
757 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
758 SDValue DeclareParamOps[] = { Chain,
759 DAG.getConstant(paramCount, MVT::i32),
760 DAG.getConstant(sz, MVT::i32),
761 DAG.getConstant(0, MVT::i32), InFlag };
762 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
764 InFlag = Chain.getValue(1);
765 SDValue OutV = OutVals[OIdx];
767 // zext/sext i1 to i16
768 unsigned opc = ISD::ZERO_EXTEND;
769 if (Outs[OIdx].Flags.isSExt())
770 opc = ISD::SIGN_EXTEND;
771 OutV = DAG.getNode(opc, dl, MVT::i16, OutV);
773 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
774 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
775 DAG.getConstant(0, MVT::i32), OutV, InFlag };
777 unsigned opcode = NVPTXISD::StoreParam;
778 if (Outs[OIdx].Flags.isZExt())
779 opcode = NVPTXISD::StoreParamU32;
780 else if (Outs[OIdx].Flags.isSExt())
781 opcode = NVPTXISD::StoreParamS32;
782 Chain = DAG.getMemIntrinsicNode(opcode, dl, CopyParamVTs, CopyParamOps, 5,
783 VT, MachinePointerInfo());
785 InFlag = Chain.getValue(1);
790 SmallVector<EVT, 16> vtparts;
791 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
792 assert(PTy && "Type of a byval parameter should be pointer");
793 ComputeValueVTs(*this, PTy->getElementType(), vtparts);
795 // declare .param .align <align> .b8 .param<n>[<size>];
796 unsigned sz = Outs[OIdx].Flags.getByValSize();
797 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
798 // The ByValAlign in the Outs[OIdx].Flags is alway set at this point,
799 // so we don't need to worry about natural alignment or not.
800 // See TargetLowering::LowerCallTo().
801 SDValue DeclareParamOps[] = {
802 Chain, DAG.getConstant(Outs[OIdx].Flags.getByValAlign(), MVT::i32),
803 DAG.getConstant(paramCount, MVT::i32), DAG.getConstant(sz, MVT::i32),
806 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
808 InFlag = Chain.getValue(1);
809 unsigned curOffset = 0;
810 for (unsigned j = 0, je = vtparts.size(); j != je; ++j) {
812 EVT elemtype = vtparts[j];
813 if (vtparts[j].isVector()) {
814 elems = vtparts[j].getVectorNumElements();
815 elemtype = vtparts[j].getVectorElementType();
817 for (unsigned k = 0, ke = elems; k != ke; ++k) {
818 unsigned sz = elemtype.getSizeInBits();
819 if (elemtype.isInteger() && (sz < 8))
822 DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[OIdx],
823 DAG.getConstant(curOffset, getPointerTy()));
824 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
825 MachinePointerInfo(), false, false, false,
827 if (elemtype.getSizeInBits() < 16) {
828 theVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, theVal);
830 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
831 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
832 DAG.getConstant(curOffset, MVT::i32), theVal,
834 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
835 CopyParamOps, 5, elemtype,
836 MachinePointerInfo());
838 InFlag = Chain.getValue(1);
845 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
846 unsigned retAlignment = 0;
849 if (Ins.size() > 0) {
850 SmallVector<EVT, 16> resvtparts;
851 ComputeValueVTs(*this, retTy, resvtparts);
854 // .param .align 16 .b8 retval0[<size-in-bytes>], or
855 // .param .b<size-in-bits> retval0
856 unsigned resultsz = TD->getTypeAllocSizeInBits(retTy);
857 if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
858 retTy->isPointerTy()) {
859 // Scalar needs to be at least 32bit wide
862 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
863 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
864 DAG.getConstant(resultsz, MVT::i32),
865 DAG.getConstant(0, MVT::i32), InFlag };
866 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
868 InFlag = Chain.getValue(1);
870 retAlignment = getArgumentAlignment(Callee, CS, retTy, 0);
871 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
872 SDValue DeclareRetOps[] = { Chain,
873 DAG.getConstant(retAlignment, MVT::i32),
874 DAG.getConstant(resultsz / 8, MVT::i32),
875 DAG.getConstant(0, MVT::i32), InFlag };
876 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
878 InFlag = Chain.getValue(1);
883 // This is indirect function call case : PTX requires a prototype of the
885 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
886 // to be emitted, and the label has to used as the last arg of call
888 // The prototype is embedded in a string and put as the operand for an
890 SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue);
891 std::string proto_string =
892 getPrototype(retTy, Args, Outs, retAlignment, CS);
893 const char *asmstr = nvTM->getManagedStrPool()
894 ->getManagedString(proto_string.c_str())->c_str();
895 SDValue InlineAsmOps[] = {
896 Chain, DAG.getTargetExternalSymbol(asmstr, getPointerTy()),
897 DAG.getMDNode(0), DAG.getTargetConstant(0, MVT::i32), InFlag
899 Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5);
900 InFlag = Chain.getValue(1);
902 // Op to just print "call"
903 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
904 SDValue PrintCallOps[] = {
905 Chain, DAG.getConstant((Ins.size() == 0) ? 0 : 1, MVT::i32), InFlag
907 Chain = DAG.getNode(Func ? (NVPTXISD::PrintCallUni) : (NVPTXISD::PrintCall),
908 dl, PrintCallVTs, PrintCallOps, 3);
909 InFlag = Chain.getValue(1);
911 // Ops to print out the function name
912 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
913 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
914 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps, 3);
915 InFlag = Chain.getValue(1);
917 // Ops to print out the param list
918 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
919 SDValue CallArgBeginOps[] = { Chain, InFlag };
920 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
922 InFlag = Chain.getValue(1);
924 for (unsigned i = 0, e = paramCount; i != e; ++i) {
927 opcode = NVPTXISD::LastCallArg;
929 opcode = NVPTXISD::CallArg;
930 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
931 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
932 DAG.getConstant(i, MVT::i32), InFlag };
933 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
934 InFlag = Chain.getValue(1);
936 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
937 SDValue CallArgEndOps[] = { Chain, DAG.getConstant(Func ? 1 : 0, MVT::i32),
940 DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps, 3);
941 InFlag = Chain.getValue(1);
944 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
945 SDValue PrototypeOps[] = { Chain, DAG.getConstant(uniqueCallSite, MVT::i32),
947 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
948 InFlag = Chain.getValue(1);
951 // Generate loads from param memory/moves from registers for result
952 if (Ins.size() > 0) {
953 unsigned resoffset = 0;
954 if (retTy && retTy->isVectorTy()) {
955 EVT ObjectVT = getValueType(retTy);
956 unsigned NumElts = ObjectVT.getVectorNumElements();
957 EVT EltVT = ObjectVT.getVectorElementType();
958 assert(nvTM->getTargetLowering()->getNumRegisters(F->getContext(),
959 ObjectVT) == NumElts &&
960 "Vector was not scalarized");
961 unsigned sz = EltVT.getSizeInBits();
962 bool needTruncate = sz < 16 ? true : false;
965 // Just a simple load
966 std::vector<EVT> LoadRetVTs;
968 // If loading i1 result, generate
971 LoadRetVTs.push_back(MVT::i16);
973 LoadRetVTs.push_back(EltVT);
974 LoadRetVTs.push_back(MVT::Other);
975 LoadRetVTs.push_back(MVT::Glue);
976 std::vector<SDValue> LoadRetOps;
977 LoadRetOps.push_back(Chain);
978 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
979 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
980 LoadRetOps.push_back(InFlag);
981 SDValue retval = DAG.getMemIntrinsicNode(
982 NVPTXISD::LoadParam, dl,
983 DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
984 LoadRetOps.size(), EltVT, MachinePointerInfo());
985 Chain = retval.getValue(1);
986 InFlag = retval.getValue(2);
987 SDValue Ret0 = retval;
989 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Ret0);
990 InVals.push_back(Ret0);
991 } else if (NumElts == 2) {
993 std::vector<EVT> LoadRetVTs;
995 // If loading i1 result, generate
998 LoadRetVTs.push_back(MVT::i16);
999 LoadRetVTs.push_back(MVT::i16);
1001 LoadRetVTs.push_back(EltVT);
1002 LoadRetVTs.push_back(EltVT);
1004 LoadRetVTs.push_back(MVT::Other);
1005 LoadRetVTs.push_back(MVT::Glue);
1006 std::vector<SDValue> LoadRetOps;
1007 LoadRetOps.push_back(Chain);
1008 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1009 LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
1010 LoadRetOps.push_back(InFlag);
1011 SDValue retval = DAG.getMemIntrinsicNode(
1012 NVPTXISD::LoadParamV2, dl,
1013 DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
1014 LoadRetOps.size(), EltVT, MachinePointerInfo());
1015 Chain = retval.getValue(2);
1016 InFlag = retval.getValue(3);
1017 SDValue Ret0 = retval.getValue(0);
1018 SDValue Ret1 = retval.getValue(1);
1020 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret0);
1021 InVals.push_back(Ret0);
1022 Ret1 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ret1);
1023 InVals.push_back(Ret1);
1025 InVals.push_back(Ret0);
1026 InVals.push_back(Ret1);
1029 // Split into N LoadV4
1031 unsigned VecSize = 4;
1032 unsigned Opc = NVPTXISD::LoadParamV4;
1033 if (EltVT.getSizeInBits() == 64) {
1035 Opc = NVPTXISD::LoadParamV2;
1037 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1038 for (unsigned i = 0; i < NumElts; i += VecSize) {
1039 SmallVector<EVT, 8> LoadRetVTs;
1041 // If loading i1 result, generate
1044 for (unsigned j = 0; j < VecSize; ++j)
1045 LoadRetVTs.push_back(MVT::i16);
1047 for (unsigned j = 0; j < VecSize; ++j)
1048 LoadRetVTs.push_back(EltVT);
1050 LoadRetVTs.push_back(MVT::Other);
1051 LoadRetVTs.push_back(MVT::Glue);
1052 SmallVector<SDValue, 4> LoadRetOps;
1053 LoadRetOps.push_back(Chain);
1054 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1055 LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
1056 LoadRetOps.push_back(InFlag);
1057 SDValue retval = DAG.getMemIntrinsicNode(
1058 Opc, dl, DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()),
1059 &LoadRetOps[0], LoadRetOps.size(), EltVT, MachinePointerInfo());
1061 Chain = retval.getValue(2);
1062 InFlag = retval.getValue(3);
1064 Chain = retval.getValue(4);
1065 InFlag = retval.getValue(5);
1068 for (unsigned j = 0; j < VecSize; ++j) {
1069 if (i + j >= NumElts)
1071 SDValue Elt = retval.getValue(j);
1073 Elt = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
1074 InVals.push_back(Elt);
1076 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1080 SmallVector<EVT, 16> VTs;
1081 ComputePTXValueVTs(*this, retTy, VTs);
1082 assert(VTs.size() == Ins.size() && "Bad value decomposition");
1083 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
1084 unsigned sz = VTs[i].getSizeInBits();
1085 bool needTruncate = sz < 8 ? true : false;
1086 if (VTs[i].isInteger() && (sz < 8))
1089 SmallVector<EVT, 4> LoadRetVTs;
1090 EVT TheLoadType = VTs[i];
1091 if (retTy->isIntegerTy() &&
1092 TD->getTypeAllocSizeInBits(retTy) < 32) {
1093 // This is for integer types only, and specifically not for
1095 LoadRetVTs.push_back(MVT::i32);
1096 TheLoadType = MVT::i32;
1097 } else if (sz < 16) {
1098 // If loading i1/i8 result, generate
1100 // trunc i16 to i1/i8
1101 LoadRetVTs.push_back(MVT::i16);
1103 LoadRetVTs.push_back(Ins[i].VT);
1104 LoadRetVTs.push_back(MVT::Other);
1105 LoadRetVTs.push_back(MVT::Glue);
1107 SmallVector<SDValue, 4> LoadRetOps;
1108 LoadRetOps.push_back(Chain);
1109 LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
1110 LoadRetOps.push_back(DAG.getConstant(resoffset, MVT::i32));
1111 LoadRetOps.push_back(InFlag);
1112 SDValue retval = DAG.getMemIntrinsicNode(
1113 NVPTXISD::LoadParam, dl,
1114 DAG.getVTList(&LoadRetVTs[0], LoadRetVTs.size()), &LoadRetOps[0],
1115 LoadRetOps.size(), TheLoadType, MachinePointerInfo());
1116 Chain = retval.getValue(1);
1117 InFlag = retval.getValue(2);
1118 SDValue Ret0 = retval.getValue(0);
1120 Ret0 = DAG.getNode(ISD::TRUNCATE, dl, Ins[i].VT, Ret0);
1121 InVals.push_back(Ret0);
1122 resoffset += sz / 8;
1127 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(uniqueCallSite, true),
1128 DAG.getIntPtrConstant(uniqueCallSite + 1, true),
1132 // set isTailCall to false for now, until we figure out how to express
1133 // tail call optimization in PTX
1138 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
1139 // (see LegalizeDAG.cpp). This is slow and uses local memory.
1140 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
1142 NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
1143 SDNode *Node = Op.getNode();
1145 SmallVector<SDValue, 8> Ops;
1146 unsigned NumOperands = Node->getNumOperands();
1147 for (unsigned i = 0; i < NumOperands; ++i) {
1148 SDValue SubOp = Node->getOperand(i);
1149 EVT VVT = SubOp.getNode()->getValueType(0);
1150 EVT EltVT = VVT.getVectorElementType();
1151 unsigned NumSubElem = VVT.getVectorNumElements();
1152 for (unsigned j = 0; j < NumSubElem; ++j) {
1153 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
1154 DAG.getIntPtrConstant(j)));
1157 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0), &Ops[0],
1162 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1163 switch (Op.getOpcode()) {
1164 case ISD::RETURNADDR:
1166 case ISD::FRAMEADDR:
1168 case ISD::GlobalAddress:
1169 return LowerGlobalAddress(Op, DAG);
1170 case ISD::INTRINSIC_W_CHAIN:
1172 case ISD::BUILD_VECTOR:
1173 case ISD::EXTRACT_SUBVECTOR:
1175 case ISD::CONCAT_VECTORS:
1176 return LowerCONCAT_VECTORS(Op, DAG);
1178 return LowerSTORE(Op, DAG);
1180 return LowerLOAD(Op, DAG);
1182 llvm_unreachable("Custom lowering not defined for operation");
1186 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1187 if (Op.getValueType() == MVT::i1)
1188 return LowerLOADi1(Op, DAG);
1195 // v1 = ld i8* addr (-> i16)
1196 // v = trunc i16 to i1
1197 SDValue NVPTXTargetLowering::LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
1198 SDNode *Node = Op.getNode();
1199 LoadSDNode *LD = cast<LoadSDNode>(Node);
1201 assert(LD->getExtensionType() == ISD::NON_EXTLOAD);
1202 assert(Node->getValueType(0) == MVT::i1 &&
1203 "Custom lowering for i1 load only");
1205 DAG.getLoad(MVT::i16, dl, LD->getChain(), LD->getBasePtr(),
1206 LD->getPointerInfo(), LD->isVolatile(), LD->isNonTemporal(),
1207 LD->isInvariant(), LD->getAlignment());
1208 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
1209 // The legalizer (the caller) is expecting two values from the legalized
1210 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
1211 // in LegalizeDAG.cpp which also uses MergeValues.
1212 SDValue Ops[] = { result, LD->getChain() };
1213 return DAG.getMergeValues(Ops, 2, dl);
1216 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1217 EVT ValVT = Op.getOperand(1).getValueType();
1218 if (ValVT == MVT::i1)
1219 return LowerSTOREi1(Op, DAG);
1220 else if (ValVT.isVector())
1221 return LowerSTOREVector(Op, DAG);
1227 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
1228 SDNode *N = Op.getNode();
1229 SDValue Val = N->getOperand(1);
1231 EVT ValVT = Val.getValueType();
1233 if (ValVT.isVector()) {
1234 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1235 // legal. We can (and should) split that into 2 stores of <2 x double> here
1236 // but I'm leaving that as a TODO for now.
1237 if (!ValVT.isSimple())
1239 switch (ValVT.getSimpleVT().SimpleTy) {
1252 // This is a "native" vector type
1256 unsigned Opcode = 0;
1257 EVT EltVT = ValVT.getVectorElementType();
1258 unsigned NumElts = ValVT.getVectorNumElements();
1260 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
1261 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1262 // stored type to i16 and propogate the "real" type as the memory type.
1263 bool NeedExt = false;
1264 if (EltVT.getSizeInBits() < 16)
1271 Opcode = NVPTXISD::StoreV2;
1274 Opcode = NVPTXISD::StoreV4;
1279 SmallVector<SDValue, 8> Ops;
1281 // First is the chain
1282 Ops.push_back(N->getOperand(0));
1284 // Then the split values
1285 for (unsigned i = 0; i < NumElts; ++i) {
1286 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
1287 DAG.getIntPtrConstant(i));
1289 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
1290 Ops.push_back(ExtVal);
1293 // Then any remaining arguments
1294 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
1295 Ops.push_back(N->getOperand(i));
1298 MemSDNode *MemSD = cast<MemSDNode>(N);
1300 SDValue NewSt = DAG.getMemIntrinsicNode(
1301 Opcode, DL, DAG.getVTList(MVT::Other), &Ops[0], Ops.size(),
1302 MemSD->getMemoryVT(), MemSD->getMemOperand());
1304 //return DCI.CombineTo(N, NewSt, true);
1313 // v1 = zxt v to i16
1315 SDValue NVPTXTargetLowering::LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
1316 SDNode *Node = Op.getNode();
1318 StoreSDNode *ST = cast<StoreSDNode>(Node);
1319 SDValue Tmp1 = ST->getChain();
1320 SDValue Tmp2 = ST->getBasePtr();
1321 SDValue Tmp3 = ST->getValue();
1322 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
1323 unsigned Alignment = ST->getAlignment();
1324 bool isVolatile = ST->isVolatile();
1325 bool isNonTemporal = ST->isNonTemporal();
1326 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Tmp3);
1327 SDValue Result = DAG.getTruncStore(Tmp1, dl, Tmp3, Tmp2,
1328 ST->getPointerInfo(), MVT::i8, isNonTemporal,
1329 isVolatile, Alignment);
1333 SDValue NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname,
1334 int idx, EVT v) const {
1335 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
1336 std::stringstream suffix;
1338 *name += suffix.str();
1339 return DAG.getTargetExternalSymbol(name->c_str(), v);
1343 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1344 std::string ParamSym;
1345 raw_string_ostream ParamStr(ParamSym);
1347 ParamStr << DAG.getMachineFunction().getName() << "_param_" << idx;
1350 std::string *SavedStr =
1351 nvTM->getManagedStrPool()->getManagedString(ParamSym.c_str());
1352 return DAG.getTargetExternalSymbol(SavedStr->c_str(), v);
1355 SDValue NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1356 return getExtSymb(DAG, ".HLPPARAM", idx);
1359 // Check to see if the kernel argument is image*_t or sampler_t
1361 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1362 static const char *const specialTypes[] = { "struct._image2d_t",
1363 "struct._image3d_t",
1364 "struct._sampler_t" };
1366 const Type *Ty = arg->getType();
1367 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1375 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1376 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1378 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1379 if (TypeName == specialTypes[i])
1385 SDValue NVPTXTargetLowering::LowerFormalArguments(
1386 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1387 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
1388 SmallVectorImpl<SDValue> &InVals) const {
1389 MachineFunction &MF = DAG.getMachineFunction();
1390 const DataLayout *TD = getDataLayout();
1392 const Function *F = MF.getFunction();
1393 const AttributeSet &PAL = F->getAttributes();
1394 const TargetLowering *TLI = nvTM->getTargetLowering();
1396 SDValue Root = DAG.getRoot();
1397 std::vector<SDValue> OutChains;
1399 bool isKernel = llvm::isKernelFunction(*F);
1400 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1401 assert(isABI && "Non-ABI compilation is not supported");
1405 std::vector<Type *> argTypes;
1406 std::vector<const Argument *> theArgs;
1407 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1409 theArgs.push_back(I);
1410 argTypes.push_back(I->getType());
1412 // argTypes.size() (or theArgs.size()) and Ins.size() need not match.
1413 // Ins.size() will be larger
1414 // * if there is an aggregate argument with multiple fields (each field
1415 // showing up separately in Ins)
1416 // * if there is a vector argument with more than typical vector-length
1417 // elements (generally if more than 4) where each vector element is
1418 // individually present in Ins.
1419 // So a different index should be used for indexing into Ins.
1420 // See similar issue in LowerCall.
1421 unsigned InsIdx = 0;
1424 for (unsigned i = 0, e = theArgs.size(); i != e; ++i, ++idx, ++InsIdx) {
1425 Type *Ty = argTypes[i];
1427 // If the kernel argument is image*_t or sampler_t, convert it to
1428 // a i32 constant holding the parameter position. This can later
1429 // matched in the AsmPrinter to output the correct mangled name.
1430 if (isImageOrSamplerVal(
1432 (theArgs[i]->getParent() ? theArgs[i]->getParent()->getParent()
1434 assert(isKernel && "Only kernels can have image/sampler params");
1435 InVals.push_back(DAG.getConstant(i + 1, MVT::i32));
1439 if (theArgs[i]->use_empty()) {
1441 if (Ty->isAggregateType()) {
1442 SmallVector<EVT, 16> vtparts;
1444 ComputePTXValueVTs(*this, Ty, vtparts);
1445 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1446 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1448 EVT partVT = vtparts[parti];
1449 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, partVT));
1452 if (vtparts.size() > 0)
1456 if (Ty->isVectorTy()) {
1457 EVT ObjectVT = getValueType(Ty);
1458 unsigned NumRegs = TLI->getNumRegisters(F->getContext(), ObjectVT);
1459 for (unsigned parti = 0; parti < NumRegs; ++parti) {
1460 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1467 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, Ins[InsIdx].VT));
1471 // In the following cases, assign a node order of "idx+1"
1472 // to newly created nodes. The SDNodes for params have to
1473 // appear in the same order as their order of appearance
1474 // in the original function. "idx+1" holds that order.
1475 if (PAL.hasAttribute(i + 1, Attribute::ByVal) == false) {
1476 if (Ty->isAggregateType()) {
1477 SmallVector<EVT, 16> vtparts;
1478 SmallVector<uint64_t, 16> offsets;
1480 // NOTE: Here, we lose the ability to issue vector loads for vectors
1481 // that are a part of a struct. This should be investigated in the
1483 ComputePTXValueVTs(*this, Ty, vtparts, &offsets, 0);
1484 assert(vtparts.size() > 0 && "empty aggregate type not expected");
1485 bool aggregateIsPacked = false;
1486 if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
1487 aggregateIsPacked = STy->isPacked();
1489 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1490 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
1492 EVT partVT = vtparts[parti];
1493 Value *srcValue = Constant::getNullValue(
1494 PointerType::get(partVT.getTypeForEVT(F->getContext()),
1495 llvm::ADDRESS_SPACE_PARAM));
1497 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1498 DAG.getConstant(offsets[parti], getPointerTy()));
1499 unsigned partAlign =
1500 aggregateIsPacked ? 1
1501 : TD->getABITypeAlignment(
1502 partVT.getTypeForEVT(F->getContext()));
1504 if (Ins[InsIdx].VT.getSizeInBits() > partVT.getSizeInBits()) {
1505 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1506 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1507 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, srcAddr,
1508 MachinePointerInfo(srcValue), partVT, false,
1511 p = DAG.getLoad(partVT, dl, Root, srcAddr,
1512 MachinePointerInfo(srcValue), false, false, false,
1516 p.getNode()->setIROrder(idx + 1);
1517 InVals.push_back(p);
1520 if (vtparts.size() > 0)
1524 if (Ty->isVectorTy()) {
1525 EVT ObjectVT = getValueType(Ty);
1526 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1527 unsigned NumElts = ObjectVT.getVectorNumElements();
1528 assert(TLI->getNumRegisters(F->getContext(), ObjectVT) == NumElts &&
1529 "Vector was not scalarized");
1531 EVT EltVT = ObjectVT.getVectorElementType();
1536 // We only have one element, so just directly load it
1537 Value *SrcValue = Constant::getNullValue(PointerType::get(
1538 EltVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1539 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1540 DAG.getConstant(Ofst, getPointerTy()));
1541 SDValue P = DAG.getLoad(
1542 EltVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1544 TD->getABITypeAlignment(EltVT.getTypeForEVT(F->getContext())));
1546 P.getNode()->setIROrder(idx + 1);
1548 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1549 P = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, P);
1550 InVals.push_back(P);
1551 Ofst += TD->getTypeAllocSize(EltVT.getTypeForEVT(F->getContext()));
1553 } else if (NumElts == 2) {
1555 // f32,f32 = load ...
1556 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, 2);
1557 Value *SrcValue = Constant::getNullValue(PointerType::get(
1558 VecVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1559 SDValue SrcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1560 DAG.getConstant(Ofst, getPointerTy()));
1561 SDValue P = DAG.getLoad(
1562 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1564 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1566 P.getNode()->setIROrder(idx + 1);
1568 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1569 DAG.getIntPtrConstant(0));
1570 SDValue Elt1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1571 DAG.getIntPtrConstant(1));
1573 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits()) {
1574 Elt0 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt0);
1575 Elt1 = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt1);
1578 InVals.push_back(Elt0);
1579 InVals.push_back(Elt1);
1580 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1584 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and
1586 // vector will be expanded to a power of 2 elements, so we know we can
1587 // always round up to the next multiple of 4 when creating the vector
1589 // e.g. 4 elem => 1 ld.v4
1590 // 6 elem => 2 ld.v4
1591 // 8 elem => 2 ld.v4
1592 // 11 elem => 3 ld.v4
1593 unsigned VecSize = 4;
1594 if (EltVT.getSizeInBits() == 64) {
1597 EVT VecVT = EVT::getVectorVT(F->getContext(), EltVT, VecSize);
1598 for (unsigned i = 0; i < NumElts; i += VecSize) {
1599 Value *SrcValue = Constant::getNullValue(
1600 PointerType::get(VecVT.getTypeForEVT(F->getContext()),
1601 llvm::ADDRESS_SPACE_PARAM));
1603 DAG.getNode(ISD::ADD, dl, getPointerTy(), Arg,
1604 DAG.getConstant(Ofst, getPointerTy()));
1605 SDValue P = DAG.getLoad(
1606 VecVT, dl, Root, SrcAddr, MachinePointerInfo(SrcValue), false,
1608 TD->getABITypeAlignment(VecVT.getTypeForEVT(F->getContext())));
1610 P.getNode()->setIROrder(idx + 1);
1612 for (unsigned j = 0; j < VecSize; ++j) {
1613 if (i + j >= NumElts)
1615 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, P,
1616 DAG.getIntPtrConstant(j));
1617 if (Ins[InsIdx].VT.getSizeInBits() > EltVT.getSizeInBits())
1618 Elt = DAG.getNode(ISD::ANY_EXTEND, dl, Ins[InsIdx].VT, Elt);
1619 InVals.push_back(Elt);
1621 Ofst += TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1631 EVT ObjectVT = getValueType(Ty);
1632 // If ABI, load from the param symbol
1633 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1634 Value *srcValue = Constant::getNullValue(PointerType::get(
1635 ObjectVT.getTypeForEVT(F->getContext()), llvm::ADDRESS_SPACE_PARAM));
1637 if (ObjectVT.getSizeInBits() < Ins[InsIdx].VT.getSizeInBits()) {
1638 ISD::LoadExtType ExtOp = Ins[InsIdx].Flags.isSExt() ?
1639 ISD::SEXTLOAD : ISD::ZEXTLOAD;
1640 p = DAG.getExtLoad(ExtOp, dl, Ins[InsIdx].VT, Root, Arg,
1641 MachinePointerInfo(srcValue), ObjectVT, false, false,
1642 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1644 p = DAG.getLoad(Ins[InsIdx].VT, dl, Root, Arg,
1645 MachinePointerInfo(srcValue), false, false, false,
1646 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(F->getContext())));
1649 p.getNode()->setIROrder(idx + 1);
1650 InVals.push_back(p);
1654 // Param has ByVal attribute
1655 // Return MoveParam(param symbol).
1656 // Ideally, the param symbol can be returned directly,
1657 // but when SDNode builder decides to use it in a CopyToReg(),
1658 // machine instruction fails because TargetExternalSymbol
1659 // (not lowered) is target dependent, and CopyToReg assumes
1660 // the source is lowered.
1661 EVT ObjectVT = getValueType(Ty);
1662 assert(ObjectVT == Ins[InsIdx].VT &&
1663 "Ins type did not match function type");
1664 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1665 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1667 p.getNode()->setIROrder(idx + 1);
1669 InVals.push_back(p);
1671 SDValue p2 = DAG.getNode(
1672 ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1673 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32), p);
1674 InVals.push_back(p2);
1678 // Clang will check explicit VarArg and issue error if any. However, Clang
1679 // will let code with
1680 // implicit var arg like f() pass. See bug 617733.
1681 // We treat this case as if the arg list is empty.
1682 // if (F.isVarArg()) {
1683 // assert(0 && "VarArg not supported yet!");
1686 if (!OutChains.empty())
1687 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &OutChains[0],
1695 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1697 const SmallVectorImpl<ISD::OutputArg> &Outs,
1698 const SmallVectorImpl<SDValue> &OutVals,
1699 SDLoc dl, SelectionDAG &DAG) const {
1700 MachineFunction &MF = DAG.getMachineFunction();
1701 const Function *F = MF.getFunction();
1702 Type *RetTy = F->getReturnType();
1703 const DataLayout *TD = getDataLayout();
1705 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1706 assert(isABI && "Non-ABI compilation is not supported");
1710 if (VectorType *VTy = dyn_cast<VectorType>(RetTy)) {
1711 // If we have a vector type, the OutVals array will be the scalarized
1712 // components and we have combine them into 1 or more vector stores.
1713 unsigned NumElts = VTy->getNumElements();
1714 assert(NumElts == Outs.size() && "Bad scalarization of return value");
1716 // const_cast can be removed in later LLVM versions
1717 EVT EltVT = getValueType(RetTy).getVectorElementType();
1718 bool NeedExtend = false;
1719 if (EltVT.getSizeInBits() < 16)
1724 SDValue StoreVal = OutVals[0];
1725 // We only have one element, so just directly store it
1727 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal);
1728 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal };
1729 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1730 DAG.getVTList(MVT::Other), &Ops[0], 3,
1731 EltVT, MachinePointerInfo());
1733 } else if (NumElts == 2) {
1735 SDValue StoreVal0 = OutVals[0];
1736 SDValue StoreVal1 = OutVals[1];
1739 StoreVal0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal0);
1740 StoreVal1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, StoreVal1);
1743 SDValue Ops[] = { Chain, DAG.getConstant(0, MVT::i32), StoreVal0,
1745 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetvalV2, dl,
1746 DAG.getVTList(MVT::Other), &Ops[0], 4,
1747 EltVT, MachinePointerInfo());
1750 // We have at least 4 elements (<3 x Ty> expands to 4 elements) and the
1751 // vector will be expanded to a power of 2 elements, so we know we can
1752 // always round up to the next multiple of 4 when creating the vector
1754 // e.g. 4 elem => 1 st.v4
1755 // 6 elem => 2 st.v4
1756 // 8 elem => 2 st.v4
1757 // 11 elem => 3 st.v4
1759 unsigned VecSize = 4;
1760 if (OutVals[0].getValueType().getSizeInBits() == 64)
1763 unsigned Offset = 0;
1766 EVT::getVectorVT(F->getContext(), OutVals[0].getValueType(), VecSize);
1767 unsigned PerStoreOffset =
1768 TD->getTypeAllocSize(VecVT.getTypeForEVT(F->getContext()));
1770 for (unsigned i = 0; i < NumElts; i += VecSize) {
1773 SmallVector<SDValue, 8> Ops;
1774 Ops.push_back(Chain);
1775 Ops.push_back(DAG.getConstant(Offset, MVT::i32));
1776 unsigned Opc = NVPTXISD::StoreRetvalV2;
1777 EVT ExtendedVT = (NeedExtend) ? MVT::i16 : OutVals[0].getValueType();
1779 StoreVal = OutVals[i];
1781 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1782 Ops.push_back(StoreVal);
1784 if (i + 1 < NumElts) {
1785 StoreVal = OutVals[i + 1];
1787 StoreVal = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1789 StoreVal = DAG.getUNDEF(ExtendedVT);
1791 Ops.push_back(StoreVal);
1794 Opc = NVPTXISD::StoreRetvalV4;
1795 if (i + 2 < NumElts) {
1796 StoreVal = OutVals[i + 2];
1799 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1801 StoreVal = DAG.getUNDEF(ExtendedVT);
1803 Ops.push_back(StoreVal);
1805 if (i + 3 < NumElts) {
1806 StoreVal = OutVals[i + 3];
1809 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtendedVT, StoreVal);
1811 StoreVal = DAG.getUNDEF(ExtendedVT);
1813 Ops.push_back(StoreVal);
1816 // Chain = DAG.getNode(Opc, dl, MVT::Other, &Ops[0], Ops.size());
1818 DAG.getMemIntrinsicNode(Opc, dl, DAG.getVTList(MVT::Other), &Ops[0],
1819 Ops.size(), EltVT, MachinePointerInfo());
1820 Offset += PerStoreOffset;
1824 SmallVector<EVT, 16> ValVTs;
1825 // const_cast is necessary since we are still using an LLVM version from
1826 // before the type system re-write.
1827 ComputePTXValueVTs(*this, RetTy, ValVTs);
1828 assert(ValVTs.size() == OutVals.size() && "Bad return value decomposition");
1830 unsigned SizeSoFar = 0;
1831 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
1832 SDValue theVal = OutVals[i];
1833 EVT TheValType = theVal.getValueType();
1834 unsigned numElems = 1;
1835 if (TheValType.isVector())
1836 numElems = TheValType.getVectorNumElements();
1837 for (unsigned j = 0, je = numElems; j != je; ++j) {
1838 SDValue TmpVal = theVal;
1839 if (TheValType.isVector())
1840 TmpVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1841 TheValType.getVectorElementType(), TmpVal,
1842 DAG.getIntPtrConstant(j));
1843 EVT TheStoreType = ValVTs[i];
1844 if (RetTy->isIntegerTy() &&
1845 TD->getTypeAllocSizeInBits(RetTy) < 32) {
1846 // The following zero-extension is for integer types only, and
1847 // specifically not for aggregates.
1848 TmpVal = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, TmpVal);
1849 TheStoreType = MVT::i32;
1851 else if (TmpVal.getValueType().getSizeInBits() < 16)
1852 TmpVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, TmpVal);
1854 SDValue Ops[] = { Chain, DAG.getConstant(SizeSoFar, MVT::i32), TmpVal };
1855 Chain = DAG.getMemIntrinsicNode(NVPTXISD::StoreRetval, dl,
1856 DAG.getVTList(MVT::Other), &Ops[0],
1858 MachinePointerInfo());
1859 if(TheValType.isVector())
1861 TheStoreType.getVectorElementType().getStoreSizeInBits() / 8;
1863 SizeSoFar += TheStoreType.getStoreSizeInBits()/8;
1868 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
1872 void NVPTXTargetLowering::LowerAsmOperandForConstraint(
1873 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
1874 SelectionDAG &DAG) const {
1875 if (Constraint.length() > 1)
1878 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1881 // NVPTX suuport vector of legal types of any length in Intrinsics because the
1882 // NVPTX specific type legalizer
1883 // will legalize them to the PTX supported length.
1884 bool NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
1885 if (isTypeLegal(VT))
1887 if (VT.isVector()) {
1888 MVT eVT = VT.getVectorElementType();
1889 if (isTypeLegal(eVT))
1895 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
1897 // because we need the information that is only available in the "Value" type
1899 // pointer. In particular, the address space information.
1900 bool NVPTXTargetLowering::getTgtMemIntrinsic(
1901 IntrinsicInfo &Info, const CallInst &I, unsigned Intrinsic) const {
1902 switch (Intrinsic) {
1906 case Intrinsic::nvvm_atomic_load_add_f32:
1907 Info.opc = ISD::INTRINSIC_W_CHAIN;
1908 Info.memVT = MVT::f32;
1909 Info.ptrVal = I.getArgOperand(0);
1912 Info.readMem = true;
1913 Info.writeMem = true;
1917 case Intrinsic::nvvm_atomic_load_inc_32:
1918 case Intrinsic::nvvm_atomic_load_dec_32:
1919 Info.opc = ISD::INTRINSIC_W_CHAIN;
1920 Info.memVT = MVT::i32;
1921 Info.ptrVal = I.getArgOperand(0);
1924 Info.readMem = true;
1925 Info.writeMem = true;
1929 case Intrinsic::nvvm_ldu_global_i:
1930 case Intrinsic::nvvm_ldu_global_f:
1931 case Intrinsic::nvvm_ldu_global_p:
1933 Info.opc = ISD::INTRINSIC_W_CHAIN;
1934 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
1935 Info.memVT = getValueType(I.getType());
1936 else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
1937 Info.memVT = getValueType(I.getType());
1939 Info.memVT = MVT::f32;
1940 Info.ptrVal = I.getArgOperand(0);
1943 Info.readMem = true;
1944 Info.writeMem = false;
1952 /// isLegalAddressingMode - Return true if the addressing mode represented
1953 /// by AM is legal for this target, for a load/store of the specified type.
1954 /// Used to guide target specific optimizations, like loop strength reduction
1955 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
1956 /// (CodeGenPrepare.cpp)
1957 bool NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1960 // AddrMode - This represents an addressing mode of:
1961 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1963 // The legal address modes are
1970 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
1976 case 0: // "r", "r+i" or "i" is allowed
1979 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
1981 // Otherwise we have r+i.
1984 // No scale > 1 is allowed
1990 //===----------------------------------------------------------------------===//
1991 // NVPTX Inline Assembly Support
1992 //===----------------------------------------------------------------------===//
1994 /// getConstraintType - Given a constraint letter, return the type of
1995 /// constraint it is for this target.
1996 NVPTXTargetLowering::ConstraintType
1997 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
1998 if (Constraint.size() == 1) {
1999 switch (Constraint[0]) {
2010 return C_RegisterClass;
2013 return TargetLowering::getConstraintType(Constraint);
2016 std::pair<unsigned, const TargetRegisterClass *>
2017 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
2019 if (Constraint.size() == 1) {
2020 switch (Constraint[0]) {
2022 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2024 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
2026 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
2029 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
2031 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
2033 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
2036 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
2039 /// getFunctionAlignment - Return the Log2 alignment of this function.
2040 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
2044 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
2045 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
2046 SmallVectorImpl<SDValue> &Results) {
2047 EVT ResVT = N->getValueType(0);
2050 assert(ResVT.isVector() && "Vector load must have vector type");
2052 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
2053 // legal. We can (and should) split that into 2 loads of <2 x double> here
2054 // but I'm leaving that as a TODO for now.
2055 assert(ResVT.isSimple() && "Can only handle simple types");
2056 switch (ResVT.getSimpleVT().SimpleTy) {
2069 // This is a "native" vector type
2073 EVT EltVT = ResVT.getVectorElementType();
2074 unsigned NumElts = ResVT.getVectorNumElements();
2076 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
2077 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2078 // loaded type to i16 and propogate the "real" type as the memory type.
2079 bool NeedTrunc = false;
2080 if (EltVT.getSizeInBits() < 16) {
2085 unsigned Opcode = 0;
2092 Opcode = NVPTXISD::LoadV2;
2093 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2096 Opcode = NVPTXISD::LoadV4;
2097 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2098 LdResVTs = DAG.getVTList(ListVTs, 5);
2103 SmallVector<SDValue, 8> OtherOps;
2105 // Copy regular operands
2106 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2107 OtherOps.push_back(N->getOperand(i));
2109 LoadSDNode *LD = cast<LoadSDNode>(N);
2111 // The select routine does not have access to the LoadSDNode instance, so
2112 // pass along the extension information
2113 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
2115 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
2116 OtherOps.size(), LD->getMemoryVT(),
2117 LD->getMemOperand());
2119 SmallVector<SDValue, 4> ScalarRes;
2121 for (unsigned i = 0; i < NumElts; ++i) {
2122 SDValue Res = NewLD.getValue(i);
2124 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2125 ScalarRes.push_back(Res);
2128 SDValue LoadChain = NewLD.getValue(NumElts);
2131 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
2133 Results.push_back(BuildVec);
2134 Results.push_back(LoadChain);
2137 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG,
2138 SmallVectorImpl<SDValue> &Results) {
2139 SDValue Chain = N->getOperand(0);
2140 SDValue Intrin = N->getOperand(1);
2143 // Get the intrinsic ID
2144 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
2148 case Intrinsic::nvvm_ldg_global_i:
2149 case Intrinsic::nvvm_ldg_global_f:
2150 case Intrinsic::nvvm_ldg_global_p:
2151 case Intrinsic::nvvm_ldu_global_i:
2152 case Intrinsic::nvvm_ldu_global_f:
2153 case Intrinsic::nvvm_ldu_global_p: {
2154 EVT ResVT = N->getValueType(0);
2156 if (ResVT.isVector()) {
2159 unsigned NumElts = ResVT.getVectorNumElements();
2160 EVT EltVT = ResVT.getVectorElementType();
2162 // Since LDU/LDG are target nodes, we cannot rely on DAG type
2164 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
2165 // loaded type to i16 and propogate the "real" type as the memory type.
2166 bool NeedTrunc = false;
2167 if (EltVT.getSizeInBits() < 16) {
2172 unsigned Opcode = 0;
2182 case Intrinsic::nvvm_ldg_global_i:
2183 case Intrinsic::nvvm_ldg_global_f:
2184 case Intrinsic::nvvm_ldg_global_p:
2185 Opcode = NVPTXISD::LDGV2;
2187 case Intrinsic::nvvm_ldu_global_i:
2188 case Intrinsic::nvvm_ldu_global_f:
2189 case Intrinsic::nvvm_ldu_global_p:
2190 Opcode = NVPTXISD::LDUV2;
2193 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
2199 case Intrinsic::nvvm_ldg_global_i:
2200 case Intrinsic::nvvm_ldg_global_f:
2201 case Intrinsic::nvvm_ldg_global_p:
2202 Opcode = NVPTXISD::LDGV4;
2204 case Intrinsic::nvvm_ldu_global_i:
2205 case Intrinsic::nvvm_ldu_global_f:
2206 case Intrinsic::nvvm_ldu_global_p:
2207 Opcode = NVPTXISD::LDUV4;
2210 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
2211 LdResVTs = DAG.getVTList(ListVTs, 5);
2216 SmallVector<SDValue, 8> OtherOps;
2218 // Copy regular operands
2220 OtherOps.push_back(Chain); // Chain
2221 // Skip operand 1 (intrinsic ID)
2223 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
2224 OtherOps.push_back(N->getOperand(i));
2226 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2228 SDValue NewLD = DAG.getMemIntrinsicNode(
2229 Opcode, DL, LdResVTs, &OtherOps[0], OtherOps.size(),
2230 MemSD->getMemoryVT(), MemSD->getMemOperand());
2232 SmallVector<SDValue, 4> ScalarRes;
2234 for (unsigned i = 0; i < NumElts; ++i) {
2235 SDValue Res = NewLD.getValue(i);
2238 DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
2239 ScalarRes.push_back(Res);
2242 SDValue LoadChain = NewLD.getValue(NumElts);
2245 DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
2247 Results.push_back(BuildVec);
2248 Results.push_back(LoadChain);
2251 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
2252 "Custom handling of non-i8 ldu/ldg?");
2254 // Just copy all operands as-is
2255 SmallVector<SDValue, 4> Ops;
2256 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
2257 Ops.push_back(N->getOperand(i));
2259 // Force output to i16
2260 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
2262 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
2264 // We make sure the memory type is i8, which will be used during isel
2265 // to select the proper instruction.
2267 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, LdResVTs, &Ops[0],
2268 Ops.size(), MVT::i8, MemSD->getMemOperand());
2270 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
2271 NewLD.getValue(0)));
2272 Results.push_back(NewLD.getValue(1));
2278 void NVPTXTargetLowering::ReplaceNodeResults(
2279 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
2280 switch (N->getOpcode()) {
2282 report_fatal_error("Unhandled custom legalization");
2284 ReplaceLoadVector(N, DAG, Results);
2286 case ISD::INTRINSIC_W_CHAIN:
2287 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);