2 // The LLVM Compiler Infrastructure
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that NVPTX uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
15 #include "NVPTXISelLowering.h"
17 #include "NVPTXTargetMachine.h"
18 #include "NVPTXTargetObjectFile.h"
19 #include "NVPTXUtilities.h"
20 #include "llvm/CodeGen/Analysis.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
26 #include "llvm/IR/DerivedTypes.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/IR/GlobalValue.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/Module.h"
32 #include "llvm/MC/MCSectionELF.h"
33 #include "llvm/Support/CallSite.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/raw_ostream.h"
41 #define DEBUG_TYPE "nvptx-lower"
45 static unsigned int uniqueCallSite = 0;
48 sched4reg("nvptx-sched4reg",
49 cl::desc("NVPTX Specific: schedule for register pressue"),
52 static bool IsPTXVectorType(MVT VT) {
53 switch (VT.SimpleTy) {
54 default: return false;
69 // NVPTXTargetLowering Constructor.
70 NVPTXTargetLowering::NVPTXTargetLowering(NVPTXTargetMachine &TM)
71 : TargetLowering(TM, new NVPTXTargetObjectFile()),
73 nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
75 // always lower memset, memcpy, and memmove intrinsics to load/store
76 // instructions, rather
77 // then generating calls to memset, mempcy or memmove.
78 MaxStoresPerMemset = (unsigned)0xFFFFFFFF;
79 MaxStoresPerMemcpy = (unsigned)0xFFFFFFFF;
80 MaxStoresPerMemmove = (unsigned)0xFFFFFFFF;
82 setBooleanContents(ZeroOrNegativeOneBooleanContent);
84 // Jump is Expensive. Don't create extra control flow for 'and', 'or'
85 // condition branches.
86 setJumpIsExpensive(true);
88 // By default, use the Source scheduling
90 setSchedulingPreference(Sched::RegPressure);
92 setSchedulingPreference(Sched::Source);
94 addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
95 addRegisterClass(MVT::i8, &NVPTX::Int8RegsRegClass);
96 addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
97 addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
98 addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
99 addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
100 addRegisterClass(MVT::f64, &NVPTX::Float64RegsRegClass);
102 // Operations not directly supported by NVPTX.
103 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
104 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
105 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
106 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
107 setOperationAction(ISD::BR_CC, MVT::i8, Expand);
108 setOperationAction(ISD::BR_CC, MVT::i16, Expand);
109 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
110 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
111 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Expand);
112 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Expand);
113 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
114 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
115 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
117 if (nvptxSubtarget.hasROT64()) {
118 setOperationAction(ISD::ROTL , MVT::i64, Legal);
119 setOperationAction(ISD::ROTR , MVT::i64, Legal);
122 setOperationAction(ISD::ROTL , MVT::i64, Expand);
123 setOperationAction(ISD::ROTR , MVT::i64, Expand);
125 if (nvptxSubtarget.hasROT32()) {
126 setOperationAction(ISD::ROTL , MVT::i32, Legal);
127 setOperationAction(ISD::ROTR , MVT::i32, Legal);
130 setOperationAction(ISD::ROTL , MVT::i32, Expand);
131 setOperationAction(ISD::ROTR , MVT::i32, Expand);
134 setOperationAction(ISD::ROTL , MVT::i16, Expand);
135 setOperationAction(ISD::ROTR , MVT::i16, Expand);
136 setOperationAction(ISD::ROTL , MVT::i8, Expand);
137 setOperationAction(ISD::ROTR , MVT::i8, Expand);
138 setOperationAction(ISD::BSWAP , MVT::i16, Expand);
139 setOperationAction(ISD::BSWAP , MVT::i32, Expand);
140 setOperationAction(ISD::BSWAP , MVT::i64, Expand);
142 // Indirect branch is not supported.
143 // This also disables Jump Table creation.
144 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
145 setOperationAction(ISD::BRIND, MVT::Other, Expand);
147 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
148 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
150 // We want to legalize constant related memmove and memcopy
152 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
154 // Turn FP extload into load/fextend
155 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
156 // Turn FP truncstore into trunc + store.
157 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
159 // PTX does not support load / store predicate registers
160 setOperationAction(ISD::LOAD, MVT::i1, Custom);
161 setOperationAction(ISD::STORE, MVT::i1, Custom);
163 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
164 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
165 setTruncStoreAction(MVT::i64, MVT::i1, Expand);
166 setTruncStoreAction(MVT::i32, MVT::i1, Expand);
167 setTruncStoreAction(MVT::i16, MVT::i1, Expand);
168 setTruncStoreAction(MVT::i8, MVT::i1, Expand);
170 // This is legal in NVPTX
171 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
172 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
174 // TRAP can be lowered to PTX trap
175 setOperationAction(ISD::TRAP, MVT::Other, Legal);
177 // Register custom handling for vector loads/stores
178 for (int i = MVT::FIRST_VECTOR_VALUETYPE;
179 i <= MVT::LAST_VECTOR_VALUETYPE; ++i) {
180 MVT VT = (MVT::SimpleValueType)i;
181 if (IsPTXVectorType(VT)) {
182 setOperationAction(ISD::LOAD, VT, Custom);
183 setOperationAction(ISD::STORE, VT, Custom);
184 setOperationAction(ISD::INTRINSIC_W_CHAIN, VT, Custom);
188 // Now deduce the information based on the above mentioned
190 computeRegisterProperties();
194 const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
197 case NVPTXISD::CALL: return "NVPTXISD::CALL";
198 case NVPTXISD::RET_FLAG: return "NVPTXISD::RET_FLAG";
199 case NVPTXISD::Wrapper: return "NVPTXISD::Wrapper";
200 case NVPTXISD::NVBuiltin: return "NVPTXISD::NVBuiltin";
201 case NVPTXISD::DeclareParam: return "NVPTXISD::DeclareParam";
202 case NVPTXISD::DeclareScalarParam:
203 return "NVPTXISD::DeclareScalarParam";
204 case NVPTXISD::DeclareRet: return "NVPTXISD::DeclareRet";
205 case NVPTXISD::DeclareRetParam: return "NVPTXISD::DeclareRetParam";
206 case NVPTXISD::PrintCall: return "NVPTXISD::PrintCall";
207 case NVPTXISD::LoadParam: return "NVPTXISD::LoadParam";
208 case NVPTXISD::StoreParam: return "NVPTXISD::StoreParam";
209 case NVPTXISD::StoreParamS32: return "NVPTXISD::StoreParamS32";
210 case NVPTXISD::StoreParamU32: return "NVPTXISD::StoreParamU32";
211 case NVPTXISD::MoveToParam: return "NVPTXISD::MoveToParam";
212 case NVPTXISD::CallArgBegin: return "NVPTXISD::CallArgBegin";
213 case NVPTXISD::CallArg: return "NVPTXISD::CallArg";
214 case NVPTXISD::LastCallArg: return "NVPTXISD::LastCallArg";
215 case NVPTXISD::CallArgEnd: return "NVPTXISD::CallArgEnd";
216 case NVPTXISD::CallVoid: return "NVPTXISD::CallVoid";
217 case NVPTXISD::CallVal: return "NVPTXISD::CallVal";
218 case NVPTXISD::CallSymbol: return "NVPTXISD::CallSymbol";
219 case NVPTXISD::Prototype: return "NVPTXISD::Prototype";
220 case NVPTXISD::MoveParam: return "NVPTXISD::MoveParam";
221 case NVPTXISD::MoveRetval: return "NVPTXISD::MoveRetval";
222 case NVPTXISD::MoveToRetval: return "NVPTXISD::MoveToRetval";
223 case NVPTXISD::StoreRetval: return "NVPTXISD::StoreRetval";
224 case NVPTXISD::PseudoUseParam: return "NVPTXISD::PseudoUseParam";
225 case NVPTXISD::RETURN: return "NVPTXISD::RETURN";
226 case NVPTXISD::CallSeqBegin: return "NVPTXISD::CallSeqBegin";
227 case NVPTXISD::CallSeqEnd: return "NVPTXISD::CallSeqEnd";
228 case NVPTXISD::LoadV2: return "NVPTXISD::LoadV2";
229 case NVPTXISD::LoadV4: return "NVPTXISD::LoadV4";
230 case NVPTXISD::LDGV2: return "NVPTXISD::LDGV2";
231 case NVPTXISD::LDGV4: return "NVPTXISD::LDGV4";
232 case NVPTXISD::LDUV2: return "NVPTXISD::LDUV2";
233 case NVPTXISD::LDUV4: return "NVPTXISD::LDUV4";
234 case NVPTXISD::StoreV2: return "NVPTXISD::StoreV2";
235 case NVPTXISD::StoreV4: return "NVPTXISD::StoreV4";
239 bool NVPTXTargetLowering::shouldSplitVectorElementType(EVT VT) const {
240 return VT == MVT::i1;
244 NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
245 DebugLoc dl = Op.getDebugLoc();
246 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
247 Op = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
248 return DAG.getNode(NVPTXISD::Wrapper, dl, getPointerTy(), Op);
251 std::string NVPTXTargetLowering::getPrototype(Type *retTy,
252 const ArgListTy &Args,
253 const SmallVectorImpl<ISD::OutputArg> &Outs,
254 unsigned retAlignment) const {
256 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
259 O << "prototype_" << uniqueCallSite << " : .callprototype ";
261 if (retTy->getTypeID() == Type::VoidTyID)
266 if (retTy->isPrimitiveType() || retTy->isIntegerTy()) {
268 if (const IntegerType *ITy = dyn_cast<IntegerType>(retTy)) {
269 size = ITy->getBitWidth();
270 if (size < 32) size = 32;
273 assert(retTy->isFloatingPointTy() &&
274 "Floating point type expected here");
275 size = retTy->getPrimitiveSizeInBits();
278 O << ".param .b" << size << " _";
280 else if (isa<PointerType>(retTy))
281 O << ".param .b" << getPointerTy().getSizeInBits()
284 if ((retTy->getTypeID() == Type::StructTyID) ||
285 isa<VectorType>(retTy)) {
286 SmallVector<EVT, 16> vtparts;
287 ComputeValueVTs(*this, retTy, vtparts);
288 unsigned totalsz = 0;
289 for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
291 EVT elemtype = vtparts[i];
292 if (vtparts[i].isVector()) {
293 elems = vtparts[i].getVectorNumElements();
294 elemtype = vtparts[i].getVectorElementType();
296 for (unsigned j=0, je=elems; j!=je; ++j) {
297 unsigned sz = elemtype.getSizeInBits();
298 if (elemtype.isInteger() && (sz < 8)) sz = 8;
302 O << ".param .align "
309 "Unknown return type");
314 SmallVector<EVT, 16> vtparts;
315 ComputeValueVTs(*this, retTy, vtparts);
317 for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
319 EVT elemtype = vtparts[i];
320 if (vtparts[i].isVector()) {
321 elems = vtparts[i].getVectorNumElements();
322 elemtype = vtparts[i].getVectorElementType();
325 for (unsigned j=0, je=elems; j!=je; ++j) {
326 unsigned sz = elemtype.getSizeInBits();
327 if (elemtype.isInteger() && (sz < 32)) sz = 32;
328 O << ".reg .b" << sz << " _";
329 if (j<je-1) O << ", ";
341 MVT thePointerTy = getPointerTy();
343 for (unsigned i=0,e=Args.size(); i!=e; ++i) {
344 const Type *Ty = Args[i].Ty;
350 if (Outs[i].Flags.isByVal() == false) {
352 if (isa<IntegerType>(Ty)) {
353 sz = cast<IntegerType>(Ty)->getBitWidth();
354 if (sz < 32) sz = 32;
356 else if (isa<PointerType>(Ty))
357 sz = thePointerTy.getSizeInBits();
359 sz = Ty->getPrimitiveSizeInBits();
361 O << ".param .b" << sz << " ";
363 O << ".reg .b" << sz << " ";
367 const PointerType *PTy = dyn_cast<PointerType>(Ty);
369 "Param with byval attribute should be a pointer type");
370 Type *ETy = PTy->getElementType();
373 unsigned align = Outs[i].Flags.getByValAlign();
374 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
375 O << ".param .align " << align
378 O << "[" << sz << "]";
382 SmallVector<EVT, 16> vtparts;
383 ComputeValueVTs(*this, ETy, vtparts);
384 for (unsigned i=0,e=vtparts.size(); i!=e; ++i) {
386 EVT elemtype = vtparts[i];
387 if (vtparts[i].isVector()) {
388 elems = vtparts[i].getVectorNumElements();
389 elemtype = vtparts[i].getVectorElementType();
392 for (unsigned j=0,je=elems; j!=je; ++j) {
393 unsigned sz = elemtype.getSizeInBits();
394 if (elemtype.isInteger() && (sz < 32)) sz = 32;
395 O << ".reg .b" << sz << " ";
397 if (j<je-1) O << ", ";
411 NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
412 SmallVectorImpl<SDValue> &InVals) const {
413 SelectionDAG &DAG = CLI.DAG;
414 DebugLoc &dl = CLI.DL;
415 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
416 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
417 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
418 SDValue Chain = CLI.Chain;
419 SDValue Callee = CLI.Callee;
420 bool &isTailCall = CLI.IsTailCall;
421 ArgListTy &Args = CLI.Args;
422 Type *retTy = CLI.RetTy;
423 ImmutableCallSite *CS = CLI.CS;
425 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
427 SDValue tempChain = Chain;
428 Chain = DAG.getCALLSEQ_START(Chain,
429 DAG.getIntPtrConstant(uniqueCallSite, true));
430 SDValue InFlag = Chain.getValue(1);
432 assert((Outs.size() == Args.size()) &&
433 "Unexpected number of arguments to function call");
434 unsigned paramCount = 0;
435 // Declare the .params or .reg need to pass values
437 for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
440 if (Outs[i].Flags.isByVal() == false) {
442 // for ABI, declare .param .b<size> .param<n>;
443 // for nonABI, declare .reg .b<size> .param<n>;
447 unsigned sz = VT.getSizeInBits();
448 if (VT.isInteger() && (sz < 32)) sz = 32;
449 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
450 SDValue DeclareParamOps[] = { Chain,
451 DAG.getConstant(paramCount, MVT::i32),
452 DAG.getConstant(sz, MVT::i32),
453 DAG.getConstant(isReg, MVT::i32),
455 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
457 InFlag = Chain.getValue(1);
458 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
459 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
460 DAG.getConstant(0, MVT::i32), OutVals[i], InFlag };
462 unsigned opcode = NVPTXISD::StoreParam;
464 opcode = NVPTXISD::MoveToParam;
466 if (Outs[i].Flags.isZExt())
467 opcode = NVPTXISD::StoreParamU32;
468 else if (Outs[i].Flags.isSExt())
469 opcode = NVPTXISD::StoreParamS32;
471 Chain = DAG.getNode(opcode, dl, CopyParamVTs, CopyParamOps, 5);
473 InFlag = Chain.getValue(1);
478 SmallVector<EVT, 16> vtparts;
479 const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
481 "Type of a byval parameter should be pointer");
482 ComputeValueVTs(*this, PTy->getElementType(), vtparts);
485 // declare .param .align 16 .b8 .param<n>[<size>];
486 unsigned sz = Outs[i].Flags.getByValSize();
487 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
488 // The ByValAlign in the Outs[i].Flags is alway set at this point, so we
490 // worry about natural alignment or not. See TargetLowering::LowerCallTo()
491 SDValue DeclareParamOps[] = { Chain,
492 DAG.getConstant(Outs[i].Flags.getByValAlign(), MVT::i32),
493 DAG.getConstant(paramCount, MVT::i32),
494 DAG.getConstant(sz, MVT::i32),
496 Chain = DAG.getNode(NVPTXISD::DeclareParam, dl, DeclareParamVTs,
498 InFlag = Chain.getValue(1);
499 unsigned curOffset = 0;
500 for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
502 EVT elemtype = vtparts[j];
503 if (vtparts[j].isVector()) {
504 elems = vtparts[j].getVectorNumElements();
505 elemtype = vtparts[j].getVectorElementType();
507 for (unsigned k=0,ke=elems; k!=ke; ++k) {
508 unsigned sz = elemtype.getSizeInBits();
509 if (elemtype.isInteger() && (sz < 8)) sz = 8;
510 SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
512 DAG.getConstant(curOffset,
514 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
515 MachinePointerInfo(), false, false, false, 0);
516 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
517 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount,
519 DAG.getConstant(curOffset, MVT::i32),
521 Chain = DAG.getNode(NVPTXISD::StoreParam, dl, CopyParamVTs,
523 InFlag = Chain.getValue(1);
530 // Non-abi, struct or vector
531 // Declare a bunch or .reg .b<size> .param<n>
532 unsigned curOffset = 0;
533 for (unsigned j=0,je=vtparts.size(); j!=je; ++j) {
535 EVT elemtype = vtparts[j];
536 if (vtparts[j].isVector()) {
537 elems = vtparts[j].getVectorNumElements();
538 elemtype = vtparts[j].getVectorElementType();
540 for (unsigned k=0,ke=elems; k!=ke; ++k) {
541 unsigned sz = elemtype.getSizeInBits();
542 if (elemtype.isInteger() && (sz < 32)) sz = 32;
543 SDVTList DeclareParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
544 SDValue DeclareParamOps[] = { Chain, DAG.getConstant(paramCount,
546 DAG.getConstant(sz, MVT::i32),
547 DAG.getConstant(1, MVT::i32),
549 Chain = DAG.getNode(NVPTXISD::DeclareScalarParam, dl, DeclareParamVTs,
551 InFlag = Chain.getValue(1);
552 SDValue srcAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), OutVals[i],
553 DAG.getConstant(curOffset,
555 SDValue theVal = DAG.getLoad(elemtype, dl, tempChain, srcAddr,
556 MachinePointerInfo(), false, false, false, 0);
557 SDVTList CopyParamVTs = DAG.getVTList(MVT::Other, MVT::Glue);
558 SDValue CopyParamOps[] = { Chain, DAG.getConstant(paramCount, MVT::i32),
559 DAG.getConstant(0, MVT::i32), theVal,
561 Chain = DAG.getNode(NVPTXISD::MoveToParam, dl, CopyParamVTs,
563 InFlag = Chain.getValue(1);
569 GlobalAddressSDNode *Func = dyn_cast<GlobalAddressSDNode>(Callee.getNode());
570 unsigned retAlignment = 0;
573 unsigned retCount = 0;
574 if (Ins.size() > 0) {
575 SmallVector<EVT, 16> resvtparts;
576 ComputeValueVTs(*this, retTy, resvtparts);
578 // Declare one .param .align 16 .b8 func_retval0[<size>] for ABI or
579 // individual .reg .b<size> func_retval<0..> for non ABI
580 unsigned resultsz = 0;
581 for (unsigned i=0,e=resvtparts.size(); i!=e; ++i) {
583 EVT elemtype = resvtparts[i];
584 if (resvtparts[i].isVector()) {
585 elems = resvtparts[i].getVectorNumElements();
586 elemtype = resvtparts[i].getVectorElementType();
588 for (unsigned j=0,je=elems; j!=je; ++j) {
589 unsigned sz = elemtype.getSizeInBits();
590 if (isABI == false) {
591 if (elemtype.isInteger() && (sz < 32)) sz = 32;
594 if (elemtype.isInteger() && (sz < 8)) sz = 8;
596 if (isABI == false) {
597 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
598 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(2, MVT::i32),
599 DAG.getConstant(sz, MVT::i32),
600 DAG.getConstant(retCount, MVT::i32),
602 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
604 InFlag = Chain.getValue(1);
611 if (retTy->isPrimitiveType() || retTy->isIntegerTy() ||
612 retTy->isPointerTy() ) {
613 // Scalar needs to be at least 32bit wide
616 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
617 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(1, MVT::i32),
618 DAG.getConstant(resultsz, MVT::i32),
619 DAG.getConstant(0, MVT::i32), InFlag };
620 Chain = DAG.getNode(NVPTXISD::DeclareRet, dl, DeclareRetVTs,
622 InFlag = Chain.getValue(1);
625 if (Func) { // direct call
626 if (!llvm::getAlign(*(CS->getCalledFunction()), 0, retAlignment))
627 retAlignment = getDataLayout()->getABITypeAlignment(retTy);
628 } else { // indirect call
629 const CallInst *CallI = dyn_cast<CallInst>(CS->getInstruction());
630 if (!llvm::getAlign(*CallI, 0, retAlignment))
631 retAlignment = getDataLayout()->getABITypeAlignment(retTy);
633 SDVTList DeclareRetVTs = DAG.getVTList(MVT::Other, MVT::Glue);
634 SDValue DeclareRetOps[] = { Chain, DAG.getConstant(retAlignment,
636 DAG.getConstant(resultsz/8, MVT::i32),
637 DAG.getConstant(0, MVT::i32), InFlag };
638 Chain = DAG.getNode(NVPTXISD::DeclareRetParam, dl, DeclareRetVTs,
640 InFlag = Chain.getValue(1);
646 // This is indirect function call case : PTX requires a prototype of the
648 // proto_0 : .callprototype(.param .b32 _) _ (.param .b32 _);
649 // to be emitted, and the label has to used as the last arg of call
651 // The prototype is embedded in a string and put as the operand for an
653 SDVTList InlineAsmVTs = DAG.getVTList(MVT::Other, MVT::Glue);
654 std::string proto_string = getPrototype(retTy, Args, Outs, retAlignment);
655 const char *asmstr = nvTM->getManagedStrPool()->
656 getManagedString(proto_string.c_str())->c_str();
657 SDValue InlineAsmOps[] = { Chain,
658 DAG.getTargetExternalSymbol(asmstr,
661 DAG.getTargetConstant(0, MVT::i32), InFlag };
662 Chain = DAG.getNode(ISD::INLINEASM, dl, InlineAsmVTs, InlineAsmOps, 5);
663 InFlag = Chain.getValue(1);
665 // Op to just print "call"
666 SDVTList PrintCallVTs = DAG.getVTList(MVT::Other, MVT::Glue);
667 SDValue PrintCallOps[] = { Chain,
668 DAG.getConstant(isABI ? ((Ins.size()==0) ? 0 : 1)
669 : retCount, MVT::i32),
671 Chain = DAG.getNode(Func?(NVPTXISD::PrintCallUni):(NVPTXISD::PrintCall), dl,
672 PrintCallVTs, PrintCallOps, 3);
673 InFlag = Chain.getValue(1);
675 // Ops to print out the function name
676 SDVTList CallVoidVTs = DAG.getVTList(MVT::Other, MVT::Glue);
677 SDValue CallVoidOps[] = { Chain, Callee, InFlag };
678 Chain = DAG.getNode(NVPTXISD::CallVoid, dl, CallVoidVTs, CallVoidOps, 3);
679 InFlag = Chain.getValue(1);
681 // Ops to print out the param list
682 SDVTList CallArgBeginVTs = DAG.getVTList(MVT::Other, MVT::Glue);
683 SDValue CallArgBeginOps[] = { Chain, InFlag };
684 Chain = DAG.getNode(NVPTXISD::CallArgBegin, dl, CallArgBeginVTs,
686 InFlag = Chain.getValue(1);
688 for (unsigned i=0, e=paramCount; i!=e; ++i) {
691 opcode = NVPTXISD::LastCallArg;
693 opcode = NVPTXISD::CallArg;
694 SDVTList CallArgVTs = DAG.getVTList(MVT::Other, MVT::Glue);
695 SDValue CallArgOps[] = { Chain, DAG.getConstant(1, MVT::i32),
696 DAG.getConstant(i, MVT::i32),
698 Chain = DAG.getNode(opcode, dl, CallArgVTs, CallArgOps, 4);
699 InFlag = Chain.getValue(1);
701 SDVTList CallArgEndVTs = DAG.getVTList(MVT::Other, MVT::Glue);
702 SDValue CallArgEndOps[] = { Chain,
703 DAG.getConstant(Func ? 1 : 0, MVT::i32),
705 Chain = DAG.getNode(NVPTXISD::CallArgEnd, dl, CallArgEndVTs, CallArgEndOps,
707 InFlag = Chain.getValue(1);
710 SDVTList PrototypeVTs = DAG.getVTList(MVT::Other, MVT::Glue);
711 SDValue PrototypeOps[] = { Chain,
712 DAG.getConstant(uniqueCallSite, MVT::i32),
714 Chain = DAG.getNode(NVPTXISD::Prototype, dl, PrototypeVTs, PrototypeOps, 3);
715 InFlag = Chain.getValue(1);
718 // Generate loads from param memory/moves from registers for result
719 if (Ins.size() > 0) {
721 unsigned resoffset = 0;
722 for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
723 unsigned sz = Ins[i].VT.getSizeInBits();
724 if (Ins[i].VT.isInteger() && (sz < 8)) sz = 8;
725 EVT LoadRetVTs[] = { Ins[i].VT, MVT::Other, MVT::Glue };
726 SDValue LoadRetOps[] = {
728 DAG.getConstant(1, MVT::i32),
729 DAG.getConstant(resoffset, MVT::i32),
732 SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, LoadRetVTs,
733 LoadRetOps, array_lengthof(LoadRetOps));
734 Chain = retval.getValue(1);
735 InFlag = retval.getValue(2);
736 InVals.push_back(retval);
741 SmallVector<EVT, 16> resvtparts;
742 ComputeValueVTs(*this, retTy, resvtparts);
744 assert(Ins.size() == resvtparts.size() &&
745 "Unexpected number of return values in non-ABI case");
746 unsigned paramNum = 0;
747 for (unsigned i=0,e=Ins.size(); i!=e; ++i) {
748 assert(EVT(Ins[i].VT) == resvtparts[i] &&
749 "Unexpected EVT type in non-ABI case");
750 unsigned numelems = 1;
751 EVT elemtype = Ins[i].VT;
752 if (Ins[i].VT.isVector()) {
753 numelems = Ins[i].VT.getVectorNumElements();
754 elemtype = Ins[i].VT.getVectorElementType();
756 std::vector<SDValue> tempRetVals;
757 for (unsigned j=0; j<numelems; ++j) {
758 EVT MoveRetVTs[] = { elemtype, MVT::Other, MVT::Glue };
759 SDValue MoveRetOps[] = {
761 DAG.getConstant(0, MVT::i32),
762 DAG.getConstant(paramNum, MVT::i32),
765 SDValue retval = DAG.getNode(NVPTXISD::LoadParam, dl, MoveRetVTs,
766 MoveRetOps, array_lengthof(MoveRetOps));
767 Chain = retval.getValue(1);
768 InFlag = retval.getValue(2);
769 tempRetVals.push_back(retval);
772 if (Ins[i].VT.isVector())
773 InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, Ins[i].VT,
774 &tempRetVals[0], tempRetVals.size()));
776 InVals.push_back(tempRetVals[0]);
780 Chain = DAG.getCALLSEQ_END(Chain,
781 DAG.getIntPtrConstant(uniqueCallSite, true),
782 DAG.getIntPtrConstant(uniqueCallSite+1, true),
786 // set isTailCall to false for now, until we figure out how to express
787 // tail call optimization in PTX
792 // By default CONCAT_VECTORS is lowered by ExpandVectorBuildThroughStack()
793 // (see LegalizeDAG.cpp). This is slow and uses local memory.
794 // We use extract/insert/build vector just as what LegalizeOp() does in llvm 2.5
795 SDValue NVPTXTargetLowering::
796 LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
797 SDNode *Node = Op.getNode();
798 DebugLoc dl = Node->getDebugLoc();
799 SmallVector<SDValue, 8> Ops;
800 unsigned NumOperands = Node->getNumOperands();
801 for (unsigned i=0; i < NumOperands; ++i) {
802 SDValue SubOp = Node->getOperand(i);
803 EVT VVT = SubOp.getNode()->getValueType(0);
804 EVT EltVT = VVT.getVectorElementType();
805 unsigned NumSubElem = VVT.getVectorNumElements();
806 for (unsigned j=0; j < NumSubElem; ++j) {
807 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, SubOp,
808 DAG.getIntPtrConstant(j)));
811 return DAG.getNode(ISD::BUILD_VECTOR, dl, Node->getValueType(0),
812 &Ops[0], Ops.size());
815 SDValue NVPTXTargetLowering::
816 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
817 switch (Op.getOpcode()) {
818 case ISD::RETURNADDR: return SDValue();
819 case ISD::FRAMEADDR: return SDValue();
820 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
821 case ISD::INTRINSIC_W_CHAIN: return Op;
822 case ISD::BUILD_VECTOR:
823 case ISD::EXTRACT_SUBVECTOR:
825 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
826 case ISD::STORE: return LowerSTORE(Op, DAG);
827 case ISD::LOAD: return LowerLOAD(Op, DAG);
829 llvm_unreachable("Custom lowering not defined for operation");
834 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
835 if (Op.getValueType() == MVT::i1)
836 return LowerLOADi1(Op, DAG);
844 // v = trunc v1 to i1
845 SDValue NVPTXTargetLowering::
846 LowerLOADi1(SDValue Op, SelectionDAG &DAG) const {
847 SDNode *Node = Op.getNode();
848 LoadSDNode *LD = cast<LoadSDNode>(Node);
849 DebugLoc dl = Node->getDebugLoc();
850 assert(LD->getExtensionType() == ISD::NON_EXTLOAD) ;
851 assert(Node->getValueType(0) == MVT::i1 &&
852 "Custom lowering for i1 load only");
853 SDValue newLD = DAG.getLoad(MVT::i8, dl, LD->getChain(), LD->getBasePtr(),
854 LD->getPointerInfo(),
855 LD->isVolatile(), LD->isNonTemporal(),
858 SDValue result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, newLD);
859 // The legalizer (the caller) is expecting two values from the legalized
860 // load, so we build a MergeValues node for it. See ExpandUnalignedLoad()
861 // in LegalizeDAG.cpp which also uses MergeValues.
862 SDValue Ops[] = {result, LD->getChain()};
863 return DAG.getMergeValues(Ops, 2, dl);
866 SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
867 EVT ValVT = Op.getOperand(1).getValueType();
868 if (ValVT == MVT::i1)
869 return LowerSTOREi1(Op, DAG);
870 else if (ValVT.isVector())
871 return LowerSTOREVector(Op, DAG);
877 NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
878 SDNode *N = Op.getNode();
879 SDValue Val = N->getOperand(1);
880 DebugLoc DL = N->getDebugLoc();
881 EVT ValVT = Val.getValueType();
883 if (ValVT.isVector()) {
884 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
885 // legal. We can (and should) split that into 2 stores of <2 x double> here
886 // but I'm leaving that as a TODO for now.
887 if (!ValVT.isSimple())
889 switch (ValVT.getSimpleVT().SimpleTy) {
890 default: return SDValue();
901 // This is a "native" vector type
906 EVT EltVT = ValVT.getVectorElementType();
907 unsigned NumElts = ValVT.getVectorNumElements();
909 // Since StoreV2 is a target node, we cannot rely on DAG type legalization.
910 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
911 // stored type to i16 and propogate the "real" type as the memory type.
912 bool NeedExt = false;
913 if (EltVT.getSizeInBits() < 16)
917 default: return SDValue();
919 Opcode = NVPTXISD::StoreV2;
922 Opcode = NVPTXISD::StoreV4;
927 SmallVector<SDValue, 8> Ops;
929 // First is the chain
930 Ops.push_back(N->getOperand(0));
932 // Then the split values
933 for (unsigned i = 0; i < NumElts; ++i) {
934 SDValue ExtVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Val,
935 DAG.getIntPtrConstant(i));
937 // ANY_EXTEND is correct here since the store will only look at the
938 // lower-order bits anyway.
939 ExtVal = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i16, ExtVal);
940 Ops.push_back(ExtVal);
943 // Then any remaining arguments
944 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
945 Ops.push_back(N->getOperand(i));
948 MemSDNode *MemSD = cast<MemSDNode>(N);
950 SDValue NewSt = DAG.getMemIntrinsicNode(Opcode, DL,
951 DAG.getVTList(MVT::Other), &Ops[0],
952 Ops.size(), MemSD->getMemoryVT(),
953 MemSD->getMemOperand());
956 //return DCI.CombineTo(N, NewSt, true);
967 SDValue NVPTXTargetLowering::
968 LowerSTOREi1(SDValue Op, SelectionDAG &DAG) const {
969 SDNode *Node = Op.getNode();
970 DebugLoc dl = Node->getDebugLoc();
971 StoreSDNode *ST = cast<StoreSDNode>(Node);
972 SDValue Tmp1 = ST->getChain();
973 SDValue Tmp2 = ST->getBasePtr();
974 SDValue Tmp3 = ST->getValue();
975 assert(Tmp3.getValueType() == MVT::i1 && "Custom lowering for i1 store only");
976 unsigned Alignment = ST->getAlignment();
977 bool isVolatile = ST->isVolatile();
978 bool isNonTemporal = ST->isNonTemporal();
979 Tmp3 = DAG.getNode(ISD::ZERO_EXTEND, dl,
981 SDValue Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
982 ST->getPointerInfo(), isVolatile,
983 isNonTemporal, Alignment);
989 NVPTXTargetLowering::getExtSymb(SelectionDAG &DAG, const char *inname, int idx,
991 std::string *name = nvTM->getManagedStrPool()->getManagedString(inname);
992 std::stringstream suffix;
994 *name += suffix.str();
995 return DAG.getTargetExternalSymbol(name->c_str(), v);
999 NVPTXTargetLowering::getParamSymbol(SelectionDAG &DAG, int idx, EVT v) const {
1000 return getExtSymb(DAG, ".PARAM", idx, v);
1004 NVPTXTargetLowering::getParamHelpSymbol(SelectionDAG &DAG, int idx) {
1005 return getExtSymb(DAG, ".HLPPARAM", idx);
1008 // Check to see if the kernel argument is image*_t or sampler_t
1010 bool llvm::isImageOrSamplerVal(const Value *arg, const Module *context) {
1011 static const char *const specialTypes[] = {
1012 "struct._image2d_t",
1013 "struct._image3d_t",
1017 const Type *Ty = arg->getType();
1018 const PointerType *PTy = dyn_cast<PointerType>(Ty);
1026 const StructType *STy = dyn_cast<StructType>(PTy->getElementType());
1027 const std::string TypeName = STy && !STy->isLiteral() ? STy->getName() : "";
1029 for (int i = 0, e = array_lengthof(specialTypes); i != e; ++i)
1030 if (TypeName == specialTypes[i])
1037 NVPTXTargetLowering::LowerFormalArguments(SDValue Chain,
1038 CallingConv::ID CallConv, bool isVarArg,
1039 const SmallVectorImpl<ISD::InputArg> &Ins,
1040 DebugLoc dl, SelectionDAG &DAG,
1041 SmallVectorImpl<SDValue> &InVals) const {
1042 MachineFunction &MF = DAG.getMachineFunction();
1043 const DataLayout *TD = getDataLayout();
1045 const Function *F = MF.getFunction();
1046 const AttributeSet &PAL = F->getAttributes();
1048 SDValue Root = DAG.getRoot();
1049 std::vector<SDValue> OutChains;
1051 bool isKernel = llvm::isKernelFunction(*F);
1052 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1054 std::vector<Type *> argTypes;
1055 std::vector<const Argument *> theArgs;
1056 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1058 theArgs.push_back(I);
1059 argTypes.push_back(I->getType());
1061 //assert(argTypes.size() == Ins.size() &&
1062 // "Ins types and function types did not match");
1065 for (unsigned i=0, e=argTypes.size(); i!=e; ++i, ++idx) {
1066 Type *Ty = argTypes[i];
1067 EVT ObjectVT = getValueType(Ty);
1068 //assert(ObjectVT == Ins[i].VT &&
1069 // "Ins type did not match function type");
1071 // If the kernel argument is image*_t or sampler_t, convert it to
1072 // a i32 constant holding the parameter position. This can later
1073 // matched in the AsmPrinter to output the correct mangled name.
1074 if (isImageOrSamplerVal(theArgs[i],
1075 (theArgs[i]->getParent() ?
1076 theArgs[i]->getParent()->getParent() : 0))) {
1077 assert(isKernel && "Only kernels can have image/sampler params");
1078 InVals.push_back(DAG.getConstant(i+1, MVT::i32));
1082 if (theArgs[i]->use_empty()) {
1084 if (ObjectVT.isVector()) {
1085 EVT EltVT = ObjectVT.getVectorElementType();
1086 unsigned NumElts = ObjectVT.getVectorNumElements();
1087 for (unsigned vi = 0; vi < NumElts; ++vi) {
1088 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, EltVT));
1091 InVals.push_back(DAG.getNode(ISD::UNDEF, dl, ObjectVT));
1096 // In the following cases, assign a node order of "idx+1"
1097 // to newly created nodes. The SDNOdes for params have to
1098 // appear in the same order as their order of appearance
1099 // in the original function. "idx+1" holds that order.
1100 if (PAL.hasAttribute(i+1, Attribute::ByVal) == false) {
1101 if (ObjectVT.isVector()) {
1102 unsigned NumElts = ObjectVT.getVectorNumElements();
1103 EVT EltVT = ObjectVT.getVectorElementType();
1104 unsigned Offset = 0;
1105 for (unsigned vi = 0; vi < NumElts; ++vi) {
1106 SDValue A = getParamSymbol(DAG, idx, getPointerTy());
1107 SDValue B = DAG.getIntPtrConstant(Offset);
1108 SDValue Addr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
1109 //getParamSymbol(DAG, idx, EltVT),
1110 //DAG.getConstant(Offset, getPointerTy()));
1112 Value *SrcValue = Constant::getNullValue(PointerType::get(
1113 EltVT.getTypeForEVT(F->getContext()),
1114 llvm::ADDRESS_SPACE_PARAM));
1115 SDValue Ld = DAG.getLoad(EltVT, dl, Root, Addr,
1116 MachinePointerInfo(SrcValue),
1117 false, false, false,
1118 TD->getABITypeAlignment(EltVT.getTypeForEVT(
1120 Offset += EltVT.getStoreSizeInBits()/8;
1121 InVals.push_back(Ld);
1127 if (isABI || isKernel) {
1128 // If ABI, load from the param symbol
1129 SDValue Arg = getParamSymbol(DAG, idx);
1130 // Conjure up a value that we can get the address space from.
1131 // FIXME: Using a constant here is a hack.
1132 Value *srcValue = Constant::getNullValue(PointerType::get(
1133 ObjectVT.getTypeForEVT(F->getContext()),
1134 llvm::ADDRESS_SPACE_PARAM));
1135 SDValue p = DAG.getLoad(ObjectVT, dl, Root, Arg,
1136 MachinePointerInfo(srcValue), false, false,
1138 TD->getABITypeAlignment(ObjectVT.getTypeForEVT(
1141 DAG.AssignOrdering(p.getNode(), idx+1);
1142 InVals.push_back(p);
1145 // If no ABI, just move the param symbol
1146 SDValue Arg = getParamSymbol(DAG, idx, ObjectVT);
1147 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1149 DAG.AssignOrdering(p.getNode(), idx+1);
1150 InVals.push_back(p);
1155 // Param has ByVal attribute
1156 if (isABI || isKernel) {
1157 // Return MoveParam(param symbol).
1158 // Ideally, the param symbol can be returned directly,
1159 // but when SDNode builder decides to use it in a CopyToReg(),
1160 // machine instruction fails because TargetExternalSymbol
1161 // (not lowered) is target dependent, and CopyToReg assumes
1162 // the source is lowered.
1163 SDValue Arg = getParamSymbol(DAG, idx, getPointerTy());
1164 SDValue p = DAG.getNode(NVPTXISD::MoveParam, dl, ObjectVT, Arg);
1166 DAG.AssignOrdering(p.getNode(), idx+1);
1168 InVals.push_back(p);
1170 SDValue p2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, ObjectVT,
1171 DAG.getConstant(Intrinsic::nvvm_ptr_local_to_gen, MVT::i32),
1173 InVals.push_back(p2);
1176 // Have to move a set of param symbols to registers and
1177 // store them locally and return the local pointer in InVals
1178 const PointerType *elemPtrType = dyn_cast<PointerType>(argTypes[i]);
1179 assert(elemPtrType &&
1180 "Byval parameter should be a pointer type");
1181 Type *elemType = elemPtrType->getElementType();
1182 // Compute the constituent parts
1183 SmallVector<EVT, 16> vtparts;
1184 SmallVector<uint64_t, 16> offsets;
1185 ComputeValueVTs(*this, elemType, vtparts, &offsets, 0);
1186 unsigned totalsize = 0;
1187 for (unsigned j=0, je=vtparts.size(); j!=je; ++j)
1188 totalsize += vtparts[j].getStoreSizeInBits();
1189 SDValue localcopy = DAG.getFrameIndex(MF.getFrameInfo()->
1190 CreateStackObject(totalsize/8, 16, false),
1192 unsigned sizesofar = 0;
1193 std::vector<SDValue> theChains;
1194 for (unsigned j=0, je=vtparts.size(); j!=je; ++j) {
1195 unsigned numElems = 1;
1196 if (vtparts[j].isVector()) numElems = vtparts[j].getVectorNumElements();
1197 for (unsigned k=0, ke=numElems; k!=ke; ++k) {
1198 EVT tmpvt = vtparts[j];
1199 if (tmpvt.isVector()) tmpvt = tmpvt.getVectorElementType();
1200 SDValue arg = DAG.getNode(NVPTXISD::MoveParam, dl, tmpvt,
1201 getParamSymbol(DAG, idx, tmpvt));
1202 SDValue addr = DAG.getNode(ISD::ADD, dl, getPointerTy(), localcopy,
1203 DAG.getConstant(sizesofar, getPointerTy()));
1204 theChains.push_back(DAG.getStore(Chain, dl, arg, addr,
1205 MachinePointerInfo(), false, false, 0));
1206 sizesofar += tmpvt.getStoreSizeInBits()/8;
1211 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &theChains[0],
1213 InVals.push_back(localcopy);
1217 // Clang will check explicit VarArg and issue error if any. However, Clang
1218 // will let code with
1219 // implicit var arg like f() pass.
1220 // We treat this case as if the arg list is empty.
1221 //if (F.isVarArg()) {
1222 // assert(0 && "VarArg not supported yet!");
1225 if (!OutChains.empty())
1226 DAG.setRoot(DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1227 &OutChains[0], OutChains.size()));
1233 NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1235 const SmallVectorImpl<ISD::OutputArg> &Outs,
1236 const SmallVectorImpl<SDValue> &OutVals,
1237 DebugLoc dl, SelectionDAG &DAG) const {
1239 bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
1241 unsigned sizesofar = 0;
1243 for (unsigned i=0, e=Outs.size(); i!=e; ++i) {
1244 SDValue theVal = OutVals[i];
1245 EVT theValType = theVal.getValueType();
1246 unsigned numElems = 1;
1247 if (theValType.isVector()) numElems = theValType.getVectorNumElements();
1248 for (unsigned j=0,je=numElems; j!=je; ++j) {
1249 SDValue tmpval = theVal;
1250 if (theValType.isVector())
1251 tmpval = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
1252 theValType.getVectorElementType(),
1253 tmpval, DAG.getIntPtrConstant(j));
1254 Chain = DAG.getNode(isABI ? NVPTXISD::StoreRetval :NVPTXISD::MoveToRetval,
1257 DAG.getConstant(isABI ? sizesofar : idx, MVT::i32),
1259 if (theValType.isVector())
1260 sizesofar += theValType.getVectorElementType().getStoreSizeInBits()/8;
1262 sizesofar += theValType.getStoreSizeInBits()/8;
1267 return DAG.getNode(NVPTXISD::RET_FLAG, dl, MVT::Other, Chain);
1271 NVPTXTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
1272 std::string &Constraint,
1273 std::vector<SDValue> &Ops,
1274 SelectionDAG &DAG) const
1276 if (Constraint.length() > 1)
1279 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1282 // NVPTX suuport vector of legal types of any length in Intrinsics because the
1283 // NVPTX specific type legalizer
1284 // will legalize them to the PTX supported length.
1286 NVPTXTargetLowering::isTypeSupportedInIntrinsic(MVT VT) const {
1287 if (isTypeLegal(VT))
1289 if (VT.isVector()) {
1290 MVT eVT = VT.getVectorElementType();
1291 if (isTypeLegal(eVT))
1298 // llvm.ptx.memcpy.const and llvm.ptx.memmove.const need to be modeled as
1300 // because we need the information that is only available in the "Value" type
1302 // pointer. In particular, the address space information.
1304 NVPTXTargetLowering::getTgtMemIntrinsic(IntrinsicInfo& Info, const CallInst &I,
1305 unsigned Intrinsic) const {
1306 switch (Intrinsic) {
1310 case Intrinsic::nvvm_atomic_load_add_f32:
1311 Info.opc = ISD::INTRINSIC_W_CHAIN;
1312 Info.memVT = MVT::f32;
1313 Info.ptrVal = I.getArgOperand(0);
1316 Info.readMem = true;
1317 Info.writeMem = true;
1321 case Intrinsic::nvvm_atomic_load_inc_32:
1322 case Intrinsic::nvvm_atomic_load_dec_32:
1323 Info.opc = ISD::INTRINSIC_W_CHAIN;
1324 Info.memVT = MVT::i32;
1325 Info.ptrVal = I.getArgOperand(0);
1328 Info.readMem = true;
1329 Info.writeMem = true;
1333 case Intrinsic::nvvm_ldu_global_i:
1334 case Intrinsic::nvvm_ldu_global_f:
1335 case Intrinsic::nvvm_ldu_global_p:
1337 Info.opc = ISD::INTRINSIC_W_CHAIN;
1338 if (Intrinsic == Intrinsic::nvvm_ldu_global_i)
1339 Info.memVT = MVT::i32;
1340 else if (Intrinsic == Intrinsic::nvvm_ldu_global_p)
1341 Info.memVT = getPointerTy();
1343 Info.memVT = MVT::f32;
1344 Info.ptrVal = I.getArgOperand(0);
1347 Info.readMem = true;
1348 Info.writeMem = false;
1356 /// isLegalAddressingMode - Return true if the addressing mode represented
1357 /// by AM is legal for this target, for a load/store of the specified type.
1358 /// Used to guide target specific optimizations, like loop strength reduction
1359 /// (LoopStrengthReduce.cpp) and memory optimization for address mode
1360 /// (CodeGenPrepare.cpp)
1362 NVPTXTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1365 // AddrMode - This represents an addressing mode of:
1366 // BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1368 // The legal address modes are
1375 if (AM.BaseOffs || AM.HasBaseReg || AM.Scale)
1381 case 0: // "r", "r+i" or "i" is allowed
1384 if (AM.HasBaseReg) // "r+r+i" or "r+r" is not allowed.
1386 // Otherwise we have r+i.
1389 // No scale > 1 is allowed
1395 //===----------------------------------------------------------------------===//
1396 // NVPTX Inline Assembly Support
1397 //===----------------------------------------------------------------------===//
1399 /// getConstraintType - Given a constraint letter, return the type of
1400 /// constraint it is for this target.
1401 NVPTXTargetLowering::ConstraintType
1402 NVPTXTargetLowering::getConstraintType(const std::string &Constraint) const {
1403 if (Constraint.size() == 1) {
1404 switch (Constraint[0]) {
1415 return C_RegisterClass;
1418 return TargetLowering::getConstraintType(Constraint);
1422 std::pair<unsigned, const TargetRegisterClass*>
1423 NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
1425 if (Constraint.size() == 1) {
1426 switch (Constraint[0]) {
1428 return std::make_pair(0U, &NVPTX::Int8RegsRegClass);
1430 return std::make_pair(0U, &NVPTX::Int16RegsRegClass);
1432 return std::make_pair(0U, &NVPTX::Int32RegsRegClass);
1435 return std::make_pair(0U, &NVPTX::Int64RegsRegClass);
1437 return std::make_pair(0U, &NVPTX::Float32RegsRegClass);
1439 return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
1442 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1447 /// getFunctionAlignment - Return the Log2 alignment of this function.
1448 unsigned NVPTXTargetLowering::getFunctionAlignment(const Function *) const {
1452 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
1453 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
1454 SmallVectorImpl<SDValue>& Results) {
1455 EVT ResVT = N->getValueType(0);
1456 DebugLoc DL = N->getDebugLoc();
1458 assert(ResVT.isVector() && "Vector load must have vector type");
1460 // We only handle "native" vector sizes for now, e.g. <4 x double> is not
1461 // legal. We can (and should) split that into 2 loads of <2 x double> here
1462 // but I'm leaving that as a TODO for now.
1463 assert(ResVT.isSimple() && "Can only handle simple types");
1464 switch (ResVT.getSimpleVT().SimpleTy) {
1476 // This is a "native" vector type
1480 EVT EltVT = ResVT.getVectorElementType();
1481 unsigned NumElts = ResVT.getVectorNumElements();
1483 // Since LoadV2 is a target node, we cannot rely on DAG type legalization.
1484 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1485 // loaded type to i16 and propogate the "real" type as the memory type.
1486 bool NeedTrunc = false;
1487 if (EltVT.getSizeInBits() < 16) {
1492 unsigned Opcode = 0;
1498 Opcode = NVPTXISD::LoadV2;
1499 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
1502 Opcode = NVPTXISD::LoadV4;
1503 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
1504 LdResVTs = DAG.getVTList(ListVTs, 5);
1509 SmallVector<SDValue, 8> OtherOps;
1511 // Copy regular operands
1512 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1513 OtherOps.push_back(N->getOperand(i));
1515 LoadSDNode *LD = cast<LoadSDNode>(N);
1517 // The select routine does not have access to the LoadSDNode instance, so
1518 // pass along the extension information
1519 OtherOps.push_back(DAG.getIntPtrConstant(LD->getExtensionType()));
1521 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
1522 OtherOps.size(), LD->getMemoryVT(),
1523 LD->getMemOperand());
1525 SmallVector<SDValue, 4> ScalarRes;
1527 for (unsigned i = 0; i < NumElts; ++i) {
1528 SDValue Res = NewLD.getValue(i);
1530 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
1531 ScalarRes.push_back(Res);
1534 SDValue LoadChain = NewLD.getValue(NumElts);
1536 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
1538 Results.push_back(BuildVec);
1539 Results.push_back(LoadChain);
1542 static void ReplaceINTRINSIC_W_CHAIN(SDNode *N,
1544 SmallVectorImpl<SDValue> &Results) {
1545 SDValue Chain = N->getOperand(0);
1546 SDValue Intrin = N->getOperand(1);
1547 DebugLoc DL = N->getDebugLoc();
1549 // Get the intrinsic ID
1550 unsigned IntrinNo = cast<ConstantSDNode>(Intrin.getNode())->getZExtValue();
1553 case Intrinsic::nvvm_ldg_global_i:
1554 case Intrinsic::nvvm_ldg_global_f:
1555 case Intrinsic::nvvm_ldg_global_p:
1556 case Intrinsic::nvvm_ldu_global_i:
1557 case Intrinsic::nvvm_ldu_global_f:
1558 case Intrinsic::nvvm_ldu_global_p: {
1559 EVT ResVT = N->getValueType(0);
1561 if (ResVT.isVector()) {
1564 unsigned NumElts = ResVT.getVectorNumElements();
1565 EVT EltVT = ResVT.getVectorElementType();
1567 // Since LDU/LDG are target nodes, we cannot rely on DAG type legalization.
1568 // Therefore, we must ensure the type is legal. For i1 and i8, we set the
1569 // loaded type to i16 and propogate the "real" type as the memory type.
1570 bool NeedTrunc = false;
1571 if (EltVT.getSizeInBits() < 16) {
1576 unsigned Opcode = 0;
1584 case Intrinsic::nvvm_ldg_global_i:
1585 case Intrinsic::nvvm_ldg_global_f:
1586 case Intrinsic::nvvm_ldg_global_p:
1587 Opcode = NVPTXISD::LDGV2;
1589 case Intrinsic::nvvm_ldu_global_i:
1590 case Intrinsic::nvvm_ldu_global_f:
1591 case Intrinsic::nvvm_ldu_global_p:
1592 Opcode = NVPTXISD::LDUV2;
1595 LdResVTs = DAG.getVTList(EltVT, EltVT, MVT::Other);
1600 case Intrinsic::nvvm_ldg_global_i:
1601 case Intrinsic::nvvm_ldg_global_f:
1602 case Intrinsic::nvvm_ldg_global_p:
1603 Opcode = NVPTXISD::LDGV4;
1605 case Intrinsic::nvvm_ldu_global_i:
1606 case Intrinsic::nvvm_ldu_global_f:
1607 case Intrinsic::nvvm_ldu_global_p:
1608 Opcode = NVPTXISD::LDUV4;
1611 EVT ListVTs[] = { EltVT, EltVT, EltVT, EltVT, MVT::Other };
1612 LdResVTs = DAG.getVTList(ListVTs, 5);
1617 SmallVector<SDValue, 8> OtherOps;
1619 // Copy regular operands
1621 OtherOps.push_back(Chain); // Chain
1622 // Skip operand 1 (intrinsic ID)
1624 for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
1625 OtherOps.push_back(N->getOperand(i));
1627 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
1629 SDValue NewLD = DAG.getMemIntrinsicNode(Opcode, DL, LdResVTs, &OtherOps[0],
1630 OtherOps.size(), MemSD->getMemoryVT(),
1631 MemSD->getMemOperand());
1633 SmallVector<SDValue, 4> ScalarRes;
1635 for (unsigned i = 0; i < NumElts; ++i) {
1636 SDValue Res = NewLD.getValue(i);
1638 Res = DAG.getNode(ISD::TRUNCATE, DL, ResVT.getVectorElementType(), Res);
1639 ScalarRes.push_back(Res);
1642 SDValue LoadChain = NewLD.getValue(NumElts);
1644 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, DL, ResVT, &ScalarRes[0], NumElts);
1646 Results.push_back(BuildVec);
1647 Results.push_back(LoadChain);
1650 assert(ResVT.isSimple() && ResVT.getSimpleVT().SimpleTy == MVT::i8 &&
1651 "Custom handling of non-i8 ldu/ldg?");
1653 // Just copy all operands as-is
1654 SmallVector<SDValue, 4> Ops;
1655 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1656 Ops.push_back(N->getOperand(i));
1658 // Force output to i16
1659 SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
1661 MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
1663 // We make sure the memory type is i8, which will be used during isel
1664 // to select the proper instruction.
1665 SDValue NewLD = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL,
1667 Ops.size(), MVT::i8,
1668 MemSD->getMemOperand());
1670 Results.push_back(NewLD.getValue(0));
1671 Results.push_back(NewLD.getValue(1));
1677 void NVPTXTargetLowering::ReplaceNodeResults(SDNode *N,
1678 SmallVectorImpl<SDValue> &Results,
1679 SelectionDAG &DAG) const {
1680 switch (N->getOpcode()) {
1681 default: report_fatal_error("Unhandled custom legalization");
1683 ReplaceLoadVector(N, DAG, Results);
1685 case ISD::INTRINSIC_W_CHAIN:
1686 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);