1 //===-- IA64ISelLowering.cpp - IA64 DAG Lowering Implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Duraid Madina and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the IA64ISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "IA64ISelLowering.h"
15 #include "IA64MachineFunctionInfo.h"
16 #include "IA64TargetMachine.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/SSARegMap.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
26 IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
27 : TargetLowering(TM) {
29 // register class for general registers
30 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
32 // register class for FP registers
33 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
35 // register class for predicate registers
36 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
38 setLoadXAction(ISD::EXTLOAD , MVT::i1 , Promote);
40 setLoadXAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
42 setLoadXAction(ISD::SEXTLOAD , MVT::i1 , Expand);
43 setLoadXAction(ISD::SEXTLOAD , MVT::i8 , Expand);
44 setLoadXAction(ISD::SEXTLOAD , MVT::i16 , Expand);
45 setLoadXAction(ISD::SEXTLOAD , MVT::i32 , Expand);
47 setOperationAction(ISD::BRIND , MVT::Other, Expand);
48 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
49 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
50 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
52 // ia64 uses SELECT not SELECT_CC
53 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
55 // We need to handle ISD::RET for void functions ourselves,
56 // so we get a chance to restore ar.pfs before adding a
58 setOperationAction(ISD::RET, MVT::Other, Custom);
60 setSetCCResultType(MVT::i1);
61 setShiftAmountType(MVT::i64);
63 setOperationAction(ISD::FREM , MVT::f32 , Expand);
64 setOperationAction(ISD::FREM , MVT::f64 , Expand);
66 setOperationAction(ISD::UREM , MVT::f32 , Expand);
67 setOperationAction(ISD::UREM , MVT::f64 , Expand);
69 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
70 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
71 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
73 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
74 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
76 // We don't support sin/cos/sqrt
77 setOperationAction(ISD::FSIN , MVT::f64, Expand);
78 setOperationAction(ISD::FCOS , MVT::f64, Expand);
79 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
80 setOperationAction(ISD::FSIN , MVT::f32, Expand);
81 setOperationAction(ISD::FCOS , MVT::f32, Expand);
82 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
84 // FIXME: IA64 supports fcopysign natively!
85 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
86 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
88 // We don't have line number support yet.
89 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
90 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
91 setOperationAction(ISD::DEBUG_LABEL, MVT::Other, Expand);
93 //IA64 has these, but they are not implemented
94 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
95 setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
96 setOperationAction(ISD::ROTL , MVT::i64 , Expand);
97 setOperationAction(ISD::ROTR , MVT::i64 , Expand);
98 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); // mux @rev
100 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
101 setOperationAction(ISD::VAARG , MVT::Other, Custom);
102 setOperationAction(ISD::VASTART , MVT::Other, Custom);
104 // Use the default implementation.
105 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
106 setOperationAction(ISD::VAEND , MVT::Other, Expand);
107 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
108 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
109 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
111 setStackPointerRegisterToSaveRestore(IA64::r12);
113 setJumpBufSize(704); // on ia64-linux, jmp_bufs are 704 bytes..
114 setJumpBufAlignment(16); // ...and must be 16-byte aligned
116 computeRegisterProperties();
118 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
119 addLegalFPImmediate(+0.0);
120 addLegalFPImmediate(+1.0);
123 const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const {
126 case IA64ISD::GETFD: return "IA64ISD::GETFD";
127 case IA64ISD::BRCALL: return "IA64ISD::BRCALL";
128 case IA64ISD::RET_FLAG: return "IA64ISD::RET_FLAG";
133 std::vector<SDOperand>
134 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
135 std::vector<SDOperand> ArgValues;
137 // add beautiful description of IA64 stack frame format
138 // here (from intel 24535803.pdf most likely)
140 MachineFunction &MF = DAG.getMachineFunction();
141 MachineFrameInfo *MFI = MF.getFrameInfo();
143 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
144 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
145 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
147 MachineBasicBlock& BB = MF.front();
149 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
150 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
152 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
153 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
159 unsigned used_FPArgs = 0; // how many FP args have been used so far?
161 unsigned ArgOffset = 0;
164 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
166 SDOperand newroot, argt;
167 if(count < 8) { // need to fix this logic? maybe.
169 switch (getValueType(I->getType())) {
171 assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n");
173 // fixme? (well, will need to for weird FP structy stuff,
174 // see intel ABI docs)
176 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
177 MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn
178 // floating point args go into f8..f15 as-needed, the increment
179 argVreg[count] = // is below..:
180 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
181 // FP args go into f8..f15 as needed: (hence the ++)
182 argPreg[count] = args_FP[used_FPArgs++];
183 argOpc[count] = IA64::FMOV;
184 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count],
186 if (I->getType() == Type::FloatTy)
187 argt = DAG.getNode(ISD::FP_ROUND, MVT::f32, argt);
189 case MVT::i1: // NOTE: as far as C abi stuff goes,
190 // bools are just boring old ints
195 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
196 MF.addLiveIn(args_int[count]); // mark this register as liveIn
198 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
199 argPreg[count] = args_int[count];
200 argOpc[count] = IA64::MOV;
202 DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::i64);
203 if ( getValueType(I->getType()) != MVT::i64)
204 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
208 } else { // more than 8 args go into the frame
209 // Create the frame index object for this incoming parameter...
210 ArgOffset = 16 + 8 * (count - 8);
211 int FI = MFI->CreateFixedObject(8, ArgOffset);
213 // Create the SelectionDAG nodes corresponding to a load
214 //from this parameter
215 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
216 argt = newroot = DAG.getLoad(getValueType(I->getType()),
217 DAG.getEntryNode(), FIN, NULL, 0);
220 DAG.setRoot(newroot.getValue(1));
221 ArgValues.push_back(argt);
225 // Create a vreg to hold the output of (what will become)
226 // the "alloc" instruction
227 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
228 BuildMI(&BB, IA64::PSEUDO_ALLOC, 0, VirtGPR);
229 // we create a PSEUDO_ALLOC (pseudo)instruction for now
231 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
234 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
235 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
238 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
241 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
242 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
246 unsigned tempOffset=0;
248 // if this is a varargs function, we simply lower llvm.va_start by
249 // pointing to the first entry
252 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
255 // here we actually do the moving of args, and store them to the stack
256 // too if this is a varargs function:
257 for (int i = 0; i < count && i < 8; ++i) {
258 BuildMI(&BB, argOpc[i], 1, argVreg[i]).addReg(argPreg[i]);
260 // if this is a varargs function, we copy the input registers to the stack
261 int FI = MFI->CreateFixedObject(8, tempOffset);
262 tempOffset+=8; //XXX: is it safe to use r22 like this?
263 BuildMI(&BB, IA64::MOV, 1, IA64::r22).addFrameIndex(FI);
264 // FIXME: we should use st8.spill here, one day
265 BuildMI(&BB, IA64::ST8, 1, IA64::r22).addReg(argPreg[i]);
269 // Finally, inform the code generator which regs we return values in.
270 // (see the ISD::RET: case in the instruction selector)
271 switch (getValueType(F.getReturnType())) {
272 default: assert(0 && "i have no idea where to return this type!");
273 case MVT::isVoid: break;
279 MF.addLiveOut(IA64::r8);
283 MF.addLiveOut(IA64::F8);
290 std::pair<SDOperand, SDOperand>
291 IA64TargetLowering::LowerCallTo(SDOperand Chain,
292 const Type *RetTy, bool isVarArg,
293 unsigned CallingConv, bool isTailCall,
294 SDOperand Callee, ArgListTy &Args,
297 MachineFunction &MF = DAG.getMachineFunction();
299 unsigned NumBytes = 16;
300 unsigned outRegsUsed = 0;
302 if (Args.size() > 8) {
303 NumBytes += (Args.size() - 8) * 8;
306 outRegsUsed = Args.size();
309 // FIXME? this WILL fail if we ever try to pass around an arg that
310 // consumes more than a single output slot (a 'real' double, int128
311 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
312 // registers we use. Hopefully, the assembler will notice.
313 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
314 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
316 // keep stack frame 16-byte aligned
317 //assert(NumBytes==((NumBytes+15) & ~15) && "stack frame not 16-byte aligned!");
318 NumBytes = (NumBytes+15) & ~15;
320 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
323 std::vector<SDOperand> Stores;
324 std::vector<SDOperand> Converts;
325 std::vector<SDOperand> RegValuesToPass;
326 unsigned ArgOffset = 16;
328 for (unsigned i = 0, e = Args.size(); i != e; ++i)
330 SDOperand Val = Args[i].first;
331 MVT::ValueType ObjectVT = Val.getValueType();
332 SDOperand ValToStore(0, 0), ValToConvert(0, 0);
335 default: assert(0 && "unexpected argument type!");
340 //promote to 64-bits, sign/zero extending based on type
342 if(Args[i].second->isSigned())
343 Val = DAG.getNode(ISD::SIGN_EXTEND, MVT::i64, Val);
345 Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i64, Val);
349 if(RegValuesToPass.size() >= 8) {
352 RegValuesToPass.push_back(Val);
357 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
360 if(RegValuesToPass.size() >= 8) {
363 RegValuesToPass.push_back(Val);
364 if(1 /* TODO: if(calling external or varadic function)*/ ) {
365 ValToConvert = Val; // additionally pass this FP value as an int
373 StackPtr = DAG.getRegister(IA64::r12, MVT::i64);
375 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
376 PtrOff = DAG.getNode(ISD::ADD, MVT::i64, StackPtr, PtrOff);
377 Stores.push_back(DAG.getStore(Chain, ValToStore, PtrOff, NULL, 0));
378 ArgOffset += ObjSize;
381 if(ValToConvert.Val) {
382 Converts.push_back(DAG.getNode(IA64ISD::GETFD, MVT::i64, ValToConvert));
386 // Emit all stores, make sure they occur before any copies into physregs.
388 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0],Stores.size());
390 static const unsigned IntArgRegs[] = {
391 IA64::out0, IA64::out1, IA64::out2, IA64::out3,
392 IA64::out4, IA64::out5, IA64::out6, IA64::out7
395 static const unsigned FPArgRegs[] = {
396 IA64::F8, IA64::F9, IA64::F10, IA64::F11,
397 IA64::F12, IA64::F13, IA64::F14, IA64::F15
402 // save the current GP, SP and RP : FIXME: do we need to do all 3 always?
403 SDOperand GPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r1, MVT::i64, InFlag);
404 Chain = GPBeforeCall.getValue(1);
405 InFlag = Chain.getValue(2);
406 SDOperand SPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r12, MVT::i64, InFlag);
407 Chain = SPBeforeCall.getValue(1);
408 InFlag = Chain.getValue(2);
409 SDOperand RPBeforeCall = DAG.getCopyFromReg(Chain, IA64::rp, MVT::i64, InFlag);
410 Chain = RPBeforeCall.getValue(1);
411 InFlag = Chain.getValue(2);
413 // Build a sequence of copy-to-reg nodes chained together with token chain
414 // and flag operands which copy the outgoing integer args into regs out[0-7]
415 // mapped 1:1 and the FP args into regs F8-F15 "lazily"
416 // TODO: for performance, we should only copy FP args into int regs when we
417 // know this is required (i.e. for varardic or external (unknown) functions)
419 // first to the FP->(integer representation) conversions, these are
420 // flagged for now, but shouldn't have to be (TODO)
421 unsigned seenConverts = 0;
422 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
423 if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) {
424 Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++], InFlag);
425 InFlag = Chain.getValue(1);
429 // next copy args into the usual places, these are flagged
430 unsigned usedFPArgs = 0;
431 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
432 Chain = DAG.getCopyToReg(Chain,
433 MVT::isInteger(RegValuesToPass[i].getValueType()) ?
434 IntArgRegs[i] : FPArgRegs[usedFPArgs++],
435 RegValuesToPass[i], InFlag);
436 InFlag = Chain.getValue(1);
439 // If the callee is a GlobalAddress node (quite common, every direct call is)
440 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
442 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
443 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i64);
447 std::vector<MVT::ValueType> NodeTys;
448 std::vector<SDOperand> CallOperands;
449 NodeTys.push_back(MVT::Other); // Returns a chain
450 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
451 CallOperands.push_back(Chain);
452 CallOperands.push_back(Callee);
454 // emit the call itself
456 CallOperands.push_back(InFlag);
458 assert(0 && "this should never happen!\n");
460 // to make way for a hack:
461 Chain = DAG.getNode(IA64ISD::BRCALL, NodeTys,
462 &CallOperands[0], CallOperands.size());
463 InFlag = Chain.getValue(1);
465 // restore the GP, SP and RP after the call
466 Chain = DAG.getCopyToReg(Chain, IA64::r1, GPBeforeCall, InFlag);
467 InFlag = Chain.getValue(1);
468 Chain = DAG.getCopyToReg(Chain, IA64::r12, SPBeforeCall, InFlag);
469 InFlag = Chain.getValue(1);
470 Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag);
471 InFlag = Chain.getValue(1);
473 std::vector<MVT::ValueType> RetVals;
474 RetVals.push_back(MVT::Other);
475 RetVals.push_back(MVT::Flag);
477 MVT::ValueType RetTyVT = getValueType(RetTy);
479 if (RetTyVT != MVT::isVoid) {
481 default: assert(0 && "Unknown value type to return!");
482 case MVT::i1: { // bools are just like other integers (returned in r8)
483 // we *could* fall through to the truncate below, but this saves a
484 // few redundant predicate ops
485 SDOperand boolInR8 = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
486 InFlag = boolInR8.getValue(2);
487 Chain = boolInR8.getValue(1);
488 SDOperand zeroReg = DAG.getCopyFromReg(Chain, IA64::r0, MVT::i64, InFlag);
489 InFlag = zeroReg.getValue(2);
490 Chain = zeroReg.getValue(1);
492 RetVal = DAG.getSetCC(MVT::i1, boolInR8, zeroReg, ISD::SETNE);
498 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
499 Chain = RetVal.getValue(1);
501 // keep track of whether it is sign or zero extended (todo: bools?)
503 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext :ISD::AssertZext,
504 MVT::i64, RetVal, DAG.getValueType(RetTyVT));
506 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
509 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
510 Chain = RetVal.getValue(1);
511 InFlag = RetVal.getValue(2); // XXX dead
514 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
515 Chain = RetVal.getValue(1);
516 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::f32, RetVal);
519 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
520 Chain = RetVal.getValue(1);
521 InFlag = RetVal.getValue(2); // XXX dead
526 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
527 DAG.getConstant(NumBytes, getPointerTy()));
529 return std::make_pair(RetVal, Chain);
532 std::pair<SDOperand, SDOperand> IA64TargetLowering::
533 LowerFrameReturnAddress(bool isFrameAddress, SDOperand Chain, unsigned Depth,
535 assert(0 && "LowerFrameReturnAddress unimplemented");
539 SDOperand IA64TargetLowering::
540 LowerOperation(SDOperand Op, SelectionDAG &DAG) {
541 switch (Op.getOpcode()) {
542 default: assert(0 && "Should not custom lower this!");
544 SDOperand AR_PFSVal, Copy;
546 switch(Op.getNumOperands()) {
548 assert(0 && "Do not know how to return this many arguments!");
551 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64);
552 AR_PFSVal = DAG.getCopyToReg(AR_PFSVal.getValue(1), IA64::AR_PFS,
554 return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal);
556 // Copy the result into the output register & restore ar.pfs
557 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
558 unsigned ArgReg = MVT::isInteger(ArgVT) ? IA64::r8 : IA64::F8;
560 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64);
561 Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1),
563 AR_PFSVal = DAG.getCopyToReg(Copy.getValue(0), IA64::AR_PFS, AR_PFSVal,
565 return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other,
566 AR_PFSVal, AR_PFSVal.getValue(1));
572 MVT::ValueType VT = getPointerTy();
573 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
574 SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1),
575 SV->getValue(), SV->getOffset());
576 // Increment the pointer, VAList, to the next vaarg
577 SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList,
578 DAG.getConstant(MVT::getSizeInBits(VT)/8,
580 // Store the incremented VAList to the legalized pointer
581 VAIncr = DAG.getStore(VAList.getValue(1), VAIncr,
582 Op.getOperand(1), SV->getValue(), SV->getOffset());
583 // Load the actual argument out of the pointer VAList
584 return DAG.getLoad(Op.getValueType(), VAIncr, VAList, NULL, 0);
587 // vastart just stores the address of the VarArgsFrameIndex slot into the
588 // memory location argument.
589 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64);
590 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
591 return DAG.getStore(Op.getOperand(0), FR,
592 Op.getOperand(1), SV->getValue(), SV->getOffset());