1 //===-- IA64ISelLowering.cpp - IA64 DAG Lowering Implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Duraid Madina and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the IA64ISelLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "IA64ISelLowering.h"
15 #include "IA64MachineFunctionInfo.h"
16 #include "IA64TargetMachine.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/SSARegMap.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
26 IA64TargetLowering::IA64TargetLowering(TargetMachine &TM)
27 : TargetLowering(TM) {
29 // register class for general registers
30 addRegisterClass(MVT::i64, IA64::GRRegisterClass);
32 // register class for FP registers
33 addRegisterClass(MVT::f64, IA64::FPRegisterClass);
35 // register class for predicate registers
36 addRegisterClass(MVT::i1, IA64::PRRegisterClass);
38 setLoadXAction(ISD::EXTLOAD , MVT::i1 , Promote);
40 setLoadXAction(ISD::ZEXTLOAD , MVT::i1 , Expand);
42 setLoadXAction(ISD::SEXTLOAD , MVT::i1 , Expand);
43 setLoadXAction(ISD::SEXTLOAD , MVT::i8 , Expand);
44 setLoadXAction(ISD::SEXTLOAD , MVT::i16 , Expand);
45 setLoadXAction(ISD::SEXTLOAD , MVT::i32 , Expand);
47 setOperationAction(ISD::BRIND , MVT::Other, Expand);
48 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
49 setOperationAction(ISD::BR_CC , MVT::Other, Expand);
50 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
52 // ia64 uses SELECT not SELECT_CC
53 setOperationAction(ISD::SELECT_CC , MVT::Other, Expand);
55 // We need to handle ISD::RET for void functions ourselves,
56 // so we get a chance to restore ar.pfs before adding a
58 setOperationAction(ISD::RET, MVT::Other, Custom);
60 setSetCCResultType(MVT::i1);
61 setShiftAmountType(MVT::i64);
63 setOperationAction(ISD::FREM , MVT::f32 , Expand);
64 setOperationAction(ISD::FREM , MVT::f64 , Expand);
66 setOperationAction(ISD::UREM , MVT::f32 , Expand);
67 setOperationAction(ISD::UREM , MVT::f64 , Expand);
69 setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
70 setOperationAction(ISD::MEMSET , MVT::Other, Expand);
71 setOperationAction(ISD::MEMCPY , MVT::Other, Expand);
73 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
74 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
76 // We don't support sin/cos/sqrt
77 setOperationAction(ISD::FSIN , MVT::f64, Expand);
78 setOperationAction(ISD::FCOS , MVT::f64, Expand);
79 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
80 setOperationAction(ISD::FSIN , MVT::f32, Expand);
81 setOperationAction(ISD::FCOS , MVT::f32, Expand);
82 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
84 // FIXME: IA64 supports fcopysign natively!
85 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
86 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
88 // We don't have line number support yet.
89 setOperationAction(ISD::LOCATION, MVT::Other, Expand);
90 setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
91 setOperationAction(ISD::LABEL, MVT::Other, Expand);
93 //IA64 has these, but they are not implemented
94 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
95 setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
96 setOperationAction(ISD::ROTL , MVT::i64 , Expand);
97 setOperationAction(ISD::ROTR , MVT::i64 , Expand);
98 setOperationAction(ISD::BSWAP, MVT::i64 , Expand); // mux @rev
100 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
101 setOperationAction(ISD::VAARG , MVT::Other, Custom);
102 setOperationAction(ISD::VASTART , MVT::Other, Custom);
104 // Use the default implementation.
105 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
106 setOperationAction(ISD::VAEND , MVT::Other, Expand);
107 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
108 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
109 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
111 // Thread Local Storage
112 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
114 setStackPointerRegisterToSaveRestore(IA64::r12);
116 setJumpBufSize(704); // on ia64-linux, jmp_bufs are 704 bytes..
117 setJumpBufAlignment(16); // ...and must be 16-byte aligned
119 computeRegisterProperties();
121 setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
122 addLegalFPImmediate(+0.0);
123 addLegalFPImmediate(+1.0);
126 const char *IA64TargetLowering::getTargetNodeName(unsigned Opcode) const {
129 case IA64ISD::GETFD: return "IA64ISD::GETFD";
130 case IA64ISD::BRCALL: return "IA64ISD::BRCALL";
131 case IA64ISD::RET_FLAG: return "IA64ISD::RET_FLAG";
136 std::vector<SDOperand>
137 IA64TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
138 std::vector<SDOperand> ArgValues;
140 // add beautiful description of IA64 stack frame format
141 // here (from intel 24535803.pdf most likely)
143 MachineFunction &MF = DAG.getMachineFunction();
144 MachineFrameInfo *MFI = MF.getFrameInfo();
145 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
147 GP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
148 SP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
149 RP = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
151 MachineBasicBlock& BB = MF.front();
153 unsigned args_int[] = {IA64::r32, IA64::r33, IA64::r34, IA64::r35,
154 IA64::r36, IA64::r37, IA64::r38, IA64::r39};
156 unsigned args_FP[] = {IA64::F8, IA64::F9, IA64::F10, IA64::F11,
157 IA64::F12,IA64::F13,IA64::F14, IA64::F15};
163 unsigned used_FPArgs = 0; // how many FP args have been used so far?
165 unsigned ArgOffset = 0;
168 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
170 SDOperand newroot, argt;
171 if(count < 8) { // need to fix this logic? maybe.
173 switch (getValueType(I->getType())) {
175 assert(0 && "ERROR in LowerArgs: can't lower this type of arg.\n");
177 // fixme? (well, will need to for weird FP structy stuff,
178 // see intel ABI docs)
180 //XXX BuildMI(&BB, IA64::IDEF, 0, args_FP[used_FPArgs]);
181 MF.addLiveIn(args_FP[used_FPArgs]); // mark this reg as liveIn
182 // floating point args go into f8..f15 as-needed, the increment
183 argVreg[count] = // is below..:
184 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::f64));
185 // FP args go into f8..f15 as needed: (hence the ++)
186 argPreg[count] = args_FP[used_FPArgs++];
187 argOpc[count] = IA64::FMOV;
188 argt = newroot = DAG.getCopyFromReg(DAG.getRoot(), argVreg[count],
190 if (I->getType() == Type::FloatTy)
191 argt = DAG.getNode(ISD::FP_ROUND, MVT::f32, argt);
193 case MVT::i1: // NOTE: as far as C abi stuff goes,
194 // bools are just boring old ints
199 //XXX BuildMI(&BB, IA64::IDEF, 0, args_int[count]);
200 MF.addLiveIn(args_int[count]); // mark this register as liveIn
202 MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
203 argPreg[count] = args_int[count];
204 argOpc[count] = IA64::MOV;
206 DAG.getCopyFromReg(DAG.getRoot(), argVreg[count], MVT::i64);
207 if ( getValueType(I->getType()) != MVT::i64)
208 argt = DAG.getNode(ISD::TRUNCATE, getValueType(I->getType()),
212 } else { // more than 8 args go into the frame
213 // Create the frame index object for this incoming parameter...
214 ArgOffset = 16 + 8 * (count - 8);
215 int FI = MFI->CreateFixedObject(8, ArgOffset);
217 // Create the SelectionDAG nodes corresponding to a load
218 //from this parameter
219 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i64);
220 argt = newroot = DAG.getLoad(getValueType(I->getType()),
221 DAG.getEntryNode(), FIN, NULL, 0);
224 DAG.setRoot(newroot.getValue(1));
225 ArgValues.push_back(argt);
229 // Create a vreg to hold the output of (what will become)
230 // the "alloc" instruction
231 VirtGPR = MF.getSSARegMap()->createVirtualRegister(getRegClassFor(MVT::i64));
232 BuildMI(&BB, TII->get(IA64::PSEUDO_ALLOC), VirtGPR);
233 // we create a PSEUDO_ALLOC (pseudo)instruction for now
235 BuildMI(&BB, IA64::IDEF, 0, IA64::r1);
238 BuildMI(&BB, IA64::IDEF, 0, IA64::r12);
239 BuildMI(&BB, IA64::IDEF, 0, IA64::rp);
242 BuildMI(&BB, IA64::MOV, 1, GP).addReg(IA64::r1);
245 BuildMI(&BB, IA64::MOV, 1, SP).addReg(IA64::r12);
246 BuildMI(&BB, IA64::MOV, 1, RP).addReg(IA64::rp);
250 unsigned tempOffset=0;
252 // if this is a varargs function, we simply lower llvm.va_start by
253 // pointing to the first entry
256 VarArgsFrameIndex = MFI->CreateFixedObject(8, tempOffset);
259 // here we actually do the moving of args, and store them to the stack
260 // too if this is a varargs function:
261 for (int i = 0; i < count && i < 8; ++i) {
262 BuildMI(&BB, TII->get(argOpc[i]), argVreg[i]).addReg(argPreg[i]);
264 // if this is a varargs function, we copy the input registers to the stack
265 int FI = MFI->CreateFixedObject(8, tempOffset);
266 tempOffset+=8; //XXX: is it safe to use r22 like this?
267 BuildMI(&BB, TII->get(IA64::MOV), IA64::r22).addFrameIndex(FI);
268 // FIXME: we should use st8.spill here, one day
269 BuildMI(&BB, TII->get(IA64::ST8), IA64::r22).addReg(argPreg[i]);
273 // Finally, inform the code generator which regs we return values in.
274 // (see the ISD::RET: case in the instruction selector)
275 switch (getValueType(F.getReturnType())) {
276 default: assert(0 && "i have no idea where to return this type!");
277 case MVT::isVoid: break;
283 MF.addLiveOut(IA64::r8);
287 MF.addLiveOut(IA64::F8);
294 std::pair<SDOperand, SDOperand>
295 IA64TargetLowering::LowerCallTo(SDOperand Chain,
296 const Type *RetTy, bool RetTyIsSigned,
297 bool isVarArg, unsigned CallingConv,
298 bool isTailCall, SDOperand Callee,
299 ArgListTy &Args, SelectionDAG &DAG) {
301 MachineFunction &MF = DAG.getMachineFunction();
303 unsigned NumBytes = 16;
304 unsigned outRegsUsed = 0;
306 if (Args.size() > 8) {
307 NumBytes += (Args.size() - 8) * 8;
310 outRegsUsed = Args.size();
313 // FIXME? this WILL fail if we ever try to pass around an arg that
314 // consumes more than a single output slot (a 'real' double, int128
315 // some sort of aggregate etc.), as we'll underestimate how many 'outX'
316 // registers we use. Hopefully, the assembler will notice.
317 MF.getInfo<IA64FunctionInfo>()->outRegsUsed=
318 std::max(outRegsUsed, MF.getInfo<IA64FunctionInfo>()->outRegsUsed);
320 // keep stack frame 16-byte aligned
321 // assert(NumBytes==((NumBytes+15) & ~15) &&
322 // "stack frame not 16-byte aligned!");
323 NumBytes = (NumBytes+15) & ~15;
325 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes, getPointerTy()));
328 std::vector<SDOperand> Stores;
329 std::vector<SDOperand> Converts;
330 std::vector<SDOperand> RegValuesToPass;
331 unsigned ArgOffset = 16;
333 for (unsigned i = 0, e = Args.size(); i != e; ++i)
335 SDOperand Val = Args[i].Node;
336 MVT::ValueType ObjectVT = Val.getValueType();
337 SDOperand ValToStore(0, 0), ValToConvert(0, 0);
340 default: assert(0 && "unexpected argument type!");
345 //promote to 64-bits, sign/zero extending based on type
347 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
349 ExtendKind = ISD::SIGN_EXTEND;
350 else if (Args[i].isZExt)
351 ExtendKind = ISD::ZERO_EXTEND;
352 Val = DAG.getNode(ExtendKind, MVT::i64, Val);
357 if(RegValuesToPass.size() >= 8) {
360 RegValuesToPass.push_back(Val);
365 Val = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Val);
368 if(RegValuesToPass.size() >= 8) {
371 RegValuesToPass.push_back(Val);
372 if(1 /* TODO: if(calling external or varadic function)*/ ) {
373 ValToConvert = Val; // additionally pass this FP value as an int
381 StackPtr = DAG.getRegister(IA64::r12, MVT::i64);
383 SDOperand PtrOff = DAG.getConstant(ArgOffset, getPointerTy());
384 PtrOff = DAG.getNode(ISD::ADD, MVT::i64, StackPtr, PtrOff);
385 Stores.push_back(DAG.getStore(Chain, ValToStore, PtrOff, NULL, 0));
386 ArgOffset += ObjSize;
389 if(ValToConvert.Val) {
390 Converts.push_back(DAG.getNode(IA64ISD::GETFD, MVT::i64, ValToConvert));
394 // Emit all stores, make sure they occur before any copies into physregs.
396 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &Stores[0],Stores.size());
398 static const unsigned IntArgRegs[] = {
399 IA64::out0, IA64::out1, IA64::out2, IA64::out3,
400 IA64::out4, IA64::out5, IA64::out6, IA64::out7
403 static const unsigned FPArgRegs[] = {
404 IA64::F8, IA64::F9, IA64::F10, IA64::F11,
405 IA64::F12, IA64::F13, IA64::F14, IA64::F15
410 // save the current GP, SP and RP : FIXME: do we need to do all 3 always?
411 SDOperand GPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r1, MVT::i64, InFlag);
412 Chain = GPBeforeCall.getValue(1);
413 InFlag = Chain.getValue(2);
414 SDOperand SPBeforeCall = DAG.getCopyFromReg(Chain, IA64::r12, MVT::i64, InFlag);
415 Chain = SPBeforeCall.getValue(1);
416 InFlag = Chain.getValue(2);
417 SDOperand RPBeforeCall = DAG.getCopyFromReg(Chain, IA64::rp, MVT::i64, InFlag);
418 Chain = RPBeforeCall.getValue(1);
419 InFlag = Chain.getValue(2);
421 // Build a sequence of copy-to-reg nodes chained together with token chain
422 // and flag operands which copy the outgoing integer args into regs out[0-7]
423 // mapped 1:1 and the FP args into regs F8-F15 "lazily"
424 // TODO: for performance, we should only copy FP args into int regs when we
425 // know this is required (i.e. for varardic or external (unknown) functions)
427 // first to the FP->(integer representation) conversions, these are
428 // flagged for now, but shouldn't have to be (TODO)
429 unsigned seenConverts = 0;
430 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
431 if(MVT::isFloatingPoint(RegValuesToPass[i].getValueType())) {
432 Chain = DAG.getCopyToReg(Chain, IntArgRegs[i], Converts[seenConverts++],
434 InFlag = Chain.getValue(1);
438 // next copy args into the usual places, these are flagged
439 unsigned usedFPArgs = 0;
440 for (unsigned i = 0, e = RegValuesToPass.size(); i != e; ++i) {
441 Chain = DAG.getCopyToReg(Chain,
442 MVT::isInteger(RegValuesToPass[i].getValueType()) ?
443 IntArgRegs[i] : FPArgRegs[usedFPArgs++], RegValuesToPass[i], InFlag);
444 InFlag = Chain.getValue(1);
447 // If the callee is a GlobalAddress node (quite common, every direct call is)
448 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
450 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
451 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i64);
455 std::vector<MVT::ValueType> NodeTys;
456 std::vector<SDOperand> CallOperands;
457 NodeTys.push_back(MVT::Other); // Returns a chain
458 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
459 CallOperands.push_back(Chain);
460 CallOperands.push_back(Callee);
462 // emit the call itself
464 CallOperands.push_back(InFlag);
466 assert(0 && "this should never happen!\n");
468 // to make way for a hack:
469 Chain = DAG.getNode(IA64ISD::BRCALL, NodeTys,
470 &CallOperands[0], CallOperands.size());
471 InFlag = Chain.getValue(1);
473 // restore the GP, SP and RP after the call
474 Chain = DAG.getCopyToReg(Chain, IA64::r1, GPBeforeCall, InFlag);
475 InFlag = Chain.getValue(1);
476 Chain = DAG.getCopyToReg(Chain, IA64::r12, SPBeforeCall, InFlag);
477 InFlag = Chain.getValue(1);
478 Chain = DAG.getCopyToReg(Chain, IA64::rp, RPBeforeCall, InFlag);
479 InFlag = Chain.getValue(1);
481 std::vector<MVT::ValueType> RetVals;
482 RetVals.push_back(MVT::Other);
483 RetVals.push_back(MVT::Flag);
485 MVT::ValueType RetTyVT = getValueType(RetTy);
487 if (RetTyVT != MVT::isVoid) {
489 default: assert(0 && "Unknown value type to return!");
490 case MVT::i1: { // bools are just like other integers (returned in r8)
491 // we *could* fall through to the truncate below, but this saves a
492 // few redundant predicate ops
493 SDOperand boolInR8 = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64,InFlag);
494 InFlag = boolInR8.getValue(2);
495 Chain = boolInR8.getValue(1);
496 SDOperand zeroReg = DAG.getCopyFromReg(Chain, IA64::r0, MVT::i64, InFlag);
497 InFlag = zeroReg.getValue(2);
498 Chain = zeroReg.getValue(1);
500 RetVal = DAG.getSetCC(MVT::i1, boolInR8, zeroReg, ISD::SETNE);
506 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
507 Chain = RetVal.getValue(1);
509 // keep track of whether it is sign or zero extended (todo: bools?)
511 RetVal = DAG.getNode(RetTy->isSigned() ? ISD::AssertSext :ISD::AssertZext,
512 MVT::i64, RetVal, DAG.getValueType(RetTyVT));
514 RetVal = DAG.getNode(ISD::TRUNCATE, RetTyVT, RetVal);
517 RetVal = DAG.getCopyFromReg(Chain, IA64::r8, MVT::i64, InFlag);
518 Chain = RetVal.getValue(1);
519 InFlag = RetVal.getValue(2); // XXX dead
522 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
523 Chain = RetVal.getValue(1);
524 RetVal = DAG.getNode(ISD::TRUNCATE, MVT::f32, RetVal);
527 RetVal = DAG.getCopyFromReg(Chain, IA64::F8, MVT::f64, InFlag);
528 Chain = RetVal.getValue(1);
529 InFlag = RetVal.getValue(2); // XXX dead
534 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
535 DAG.getConstant(NumBytes, getPointerTy()));
537 return std::make_pair(RetVal, Chain);
540 SDOperand IA64TargetLowering::
541 LowerOperation(SDOperand Op, SelectionDAG &DAG) {
542 switch (Op.getOpcode()) {
543 default: assert(0 && "Should not custom lower this!");
544 case ISD::GlobalTLSAddress:
545 assert(0 && "TLS not implemented for IA64.");
547 SDOperand AR_PFSVal, Copy;
549 switch(Op.getNumOperands()) {
551 assert(0 && "Do not know how to return this many arguments!");
554 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64);
555 AR_PFSVal = DAG.getCopyToReg(AR_PFSVal.getValue(1), IA64::AR_PFS,
557 return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other, AR_PFSVal);
559 // Copy the result into the output register & restore ar.pfs
560 MVT::ValueType ArgVT = Op.getOperand(1).getValueType();
561 unsigned ArgReg = MVT::isInteger(ArgVT) ? IA64::r8 : IA64::F8;
563 AR_PFSVal = DAG.getCopyFromReg(Op.getOperand(0), VirtGPR, MVT::i64);
564 Copy = DAG.getCopyToReg(AR_PFSVal.getValue(1), ArgReg, Op.getOperand(1),
566 AR_PFSVal = DAG.getCopyToReg(Copy.getValue(0), IA64::AR_PFS, AR_PFSVal,
568 return DAG.getNode(IA64ISD::RET_FLAG, MVT::Other,
569 AR_PFSVal, AR_PFSVal.getValue(1));
575 MVT::ValueType VT = getPointerTy();
576 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
577 SDOperand VAList = DAG.getLoad(VT, Op.getOperand(0), Op.getOperand(1),
578 SV->getValue(), SV->getOffset());
579 // Increment the pointer, VAList, to the next vaarg
580 SDOperand VAIncr = DAG.getNode(ISD::ADD, VT, VAList,
581 DAG.getConstant(MVT::getSizeInBits(VT)/8,
583 // Store the incremented VAList to the legalized pointer
584 VAIncr = DAG.getStore(VAList.getValue(1), VAIncr,
585 Op.getOperand(1), SV->getValue(), SV->getOffset());
586 // Load the actual argument out of the pointer VAList
587 return DAG.getLoad(Op.getValueType(), VAIncr, VAList, NULL, 0);
590 // vastart just stores the address of the VarArgsFrameIndex slot into the
591 // memory location argument.
592 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, MVT::i64);
593 SrcValueSDNode *SV = cast<SrcValueSDNode>(Op.getOperand(2));
594 return DAG.getStore(Op.getOperand(0), FR,
595 Op.getOperand(1), SV->getValue(), SV->getOffset());
597 // Frame & Return address. Currently unimplemented
598 case ISD::RETURNADDR: break;
599 case ISD::FRAMEADDR: break;