1 //===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the interfaces that Sparc uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "SparcISelLowering.h"
16 #include "MCTargetDesc/SparcMCExpr.h"
17 #include "SparcMachineFunctionInfo.h"
18 #include "SparcRegisterInfo.h"
19 #include "SparcTargetMachine.h"
20 #include "SparcTargetObjectFile.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/Support/ErrorHandling.h"
35 //===----------------------------------------------------------------------===//
36 // Calling Convention Implementation
37 //===----------------------------------------------------------------------===//
39 static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
40 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
41 ISD::ArgFlagsTy &ArgFlags, CCState &State)
43 assert (ArgFlags.isSRet());
45 // Assign SRet argument.
46 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
52 static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
53 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
54 ISD::ArgFlagsTy &ArgFlags, CCState &State)
56 static const MCPhysReg RegList[] = {
57 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
59 // Try to get first reg.
60 if (unsigned Reg = State.AllocateReg(RegList)) {
61 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
63 // Assign whole thing in stack.
64 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
65 State.AllocateStack(8,4),
70 // Try to get second reg.
71 if (unsigned Reg = State.AllocateReg(RegList))
72 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
74 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
75 State.AllocateStack(4,4),
80 static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
81 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
82 ISD::ArgFlagsTy &ArgFlags, CCState &State)
84 static const MCPhysReg RegList[] = {
85 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
88 // Try to get first reg.
89 if (unsigned Reg = State.AllocateReg(RegList))
90 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
94 // Try to get second reg.
95 if (unsigned Reg = State.AllocateReg(RegList))
96 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
103 // Allocate a full-sized argument for the 64-bit ABI.
104 static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
105 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
106 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
107 assert((LocVT == MVT::f32 || LocVT == MVT::f128
108 || LocVT.getSizeInBits() == 64) &&
109 "Can't handle non-64 bits locations");
111 // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
112 unsigned size = (LocVT == MVT::f128) ? 16 : 8;
113 unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
114 unsigned Offset = State.AllocateStack(size, alignment);
117 if (LocVT == MVT::i64 && Offset < 6*8)
118 // Promote integers to %i0-%i5.
119 Reg = SP::I0 + Offset/8;
120 else if (LocVT == MVT::f64 && Offset < 16*8)
121 // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
122 Reg = SP::D0 + Offset/8;
123 else if (LocVT == MVT::f32 && Offset < 16*8)
124 // Promote floats to %f1, %f3, ...
125 Reg = SP::F1 + Offset/4;
126 else if (LocVT == MVT::f128 && Offset < 16*8)
127 // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
128 Reg = SP::Q0 + Offset/16;
130 // Promote to register when possible, otherwise use the stack slot.
132 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
136 // This argument goes on the stack in an 8-byte slot.
137 // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
138 // the right-aligned float. The first 4 bytes of the stack slot are undefined.
139 if (LocVT == MVT::f32)
142 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
146 // Allocate a half-sized argument for the 64-bit ABI.
148 // This is used when passing { float, int } structs by value in registers.
149 static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
150 MVT &LocVT, CCValAssign::LocInfo &LocInfo,
151 ISD::ArgFlagsTy &ArgFlags, CCState &State) {
152 assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
153 unsigned Offset = State.AllocateStack(4, 4);
155 if (LocVT == MVT::f32 && Offset < 16*8) {
156 // Promote floats to %f0-%f31.
157 State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
162 if (LocVT == MVT::i32 && Offset < 6*8) {
163 // Promote integers to %i0-%i5, using half the register.
164 unsigned Reg = SP::I0 + Offset/8;
166 LocInfo = CCValAssign::AExt;
168 // Set the Custom bit if this i32 goes in the high bits of a register.
170 State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
173 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
177 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
181 #include "SparcGenCallingConv.inc"
183 // The calling conventions in SparcCallingConv.td are described in terms of the
184 // callee's register window. This function translates registers to the
185 // corresponding caller window %o register.
186 static unsigned toCallerWindow(unsigned Reg) {
187 assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7 && "Unexpected enum");
188 if (Reg >= SP::I0 && Reg <= SP::I7)
189 return Reg - SP::I0 + SP::O0;
194 SparcTargetLowering::LowerReturn(SDValue Chain,
195 CallingConv::ID CallConv, bool IsVarArg,
196 const SmallVectorImpl<ISD::OutputArg> &Outs,
197 const SmallVectorImpl<SDValue> &OutVals,
198 SDLoc DL, SelectionDAG &DAG) const {
199 if (Subtarget->is64Bit())
200 return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
201 return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
205 SparcTargetLowering::LowerReturn_32(SDValue Chain,
206 CallingConv::ID CallConv, bool IsVarArg,
207 const SmallVectorImpl<ISD::OutputArg> &Outs,
208 const SmallVectorImpl<SDValue> &OutVals,
209 SDLoc DL, SelectionDAG &DAG) const {
210 MachineFunction &MF = DAG.getMachineFunction();
212 // CCValAssign - represent the assignment of the return value to locations.
213 SmallVector<CCValAssign, 16> RVLocs;
215 // CCState - Info about the registers and stack slot.
216 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
219 // Analyze return values.
220 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
223 SmallVector<SDValue, 4> RetOps(1, Chain);
224 // Make room for the return address offset.
225 RetOps.push_back(SDValue());
227 // Copy the result values into the output registers.
228 for (unsigned i = 0, realRVLocIdx = 0;
230 ++i, ++realRVLocIdx) {
231 CCValAssign &VA = RVLocs[i];
232 assert(VA.isRegLoc() && "Can only return in registers!");
234 SDValue Arg = OutVals[realRVLocIdx];
236 if (VA.needsCustom()) {
237 assert(VA.getLocVT() == MVT::v2i32);
238 // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
239 // happen by default if this wasn't a legal type)
241 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
243 DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
244 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
246 DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
248 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
249 Flag = Chain.getValue(1);
250 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
251 VA = RVLocs[++i]; // skip ahead to next loc
252 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
255 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
257 // Guarantee that all emitted copies are stuck together with flags.
258 Flag = Chain.getValue(1);
259 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
262 unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
263 // If the function returns a struct, copy the SRetReturnReg to I0
264 if (MF.getFunction()->hasStructRetAttr()) {
265 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
266 unsigned Reg = SFI->getSRetReturnReg();
268 llvm_unreachable("sret virtual register not created in the entry block");
269 auto PtrVT = getPointerTy(DAG.getDataLayout());
270 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
271 Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
272 Flag = Chain.getValue(1);
273 RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
274 RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
277 RetOps[0] = Chain; // Update chain.
278 RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
280 // Add the flag if we have it.
282 RetOps.push_back(Flag);
284 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
287 // Lower return values for the 64-bit ABI.
288 // Return values are passed the exactly the same way as function arguments.
290 SparcTargetLowering::LowerReturn_64(SDValue Chain,
291 CallingConv::ID CallConv, bool IsVarArg,
292 const SmallVectorImpl<ISD::OutputArg> &Outs,
293 const SmallVectorImpl<SDValue> &OutVals,
294 SDLoc DL, SelectionDAG &DAG) const {
295 // CCValAssign - represent the assignment of the return value to locations.
296 SmallVector<CCValAssign, 16> RVLocs;
298 // CCState - Info about the registers and stack slot.
299 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
302 // Analyze return values.
303 CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
306 SmallVector<SDValue, 4> RetOps(1, Chain);
308 // The second operand on the return instruction is the return address offset.
309 // The return address is always %i7+8 with the 64-bit ABI.
310 RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
312 // Copy the result values into the output registers.
313 for (unsigned i = 0; i != RVLocs.size(); ++i) {
314 CCValAssign &VA = RVLocs[i];
315 assert(VA.isRegLoc() && "Can only return in registers!");
316 SDValue OutVal = OutVals[i];
318 // Integer return values must be sign or zero extended by the callee.
319 switch (VA.getLocInfo()) {
320 case CCValAssign::Full: break;
321 case CCValAssign::SExt:
322 OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
324 case CCValAssign::ZExt:
325 OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
327 case CCValAssign::AExt:
328 OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
331 llvm_unreachable("Unknown loc info!");
334 // The custom bit on an i32 return value indicates that it should be passed
335 // in the high bits of the register.
336 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
337 OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
338 DAG.getConstant(32, DL, MVT::i32));
340 // The next value may go in the low bits of the same register.
341 // Handle both at once.
342 if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
343 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
344 OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
345 // Skip the next value, it's already done.
350 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
352 // Guarantee that all emitted copies are stuck together with flags.
353 Flag = Chain.getValue(1);
354 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
357 RetOps[0] = Chain; // Update chain.
359 // Add the flag if we have it.
361 RetOps.push_back(Flag);
363 return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
366 SDValue SparcTargetLowering::
367 LowerFormalArguments(SDValue Chain,
368 CallingConv::ID CallConv,
370 const SmallVectorImpl<ISD::InputArg> &Ins,
373 SmallVectorImpl<SDValue> &InVals) const {
374 if (Subtarget->is64Bit())
375 return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
377 return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
381 /// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
382 /// passed in either one or two GPRs, including FP values. TODO: we should
383 /// pass FP values in FP registers for fastcc functions.
384 SDValue SparcTargetLowering::
385 LowerFormalArguments_32(SDValue Chain,
386 CallingConv::ID CallConv,
388 const SmallVectorImpl<ISD::InputArg> &Ins,
391 SmallVectorImpl<SDValue> &InVals) const {
392 MachineFunction &MF = DAG.getMachineFunction();
393 MachineRegisterInfo &RegInfo = MF.getRegInfo();
394 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
396 // Assign locations to all of the incoming arguments.
397 SmallVector<CCValAssign, 16> ArgLocs;
398 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
400 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
402 const unsigned StackOffset = 92;
405 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
406 CCValAssign &VA = ArgLocs[i];
408 if (Ins[InIdx].Flags.isSRet()) {
410 report_fatal_error("sparc only supports sret on the first parameter");
411 // Get SRet from [%fp+64].
412 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, 64, true);
413 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
414 SDValue Arg = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
415 MachinePointerInfo(),
416 false, false, false, 0);
417 InVals.push_back(Arg);
422 if (VA.needsCustom()) {
423 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
425 unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
426 MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
427 SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
430 CCValAssign &NextVA = ArgLocs[++i];
433 if (NextVA.isMemLoc()) {
434 int FrameIdx = MF.getFrameInfo()->
435 CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
436 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
437 LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
438 MachinePointerInfo(),
439 false, false, false, 0);
441 unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
442 &SP::IntRegsRegClass);
443 LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
446 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
447 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
448 InVals.push_back(WholeValue);
451 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
452 MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
453 SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
454 if (VA.getLocVT() == MVT::f32)
455 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
456 else if (VA.getLocVT() != MVT::i32) {
457 Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
458 DAG.getValueType(VA.getLocVT()));
459 Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
461 InVals.push_back(Arg);
465 assert(VA.isMemLoc());
467 unsigned Offset = VA.getLocMemOffset()+StackOffset;
468 auto PtrVT = getPointerTy(DAG.getDataLayout());
470 if (VA.needsCustom()) {
471 assert(VA.getValVT() == MVT::f64 || MVT::v2i32);
472 // If it is double-word aligned, just load.
473 if (Offset % 8 == 0) {
474 int FI = MF.getFrameInfo()->CreateFixedObject(8,
477 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
478 SDValue Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
479 MachinePointerInfo(),
480 false,false, false, 0);
481 InVals.push_back(Load);
485 int FI = MF.getFrameInfo()->CreateFixedObject(4,
488 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
489 SDValue HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr,
490 MachinePointerInfo(),
491 false, false, false, 0);
492 int FI2 = MF.getFrameInfo()->CreateFixedObject(4,
495 SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
497 SDValue LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr2,
498 MachinePointerInfo(),
499 false, false, false, 0);
502 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
503 WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
504 InVals.push_back(WholeValue);
508 int FI = MF.getFrameInfo()->CreateFixedObject(4,
511 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
513 if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
514 Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr,
515 MachinePointerInfo(),
516 false, false, false, 0);
518 ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
519 // Sparc is big endian, so add an offset based on the ObjectVT.
520 unsigned Offset = 4-std::max(1U, VA.getValVT().getSizeInBits()/8);
521 FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
522 DAG.getConstant(Offset, dl, MVT::i32));
523 Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
524 MachinePointerInfo(),
525 VA.getValVT(), false, false, false,0);
526 Load = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Load);
528 InVals.push_back(Load);
531 if (MF.getFunction()->hasStructRetAttr()) {
532 // Copy the SRet Argument to SRetReturnReg.
533 SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
534 unsigned Reg = SFI->getSRetReturnReg();
536 Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
537 SFI->setSRetReturnReg(Reg);
539 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
540 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
543 // Store remaining ArgRegs to the stack if this is a varargs function.
545 static const MCPhysReg ArgRegs[] = {
546 SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
548 unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
549 const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
550 unsigned ArgOffset = CCInfo.getNextStackOffset();
551 if (NumAllocated == 6)
552 ArgOffset += StackOffset;
555 ArgOffset = 68+4*NumAllocated;
558 // Remember the vararg offset for the va_start implementation.
559 FuncInfo->setVarArgsFrameOffset(ArgOffset);
561 std::vector<SDValue> OutChains;
563 for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
564 unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
565 MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
566 SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
568 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
570 SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
572 OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr,
573 MachinePointerInfo(),
578 if (!OutChains.empty()) {
579 OutChains.push_back(Chain);
580 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
587 // Lower formal arguments for the 64 bit ABI.
588 SDValue SparcTargetLowering::
589 LowerFormalArguments_64(SDValue Chain,
590 CallingConv::ID CallConv,
592 const SmallVectorImpl<ISD::InputArg> &Ins,
595 SmallVectorImpl<SDValue> &InVals) const {
596 MachineFunction &MF = DAG.getMachineFunction();
598 // Analyze arguments according to CC_Sparc64.
599 SmallVector<CCValAssign, 16> ArgLocs;
600 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
602 CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
604 // The argument array begins at %fp+BIAS+128, after the register save area.
605 const unsigned ArgArea = 128;
607 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
608 CCValAssign &VA = ArgLocs[i];
610 // This argument is passed in a register.
611 // All integer register arguments are promoted by the caller to i64.
613 // Create a virtual register for the promoted live-in value.
614 unsigned VReg = MF.addLiveIn(VA.getLocReg(),
615 getRegClassFor(VA.getLocVT()));
616 SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
618 // Get the high bits for i32 struct elements.
619 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
620 Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
621 DAG.getConstant(32, DL, MVT::i32));
623 // The caller promoted the argument, so insert an Assert?ext SDNode so we
624 // won't promote the value again in this function.
625 switch (VA.getLocInfo()) {
626 case CCValAssign::SExt:
627 Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
628 DAG.getValueType(VA.getValVT()));
630 case CCValAssign::ZExt:
631 Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
632 DAG.getValueType(VA.getValVT()));
638 // Truncate the register down to the argument type.
640 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
642 InVals.push_back(Arg);
646 // The registers are exhausted. This argument was passed on the stack.
647 assert(VA.isMemLoc());
648 // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
649 // beginning of the arguments area at %fp+BIAS+128.
650 unsigned Offset = VA.getLocMemOffset() + ArgArea;
651 unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
652 // Adjust offset for extended arguments, SPARC is big-endian.
653 // The caller will have written the full slot with extended bytes, but we
654 // prefer our own extending loads.
656 Offset += 8 - ValSize;
657 int FI = MF.getFrameInfo()->CreateFixedObject(ValSize, Offset, true);
658 InVals.push_back(DAG.getLoad(
659 VA.getValVT(), DL, Chain,
660 DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
661 MachinePointerInfo::getFixedStack(MF, FI), false, false, false, 0));
667 // This function takes variable arguments, some of which may have been passed
668 // in registers %i0-%i5. Variable floating point arguments are never passed
669 // in floating point registers. They go on %i0-%i5 or on the stack like
670 // integer arguments.
672 // The va_start intrinsic needs to know the offset to the first variable
674 unsigned ArgOffset = CCInfo.getNextStackOffset();
675 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
676 // Skip the 128 bytes of register save area.
677 FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
678 Subtarget->getStackPointerBias());
680 // Save the variable arguments that were passed in registers.
681 // The caller is required to reserve stack space for 6 arguments regardless
682 // of how many arguments were actually passed.
683 SmallVector<SDValue, 8> OutChains;
684 for (; ArgOffset < 6*8; ArgOffset += 8) {
685 unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
686 SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
687 int FI = MF.getFrameInfo()->CreateFixedObject(8, ArgOffset + ArgArea, true);
688 auto PtrVT = getPointerTy(MF.getDataLayout());
689 OutChains.push_back(DAG.getStore(
690 Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
691 MachinePointerInfo::getFixedStack(MF, FI), false, false, 0));
694 if (!OutChains.empty())
695 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
701 SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
702 SmallVectorImpl<SDValue> &InVals) const {
703 if (Subtarget->is64Bit())
704 return LowerCall_64(CLI, InVals);
705 return LowerCall_32(CLI, InVals);
708 static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
709 ImmutableCallSite *CS) {
711 return CS->hasFnAttr(Attribute::ReturnsTwice);
713 const Function *CalleeFn = nullptr;
714 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
715 CalleeFn = dyn_cast<Function>(G->getGlobal());
716 } else if (ExternalSymbolSDNode *E =
717 dyn_cast<ExternalSymbolSDNode>(Callee)) {
718 const Function *Fn = DAG.getMachineFunction().getFunction();
719 const Module *M = Fn->getParent();
720 const char *CalleeName = E->getSymbol();
721 CalleeFn = M->getFunction(CalleeName);
726 return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
729 // Lower a call for the 32-bit ABI.
731 SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
732 SmallVectorImpl<SDValue> &InVals) const {
733 SelectionDAG &DAG = CLI.DAG;
735 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
736 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
737 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
738 SDValue Chain = CLI.Chain;
739 SDValue Callee = CLI.Callee;
740 bool &isTailCall = CLI.IsTailCall;
741 CallingConv::ID CallConv = CLI.CallConv;
742 bool isVarArg = CLI.IsVarArg;
744 // Sparc target does not yet support tail call optimization.
747 // Analyze operands of the call, assigning locations to each operand.
748 SmallVector<CCValAssign, 16> ArgLocs;
749 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
751 CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
753 // Get the size of the outgoing arguments stack space requirement.
754 unsigned ArgsSize = CCInfo.getNextStackOffset();
756 // Keep stack frames 8-byte aligned.
757 ArgsSize = (ArgsSize+7) & ~7;
759 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
761 // Create local copies for byval args.
762 SmallVector<SDValue, 8> ByValArgs;
763 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
764 ISD::ArgFlagsTy Flags = Outs[i].Flags;
765 if (!Flags.isByVal())
768 SDValue Arg = OutVals[i];
769 unsigned Size = Flags.getByValSize();
770 unsigned Align = Flags.getByValAlign();
772 int FI = MFI->CreateStackObject(Size, Align, false);
773 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
774 SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
776 Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
777 false, // isVolatile,
778 (Size <= 32), // AlwaysInline if size <= 32,
780 MachinePointerInfo(), MachinePointerInfo());
781 ByValArgs.push_back(FIPtr);
784 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
787 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
788 SmallVector<SDValue, 8> MemOpChains;
790 const unsigned StackOffset = 92;
791 bool hasStructRetAttr = false;
792 // Walk the register/memloc assignments, inserting copies/loads.
793 for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
796 CCValAssign &VA = ArgLocs[i];
797 SDValue Arg = OutVals[realArgIdx];
799 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
801 // Use local copy if it is a byval arg.
803 Arg = ByValArgs[byvalArgIdx++];
805 // Promote the value if needed.
806 switch (VA.getLocInfo()) {
807 default: llvm_unreachable("Unknown loc info!");
808 case CCValAssign::Full: break;
809 case CCValAssign::SExt:
810 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
812 case CCValAssign::ZExt:
813 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
815 case CCValAssign::AExt:
816 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
818 case CCValAssign::BCvt:
819 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
823 if (Flags.isSRet()) {
824 assert(VA.needsCustom());
825 // store SRet argument in %sp+64
826 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
827 SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
828 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
829 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
830 MachinePointerInfo(),
832 hasStructRetAttr = true;
836 if (VA.needsCustom()) {
837 assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
840 unsigned Offset = VA.getLocMemOffset() + StackOffset;
841 // if it is double-word aligned, just store.
842 if (Offset % 8 == 0) {
843 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
844 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
845 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
846 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
847 MachinePointerInfo(),
853 if (VA.getLocVT() == MVT::f64) {
854 // Move from the float value from float registers into the
855 // integer registers.
857 // TODO: The f64 -> v2i32 conversion is super-inefficient for
858 // constants: it sticks them in the constant pool, then loads
859 // to a fp register, then stores to temp memory, then loads to
860 // integer registers.
861 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
864 SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
866 DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
867 SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
869 DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
872 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
874 CCValAssign &NextVA = ArgLocs[++i];
875 if (NextVA.isRegLoc()) {
876 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
878 // Store the second part in stack.
879 unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
880 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
881 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
882 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
883 MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff,
884 MachinePointerInfo(),
888 unsigned Offset = VA.getLocMemOffset() + StackOffset;
889 // Store the first part.
890 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
891 SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
892 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
893 MemOpChains.push_back(DAG.getStore(Chain, dl, Part0, PtrOff,
894 MachinePointerInfo(),
896 // Store the second part.
897 PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
898 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
899 MemOpChains.push_back(DAG.getStore(Chain, dl, Part1, PtrOff,
900 MachinePointerInfo(),
906 // Arguments that can be passed on register must be kept at
909 if (VA.getLocVT() != MVT::f32) {
910 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
913 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
914 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
918 assert(VA.isMemLoc());
920 // Create a store off the stack pointer for this argument.
921 SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
922 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
924 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
925 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
926 MachinePointerInfo(),
931 // Emit all stores, make sure the occur before any copies into physregs.
932 if (!MemOpChains.empty())
933 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
935 // Build a sequence of copy-to-reg nodes chained together with token
936 // chain and flag operands which copy the outgoing args into registers.
937 // The InFlag in necessary since all emitted instructions must be
940 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
941 unsigned Reg = toCallerWindow(RegsToPass[i].first);
942 Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
943 InFlag = Chain.getValue(1);
946 unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
947 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
949 // If the callee is a GlobalAddress node (quite common, every direct call is)
950 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
951 // Likewise ExternalSymbol -> TargetExternalSymbol.
952 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_)
953 ? SparcMCExpr::VK_Sparc_WPLT30 : 0);
954 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
955 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
956 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
957 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
959 // Returns a chain & a flag for retval copy to use
960 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
961 SmallVector<SDValue, 8> Ops;
962 Ops.push_back(Chain);
963 Ops.push_back(Callee);
964 if (hasStructRetAttr)
965 Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
966 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
967 Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
968 RegsToPass[i].second.getValueType()));
970 // Add a register mask operand representing the call-preserved registers.
971 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
972 const uint32_t *Mask =
974 ? TRI->getRTCallPreservedMask(CallConv)
975 : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
976 assert(Mask && "Missing call preserved mask for calling convention");
977 Ops.push_back(DAG.getRegisterMask(Mask));
979 if (InFlag.getNode())
980 Ops.push_back(InFlag);
982 Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
983 InFlag = Chain.getValue(1);
985 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
986 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
987 InFlag = Chain.getValue(1);
989 // Assign locations to each value returned by this call.
990 SmallVector<CCValAssign, 16> RVLocs;
991 CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
994 RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
996 // Copy all of the result registers out of their specified physreg.
997 for (unsigned i = 0; i != RVLocs.size(); ++i) {
998 Chain = DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
999 RVLocs[i].getValVT(), InFlag).getValue(1);
1000 InFlag = Chain.getValue(2);
1001 InVals.push_back(Chain.getValue(0));
1007 // This functions returns true if CalleeName is a ABI function that returns
1008 // a long double (fp128).
1009 static bool isFP128ABICall(const char *CalleeName)
1011 static const char *const ABICalls[] =
1012 { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
1013 "_Q_sqrt", "_Q_neg",
1014 "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
1015 "_Q_lltoq", "_Q_ulltoq",
1018 for (const char * const *I = ABICalls; *I != nullptr; ++I)
1019 if (strcmp(CalleeName, *I) == 0)
1025 SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
1027 const Function *CalleeFn = nullptr;
1028 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1029 CalleeFn = dyn_cast<Function>(G->getGlobal());
1030 } else if (ExternalSymbolSDNode *E =
1031 dyn_cast<ExternalSymbolSDNode>(Callee)) {
1032 const Function *Fn = DAG.getMachineFunction().getFunction();
1033 const Module *M = Fn->getParent();
1034 const char *CalleeName = E->getSymbol();
1035 CalleeFn = M->getFunction(CalleeName);
1036 if (!CalleeFn && isFP128ABICall(CalleeName))
1037 return 16; // Return sizeof(fp128)
1043 // It would be nice to check for the sret attribute on CalleeFn here,
1044 // but since it is not part of the function type, any check will misfire.
1046 PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
1047 Type *ElementTy = Ty->getElementType();
1048 return DAG.getDataLayout().getTypeAllocSize(ElementTy);
1052 // Fixup floating point arguments in the ... part of a varargs call.
1054 // The SPARC v9 ABI requires that floating point arguments are treated the same
1055 // as integers when calling a varargs function. This does not apply to the
1056 // fixed arguments that are part of the function's prototype.
1058 // This function post-processes a CCValAssign array created by
1059 // AnalyzeCallOperands().
1060 static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
1061 ArrayRef<ISD::OutputArg> Outs) {
1062 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1063 const CCValAssign &VA = ArgLocs[i];
1064 MVT ValTy = VA.getLocVT();
1065 // FIXME: What about f32 arguments? C promotes them to f64 when calling
1066 // varargs functions.
1067 if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
1069 // The fixed arguments to a varargs function still go in FP registers.
1070 if (Outs[VA.getValNo()].IsFixed)
1073 // This floating point argument should be reassigned.
1076 // Determine the offset into the argument array.
1077 unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
1078 unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
1079 unsigned Offset = argSize * (VA.getLocReg() - firstReg);
1080 assert(Offset < 16*8 && "Offset out of range, bad register enum?");
1083 // This argument should go in %i0-%i5.
1084 unsigned IReg = SP::I0 + Offset/8;
1085 if (ValTy == MVT::f64)
1086 // Full register, just bitconvert into i64.
1087 NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
1088 IReg, MVT::i64, CCValAssign::BCvt);
1090 assert(ValTy == MVT::f128 && "Unexpected type!");
1091 // Full register, just bitconvert into i128 -- We will lower this into
1092 // two i64s in LowerCall_64.
1093 NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
1094 IReg, MVT::i128, CCValAssign::BCvt);
1097 // This needs to go to memory, we're out of integer registers.
1098 NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
1099 Offset, VA.getLocVT(), VA.getLocInfo());
1105 // Lower a call for the 64-bit ABI.
1107 SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
1108 SmallVectorImpl<SDValue> &InVals) const {
1109 SelectionDAG &DAG = CLI.DAG;
1111 SDValue Chain = CLI.Chain;
1112 auto PtrVT = getPointerTy(DAG.getDataLayout());
1114 // Sparc target does not yet support tail call optimization.
1115 CLI.IsTailCall = false;
1117 // Analyze operands of the call, assigning locations to each operand.
1118 SmallVector<CCValAssign, 16> ArgLocs;
1119 CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
1121 CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
1123 // Get the size of the outgoing arguments stack space requirement.
1124 // The stack offset computed by CC_Sparc64 includes all arguments.
1125 // Called functions expect 6 argument words to exist in the stack frame, used
1127 unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
1129 // Keep stack frames 16-byte aligned.
1130 ArgsSize = RoundUpToAlignment(ArgsSize, 16);
1132 // Varargs calls require special treatment.
1134 fixupVariableFloatArgs(ArgLocs, CLI.Outs);
1136 // Adjust the stack pointer to make room for the arguments.
1137 // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
1138 // with more than 6 arguments.
1139 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1142 // Collect the set of registers to pass to the function and their values.
1143 // This will be emitted as a sequence of CopyToReg nodes glued to the call
1145 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1147 // Collect chains from all the memory opeations that copy arguments to the
1148 // stack. They must follow the stack pointer adjustment above and precede the
1149 // call instruction itself.
1150 SmallVector<SDValue, 8> MemOpChains;
1152 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1153 const CCValAssign &VA = ArgLocs[i];
1154 SDValue Arg = CLI.OutVals[i];
1156 // Promote the value if needed.
1157 switch (VA.getLocInfo()) {
1159 llvm_unreachable("Unknown location info!");
1160 case CCValAssign::Full:
1162 case CCValAssign::SExt:
1163 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
1165 case CCValAssign::ZExt:
1166 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
1168 case CCValAssign::AExt:
1169 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
1171 case CCValAssign::BCvt:
1172 // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
1173 // SPARC does not support i128 natively. Lower it into two i64, see below.
1174 if (!VA.needsCustom() || VA.getValVT() != MVT::f128
1175 || VA.getLocVT() != MVT::i128)
1176 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
1180 if (VA.isRegLoc()) {
1181 if (VA.needsCustom() && VA.getValVT() == MVT::f128
1182 && VA.getLocVT() == MVT::i128) {
1183 // Store and reload into the interger register reg and reg+1.
1184 unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
1185 unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
1186 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1187 SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
1188 HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
1189 SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
1190 LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
1192 // Store to %sp+BIAS+128+Offset
1193 SDValue Store = DAG.getStore(Chain, DL, Arg, HiPtrOff,
1194 MachinePointerInfo(),
1196 // Load into Reg and Reg+1
1197 SDValue Hi64 = DAG.getLoad(MVT::i64, DL, Store, HiPtrOff,
1198 MachinePointerInfo(),
1199 false, false, false, 0);
1200 SDValue Lo64 = DAG.getLoad(MVT::i64, DL, Store, LoPtrOff,
1201 MachinePointerInfo(),
1202 false, false, false, 0);
1203 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
1205 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
1210 // The custom bit on an i32 return value indicates that it should be
1211 // passed in the high bits of the register.
1212 if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
1213 Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
1214 DAG.getConstant(32, DL, MVT::i32));
1216 // The next value may go in the low bits of the same register.
1217 // Handle both at once.
1218 if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
1219 ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
1220 SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
1222 Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
1223 // Skip the next value, it's already done.
1227 RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
1231 assert(VA.isMemLoc());
1233 // Create a store off the stack pointer for this argument.
1234 SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
1235 // The argument area starts at %fp+BIAS+128 in the callee frame,
1236 // %sp+BIAS+128 in ours.
1237 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
1238 Subtarget->getStackPointerBias() +
1240 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
1241 MemOpChains.push_back(DAG.getStore(Chain, DL, Arg, PtrOff,
1242 MachinePointerInfo(),
1246 // Emit all stores, make sure they occur before the call.
1247 if (!MemOpChains.empty())
1248 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1250 // Build a sequence of CopyToReg nodes glued together with token chain and
1251 // glue operands which copy the outgoing args into registers. The InGlue is
1252 // necessary since all emitted instructions must be stuck together in order
1253 // to pass the live physical registers.
1255 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1256 Chain = DAG.getCopyToReg(Chain, DL,
1257 RegsToPass[i].first, RegsToPass[i].second, InGlue);
1258 InGlue = Chain.getValue(1);
1261 // If the callee is a GlobalAddress node (quite common, every direct call is)
1262 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
1263 // Likewise ExternalSymbol -> TargetExternalSymbol.
1264 SDValue Callee = CLI.Callee;
1265 bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
1266 unsigned TF = ((getTargetMachine().getRelocationModel() == Reloc::PIC_)
1267 ? SparcMCExpr::VK_Sparc_WPLT30 : 0);
1268 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
1269 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
1270 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
1271 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
1273 // Build the operands for the call instruction itself.
1274 SmallVector<SDValue, 8> Ops;
1275 Ops.push_back(Chain);
1276 Ops.push_back(Callee);
1277 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1278 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1279 RegsToPass[i].second.getValueType()));
1281 // Add a register mask operand representing the call-preserved registers.
1282 const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
1283 const uint32_t *Mask =
1284 ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
1285 : TRI->getCallPreservedMask(DAG.getMachineFunction(),
1287 assert(Mask && "Missing call preserved mask for calling convention");
1288 Ops.push_back(DAG.getRegisterMask(Mask));
1290 // Make sure the CopyToReg nodes are glued to the call instruction which
1291 // consumes the registers.
1292 if (InGlue.getNode())
1293 Ops.push_back(InGlue);
1295 // Now the call itself.
1296 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1297 Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
1298 InGlue = Chain.getValue(1);
1300 // Revert the stack pointer immediately after the call.
1301 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
1302 DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
1303 InGlue = Chain.getValue(1);
1305 // Now extract the return values. This is more or less the same as
1306 // LowerFormalArguments_64.
1308 // Assign locations to each value returned by this call.
1309 SmallVector<CCValAssign, 16> RVLocs;
1310 CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
1313 // Set inreg flag manually for codegen generated library calls that
1315 if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && CLI.CS == nullptr)
1316 CLI.Ins[0].Flags.setInReg();
1318 RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
1320 // Copy all of the result registers out of their specified physreg.
1321 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1322 CCValAssign &VA = RVLocs[i];
1323 unsigned Reg = toCallerWindow(VA.getLocReg());
1325 // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
1326 // reside in the same register in the high and low bits. Reuse the
1327 // CopyFromReg previous node to avoid duplicate copies.
1329 if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
1330 if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
1331 RV = Chain.getValue(0);
1333 // But usually we'll create a new CopyFromReg for a different register.
1334 if (!RV.getNode()) {
1335 RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
1336 Chain = RV.getValue(1);
1337 InGlue = Chain.getValue(2);
1340 // Get the high bits for i32 struct elements.
1341 if (VA.getValVT() == MVT::i32 && VA.needsCustom())
1342 RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
1343 DAG.getConstant(32, DL, MVT::i32));
1345 // The callee promoted the return value, so insert an Assert?ext SDNode so
1346 // we won't promote the value again in this function.
1347 switch (VA.getLocInfo()) {
1348 case CCValAssign::SExt:
1349 RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
1350 DAG.getValueType(VA.getValVT()));
1352 case CCValAssign::ZExt:
1353 RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
1354 DAG.getValueType(VA.getValVT()));
1360 // Truncate the register down to the return value type.
1361 if (VA.isExtInLoc())
1362 RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
1364 InVals.push_back(RV);
1370 //===----------------------------------------------------------------------===//
1371 // TargetLowering Implementation
1372 //===----------------------------------------------------------------------===//
1374 /// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
1376 static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
1378 default: llvm_unreachable("Unknown integer condition code!");
1379 case ISD::SETEQ: return SPCC::ICC_E;
1380 case ISD::SETNE: return SPCC::ICC_NE;
1381 case ISD::SETLT: return SPCC::ICC_L;
1382 case ISD::SETGT: return SPCC::ICC_G;
1383 case ISD::SETLE: return SPCC::ICC_LE;
1384 case ISD::SETGE: return SPCC::ICC_GE;
1385 case ISD::SETULT: return SPCC::ICC_CS;
1386 case ISD::SETULE: return SPCC::ICC_LEU;
1387 case ISD::SETUGT: return SPCC::ICC_GU;
1388 case ISD::SETUGE: return SPCC::ICC_CC;
1392 /// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
1394 static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
1396 default: llvm_unreachable("Unknown fp condition code!");
1398 case ISD::SETOEQ: return SPCC::FCC_E;
1400 case ISD::SETUNE: return SPCC::FCC_NE;
1402 case ISD::SETOLT: return SPCC::FCC_L;
1404 case ISD::SETOGT: return SPCC::FCC_G;
1406 case ISD::SETOLE: return SPCC::FCC_LE;
1408 case ISD::SETOGE: return SPCC::FCC_GE;
1409 case ISD::SETULT: return SPCC::FCC_UL;
1410 case ISD::SETULE: return SPCC::FCC_ULE;
1411 case ISD::SETUGT: return SPCC::FCC_UG;
1412 case ISD::SETUGE: return SPCC::FCC_UGE;
1413 case ISD::SETUO: return SPCC::FCC_U;
1414 case ISD::SETO: return SPCC::FCC_O;
1415 case ISD::SETONE: return SPCC::FCC_LG;
1416 case ISD::SETUEQ: return SPCC::FCC_UE;
1420 SparcTargetLowering::SparcTargetLowering(TargetMachine &TM,
1421 const SparcSubtarget &STI)
1422 : TargetLowering(TM), Subtarget(&STI) {
1423 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
1425 // Instructions which use registers as conditionals examine all the
1426 // bits (as does the pseudo SELECT_CC expansion). I don't think it
1427 // matters much whether it's ZeroOrOneBooleanContent, or
1428 // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
1430 setBooleanContents(ZeroOrOneBooleanContent);
1431 setBooleanVectorContents(ZeroOrOneBooleanContent);
1433 // Set up the register classes.
1434 addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
1435 addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
1436 addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
1437 addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
1438 if (Subtarget->is64Bit()) {
1439 addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
1441 // On 32bit sparc, we define a double-register 32bit register
1442 // class, as well. This is modeled in LLVM as a 2-vector of i32.
1443 addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
1445 // ...but almost all operations must be expanded, so set that as
1447 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
1448 setOperationAction(Op, MVT::v2i32, Expand);
1450 // Truncating/extending stores/loads are also not supported.
1451 for (MVT VT : MVT::integer_vector_valuetypes()) {
1452 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
1453 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
1454 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
1456 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
1457 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
1458 setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
1460 setTruncStoreAction(VT, MVT::v2i32, Expand);
1461 setTruncStoreAction(MVT::v2i32, VT, Expand);
1463 // However, load and store *are* legal.
1464 setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
1465 setOperationAction(ISD::STORE, MVT::v2i32, Legal);
1466 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);
1467 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);
1469 // And we need to promote i64 loads/stores into vector load/store
1470 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1471 setOperationAction(ISD::STORE, MVT::i64, Custom);
1473 // Sadly, this doesn't work:
1474 // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
1475 // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
1478 // Turn FP extload into load/fextend
1479 for (MVT VT : MVT::fp_valuetypes()) {
1480 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1481 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
1484 // Sparc doesn't have i1 sign extending load
1485 for (MVT VT : MVT::integer_valuetypes())
1486 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1488 // Turn FP truncstore into trunc + store.
1489 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1490 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1491 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1493 // Custom legalize GlobalAddress nodes into LO/HI parts.
1494 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
1495 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
1496 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
1497 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
1499 // Sparc doesn't have sext_inreg, replace them with shl/sra
1500 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1501 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
1502 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1504 // Sparc has no REM or DIVREM operations.
1505 setOperationAction(ISD::UREM, MVT::i32, Expand);
1506 setOperationAction(ISD::SREM, MVT::i32, Expand);
1507 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1508 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1510 // ... nor does SparcV9.
1511 if (Subtarget->is64Bit()) {
1512 setOperationAction(ISD::UREM, MVT::i64, Expand);
1513 setOperationAction(ISD::SREM, MVT::i64, Expand);
1514 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1515 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1518 // Custom expand fp<->sint
1519 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1520 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1521 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
1522 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
1524 // Custom Expand fp<->uint
1525 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1526 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1527 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
1528 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
1530 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
1531 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
1533 // Sparc has no select or setcc: expand to SELECT_CC.
1534 setOperationAction(ISD::SELECT, MVT::i32, Expand);
1535 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1536 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1537 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1539 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1540 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1541 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1542 setOperationAction(ISD::SETCC, MVT::f128, Expand);
1544 // Sparc doesn't have BRCOND either, it has BR_CC.
1545 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1546 setOperationAction(ISD::BRIND, MVT::Other, Expand);
1547 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1548 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1549 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1550 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1551 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
1553 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1554 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1555 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1556 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1558 if (Subtarget->is64Bit()) {
1559 setOperationAction(ISD::ADDC, MVT::i64, Custom);
1560 setOperationAction(ISD::ADDE, MVT::i64, Custom);
1561 setOperationAction(ISD::SUBC, MVT::i64, Custom);
1562 setOperationAction(ISD::SUBE, MVT::i64, Custom);
1563 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
1564 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
1565 setOperationAction(ISD::SELECT, MVT::i64, Expand);
1566 setOperationAction(ISD::SETCC, MVT::i64, Expand);
1567 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
1568 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1570 setOperationAction(ISD::CTPOP, MVT::i64,
1571 Subtarget->usePopc() ? Legal : Expand);
1572 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1573 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
1574 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1575 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
1576 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1577 setOperationAction(ISD::ROTL , MVT::i64, Expand);
1578 setOperationAction(ISD::ROTR , MVT::i64, Expand);
1579 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
1583 // FIXME: We insert fences for each atomics and generate sub-optimal code
1584 // for PSO/TSO. Also, implement other atomicrmw operations.
1586 setInsertFencesForAtomic(true);
1588 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
1589 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32,
1590 (Subtarget->isV9() ? Legal: Expand));
1593 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
1595 // Custom Lower Atomic LOAD/STORE
1596 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1597 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1599 if (Subtarget->is64Bit()) {
1600 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
1601 setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
1602 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
1603 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
1606 if (!Subtarget->isV9()) {
1607 // SparcV8 does not have FNEGD and FABSD.
1608 setOperationAction(ISD::FNEG, MVT::f64, Custom);
1609 setOperationAction(ISD::FABS, MVT::f64, Custom);
1612 setOperationAction(ISD::FSIN , MVT::f128, Expand);
1613 setOperationAction(ISD::FCOS , MVT::f128, Expand);
1614 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
1615 setOperationAction(ISD::FREM , MVT::f128, Expand);
1616 setOperationAction(ISD::FMA , MVT::f128, Expand);
1617 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1618 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1619 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1620 setOperationAction(ISD::FREM , MVT::f64, Expand);
1621 setOperationAction(ISD::FMA , MVT::f64, Expand);
1622 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1623 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1624 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1625 setOperationAction(ISD::FREM , MVT::f32, Expand);
1626 setOperationAction(ISD::FMA , MVT::f32, Expand);
1627 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1628 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1629 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1630 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1631 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1632 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1633 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1634 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1635 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1636 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1637 setOperationAction(ISD::FPOW , MVT::f128, Expand);
1638 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1639 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1641 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1642 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1643 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1645 // FIXME: Sparc provides these multiplies, but we don't have them yet.
1646 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1647 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1649 if (Subtarget->is64Bit()) {
1650 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1651 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1652 setOperationAction(ISD::MULHU, MVT::i64, Expand);
1653 setOperationAction(ISD::MULHS, MVT::i64, Expand);
1655 setOperationAction(ISD::UMULO, MVT::i64, Custom);
1656 setOperationAction(ISD::SMULO, MVT::i64, Custom);
1658 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
1659 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
1660 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
1663 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1664 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1665 // VAARG needs to be lowered to not do unaligned accesses for doubles.
1666 setOperationAction(ISD::VAARG , MVT::Other, Custom);
1668 setOperationAction(ISD::TRAP , MVT::Other, Legal);
1670 // Use the default implementation.
1671 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1672 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1673 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1674 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1675 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1677 setStackPointerRegisterToSaveRestore(SP::O6);
1679 setOperationAction(ISD::CTPOP, MVT::i32,
1680 Subtarget->usePopc() ? Legal : Expand);
1682 if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
1683 setOperationAction(ISD::LOAD, MVT::f128, Legal);
1684 setOperationAction(ISD::STORE, MVT::f128, Legal);
1686 setOperationAction(ISD::LOAD, MVT::f128, Custom);
1687 setOperationAction(ISD::STORE, MVT::f128, Custom);
1690 if (Subtarget->hasHardQuad()) {
1691 setOperationAction(ISD::FADD, MVT::f128, Legal);
1692 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1693 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1694 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1695 setOperationAction(ISD::FSQRT, MVT::f128, Legal);
1696 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1697 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1698 if (Subtarget->isV9()) {
1699 setOperationAction(ISD::FNEG, MVT::f128, Legal);
1700 setOperationAction(ISD::FABS, MVT::f128, Legal);
1702 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1703 setOperationAction(ISD::FABS, MVT::f128, Custom);
1706 if (!Subtarget->is64Bit()) {
1707 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1708 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1709 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1710 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1714 // Custom legalize f128 operations.
1716 setOperationAction(ISD::FADD, MVT::f128, Custom);
1717 setOperationAction(ISD::FSUB, MVT::f128, Custom);
1718 setOperationAction(ISD::FMUL, MVT::f128, Custom);
1719 setOperationAction(ISD::FDIV, MVT::f128, Custom);
1720 setOperationAction(ISD::FSQRT, MVT::f128, Custom);
1721 setOperationAction(ISD::FNEG, MVT::f128, Custom);
1722 setOperationAction(ISD::FABS, MVT::f128, Custom);
1724 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
1725 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
1726 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
1728 // Setup Runtime library names.
1729 if (Subtarget->is64Bit()) {
1730 setLibcallName(RTLIB::ADD_F128, "_Qp_add");
1731 setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
1732 setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
1733 setLibcallName(RTLIB::DIV_F128, "_Qp_div");
1734 setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
1735 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
1736 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
1737 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
1738 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
1739 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
1740 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
1741 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
1742 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
1743 setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
1744 setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
1745 setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
1746 setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
1748 setLibcallName(RTLIB::ADD_F128, "_Q_add");
1749 setLibcallName(RTLIB::SUB_F128, "_Q_sub");
1750 setLibcallName(RTLIB::MUL_F128, "_Q_mul");
1751 setLibcallName(RTLIB::DIV_F128, "_Q_div");
1752 setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
1753 setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
1754 setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
1755 setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
1756 setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
1757 setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
1758 setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
1759 setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
1760 setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
1761 setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
1762 setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
1763 setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
1764 setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
1768 setMinFunctionAlignment(2);
1770 computeRegisterProperties(Subtarget->getRegisterInfo());
1773 const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
1774 switch ((SPISD::NodeType)Opcode) {
1775 case SPISD::FIRST_NUMBER: break;
1776 case SPISD::CMPICC: return "SPISD::CMPICC";
1777 case SPISD::CMPFCC: return "SPISD::CMPFCC";
1778 case SPISD::BRICC: return "SPISD::BRICC";
1779 case SPISD::BRXCC: return "SPISD::BRXCC";
1780 case SPISD::BRFCC: return "SPISD::BRFCC";
1781 case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
1782 case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
1783 case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
1784 case SPISD::Hi: return "SPISD::Hi";
1785 case SPISD::Lo: return "SPISD::Lo";
1786 case SPISD::FTOI: return "SPISD::FTOI";
1787 case SPISD::ITOF: return "SPISD::ITOF";
1788 case SPISD::FTOX: return "SPISD::FTOX";
1789 case SPISD::XTOF: return "SPISD::XTOF";
1790 case SPISD::CALL: return "SPISD::CALL";
1791 case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
1792 case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
1793 case SPISD::FLUSHW: return "SPISD::FLUSHW";
1794 case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
1795 case SPISD::TLS_LD: return "SPISD::TLS_LD";
1796 case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
1801 EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
1805 return VT.changeVectorElementTypeToInteger();
1808 /// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
1809 /// be zero. Op is expected to be a target specific node. Used by DAG
1811 void SparcTargetLowering::computeKnownBitsForTargetNode
1815 const SelectionDAG &DAG,
1816 unsigned Depth) const {
1817 APInt KnownZero2, KnownOne2;
1818 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1820 switch (Op.getOpcode()) {
1822 case SPISD::SELECT_ICC:
1823 case SPISD::SELECT_XCC:
1824 case SPISD::SELECT_FCC:
1825 DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1826 DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1828 // Only known if known in both the LHS and RHS.
1829 KnownOne &= KnownOne2;
1830 KnownZero &= KnownZero2;
1835 // Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
1836 // set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
1837 static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
1838 ISD::CondCode CC, unsigned &SPCC) {
1839 if (isNullConstant(RHS) &&
1841 (((LHS.getOpcode() == SPISD::SELECT_ICC ||
1842 LHS.getOpcode() == SPISD::SELECT_XCC) &&
1843 LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
1844 (LHS.getOpcode() == SPISD::SELECT_FCC &&
1845 LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
1846 isOneConstant(LHS.getOperand(0)) &&
1847 isNullConstant(LHS.getOperand(1))) {
1848 SDValue CMPCC = LHS.getOperand(3);
1849 SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
1850 LHS = CMPCC.getOperand(0);
1851 RHS = CMPCC.getOperand(1);
1855 // Convert to a target node and set target flags.
1856 SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
1857 SelectionDAG &DAG) const {
1858 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
1859 return DAG.getTargetGlobalAddress(GA->getGlobal(),
1861 GA->getValueType(0),
1862 GA->getOffset(), TF);
1864 if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
1865 return DAG.getTargetConstantPool(CP->getConstVal(),
1866 CP->getValueType(0),
1868 CP->getOffset(), TF);
1870 if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
1871 return DAG.getTargetBlockAddress(BA->getBlockAddress(),
1876 if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
1877 return DAG.getTargetExternalSymbol(ES->getSymbol(),
1878 ES->getValueType(0), TF);
1880 llvm_unreachable("Unhandled address SDNode");
1883 // Split Op into high and low parts according to HiTF and LoTF.
1884 // Return an ADD node combining the parts.
1885 SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
1886 unsigned HiTF, unsigned LoTF,
1887 SelectionDAG &DAG) const {
1889 EVT VT = Op.getValueType();
1890 SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
1891 SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
1892 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1895 // Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
1896 // or ExternalSymbol SDNode.
1897 SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
1899 EVT VT = getPointerTy(DAG.getDataLayout());
1901 // Handle PIC mode first.
1902 if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
1903 // This is the pic32 code model, the GOT is known to be smaller than 4GB.
1904 SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,
1905 SparcMCExpr::VK_Sparc_GOT10, DAG);
1906 SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
1907 SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
1908 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
1909 // function has calls.
1910 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1911 MFI->setHasCalls(true);
1912 return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
1913 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
1914 false, false, false, 0);
1917 // This is one of the absolute code models.
1918 switch(getTargetMachine().getCodeModel()) {
1920 llvm_unreachable("Unsupported absolute code model");
1921 case CodeModel::Small:
1923 return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
1924 SparcMCExpr::VK_Sparc_LO, DAG);
1925 case CodeModel::Medium: {
1927 SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,
1928 SparcMCExpr::VK_Sparc_M44, DAG);
1929 H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
1930 SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);
1931 L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
1932 return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
1934 case CodeModel::Large: {
1936 SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,
1937 SparcMCExpr::VK_Sparc_HM, DAG);
1938 Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
1939 SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
1940 SparcMCExpr::VK_Sparc_LO, DAG);
1941 return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
1946 SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
1947 SelectionDAG &DAG) const {
1948 return makeAddress(Op, DAG);
1951 SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
1952 SelectionDAG &DAG) const {
1953 return makeAddress(Op, DAG);
1956 SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
1957 SelectionDAG &DAG) const {
1958 return makeAddress(Op, DAG);
1961 SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
1962 SelectionDAG &DAG) const {
1964 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1965 if (DAG.getTarget().Options.EmulatedTLS)
1966 return LowerToTLSEmulatedModel(GA, DAG);
1969 const GlobalValue *GV = GA->getGlobal();
1970 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1972 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
1974 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
1975 unsigned HiTF = ((model == TLSModel::GeneralDynamic)
1976 ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
1977 : SparcMCExpr::VK_Sparc_TLS_LDM_HI22);
1978 unsigned LoTF = ((model == TLSModel::GeneralDynamic)
1979 ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
1980 : SparcMCExpr::VK_Sparc_TLS_LDM_LO10);
1981 unsigned addTF = ((model == TLSModel::GeneralDynamic)
1982 ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
1983 : SparcMCExpr::VK_Sparc_TLS_LDM_ADD);
1984 unsigned callTF = ((model == TLSModel::GeneralDynamic)
1985 ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
1986 : SparcMCExpr::VK_Sparc_TLS_LDM_CALL);
1988 SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
1989 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
1990 SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
1991 withTargetFlags(Op, addTF, DAG));
1993 SDValue Chain = DAG.getEntryNode();
1996 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(1, DL, true), DL);
1997 Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
1998 InFlag = Chain.getValue(1);
1999 SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
2000 SDValue Symbol = withTargetFlags(Op, callTF, DAG);
2002 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2003 SmallVector<SDValue, 4> Ops;
2004 Ops.push_back(Chain);
2005 Ops.push_back(Callee);
2006 Ops.push_back(Symbol);
2007 Ops.push_back(DAG.getRegister(SP::O0, PtrVT));
2008 const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
2009 DAG.getMachineFunction(), CallingConv::C);
2010 assert(Mask && "Missing call preserved mask for calling convention");
2011 Ops.push_back(DAG.getRegisterMask(Mask));
2012 Ops.push_back(InFlag);
2013 Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
2014 InFlag = Chain.getValue(1);
2015 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
2016 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
2017 InFlag = Chain.getValue(1);
2018 SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
2020 if (model != TLSModel::LocalDynamic)
2023 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2024 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));
2025 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2026 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));
2027 HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2028 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
2029 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));
2032 if (model == TLSModel::InitialExec) {
2033 unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
2034 : SparcMCExpr::VK_Sparc_TLS_IE_LD);
2036 SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
2038 // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
2039 // function has calls.
2040 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2041 MFI->setHasCalls(true);
2043 SDValue TGA = makeHiLoPair(Op,
2044 SparcMCExpr::VK_Sparc_TLS_IE_HI22,
2045 SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);
2046 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
2047 SDValue Offset = DAG.getNode(SPISD::TLS_LD,
2049 withTargetFlags(Op, ldTF, DAG));
2050 return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
2051 DAG.getRegister(SP::G7, PtrVT), Offset,
2053 SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));
2056 assert(model == TLSModel::LocalExec);
2057 SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
2058 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));
2059 SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
2060 withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));
2061 SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
2063 return DAG.getNode(ISD::ADD, DL, PtrVT,
2064 DAG.getRegister(SP::G7, PtrVT), Offset);
2068 SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args,
2069 SDValue Arg, SDLoc DL,
2070 SelectionDAG &DAG) const {
2071 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2072 EVT ArgVT = Arg.getValueType();
2073 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2079 if (ArgTy->isFP128Ty()) {
2080 // Create a stack object and pass the pointer to the library function.
2081 int FI = MFI->CreateStackObject(16, 8, false);
2082 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2083 Chain = DAG.getStore(Chain,
2087 MachinePointerInfo(),
2093 Entry.Ty = PointerType::getUnqual(ArgTy);
2095 Args.push_back(Entry);
2100 SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
2101 const char *LibFuncName,
2102 unsigned numArgs) const {
2106 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2107 auto PtrVT = getPointerTy(DAG.getDataLayout());
2109 SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
2110 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2111 Type *RetTyABI = RetTy;
2112 SDValue Chain = DAG.getEntryNode();
2115 if (RetTy->isFP128Ty()) {
2116 // Create a Stack Object to receive the return value of type f128.
2118 int RetFI = MFI->CreateStackObject(16, 8, false);
2119 RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
2120 Entry.Node = RetPtr;
2121 Entry.Ty = PointerType::getUnqual(RetTy);
2122 if (!Subtarget->is64Bit())
2123 Entry.isSRet = true;
2124 Entry.isReturned = false;
2125 Args.push_back(Entry);
2126 RetTyABI = Type::getVoidTy(*DAG.getContext());
2129 assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
2130 for (unsigned i = 0, e = numArgs; i != e; ++i) {
2131 Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
2133 TargetLowering::CallLoweringInfo CLI(DAG);
2134 CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
2135 .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args), 0);
2137 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2139 // chain is in second result.
2140 if (RetTyABI == RetTy)
2141 return CallInfo.first;
2143 assert (RetTy->isFP128Ty() && "Unexpected return type!");
2145 Chain = CallInfo.second;
2147 // Load RetPtr to get the return value.
2148 return DAG.getLoad(Op.getValueType(),
2152 MachinePointerInfo(),
2153 false, false, false, 8);
2157 SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
2160 SelectionDAG &DAG) const {
2162 const char *LibCall = nullptr;
2163 bool is64Bit = Subtarget->is64Bit();
2165 default: llvm_unreachable("Unhandled conditional code!");
2166 case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
2167 case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
2168 case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
2169 case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
2170 case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
2171 case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
2179 case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
2182 auto PtrVT = getPointerTy(DAG.getDataLayout());
2183 SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
2184 Type *RetTy = Type::getInt32Ty(*DAG.getContext());
2186 SDValue Chain = DAG.getEntryNode();
2187 Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
2188 Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
2190 TargetLowering::CallLoweringInfo CLI(DAG);
2191 CLI.setDebugLoc(DL).setChain(Chain)
2192 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
2194 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2196 // result is in first, and chain is in second result.
2197 SDValue Result = CallInfo.first;
2201 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2202 SPCC = SPCC::ICC_NE;
2203 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2205 case SPCC::FCC_UL : {
2206 SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
2207 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2208 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2209 SPCC = SPCC::ICC_NE;
2210 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2212 case SPCC::FCC_ULE: {
2213 SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
2214 SPCC = SPCC::ICC_NE;
2215 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2217 case SPCC::FCC_UG : {
2218 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2220 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2222 case SPCC::FCC_UGE: {
2223 SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
2224 SPCC = SPCC::ICC_NE;
2225 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2228 case SPCC::FCC_U : {
2229 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2231 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2233 case SPCC::FCC_O : {
2234 SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
2235 SPCC = SPCC::ICC_NE;
2236 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2238 case SPCC::FCC_LG : {
2239 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2240 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2241 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2242 SPCC = SPCC::ICC_NE;
2243 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2245 case SPCC::FCC_UE : {
2246 SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
2247 Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
2248 SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
2250 return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
2256 LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
2257 const SparcTargetLowering &TLI) {
2259 if (Op.getOperand(0).getValueType() == MVT::f64)
2260 return TLI.LowerF128Op(Op, DAG,
2261 TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
2263 if (Op.getOperand(0).getValueType() == MVT::f32)
2264 return TLI.LowerF128Op(Op, DAG,
2265 TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
2267 llvm_unreachable("fpextend with non-float operand!");
2272 LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
2273 const SparcTargetLowering &TLI) {
2274 // FP_ROUND on f64 and f32 are legal.
2275 if (Op.getOperand(0).getValueType() != MVT::f128)
2278 if (Op.getValueType() == MVT::f64)
2279 return TLI.LowerF128Op(Op, DAG,
2280 TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
2281 if (Op.getValueType() == MVT::f32)
2282 return TLI.LowerF128Op(Op, DAG,
2283 TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
2285 llvm_unreachable("fpround to non-float!");
2289 static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
2290 const SparcTargetLowering &TLI,
2293 EVT VT = Op.getValueType();
2294 assert(VT == MVT::i32 || VT == MVT::i64);
2296 // Expand f128 operations to fp128 abi calls.
2297 if (Op.getOperand(0).getValueType() == MVT::f128
2298 && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
2299 const char *libName = TLI.getLibcallName(VT == MVT::i32
2300 ? RTLIB::FPTOSINT_F128_I32
2301 : RTLIB::FPTOSINT_F128_I64);
2302 return TLI.LowerF128Op(Op, DAG, libName, 1);
2305 // Expand if the resulting type is illegal.
2306 if (!TLI.isTypeLegal(VT))
2309 // Otherwise, Convert the fp value to integer in an FP register.
2311 Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
2313 Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
2315 return DAG.getNode(ISD::BITCAST, dl, VT, Op);
2318 static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2319 const SparcTargetLowering &TLI,
2322 EVT OpVT = Op.getOperand(0).getValueType();
2323 assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
2325 EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
2327 // Expand f128 operations to fp128 ABI calls.
2328 if (Op.getValueType() == MVT::f128
2329 && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
2330 const char *libName = TLI.getLibcallName(OpVT == MVT::i32
2331 ? RTLIB::SINTTOFP_I32_F128
2332 : RTLIB::SINTTOFP_I64_F128);
2333 return TLI.LowerF128Op(Op, DAG, libName, 1);
2336 // Expand if the operand type is illegal.
2337 if (!TLI.isTypeLegal(OpVT))
2340 // Otherwise, Convert the int value to FP in an FP register.
2341 SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
2342 unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
2343 return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
2346 static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
2347 const SparcTargetLowering &TLI,
2350 EVT VT = Op.getValueType();
2352 // Expand if it does not involve f128 or the target has support for
2353 // quad floating point instructions and the resulting type is legal.
2354 if (Op.getOperand(0).getValueType() != MVT::f128 ||
2355 (hasHardQuad && TLI.isTypeLegal(VT)))
2358 assert(VT == MVT::i32 || VT == MVT::i64);
2360 return TLI.LowerF128Op(Op, DAG,
2361 TLI.getLibcallName(VT == MVT::i32
2362 ? RTLIB::FPTOUINT_F128_I32
2363 : RTLIB::FPTOUINT_F128_I64),
2367 static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2368 const SparcTargetLowering &TLI,
2371 EVT OpVT = Op.getOperand(0).getValueType();
2372 assert(OpVT == MVT::i32 || OpVT == MVT::i64);
2374 // Expand if it does not involve f128 or the target has support for
2375 // quad floating point instructions and the operand type is legal.
2376 if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
2379 return TLI.LowerF128Op(Op, DAG,
2380 TLI.getLibcallName(OpVT == MVT::i32
2381 ? RTLIB::UINTTOFP_I32_F128
2382 : RTLIB::UINTTOFP_I64_F128),
2386 static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
2387 const SparcTargetLowering &TLI,
2389 SDValue Chain = Op.getOperand(0);
2390 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2391 SDValue LHS = Op.getOperand(2);
2392 SDValue RHS = Op.getOperand(3);
2393 SDValue Dest = Op.getOperand(4);
2395 unsigned Opc, SPCC = ~0U;
2397 // If this is a br_cc of a "setcc", and if the setcc got lowered into
2398 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2399 LookThroughSetCC(LHS, RHS, CC, SPCC);
2401 // Get the condition flag.
2402 SDValue CompareFlag;
2403 if (LHS.getValueType().isInteger()) {
2404 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2405 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2406 // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
2407 Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
2409 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2410 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2411 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2414 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2415 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2419 return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
2420 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2423 static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
2424 const SparcTargetLowering &TLI,
2426 SDValue LHS = Op.getOperand(0);
2427 SDValue RHS = Op.getOperand(1);
2428 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2429 SDValue TrueVal = Op.getOperand(2);
2430 SDValue FalseVal = Op.getOperand(3);
2432 unsigned Opc, SPCC = ~0U;
2434 // If this is a select_cc of a "setcc", and if the setcc got lowered into
2435 // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
2436 LookThroughSetCC(LHS, RHS, CC, SPCC);
2438 SDValue CompareFlag;
2439 if (LHS.getValueType().isInteger()) {
2440 CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
2441 Opc = LHS.getValueType() == MVT::i32 ?
2442 SPISD::SELECT_ICC : SPISD::SELECT_XCC;
2443 if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
2445 if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
2446 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2447 CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
2448 Opc = SPISD::SELECT_ICC;
2450 CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
2451 Opc = SPISD::SELECT_FCC;
2452 if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
2455 return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
2456 DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
2459 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
2460 const SparcTargetLowering &TLI) {
2461 MachineFunction &MF = DAG.getMachineFunction();
2462 SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
2463 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2465 // Need frame address to find the address of VarArgsFrameIndex.
2466 MF.getFrameInfo()->setFrameAddressIsTaken(true);
2468 // vastart just stores the address of the VarArgsFrameIndex slot into the
2469 // memory location argument.
2472 DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
2473 DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
2474 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2475 return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
2476 MachinePointerInfo(SV), false, false, 0);
2479 static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
2480 SDNode *Node = Op.getNode();
2481 EVT VT = Node->getValueType(0);
2482 SDValue InChain = Node->getOperand(0);
2483 SDValue VAListPtr = Node->getOperand(1);
2484 EVT PtrVT = VAListPtr.getValueType();
2485 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2487 SDValue VAList = DAG.getLoad(PtrVT, DL, InChain, VAListPtr,
2488 MachinePointerInfo(SV), false, false, false, 0);
2489 // Increment the pointer, VAList, to the next vaarg.
2490 SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
2491 DAG.getIntPtrConstant(VT.getSizeInBits()/8,
2493 // Store the incremented VAList to the legalized pointer.
2494 InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr,
2495 VAListPtr, MachinePointerInfo(SV), false, false, 0);
2496 // Load the actual argument out of the pointer VAList.
2497 // We can't count on greater alignment than the word size.
2498 return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
2499 false, false, false,
2500 std::min(PtrVT.getSizeInBits(), VT.getSizeInBits())/8);
2503 static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
2504 const SparcSubtarget *Subtarget) {
2505 SDValue Chain = Op.getOperand(0); // Legalize the chain.
2506 SDValue Size = Op.getOperand(1); // Legalize the size.
2507 EVT VT = Size->getValueType(0);
2510 unsigned SPReg = SP::O6;
2511 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
2512 SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
2513 Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
2515 // The resultant pointer is actually 16 words from the bottom of the stack,
2516 // to provide a register spill area.
2517 unsigned regSpillArea = Subtarget->is64Bit() ? 128 : 96;
2518 regSpillArea += Subtarget->getStackPointerBias();
2520 SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
2521 DAG.getConstant(regSpillArea, dl, VT));
2522 SDValue Ops[2] = { NewVal, Chain };
2523 return DAG.getMergeValues(Ops, dl);
2527 static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
2529 SDValue Chain = DAG.getNode(SPISD::FLUSHW,
2530 dl, MVT::Other, DAG.getEntryNode());
2534 static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
2535 const SparcSubtarget *Subtarget) {
2536 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2537 MFI->setFrameAddressIsTaken(true);
2539 EVT VT = Op.getValueType();
2541 unsigned FrameReg = SP::I6;
2542 unsigned stackBias = Subtarget->getStackPointerBias();
2547 FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2548 if (Subtarget->is64Bit())
2549 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2550 DAG.getIntPtrConstant(stackBias, dl));
2554 // flush first to make sure the windowed registers' values are in stack
2555 SDValue Chain = getFLUSHW(Op, DAG);
2556 FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
2558 unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
2561 SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2562 DAG.getIntPtrConstant(Offset, dl));
2563 FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo(),
2564 false, false, false, 0);
2566 if (Subtarget->is64Bit())
2567 FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
2568 DAG.getIntPtrConstant(stackBias, dl));
2573 static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
2574 const SparcSubtarget *Subtarget) {
2576 uint64_t depth = Op.getConstantOperandVal(0);
2578 return getFRAMEADDR(depth, Op, DAG, Subtarget);
2582 static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
2583 const SparcTargetLowering &TLI,
2584 const SparcSubtarget *Subtarget) {
2585 MachineFunction &MF = DAG.getMachineFunction();
2586 MachineFrameInfo *MFI = MF.getFrameInfo();
2587 MFI->setReturnAddressIsTaken(true);
2589 if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
2592 EVT VT = Op.getValueType();
2594 uint64_t depth = Op.getConstantOperandVal(0);
2598 auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
2599 unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
2600 RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
2604 // Need frame address to find return address of the caller.
2605 SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget);
2607 unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
2608 SDValue Ptr = DAG.getNode(ISD::ADD,
2611 DAG.getIntPtrConstant(Offset, dl));
2612 RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr,
2613 MachinePointerInfo(), false, false, false, 0);
2618 static SDValue LowerF64Op(SDValue Op, SelectionDAG &DAG, unsigned opcode)
2622 assert(Op.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
2623 assert(opcode == ISD::FNEG || opcode == ISD::FABS);
2625 // Lower fneg/fabs on f64 to fneg/fabs on f32.
2626 // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
2627 // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
2629 SDValue SrcReg64 = Op.getOperand(0);
2630 SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
2632 SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
2635 Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
2637 SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2639 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
2641 DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
2646 // Lower a f128 load into two f64 loads.
2647 static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
2650 LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
2651 assert(LdNode && LdNode->getOffset().getOpcode() == ISD::UNDEF
2652 && "Unexpected node type");
2654 unsigned alignment = LdNode->getAlignment();
2658 SDValue Hi64 = DAG.getLoad(MVT::f64,
2661 LdNode->getBasePtr(),
2662 LdNode->getPointerInfo(),
2663 false, false, false, alignment);
2664 EVT addrVT = LdNode->getBasePtr().getValueType();
2665 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2666 LdNode->getBasePtr(),
2667 DAG.getConstant(8, dl, addrVT));
2668 SDValue Lo64 = DAG.getLoad(MVT::f64,
2672 LdNode->getPointerInfo(),
2673 false, false, false, alignment);
2675 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2676 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2678 SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2680 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2682 SDValue(InFP128, 0),
2685 InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
2687 SDValue(InFP128, 0),
2690 SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
2691 SDValue(Lo64.getNode(), 1) };
2692 SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2693 SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
2694 return DAG.getMergeValues(Ops, dl);
2697 static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
2699 LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
2701 EVT MemVT = LdNode->getMemoryVT();
2702 if (MemVT == MVT::f128)
2703 return LowerF128Load(Op, DAG);
2708 // Lower a f128 store into two f64 stores.
2709 static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
2711 StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
2712 assert(StNode && StNode->getOffset().getOpcode() == ISD::UNDEF
2713 && "Unexpected node type");
2714 SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
2715 SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
2717 SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2722 SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
2728 unsigned alignment = StNode->getAlignment();
2732 SDValue OutChains[2];
2733 OutChains[0] = DAG.getStore(StNode->getChain(),
2736 StNode->getBasePtr(),
2737 MachinePointerInfo(),
2738 false, false, alignment);
2739 EVT addrVT = StNode->getBasePtr().getValueType();
2740 SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
2741 StNode->getBasePtr(),
2742 DAG.getConstant(8, dl, addrVT));
2743 OutChains[1] = DAG.getStore(StNode->getChain(),
2747 MachinePointerInfo(),
2748 false, false, alignment);
2749 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
2752 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
2755 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
2757 EVT MemVT = St->getMemoryVT();
2758 if (MemVT == MVT::f128)
2759 return LowerF128Store(Op, DAG);
2761 if (MemVT == MVT::i64) {
2762 // Custom handling for i64 stores: turn it into a bitcast and a
2764 SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
2765 SDValue Chain = DAG.getStore(
2766 St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
2767 St->isVolatile(), St->isNonTemporal(), St->getAlignment(),
2775 static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
2776 assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
2777 && "invalid opcode");
2779 if (Op.getValueType() == MVT::f64)
2780 return LowerF64Op(Op, DAG, Op.getOpcode());
2781 if (Op.getValueType() != MVT::f128)
2784 // Lower fabs/fneg on f128 to fabs/fneg on f64
2785 // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
2788 SDValue SrcReg128 = Op.getOperand(0);
2789 SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
2791 SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
2794 Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
2796 Hi64 = LowerF64Op(Hi64, DAG, Op.getOpcode());
2798 SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
2800 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
2802 DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
2807 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
2809 if (Op.getValueType() != MVT::i64)
2813 SDValue Src1 = Op.getOperand(0);
2814 SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
2815 SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
2816 DAG.getConstant(32, dl, MVT::i64));
2817 Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
2819 SDValue Src2 = Op.getOperand(1);
2820 SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
2821 SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
2822 DAG.getConstant(32, dl, MVT::i64));
2823 Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
2826 bool hasChain = false;
2827 unsigned hiOpc = Op.getOpcode();
2828 switch (Op.getOpcode()) {
2829 default: llvm_unreachable("Invalid opcode");
2830 case ISD::ADDC: hiOpc = ISD::ADDE; break;
2831 case ISD::ADDE: hasChain = true; break;
2832 case ISD::SUBC: hiOpc = ISD::SUBE; break;
2833 case ISD::SUBE: hasChain = true; break;
2836 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
2838 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
2841 Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
2843 SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
2844 SDValue Carry = Hi.getValue(1);
2846 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
2847 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
2848 Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
2849 DAG.getConstant(32, dl, MVT::i64));
2851 SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
2852 SDValue Ops[2] = { Dst, Carry };
2853 return DAG.getMergeValues(Ops, dl);
2856 // Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
2857 // in LegalizeDAG.cpp except the order of arguments to the library function.
2858 static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
2859 const SparcTargetLowering &TLI)
2861 unsigned opcode = Op.getOpcode();
2862 assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
2864 bool isSigned = (opcode == ISD::SMULO);
2866 EVT WideVT = MVT::i128;
2868 SDValue LHS = Op.getOperand(0);
2870 if (LHS.getValueType() != VT)
2873 SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
2875 SDValue RHS = Op.getOperand(1);
2876 SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
2877 SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
2878 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
2880 SDValue MulResult = TLI.makeLibCall(DAG,
2881 RTLIB::MUL_I128, WideVT,
2882 Args, isSigned, dl).first;
2883 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2884 MulResult, DAG.getIntPtrConstant(0, dl));
2885 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
2886 MulResult, DAG.getIntPtrConstant(1, dl));
2888 SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
2889 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
2891 TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
2894 // MulResult is a node with an illegal type. Because such things are not
2895 // generally permitted during this phase of legalization, ensure that
2896 // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
2898 assert(MulResult->use_empty() && "Illegally typed node still in use!");
2900 SDValue Ops[2] = { BottomHalf, TopHalf } ;
2901 return DAG.getMergeValues(Ops, dl);
2904 static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
2905 // Monotonic load/stores are legal.
2906 if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
2909 // Otherwise, expand with a fence.
2913 SDValue SparcTargetLowering::
2914 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2916 bool hasHardQuad = Subtarget->hasHardQuad();
2917 bool isV9 = Subtarget->isV9();
2919 switch (Op.getOpcode()) {
2920 default: llvm_unreachable("Should not custom lower this!");
2922 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
2924 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
2926 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2927 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
2928 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2929 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
2930 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
2932 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
2934 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
2936 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
2938 case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
2940 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
2942 case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
2943 case ISD::VAARG: return LowerVAARG(Op, DAG);
2944 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
2947 case ISD::LOAD: return LowerLOAD(Op, DAG);
2948 case ISD::STORE: return LowerSTORE(Op, DAG);
2949 case ISD::FADD: return LowerF128Op(Op, DAG,
2950 getLibcallName(RTLIB::ADD_F128), 2);
2951 case ISD::FSUB: return LowerF128Op(Op, DAG,
2952 getLibcallName(RTLIB::SUB_F128), 2);
2953 case ISD::FMUL: return LowerF128Op(Op, DAG,
2954 getLibcallName(RTLIB::MUL_F128), 2);
2955 case ISD::FDIV: return LowerF128Op(Op, DAG,
2956 getLibcallName(RTLIB::DIV_F128), 2);
2957 case ISD::FSQRT: return LowerF128Op(Op, DAG,
2958 getLibcallName(RTLIB::SQRT_F128),1);
2960 case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
2961 case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
2962 case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
2966 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
2968 case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
2969 case ISD::ATOMIC_LOAD:
2970 case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
2975 SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
2976 MachineBasicBlock *BB) const {
2977 switch (MI->getOpcode()) {
2978 default: llvm_unreachable("Unknown SELECT_CC!");
2979 case SP::SELECT_CC_Int_ICC:
2980 case SP::SELECT_CC_FP_ICC:
2981 case SP::SELECT_CC_DFP_ICC:
2982 case SP::SELECT_CC_QFP_ICC:
2983 return expandSelectCC(MI, BB, SP::BCOND);
2984 case SP::SELECT_CC_Int_FCC:
2985 case SP::SELECT_CC_FP_FCC:
2986 case SP::SELECT_CC_DFP_FCC:
2987 case SP::SELECT_CC_QFP_FCC:
2988 return expandSelectCC(MI, BB, SP::FBCOND);
2990 case SP::ATOMIC_LOAD_ADD_32:
2991 return expandAtomicRMW(MI, BB, SP::ADDrr);
2992 case SP::ATOMIC_LOAD_ADD_64:
2993 return expandAtomicRMW(MI, BB, SP::ADDXrr);
2994 case SP::ATOMIC_LOAD_SUB_32:
2995 return expandAtomicRMW(MI, BB, SP::SUBrr);
2996 case SP::ATOMIC_LOAD_SUB_64:
2997 return expandAtomicRMW(MI, BB, SP::SUBXrr);
2998 case SP::ATOMIC_LOAD_AND_32:
2999 return expandAtomicRMW(MI, BB, SP::ANDrr);
3000 case SP::ATOMIC_LOAD_AND_64:
3001 return expandAtomicRMW(MI, BB, SP::ANDXrr);
3002 case SP::ATOMIC_LOAD_OR_32:
3003 return expandAtomicRMW(MI, BB, SP::ORrr);
3004 case SP::ATOMIC_LOAD_OR_64:
3005 return expandAtomicRMW(MI, BB, SP::ORXrr);
3006 case SP::ATOMIC_LOAD_XOR_32:
3007 return expandAtomicRMW(MI, BB, SP::XORrr);
3008 case SP::ATOMIC_LOAD_XOR_64:
3009 return expandAtomicRMW(MI, BB, SP::XORXrr);
3010 case SP::ATOMIC_LOAD_NAND_32:
3011 return expandAtomicRMW(MI, BB, SP::ANDrr);
3012 case SP::ATOMIC_LOAD_NAND_64:
3013 return expandAtomicRMW(MI, BB, SP::ANDXrr);
3015 case SP::ATOMIC_SWAP_64:
3016 return expandAtomicRMW(MI, BB, 0);
3018 case SP::ATOMIC_LOAD_MAX_32:
3019 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
3020 case SP::ATOMIC_LOAD_MAX_64:
3021 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G);
3022 case SP::ATOMIC_LOAD_MIN_32:
3023 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE);
3024 case SP::ATOMIC_LOAD_MIN_64:
3025 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE);
3026 case SP::ATOMIC_LOAD_UMAX_32:
3027 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU);
3028 case SP::ATOMIC_LOAD_UMAX_64:
3029 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU);
3030 case SP::ATOMIC_LOAD_UMIN_32:
3031 return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU);
3032 case SP::ATOMIC_LOAD_UMIN_64:
3033 return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU);
3038 SparcTargetLowering::expandSelectCC(MachineInstr *MI,
3039 MachineBasicBlock *BB,
3040 unsigned BROpcode) const {
3041 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3042 DebugLoc dl = MI->getDebugLoc();
3043 unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
3045 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
3046 // control-flow pattern. The incoming instruction knows the destination vreg
3047 // to set, the condition code register to branch on, the true/false values to
3048 // select between, and a branch opcode to use.
3049 const BasicBlock *LLVM_BB = BB->getBasicBlock();
3050 MachineFunction::iterator It = ++BB->getIterator();
3056 // fallthrough --> copy0MBB
3057 MachineBasicBlock *thisMBB = BB;
3058 MachineFunction *F = BB->getParent();
3059 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
3060 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
3061 F->insert(It, copy0MBB);
3062 F->insert(It, sinkMBB);
3064 // Transfer the remainder of BB and its successor edges to sinkMBB.
3065 sinkMBB->splice(sinkMBB->begin(), BB,
3066 std::next(MachineBasicBlock::iterator(MI)),
3068 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
3070 // Add the true and fallthrough blocks as its successors.
3071 BB->addSuccessor(copy0MBB);
3072 BB->addSuccessor(sinkMBB);
3074 BuildMI(BB, dl, TII.get(BROpcode)).addMBB(sinkMBB).addImm(CC);
3077 // %FalseValue = ...
3078 // # fallthrough to sinkMBB
3081 // Update machine-CFG edges
3082 BB->addSuccessor(sinkMBB);
3085 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
3088 BuildMI(*BB, BB->begin(), dl, TII.get(SP::PHI), MI->getOperand(0).getReg())
3089 .addReg(MI->getOperand(2).getReg()).addMBB(copy0MBB)
3090 .addReg(MI->getOperand(1).getReg()).addMBB(thisMBB);
3092 MI->eraseFromParent(); // The pseudo instruction is gone now.
3097 SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
3098 MachineBasicBlock *MBB,
3100 unsigned CondCode) const {
3101 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
3102 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
3103 DebugLoc DL = MI->getDebugLoc();
3105 // MI is an atomic read-modify-write instruction of the form:
3107 // rd = atomicrmw<op> addr, rs2
3109 // All three operands are registers.
3110 unsigned DestReg = MI->getOperand(0).getReg();
3111 unsigned AddrReg = MI->getOperand(1).getReg();
3112 unsigned Rs2Reg = MI->getOperand(2).getReg();
3114 // SelectionDAG has already inserted memory barriers before and after MI, so
3115 // we simply have to implement the operatiuon in terms of compare-and-swap.
3117 // %val0 = load %addr
3119 // %val = phi %val0, %dest
3120 // %upd = op %val, %rs2
3121 // %dest = cas %addr, %val, %upd
3126 bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg));
3127 const TargetRegisterClass *ValueRC =
3128 is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
3129 unsigned Val0Reg = MRI.createVirtualRegister(ValueRC);
3131 BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg)
3132 .addReg(AddrReg).addImm(0);
3134 // Split the basic block MBB before MI and insert the loop block in the hole.
3135 MachineFunction::iterator MFI = MBB->getIterator();
3136 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
3137 MachineFunction *MF = MBB->getParent();
3138 MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
3139 MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB);
3141 MF->insert(MFI, LoopMBB);
3142 MF->insert(MFI, DoneMBB);
3144 // Move MI and following instructions to DoneMBB.
3145 DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end());
3146 DoneMBB->transferSuccessorsAndUpdatePHIs(MBB);
3148 // Connect the CFG again.
3149 MBB->addSuccessor(LoopMBB);
3150 LoopMBB->addSuccessor(LoopMBB);
3151 LoopMBB->addSuccessor(DoneMBB);
3153 // Build the loop block.
3154 unsigned ValReg = MRI.createVirtualRegister(ValueRC);
3155 // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP).
3156 unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg);
3158 BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
3159 .addReg(Val0Reg).addMBB(MBB)
3160 .addReg(DestReg).addMBB(LoopMBB);
3163 // This is one of the min/max operations. We need a CMPrr followed by a
3165 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
3166 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
3167 .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
3168 } else if (Opcode) {
3169 BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
3170 .addReg(ValReg).addReg(Rs2Reg);
3173 if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 ||
3174 MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) {
3175 unsigned TmpReg = UpdReg;
3176 UpdReg = MRI.createVirtualRegister(ValueRC);
3177 BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1);
3180 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg)
3181 .addReg(AddrReg).addReg(ValReg).addReg(UpdReg)
3182 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
3183 BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg);
3184 BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND))
3185 .addMBB(LoopMBB).addImm(SPCC::ICC_NE);
3187 MI->eraseFromParent();
3191 //===----------------------------------------------------------------------===//
3192 // Sparc Inline Assembly Support
3193 //===----------------------------------------------------------------------===//
3195 /// getConstraintType - Given a constraint letter, return the type of
3196 /// constraint it is for this target.
3197 SparcTargetLowering::ConstraintType
3198 SparcTargetLowering::getConstraintType(StringRef Constraint) const {
3199 if (Constraint.size() == 1) {
3200 switch (Constraint[0]) {
3202 case 'r': return C_RegisterClass;
3208 return TargetLowering::getConstraintType(Constraint);
3211 TargetLowering::ConstraintWeight SparcTargetLowering::
3212 getSingleConstraintMatchWeight(AsmOperandInfo &info,
3213 const char *constraint) const {
3214 ConstraintWeight weight = CW_Invalid;
3215 Value *CallOperandVal = info.CallOperandVal;
3216 // If we don't have a value, we can't do a match,
3217 // but allow it at the lowest weight.
3218 if (!CallOperandVal)
3221 // Look at the constraint type.
3222 switch (*constraint) {
3224 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3227 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
3228 if (isInt<13>(C->getSExtValue()))
3229 weight = CW_Constant;
3236 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3237 /// vector. If it is invalid, don't add anything to Ops.
3238 void SparcTargetLowering::
3239 LowerAsmOperandForConstraint(SDValue Op,
3240 std::string &Constraint,
3241 std::vector<SDValue> &Ops,
3242 SelectionDAG &DAG) const {
3243 SDValue Result(nullptr, 0);
3245 // Only support length 1 constraints for now.
3246 if (Constraint.length() > 1)
3249 char ConstraintLetter = Constraint[0];
3250 switch (ConstraintLetter) {
3253 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3254 if (isInt<13>(C->getSExtValue())) {
3255 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
3263 if (Result.getNode()) {
3264 Ops.push_back(Result);
3267 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3270 std::pair<unsigned, const TargetRegisterClass *>
3271 SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3272 StringRef Constraint,
3274 if (Constraint.size() == 1) {
3275 switch (Constraint[0]) {
3277 if (VT == MVT::v2i32)
3278 return std::make_pair(0U, &SP::IntPairRegClass);
3280 return std::make_pair(0U, &SP::IntRegsRegClass);
3282 } else if (!Constraint.empty() && Constraint.size() <= 5
3283 && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
3284 // constraint = '{r<d>}'
3285 // Remove the braces from around the name.
3286 StringRef name(Constraint.data()+1, Constraint.size()-2);
3287 // Handle register aliases:
3292 uint64_t intVal = 0;
3293 if (name.substr(0, 1).equals("r")
3294 && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
3295 const char regTypes[] = { 'g', 'o', 'l', 'i' };
3296 char regType = regTypes[intVal/8];
3297 char regIdx = '0' + (intVal % 8);
3298 char tmp[] = { '{', regType, regIdx, '}', 0 };
3299 std::string newConstraint = std::string(tmp);
3300 return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
3305 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3309 SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3310 // The Sparc target isn't yet aware of offsets.
3314 void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
3315 SmallVectorImpl<SDValue>& Results,
3316 SelectionDAG &DAG) const {
3320 RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
3322 switch (N->getOpcode()) {
3324 llvm_unreachable("Do not know how to custom type legalize this operation!");
3326 case ISD::FP_TO_SINT:
3327 case ISD::FP_TO_UINT:
3328 // Custom lower only if it involves f128 or i64.
3329 if (N->getOperand(0).getValueType() != MVT::f128
3330 || N->getValueType(0) != MVT::i64)
3332 libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
3333 ? RTLIB::FPTOSINT_F128_I64
3334 : RTLIB::FPTOUINT_F128_I64);
3336 Results.push_back(LowerF128Op(SDValue(N, 0),
3338 getLibcallName(libCall),
3342 case ISD::SINT_TO_FP:
3343 case ISD::UINT_TO_FP:
3344 // Custom lower only if it involves f128 or i64.
3345 if (N->getValueType(0) != MVT::f128
3346 || N->getOperand(0).getValueType() != MVT::i64)
3349 libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
3350 ? RTLIB::SINTTOFP_I64_F128
3351 : RTLIB::UINTTOFP_I64_F128);
3353 Results.push_back(LowerF128Op(SDValue(N, 0),
3355 getLibcallName(libCall),
3359 LoadSDNode *Ld = cast<LoadSDNode>(N);
3360 // Custom handling only for i64: turn i64 load into a v2i32 load,
3362 if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
3366 SDValue LoadRes = DAG.getExtLoad(
3367 Ld->getExtensionType(), dl, MVT::v2i32,
3368 Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
3369 MVT::v2i32, Ld->isVolatile(), Ld->isNonTemporal(),
3370 Ld->isInvariant(), Ld->getAlignment(), Ld->getAAInfo());
3372 SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
3373 Results.push_back(Res);
3374 Results.push_back(LoadRes.getValue(1));