1 //===-- MipsISelLowering.cpp - Mips DAG Lowering Implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that Mips uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
14 #include "MipsISelLowering.h"
15 #include "InstPrinter/MipsInstPrinter.h"
16 #include "MCTargetDesc/MipsBaseInfo.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsSubtarget.h"
19 #include "MipsTargetMachine.h"
20 #include "MipsTargetObjectFile.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/ADT/StringSwitch.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
41 #define DEBUG_TYPE "mips-lower"
43 STATISTIC(NumTailCalls, "Number of tail calls");
46 LargeGOT("mxgot", cl::Hidden,
47 cl::desc("MIPS: Enable GOT larger than 64k."), cl::init(false));
50 NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
51 cl::desc("MIPS: Don't trap on integer division by zero."),
55 EnableMipsFastISel("mips-fast-isel", cl::Hidden,
56 cl::desc("Allow mips-fast-isel to be used"),
59 static const MCPhysReg O32IntRegs[4] = {
60 Mips::A0, Mips::A1, Mips::A2, Mips::A3
63 static const MCPhysReg Mips64IntRegs[8] = {
64 Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
65 Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64
68 static const MCPhysReg Mips64DPRegs[8] = {
69 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
70 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
73 // If I is a shifted mask, set the size (Size) and the first bit of the
74 // mask (Pos), and return true.
75 // For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
76 static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
77 if (!isShiftedMask_64(I))
80 Size = CountPopulation_64(I);
81 Pos = countTrailingZeros(I);
85 SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
86 MipsFunctionInfo *FI = DAG.getMachineFunction().getInfo<MipsFunctionInfo>();
87 return DAG.getRegister(FI->getGlobalBaseReg(), Ty);
90 SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
92 unsigned Flag) const {
93 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
96 SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
98 unsigned Flag) const {
99 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
102 SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
104 unsigned Flag) const {
105 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
108 SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
110 unsigned Flag) const {
111 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
114 SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
116 unsigned Flag) const {
117 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
118 N->getOffset(), Flag);
121 const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
123 case MipsISD::JmpLink: return "MipsISD::JmpLink";
124 case MipsISD::TailCall: return "MipsISD::TailCall";
125 case MipsISD::Hi: return "MipsISD::Hi";
126 case MipsISD::Lo: return "MipsISD::Lo";
127 case MipsISD::GPRel: return "MipsISD::GPRel";
128 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
129 case MipsISD::Ret: return "MipsISD::Ret";
130 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
131 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
132 case MipsISD::FPCmp: return "MipsISD::FPCmp";
133 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
134 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
135 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
136 case MipsISD::MFHI: return "MipsISD::MFHI";
137 case MipsISD::MFLO: return "MipsISD::MFLO";
138 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
139 case MipsISD::Mult: return "MipsISD::Mult";
140 case MipsISD::Multu: return "MipsISD::Multu";
141 case MipsISD::MAdd: return "MipsISD::MAdd";
142 case MipsISD::MAddu: return "MipsISD::MAddu";
143 case MipsISD::MSub: return "MipsISD::MSub";
144 case MipsISD::MSubu: return "MipsISD::MSubu";
145 case MipsISD::DivRem: return "MipsISD::DivRem";
146 case MipsISD::DivRemU: return "MipsISD::DivRemU";
147 case MipsISD::DivRem16: return "MipsISD::DivRem16";
148 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
149 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
150 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
151 case MipsISD::Wrapper: return "MipsISD::Wrapper";
152 case MipsISD::Sync: return "MipsISD::Sync";
153 case MipsISD::Ext: return "MipsISD::Ext";
154 case MipsISD::Ins: return "MipsISD::Ins";
155 case MipsISD::LWL: return "MipsISD::LWL";
156 case MipsISD::LWR: return "MipsISD::LWR";
157 case MipsISD::SWL: return "MipsISD::SWL";
158 case MipsISD::SWR: return "MipsISD::SWR";
159 case MipsISD::LDL: return "MipsISD::LDL";
160 case MipsISD::LDR: return "MipsISD::LDR";
161 case MipsISD::SDL: return "MipsISD::SDL";
162 case MipsISD::SDR: return "MipsISD::SDR";
163 case MipsISD::EXTP: return "MipsISD::EXTP";
164 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
165 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
166 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
167 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
168 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
169 case MipsISD::SHILO: return "MipsISD::SHILO";
170 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
171 case MipsISD::MULT: return "MipsISD::MULT";
172 case MipsISD::MULTU: return "MipsISD::MULTU";
173 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
174 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
175 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
176 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
177 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
178 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
179 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
180 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
181 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
182 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
183 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
184 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
185 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
186 case MipsISD::VCEQ: return "MipsISD::VCEQ";
187 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
188 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
189 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
190 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
191 case MipsISD::VSMAX: return "MipsISD::VSMAX";
192 case MipsISD::VSMIN: return "MipsISD::VSMIN";
193 case MipsISD::VUMAX: return "MipsISD::VUMAX";
194 case MipsISD::VUMIN: return "MipsISD::VUMIN";
195 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
196 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
197 case MipsISD::VNOR: return "MipsISD::VNOR";
198 case MipsISD::VSHF: return "MipsISD::VSHF";
199 case MipsISD::SHF: return "MipsISD::SHF";
200 case MipsISD::ILVEV: return "MipsISD::ILVEV";
201 case MipsISD::ILVOD: return "MipsISD::ILVOD";
202 case MipsISD::ILVL: return "MipsISD::ILVL";
203 case MipsISD::ILVR: return "MipsISD::ILVR";
204 case MipsISD::PCKEV: return "MipsISD::PCKEV";
205 case MipsISD::PCKOD: return "MipsISD::PCKOD";
206 case MipsISD::INSVE: return "MipsISD::INSVE";
207 default: return nullptr;
211 MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM,
212 const MipsSubtarget &STI)
213 : TargetLowering(TM, new MipsTargetObjectFile()), Subtarget(STI) {
214 // Mips does not have i1 type, so use i32 for
215 // setcc operations results (slt, sgt, ...).
216 setBooleanContents(ZeroOrOneBooleanContent);
217 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
218 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
219 // does. Integer booleans still use 0 and 1.
220 if (Subtarget.hasMips32r6())
221 setBooleanContents(ZeroOrOneBooleanContent,
222 ZeroOrNegativeOneBooleanContent);
224 // Load extented operations for i1 types must be promoted
225 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
226 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
227 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
229 // MIPS doesn't have extending float->double load/store
230 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
231 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
233 // Used by legalize types to correctly generate the setcc result.
234 // Without this, every float setcc comes with a AND/OR with the result,
235 // we don't want this, since the fpcmp result goes to a flag register,
236 // which is used implicitly by brcond and select operations.
237 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
239 // Mips Custom Operations
240 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
241 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
242 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
243 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
244 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
245 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
246 setOperationAction(ISD::SELECT, MVT::f32, Custom);
247 setOperationAction(ISD::SELECT, MVT::f64, Custom);
248 setOperationAction(ISD::SELECT, MVT::i32, Custom);
249 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
250 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
251 setOperationAction(ISD::SETCC, MVT::f32, Custom);
252 setOperationAction(ISD::SETCC, MVT::f64, Custom);
253 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
254 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
255 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
256 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
258 if (Subtarget.isGP64bit()) {
259 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
260 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
261 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
262 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
263 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
264 setOperationAction(ISD::SELECT, MVT::i64, Custom);
265 setOperationAction(ISD::LOAD, MVT::i64, Custom);
266 setOperationAction(ISD::STORE, MVT::i64, Custom);
267 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
270 if (!Subtarget.isGP64bit()) {
271 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
272 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
273 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
276 setOperationAction(ISD::ADD, MVT::i32, Custom);
277 if (Subtarget.isGP64bit())
278 setOperationAction(ISD::ADD, MVT::i64, Custom);
280 setOperationAction(ISD::SDIV, MVT::i32, Expand);
281 setOperationAction(ISD::SREM, MVT::i32, Expand);
282 setOperationAction(ISD::UDIV, MVT::i32, Expand);
283 setOperationAction(ISD::UREM, MVT::i32, Expand);
284 setOperationAction(ISD::SDIV, MVT::i64, Expand);
285 setOperationAction(ISD::SREM, MVT::i64, Expand);
286 setOperationAction(ISD::UDIV, MVT::i64, Expand);
287 setOperationAction(ISD::UREM, MVT::i64, Expand);
289 // Operations not directly supported by Mips.
290 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
291 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
292 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
293 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
294 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
295 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
296 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
297 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
298 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
299 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
300 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
301 if (Subtarget.hasCnMips()) {
302 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
303 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
305 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
306 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
308 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
309 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
310 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
311 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
312 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
313 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
314 setOperationAction(ISD::ROTL, MVT::i32, Expand);
315 setOperationAction(ISD::ROTL, MVT::i64, Expand);
316 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
317 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
319 if (!Subtarget.hasMips32r2())
320 setOperationAction(ISD::ROTR, MVT::i32, Expand);
322 if (!Subtarget.hasMips64r2())
323 setOperationAction(ISD::ROTR, MVT::i64, Expand);
325 setOperationAction(ISD::FSIN, MVT::f32, Expand);
326 setOperationAction(ISD::FSIN, MVT::f64, Expand);
327 setOperationAction(ISD::FCOS, MVT::f32, Expand);
328 setOperationAction(ISD::FCOS, MVT::f64, Expand);
329 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
330 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
331 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
332 setOperationAction(ISD::FPOW, MVT::f32, Expand);
333 setOperationAction(ISD::FPOW, MVT::f64, Expand);
334 setOperationAction(ISD::FLOG, MVT::f32, Expand);
335 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
336 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
337 setOperationAction(ISD::FEXP, MVT::f32, Expand);
338 setOperationAction(ISD::FMA, MVT::f32, Expand);
339 setOperationAction(ISD::FMA, MVT::f64, Expand);
340 setOperationAction(ISD::FREM, MVT::f32, Expand);
341 setOperationAction(ISD::FREM, MVT::f64, Expand);
343 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
345 setOperationAction(ISD::VASTART, MVT::Other, Custom);
346 setOperationAction(ISD::VAARG, MVT::Other, Custom);
347 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
348 setOperationAction(ISD::VAEND, MVT::Other, Expand);
350 // Use the default for now
351 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
352 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
354 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
355 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
356 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
357 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
359 setInsertFencesForAtomic(true);
361 if (!Subtarget.hasMips32r2()) {
362 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
363 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
366 // MIPS16 lacks MIPS32's clz and clo instructions.
367 if (!Subtarget.hasMips32() || Subtarget.inMips16Mode())
368 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
369 if (!Subtarget.hasMips64())
370 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
372 if (!Subtarget.hasMips32r2())
373 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
374 if (!Subtarget.hasMips64r2())
375 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
377 if (Subtarget.isGP64bit()) {
378 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom);
379 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom);
380 setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom);
381 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
384 setOperationAction(ISD::TRAP, MVT::Other, Legal);
386 setTargetDAGCombine(ISD::SDIVREM);
387 setTargetDAGCombine(ISD::UDIVREM);
388 setTargetDAGCombine(ISD::SELECT);
389 setTargetDAGCombine(ISD::AND);
390 setTargetDAGCombine(ISD::OR);
391 setTargetDAGCombine(ISD::ADD);
393 setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2);
395 // The arguments on the stack are defined in terms of 4-byte slots on O32
396 // and 8-byte slots on N32/N64.
397 setMinStackArgumentAlignment(
398 (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4);
400 setStackPointerRegisterToSaveRestore(Subtarget.isABI_N64() ? Mips::SP_64
403 setExceptionPointerRegister(Subtarget.isABI_N64() ? Mips::A0_64 : Mips::A0);
404 setExceptionSelectorRegister(Subtarget.isABI_N64() ? Mips::A1_64 : Mips::A1);
406 MaxStoresPerMemcpy = 16;
408 isMicroMips = Subtarget.inMicroMipsMode();
411 const MipsTargetLowering *MipsTargetLowering::create(MipsTargetMachine &TM,
412 const MipsSubtarget &STI) {
413 if (STI.inMips16Mode())
414 return llvm::createMips16TargetLowering(TM, STI);
416 return llvm::createMipsSETargetLowering(TM, STI);
419 // Create a fast isel object.
421 MipsTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
422 const TargetLibraryInfo *libInfo) const {
423 if (!EnableMipsFastISel)
424 return TargetLowering::createFastISel(funcInfo, libInfo);
425 return Mips::createFastISel(funcInfo, libInfo);
428 EVT MipsTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
431 return VT.changeVectorElementTypeToInteger();
434 static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
435 TargetLowering::DAGCombinerInfo &DCI,
436 const MipsSubtarget &Subtarget) {
437 if (DCI.isBeforeLegalizeOps())
440 EVT Ty = N->getValueType(0);
441 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
442 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
443 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
447 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
448 N->getOperand(0), N->getOperand(1));
449 SDValue InChain = DAG.getEntryNode();
450 SDValue InGlue = DivRem;
453 if (N->hasAnyUseOfValue(0)) {
454 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
456 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
457 InChain = CopyFromLo.getValue(1);
458 InGlue = CopyFromLo.getValue(2);
462 if (N->hasAnyUseOfValue(1)) {
463 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
465 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
471 static Mips::CondCode condCodeToFCC(ISD::CondCode CC) {
473 default: llvm_unreachable("Unknown fp condition code!");
475 case ISD::SETOEQ: return Mips::FCOND_OEQ;
476 case ISD::SETUNE: return Mips::FCOND_UNE;
478 case ISD::SETOLT: return Mips::FCOND_OLT;
480 case ISD::SETOGT: return Mips::FCOND_OGT;
482 case ISD::SETOLE: return Mips::FCOND_OLE;
484 case ISD::SETOGE: return Mips::FCOND_OGE;
485 case ISD::SETULT: return Mips::FCOND_ULT;
486 case ISD::SETULE: return Mips::FCOND_ULE;
487 case ISD::SETUGT: return Mips::FCOND_UGT;
488 case ISD::SETUGE: return Mips::FCOND_UGE;
489 case ISD::SETUO: return Mips::FCOND_UN;
490 case ISD::SETO: return Mips::FCOND_OR;
492 case ISD::SETONE: return Mips::FCOND_ONE;
493 case ISD::SETUEQ: return Mips::FCOND_UEQ;
498 /// This function returns true if the floating point conditional branches and
499 /// conditional moves which use condition code CC should be inverted.
500 static bool invertFPCondCodeUser(Mips::CondCode CC) {
501 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
504 assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
505 "Illegal Condition Code");
510 // Creates and returns an FPCmp node from a setcc node.
511 // Returns Op if setcc is not a floating point comparison.
512 static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
513 // must be a SETCC node
514 if (Op.getOpcode() != ISD::SETCC)
517 SDValue LHS = Op.getOperand(0);
519 if (!LHS.getValueType().isFloatingPoint())
522 SDValue RHS = Op.getOperand(1);
525 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
526 // node if necessary.
527 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
529 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
530 DAG.getConstant(condCodeToFCC(CC), MVT::i32));
533 // Creates and returns a CMovFPT/F node.
534 static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
535 SDValue False, SDLoc DL) {
536 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
537 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
538 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
540 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
541 True.getValueType(), True, FCC0, False, Cond);
544 static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
545 TargetLowering::DAGCombinerInfo &DCI,
546 const MipsSubtarget &Subtarget) {
547 if (DCI.isBeforeLegalizeOps())
550 SDValue SetCC = N->getOperand(0);
552 if ((SetCC.getOpcode() != ISD::SETCC) ||
553 !SetCC.getOperand(0).getValueType().isInteger())
556 SDValue False = N->getOperand(2);
557 EVT FalseTy = False.getValueType();
559 if (!FalseTy.isInteger())
562 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
564 // If the RHS (False) is 0, we swap the order of the operands
565 // of ISD::SELECT (obviously also inverting the condition) so that we can
566 // take advantage of conditional moves using the $0 register.
568 // return (a != 0) ? x : 0;
576 if (!FalseC->getZExtValue()) {
577 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
578 SDValue True = N->getOperand(1);
580 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
581 SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
583 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
586 // If both operands are integer constants there's a possibility that we
587 // can do some interesting optimizations.
588 SDValue True = N->getOperand(1);
589 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
591 if (!TrueC || !True.getValueType().isInteger())
594 // We'll also ignore MVT::i64 operands as this optimizations proves
595 // to be ineffective because of the required sign extensions as the result
596 // of a SETCC operator is always MVT::i32 for non-vector types.
597 if (True.getValueType() == MVT::i64)
600 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
602 // 1) (a < x) ? y : y-1
604 // addiu $reg2, $reg1, y-1
606 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
608 // 2) (a < x) ? y-1 : y
610 // xor $reg1, $reg1, 1
611 // addiu $reg2, $reg1, y-1
613 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
614 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
615 SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
616 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
619 // Couldn't optimize.
623 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
624 TargetLowering::DAGCombinerInfo &DCI,
625 const MipsSubtarget &Subtarget) {
626 // Pattern match EXT.
627 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
628 // => ext $dst, $src, size, pos
629 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
632 SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1);
633 unsigned ShiftRightOpc = ShiftRight.getOpcode();
635 // Op's first operand must be a shift right.
636 if (ShiftRightOpc != ISD::SRA && ShiftRightOpc != ISD::SRL)
639 // The second operand of the shift must be an immediate.
641 if (!(CN = dyn_cast<ConstantSDNode>(ShiftRight.getOperand(1))))
644 uint64_t Pos = CN->getZExtValue();
645 uint64_t SMPos, SMSize;
647 // Op's second operand must be a shifted mask.
648 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
649 !isShiftedMask(CN->getZExtValue(), SMPos, SMSize))
652 // Return if the shifted mask does not start at bit 0 or the sum of its size
653 // and Pos exceeds the word's size.
654 EVT ValTy = N->getValueType(0);
655 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
658 return DAG.getNode(MipsISD::Ext, SDLoc(N), ValTy,
659 ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32),
660 DAG.getConstant(SMSize, MVT::i32));
663 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
664 TargetLowering::DAGCombinerInfo &DCI,
665 const MipsSubtarget &Subtarget) {
666 // Pattern match INS.
667 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
668 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
669 // => ins $dst, $src, size, pos, $src1
670 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
673 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
674 uint64_t SMPos0, SMSize0, SMPos1, SMSize1;
677 // See if Op's first operand matches (and $src1 , mask0).
678 if (And0.getOpcode() != ISD::AND)
681 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
682 !isShiftedMask(~CN->getSExtValue(), SMPos0, SMSize0))
685 // See if Op's second operand matches (and (shl $src, pos), mask1).
686 if (And1.getOpcode() != ISD::AND)
689 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
690 !isShiftedMask(CN->getZExtValue(), SMPos1, SMSize1))
693 // The shift masks must have the same position and size.
694 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
697 SDValue Shl = And1.getOperand(0);
698 if (Shl.getOpcode() != ISD::SHL)
701 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
704 unsigned Shamt = CN->getZExtValue();
706 // Return if the shift amount and the first bit position of mask are not the
708 EVT ValTy = N->getValueType(0);
709 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
712 return DAG.getNode(MipsISD::Ins, SDLoc(N), ValTy, Shl.getOperand(0),
713 DAG.getConstant(SMPos0, MVT::i32),
714 DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
717 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
718 TargetLowering::DAGCombinerInfo &DCI,
719 const MipsSubtarget &Subtarget) {
720 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
722 if (DCI.isBeforeLegalizeOps())
725 SDValue Add = N->getOperand(1);
727 if (Add.getOpcode() != ISD::ADD)
730 SDValue Lo = Add.getOperand(1);
732 if ((Lo.getOpcode() != MipsISD::Lo) ||
733 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
736 EVT ValTy = N->getValueType(0);
739 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
741 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
744 SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
746 SelectionDAG &DAG = DCI.DAG;
747 unsigned Opc = N->getOpcode();
753 return performDivRemCombine(N, DAG, DCI, Subtarget);
755 return performSELECTCombine(N, DAG, DCI, Subtarget);
757 return performANDCombine(N, DAG, DCI, Subtarget);
759 return performORCombine(N, DAG, DCI, Subtarget);
761 return performADDCombine(N, DAG, DCI, Subtarget);
768 MipsTargetLowering::LowerOperationWrapper(SDNode *N,
769 SmallVectorImpl<SDValue> &Results,
770 SelectionDAG &DAG) const {
771 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
773 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
774 Results.push_back(Res.getValue(I));
778 MipsTargetLowering::ReplaceNodeResults(SDNode *N,
779 SmallVectorImpl<SDValue> &Results,
780 SelectionDAG &DAG) const {
781 return LowerOperationWrapper(N, Results, DAG);
784 SDValue MipsTargetLowering::
785 LowerOperation(SDValue Op, SelectionDAG &DAG) const
787 switch (Op.getOpcode())
789 case ISD::BR_JT: return lowerBR_JT(Op, DAG);
790 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
791 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
792 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
793 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
794 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
795 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
796 case ISD::SELECT: return lowerSELECT(Op, DAG);
797 case ISD::SELECT_CC: return lowerSELECT_CC(Op, DAG);
798 case ISD::SETCC: return lowerSETCC(Op, DAG);
799 case ISD::VASTART: return lowerVASTART(Op, DAG);
800 case ISD::VAARG: return lowerVAARG(Op, DAG);
801 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
802 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
803 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
804 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
805 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
806 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
807 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
808 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
809 case ISD::LOAD: return lowerLOAD(Op, DAG);
810 case ISD::STORE: return lowerSTORE(Op, DAG);
811 case ISD::ADD: return lowerADD(Op, DAG);
812 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
817 //===----------------------------------------------------------------------===//
818 // Lower helper functions
819 //===----------------------------------------------------------------------===//
821 // addLiveIn - This helper function adds the specified physical register to the
822 // MachineFunction as a live in value. It also creates a corresponding
823 // virtual register for it.
825 addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
827 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
828 MF.getRegInfo().addLiveIn(PReg, VReg);
832 static MachineBasicBlock *insertDivByZeroTrap(MachineInstr *MI,
833 MachineBasicBlock &MBB,
834 const TargetInstrInfo &TII,
839 // Insert instruction "teq $divisor_reg, $zero, 7".
840 MachineBasicBlock::iterator I(MI);
841 MachineInstrBuilder MIB;
842 MachineOperand &Divisor = MI->getOperand(2);
843 MIB = BuildMI(MBB, std::next(I), MI->getDebugLoc(), TII.get(Mips::TEQ))
844 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
845 .addReg(Mips::ZERO).addImm(7);
847 // Use the 32-bit sub-register if this is a 64-bit division.
849 MIB->getOperand(0).setSubReg(Mips::sub_32);
851 // Clear Divisor's kill flag.
852 Divisor.setIsKill(false);
854 // We would normally delete the original instruction here but in this case
855 // we only needed to inject an additional instruction rather than replace it.
861 MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
862 MachineBasicBlock *BB) const {
863 switch (MI->getOpcode()) {
865 llvm_unreachable("Unexpected instr type to insert");
866 case Mips::ATOMIC_LOAD_ADD_I8:
867 return emitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
868 case Mips::ATOMIC_LOAD_ADD_I16:
869 return emitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
870 case Mips::ATOMIC_LOAD_ADD_I32:
871 return emitAtomicBinary(MI, BB, 4, Mips::ADDu);
872 case Mips::ATOMIC_LOAD_ADD_I64:
873 return emitAtomicBinary(MI, BB, 8, Mips::DADDu);
875 case Mips::ATOMIC_LOAD_AND_I8:
876 return emitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
877 case Mips::ATOMIC_LOAD_AND_I16:
878 return emitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
879 case Mips::ATOMIC_LOAD_AND_I32:
880 return emitAtomicBinary(MI, BB, 4, Mips::AND);
881 case Mips::ATOMIC_LOAD_AND_I64:
882 return emitAtomicBinary(MI, BB, 8, Mips::AND64);
884 case Mips::ATOMIC_LOAD_OR_I8:
885 return emitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
886 case Mips::ATOMIC_LOAD_OR_I16:
887 return emitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
888 case Mips::ATOMIC_LOAD_OR_I32:
889 return emitAtomicBinary(MI, BB, 4, Mips::OR);
890 case Mips::ATOMIC_LOAD_OR_I64:
891 return emitAtomicBinary(MI, BB, 8, Mips::OR64);
893 case Mips::ATOMIC_LOAD_XOR_I8:
894 return emitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
895 case Mips::ATOMIC_LOAD_XOR_I16:
896 return emitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
897 case Mips::ATOMIC_LOAD_XOR_I32:
898 return emitAtomicBinary(MI, BB, 4, Mips::XOR);
899 case Mips::ATOMIC_LOAD_XOR_I64:
900 return emitAtomicBinary(MI, BB, 8, Mips::XOR64);
902 case Mips::ATOMIC_LOAD_NAND_I8:
903 return emitAtomicBinaryPartword(MI, BB, 1, 0, true);
904 case Mips::ATOMIC_LOAD_NAND_I16:
905 return emitAtomicBinaryPartword(MI, BB, 2, 0, true);
906 case Mips::ATOMIC_LOAD_NAND_I32:
907 return emitAtomicBinary(MI, BB, 4, 0, true);
908 case Mips::ATOMIC_LOAD_NAND_I64:
909 return emitAtomicBinary(MI, BB, 8, 0, true);
911 case Mips::ATOMIC_LOAD_SUB_I8:
912 return emitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
913 case Mips::ATOMIC_LOAD_SUB_I16:
914 return emitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
915 case Mips::ATOMIC_LOAD_SUB_I32:
916 return emitAtomicBinary(MI, BB, 4, Mips::SUBu);
917 case Mips::ATOMIC_LOAD_SUB_I64:
918 return emitAtomicBinary(MI, BB, 8, Mips::DSUBu);
920 case Mips::ATOMIC_SWAP_I8:
921 return emitAtomicBinaryPartword(MI, BB, 1, 0);
922 case Mips::ATOMIC_SWAP_I16:
923 return emitAtomicBinaryPartword(MI, BB, 2, 0);
924 case Mips::ATOMIC_SWAP_I32:
925 return emitAtomicBinary(MI, BB, 4, 0);
926 case Mips::ATOMIC_SWAP_I64:
927 return emitAtomicBinary(MI, BB, 8, 0);
929 case Mips::ATOMIC_CMP_SWAP_I8:
930 return emitAtomicCmpSwapPartword(MI, BB, 1);
931 case Mips::ATOMIC_CMP_SWAP_I16:
932 return emitAtomicCmpSwapPartword(MI, BB, 2);
933 case Mips::ATOMIC_CMP_SWAP_I32:
934 return emitAtomicCmpSwap(MI, BB, 4);
935 case Mips::ATOMIC_CMP_SWAP_I64:
936 return emitAtomicCmpSwap(MI, BB, 8);
937 case Mips::PseudoSDIV:
938 case Mips::PseudoUDIV:
943 return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(),
945 case Mips::PseudoDSDIV:
946 case Mips::PseudoDUDIV:
951 return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(),
954 return emitSEL_D(MI, BB);
958 // This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
959 // Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
961 MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
962 unsigned Size, unsigned BinOpcode,
964 assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicBinary.");
966 MachineFunction *MF = BB->getParent();
967 MachineRegisterInfo &RegInfo = MF->getRegInfo();
968 const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
969 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
970 DebugLoc DL = MI->getDebugLoc();
971 unsigned LL, SC, AND, NOR, ZERO, BEQ;
978 LL = Subtarget.hasMips32r6() ? Mips::LL_R6 : Mips::LL;
979 SC = Subtarget.hasMips32r6() ? Mips::SC_R6 : Mips::SC;
986 LL = Subtarget.hasMips64r6() ? Mips::LLD_R6 : Mips::LLD;
987 SC = Subtarget.hasMips64r6() ? Mips::SCD_R6 : Mips::SCD;
990 ZERO = Mips::ZERO_64;
994 unsigned OldVal = MI->getOperand(0).getReg();
995 unsigned Ptr = MI->getOperand(1).getReg();
996 unsigned Incr = MI->getOperand(2).getReg();
998 unsigned StoreVal = RegInfo.createVirtualRegister(RC);
999 unsigned AndRes = RegInfo.createVirtualRegister(RC);
1000 unsigned Success = RegInfo.createVirtualRegister(RC);
1002 // insert new blocks after the current block
1003 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1004 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1005 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1006 MachineFunction::iterator It = BB;
1008 MF->insert(It, loopMBB);
1009 MF->insert(It, exitMBB);
1011 // Transfer the remainder of BB and its successor edges to exitMBB.
1012 exitMBB->splice(exitMBB->begin(), BB,
1013 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1014 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1018 // fallthrough --> loopMBB
1019 BB->addSuccessor(loopMBB);
1020 loopMBB->addSuccessor(loopMBB);
1021 loopMBB->addSuccessor(exitMBB);
1024 // ll oldval, 0(ptr)
1025 // <binop> storeval, oldval, incr
1026 // sc success, storeval, 0(ptr)
1027 // beq success, $0, loopMBB
1029 BuildMI(BB, DL, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
1031 // and andres, oldval, incr
1032 // nor storeval, $0, andres
1033 BuildMI(BB, DL, TII->get(AND), AndRes).addReg(OldVal).addReg(Incr);
1034 BuildMI(BB, DL, TII->get(NOR), StoreVal).addReg(ZERO).addReg(AndRes);
1035 } else if (BinOpcode) {
1036 // <binop> storeval, oldval, incr
1037 BuildMI(BB, DL, TII->get(BinOpcode), StoreVal).addReg(OldVal).addReg(Incr);
1041 BuildMI(BB, DL, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
1042 BuildMI(BB, DL, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
1044 MI->eraseFromParent(); // The instruction is gone now.
1049 MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1050 MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1051 unsigned SrcReg) const {
1052 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1053 DebugLoc DL = MI->getDebugLoc();
1055 if (Subtarget.hasMips32r2() && Size == 1) {
1056 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1060 if (Subtarget.hasMips32r2() && Size == 2) {
1061 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1065 MachineFunction *MF = BB->getParent();
1066 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1067 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1068 unsigned ScrReg = RegInfo.createVirtualRegister(RC);
1071 int64_t ShiftImm = 32 - (Size * 8);
1073 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1074 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1079 MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1080 MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned BinOpcode,
1082 assert((Size == 1 || Size == 2) &&
1083 "Unsupported size for EmitAtomicBinaryPartial.");
1085 MachineFunction *MF = BB->getParent();
1086 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1087 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1088 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1089 DebugLoc DL = MI->getDebugLoc();
1091 unsigned Dest = MI->getOperand(0).getReg();
1092 unsigned Ptr = MI->getOperand(1).getReg();
1093 unsigned Incr = MI->getOperand(2).getReg();
1095 unsigned AlignedAddr = RegInfo.createVirtualRegister(RC);
1096 unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
1097 unsigned Mask = RegInfo.createVirtualRegister(RC);
1098 unsigned Mask2 = RegInfo.createVirtualRegister(RC);
1099 unsigned NewVal = RegInfo.createVirtualRegister(RC);
1100 unsigned OldVal = RegInfo.createVirtualRegister(RC);
1101 unsigned Incr2 = RegInfo.createVirtualRegister(RC);
1102 unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC);
1103 unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
1104 unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
1105 unsigned AndRes = RegInfo.createVirtualRegister(RC);
1106 unsigned BinOpRes = RegInfo.createVirtualRegister(RC);
1107 unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC);
1108 unsigned StoreVal = RegInfo.createVirtualRegister(RC);
1109 unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC);
1110 unsigned SrlRes = RegInfo.createVirtualRegister(RC);
1111 unsigned Success = RegInfo.createVirtualRegister(RC);
1113 // insert new blocks after the current block
1114 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1115 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1116 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1117 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1118 MachineFunction::iterator It = BB;
1120 MF->insert(It, loopMBB);
1121 MF->insert(It, sinkMBB);
1122 MF->insert(It, exitMBB);
1124 // Transfer the remainder of BB and its successor edges to exitMBB.
1125 exitMBB->splice(exitMBB->begin(), BB,
1126 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1127 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1129 BB->addSuccessor(loopMBB);
1130 loopMBB->addSuccessor(loopMBB);
1131 loopMBB->addSuccessor(sinkMBB);
1132 sinkMBB->addSuccessor(exitMBB);
1135 // addiu masklsb2,$0,-4 # 0xfffffffc
1136 // and alignedaddr,ptr,masklsb2
1137 // andi ptrlsb2,ptr,3
1138 // sll shiftamt,ptrlsb2,3
1139 // ori maskupper,$0,255 # 0xff
1140 // sll mask,maskupper,shiftamt
1141 // nor mask2,$0,mask
1142 // sll incr2,incr,shiftamt
1144 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1145 BuildMI(BB, DL, TII->get(Mips::ADDiu), MaskLSB2)
1146 .addReg(Mips::ZERO).addImm(-4);
1147 BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
1148 .addReg(Ptr).addReg(MaskLSB2);
1149 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
1150 if (Subtarget.isLittle()) {
1151 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1153 unsigned Off = RegInfo.createVirtualRegister(RC);
1154 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1155 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1156 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1158 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1159 .addReg(Mips::ZERO).addImm(MaskImm);
1160 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1161 .addReg(MaskUpper).addReg(ShiftAmt);
1162 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1163 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1165 // atomic.load.binop
1167 // ll oldval,0(alignedaddr)
1168 // binop binopres,oldval,incr2
1169 // and newval,binopres,mask
1170 // and maskedoldval0,oldval,mask2
1171 // or storeval,maskedoldval0,newval
1172 // sc success,storeval,0(alignedaddr)
1173 // beq success,$0,loopMBB
1177 // ll oldval,0(alignedaddr)
1178 // and newval,incr2,mask
1179 // and maskedoldval0,oldval,mask2
1180 // or storeval,maskedoldval0,newval
1181 // sc success,storeval,0(alignedaddr)
1182 // beq success,$0,loopMBB
1185 BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
1187 // and andres, oldval, incr2
1188 // nor binopres, $0, andres
1189 // and newval, binopres, mask
1190 BuildMI(BB, DL, TII->get(Mips::AND), AndRes).addReg(OldVal).addReg(Incr2);
1191 BuildMI(BB, DL, TII->get(Mips::NOR), BinOpRes)
1192 .addReg(Mips::ZERO).addReg(AndRes);
1193 BuildMI(BB, DL, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask);
1194 } else if (BinOpcode) {
1195 // <binop> binopres, oldval, incr2
1196 // and newval, binopres, mask
1197 BuildMI(BB, DL, TII->get(BinOpcode), BinOpRes).addReg(OldVal).addReg(Incr2);
1198 BuildMI(BB, DL, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask);
1199 } else { // atomic.swap
1200 // and newval, incr2, mask
1201 BuildMI(BB, DL, TII->get(Mips::AND), NewVal).addReg(Incr2).addReg(Mask);
1204 BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
1205 .addReg(OldVal).addReg(Mask2);
1206 BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
1207 .addReg(MaskedOldVal0).addReg(NewVal);
1208 BuildMI(BB, DL, TII->get(Mips::SC), Success)
1209 .addReg(StoreVal).addReg(AlignedAddr).addImm(0);
1210 BuildMI(BB, DL, TII->get(Mips::BEQ))
1211 .addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
1214 // and maskedoldval1,oldval,mask
1215 // srl srlres,maskedoldval1,shiftamt
1216 // sign_extend dest,srlres
1219 BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal1)
1220 .addReg(OldVal).addReg(Mask);
1221 BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
1222 .addReg(MaskedOldVal1).addReg(ShiftAmt);
1223 BB = emitSignExtendToI32InReg(MI, BB, Size, Dest, SrlRes);
1225 MI->eraseFromParent(); // The instruction is gone now.
1230 MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
1231 MachineBasicBlock *BB,
1232 unsigned Size) const {
1233 assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
1235 MachineFunction *MF = BB->getParent();
1236 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1237 const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
1238 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1239 DebugLoc DL = MI->getDebugLoc();
1240 unsigned LL, SC, ZERO, BNE, BEQ;
1243 LL = isMicroMips ? Mips::LL_MM : Mips::LL;
1244 SC = isMicroMips ? Mips::SC_MM : Mips::SC;
1251 ZERO = Mips::ZERO_64;
1256 unsigned Dest = MI->getOperand(0).getReg();
1257 unsigned Ptr = MI->getOperand(1).getReg();
1258 unsigned OldVal = MI->getOperand(2).getReg();
1259 unsigned NewVal = MI->getOperand(3).getReg();
1261 unsigned Success = RegInfo.createVirtualRegister(RC);
1263 // insert new blocks after the current block
1264 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1265 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1266 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1267 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1268 MachineFunction::iterator It = BB;
1270 MF->insert(It, loop1MBB);
1271 MF->insert(It, loop2MBB);
1272 MF->insert(It, exitMBB);
1274 // Transfer the remainder of BB and its successor edges to exitMBB.
1275 exitMBB->splice(exitMBB->begin(), BB,
1276 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1277 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1281 // fallthrough --> loop1MBB
1282 BB->addSuccessor(loop1MBB);
1283 loop1MBB->addSuccessor(exitMBB);
1284 loop1MBB->addSuccessor(loop2MBB);
1285 loop2MBB->addSuccessor(loop1MBB);
1286 loop2MBB->addSuccessor(exitMBB);
1290 // bne dest, oldval, exitMBB
1292 BuildMI(BB, DL, TII->get(LL), Dest).addReg(Ptr).addImm(0);
1293 BuildMI(BB, DL, TII->get(BNE))
1294 .addReg(Dest).addReg(OldVal).addMBB(exitMBB);
1297 // sc success, newval, 0(ptr)
1298 // beq success, $0, loop1MBB
1300 BuildMI(BB, DL, TII->get(SC), Success)
1301 .addReg(NewVal).addReg(Ptr).addImm(0);
1302 BuildMI(BB, DL, TII->get(BEQ))
1303 .addReg(Success).addReg(ZERO).addMBB(loop1MBB);
1305 MI->eraseFromParent(); // The instruction is gone now.
1311 MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI,
1312 MachineBasicBlock *BB,
1313 unsigned Size) const {
1314 assert((Size == 1 || Size == 2) &&
1315 "Unsupported size for EmitAtomicCmpSwapPartial.");
1317 MachineFunction *MF = BB->getParent();
1318 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1319 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1320 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1321 DebugLoc DL = MI->getDebugLoc();
1323 unsigned Dest = MI->getOperand(0).getReg();
1324 unsigned Ptr = MI->getOperand(1).getReg();
1325 unsigned CmpVal = MI->getOperand(2).getReg();
1326 unsigned NewVal = MI->getOperand(3).getReg();
1328 unsigned AlignedAddr = RegInfo.createVirtualRegister(RC);
1329 unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
1330 unsigned Mask = RegInfo.createVirtualRegister(RC);
1331 unsigned Mask2 = RegInfo.createVirtualRegister(RC);
1332 unsigned ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1333 unsigned OldVal = RegInfo.createVirtualRegister(RC);
1334 unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC);
1335 unsigned ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1336 unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC);
1337 unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
1338 unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
1339 unsigned MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1340 unsigned MaskedNewVal = RegInfo.createVirtualRegister(RC);
1341 unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC);
1342 unsigned StoreVal = RegInfo.createVirtualRegister(RC);
1343 unsigned SrlRes = RegInfo.createVirtualRegister(RC);
1344 unsigned Success = RegInfo.createVirtualRegister(RC);
1346 // insert new blocks after the current block
1347 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1348 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1349 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1350 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1351 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1352 MachineFunction::iterator It = BB;
1354 MF->insert(It, loop1MBB);
1355 MF->insert(It, loop2MBB);
1356 MF->insert(It, sinkMBB);
1357 MF->insert(It, exitMBB);
1359 // Transfer the remainder of BB and its successor edges to exitMBB.
1360 exitMBB->splice(exitMBB->begin(), BB,
1361 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1362 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1364 BB->addSuccessor(loop1MBB);
1365 loop1MBB->addSuccessor(sinkMBB);
1366 loop1MBB->addSuccessor(loop2MBB);
1367 loop2MBB->addSuccessor(loop1MBB);
1368 loop2MBB->addSuccessor(sinkMBB);
1369 sinkMBB->addSuccessor(exitMBB);
1371 // FIXME: computation of newval2 can be moved to loop2MBB.
1373 // addiu masklsb2,$0,-4 # 0xfffffffc
1374 // and alignedaddr,ptr,masklsb2
1375 // andi ptrlsb2,ptr,3
1376 // sll shiftamt,ptrlsb2,3
1377 // ori maskupper,$0,255 # 0xff
1378 // sll mask,maskupper,shiftamt
1379 // nor mask2,$0,mask
1380 // andi maskedcmpval,cmpval,255
1381 // sll shiftedcmpval,maskedcmpval,shiftamt
1382 // andi maskednewval,newval,255
1383 // sll shiftednewval,maskednewval,shiftamt
1384 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1385 BuildMI(BB, DL, TII->get(Mips::ADDiu), MaskLSB2)
1386 .addReg(Mips::ZERO).addImm(-4);
1387 BuildMI(BB, DL, TII->get(Mips::AND), AlignedAddr)
1388 .addReg(Ptr).addReg(MaskLSB2);
1389 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
1390 if (Subtarget.isLittle()) {
1391 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1393 unsigned Off = RegInfo.createVirtualRegister(RC);
1394 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1395 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1396 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1398 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1399 .addReg(Mips::ZERO).addImm(MaskImm);
1400 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1401 .addReg(MaskUpper).addReg(ShiftAmt);
1402 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1403 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
1404 .addReg(CmpVal).addImm(MaskImm);
1405 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
1406 .addReg(MaskedCmpVal).addReg(ShiftAmt);
1407 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
1408 .addReg(NewVal).addImm(MaskImm);
1409 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
1410 .addReg(MaskedNewVal).addReg(ShiftAmt);
1413 // ll oldval,0(alginedaddr)
1414 // and maskedoldval0,oldval,mask
1415 // bne maskedoldval0,shiftedcmpval,sinkMBB
1417 BuildMI(BB, DL, TII->get(Mips::LL), OldVal).addReg(AlignedAddr).addImm(0);
1418 BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal0)
1419 .addReg(OldVal).addReg(Mask);
1420 BuildMI(BB, DL, TII->get(Mips::BNE))
1421 .addReg(MaskedOldVal0).addReg(ShiftedCmpVal).addMBB(sinkMBB);
1424 // and maskedoldval1,oldval,mask2
1425 // or storeval,maskedoldval1,shiftednewval
1426 // sc success,storeval,0(alignedaddr)
1427 // beq success,$0,loop1MBB
1429 BuildMI(BB, DL, TII->get(Mips::AND), MaskedOldVal1)
1430 .addReg(OldVal).addReg(Mask2);
1431 BuildMI(BB, DL, TII->get(Mips::OR), StoreVal)
1432 .addReg(MaskedOldVal1).addReg(ShiftedNewVal);
1433 BuildMI(BB, DL, TII->get(Mips::SC), Success)
1434 .addReg(StoreVal).addReg(AlignedAddr).addImm(0);
1435 BuildMI(BB, DL, TII->get(Mips::BEQ))
1436 .addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
1439 // srl srlres,maskedoldval0,shiftamt
1440 // sign_extend dest,srlres
1443 BuildMI(BB, DL, TII->get(Mips::SRLV), SrlRes)
1444 .addReg(MaskedOldVal0).addReg(ShiftAmt);
1445 BB = emitSignExtendToI32InReg(MI, BB, Size, Dest, SrlRes);
1447 MI->eraseFromParent(); // The instruction is gone now.
1452 MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI,
1453 MachineBasicBlock *BB) const {
1454 MachineFunction *MF = BB->getParent();
1455 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1456 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1457 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1458 DebugLoc DL = MI->getDebugLoc();
1459 MachineBasicBlock::iterator II(MI);
1461 unsigned Fc = MI->getOperand(1).getReg();
1462 const auto &FGR64RegClass = TRI->getRegClass(Mips::FGR64RegClassID);
1464 unsigned Fc2 = RegInfo.createVirtualRegister(FGR64RegClass);
1466 BuildMI(*BB, II, DL, TII->get(Mips::SUBREG_TO_REG), Fc2)
1469 .addImm(Mips::sub_lo);
1471 // We don't erase the original instruction, we just replace the condition
1472 // register with the 64-bit super-register.
1473 MI->getOperand(1).setReg(Fc2);
1478 //===----------------------------------------------------------------------===//
1479 // Misc Lower Operation implementation
1480 //===----------------------------------------------------------------------===//
1481 SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
1482 SDValue Chain = Op.getOperand(0);
1483 SDValue Table = Op.getOperand(1);
1484 SDValue Index = Op.getOperand(2);
1486 EVT PTy = getPointerTy();
1487 unsigned EntrySize =
1488 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(*getDataLayout());
1490 Index = DAG.getNode(ISD::MUL, DL, PTy, Index,
1491 DAG.getConstant(EntrySize, PTy));
1492 SDValue Addr = DAG.getNode(ISD::ADD, DL, PTy, Index, Table);
1494 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), EntrySize * 8);
1495 Addr = DAG.getExtLoad(ISD::SEXTLOAD, DL, PTy, Chain, Addr,
1496 MachinePointerInfo::getJumpTable(), MemVT, false, false,
1498 Chain = Addr.getValue(1);
1500 if ((getTargetMachine().getRelocationModel() == Reloc::PIC_) ||
1501 Subtarget.isABI_N64()) {
1502 // For PIC, the sequence is:
1503 // BRIND(load(Jumptable + index) + RelocBase)
1504 // RelocBase can be JumpTable, GOT or some sort of global base.
1505 Addr = DAG.getNode(ISD::ADD, DL, PTy, Addr,
1506 getPICJumpTableRelocBase(Table, DAG));
1509 return DAG.getNode(ISD::BRIND, DL, MVT::Other, Chain, Addr);
1512 SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1513 // The first operand is the chain, the second is the condition, the third is
1514 // the block to branch to if the condition is true.
1515 SDValue Chain = Op.getOperand(0);
1516 SDValue Dest = Op.getOperand(2);
1519 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
1520 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
1522 // Return if flag is not set by a floating point comparison.
1523 if (CondRes.getOpcode() != MipsISD::FPCmp)
1526 SDValue CCNode = CondRes.getOperand(2);
1528 (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
1529 unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
1530 SDValue BrCode = DAG.getConstant(Opc, MVT::i32);
1531 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
1532 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
1533 FCC0, Dest, CondRes);
1536 SDValue MipsTargetLowering::
1537 lowerSELECT(SDValue Op, SelectionDAG &DAG) const
1539 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
1540 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
1542 // Return if flag is not set by a floating point comparison.
1543 if (Cond.getOpcode() != MipsISD::FPCmp)
1546 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
1550 SDValue MipsTargetLowering::
1551 lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
1554 EVT Ty = Op.getOperand(0).getValueType();
1555 SDValue Cond = DAG.getNode(ISD::SETCC, DL,
1556 getSetCCResultType(*DAG.getContext(), Ty),
1557 Op.getOperand(0), Op.getOperand(1),
1560 return DAG.getNode(ISD::SELECT, DL, Op.getValueType(), Cond, Op.getOperand(2),
1564 SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1565 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
1566 SDValue Cond = createFPCmp(DAG, Op);
1568 assert(Cond.getOpcode() == MipsISD::FPCmp &&
1569 "Floating point operand expected.");
1571 SDValue True = DAG.getConstant(1, MVT::i32);
1572 SDValue False = DAG.getConstant(0, MVT::i32);
1574 return createCMovFP(DAG, Cond, True, False, SDLoc(Op));
1577 SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
1578 SelectionDAG &DAG) const {
1579 // FIXME there isn't actually debug info here
1581 EVT Ty = Op.getValueType();
1582 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
1583 const GlobalValue *GV = N->getGlobal();
1585 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
1586 !Subtarget.isABI_N64()) {
1587 const MipsTargetObjectFile &TLOF =
1588 (const MipsTargetObjectFile&)getObjFileLowering();
1590 // %gp_rel relocation
1591 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
1592 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, 0,
1594 SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, DL,
1595 DAG.getVTList(MVT::i32), GA);
1596 SDValue GPReg = DAG.getRegister(Mips::GP, MVT::i32);
1597 return DAG.getNode(ISD::ADD, DL, MVT::i32, GPReg, GPRelNode);
1600 // %hi/%lo relocation
1601 return getAddrNonPIC(N, Ty, DAG);
1604 if (GV->hasInternalLinkage() || (GV->hasLocalLinkage() && !isa<Function>(GV)))
1605 return getAddrLocal(N, Ty, DAG,
1606 Subtarget.isABI_N32() || Subtarget.isABI_N64());
1609 return getAddrGlobalLargeGOT(N, Ty, DAG, MipsII::MO_GOT_HI16,
1610 MipsII::MO_GOT_LO16, DAG.getEntryNode(),
1611 MachinePointerInfo::getGOT());
1613 return getAddrGlobal(N, Ty, DAG,
1614 (Subtarget.isABI_N32() || Subtarget.isABI_N64())
1615 ? MipsII::MO_GOT_DISP
1617 DAG.getEntryNode(), MachinePointerInfo::getGOT());
1620 SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
1621 SelectionDAG &DAG) const {
1622 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
1623 EVT Ty = Op.getValueType();
1625 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
1626 !Subtarget.isABI_N64())
1627 return getAddrNonPIC(N, Ty, DAG);
1629 return getAddrLocal(N, Ty, DAG,
1630 Subtarget.isABI_N32() || Subtarget.isABI_N64());
1633 SDValue MipsTargetLowering::
1634 lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
1636 // If the relocation model is PIC, use the General Dynamic TLS Model or
1637 // Local Dynamic TLS model, otherwise use the Initial Exec or
1638 // Local Exec TLS Model.
1640 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1642 const GlobalValue *GV = GA->getGlobal();
1643 EVT PtrVT = getPointerTy();
1645 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
1647 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
1648 // General Dynamic and Local Dynamic TLS Model.
1649 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
1652 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
1653 SDValue Argument = DAG.getNode(MipsISD::Wrapper, DL, PtrVT,
1654 getGlobalReg(DAG, PtrVT), TGA);
1655 unsigned PtrSize = PtrVT.getSizeInBits();
1656 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
1658 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
1662 Entry.Node = Argument;
1664 Args.push_back(Entry);
1666 TargetLowering::CallLoweringInfo CLI(DAG);
1667 CLI.setDebugLoc(DL).setChain(DAG.getEntryNode())
1668 .setCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args), 0);
1669 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
1671 SDValue Ret = CallResult.first;
1673 if (model != TLSModel::LocalDynamic)
1676 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
1677 MipsII::MO_DTPREL_HI);
1678 SDValue Hi = DAG.getNode(MipsISD::Hi, DL, PtrVT, TGAHi);
1679 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
1680 MipsII::MO_DTPREL_LO);
1681 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
1682 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
1683 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
1687 if (model == TLSModel::InitialExec) {
1688 // Initial Exec TLS Model
1689 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
1690 MipsII::MO_GOTTPREL);
1691 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
1693 Offset = DAG.getLoad(PtrVT, DL,
1694 DAG.getEntryNode(), TGA, MachinePointerInfo(),
1695 false, false, false, 0);
1697 // Local Exec TLS Model
1698 assert(model == TLSModel::LocalExec);
1699 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
1700 MipsII::MO_TPREL_HI);
1701 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
1702 MipsII::MO_TPREL_LO);
1703 SDValue Hi = DAG.getNode(MipsISD::Hi, DL, PtrVT, TGAHi);
1704 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
1705 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
1708 SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, DL, PtrVT);
1709 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
1712 SDValue MipsTargetLowering::
1713 lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
1715 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
1716 EVT Ty = Op.getValueType();
1718 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
1719 !Subtarget.isABI_N64())
1720 return getAddrNonPIC(N, Ty, DAG);
1722 return getAddrLocal(N, Ty, DAG,
1723 Subtarget.isABI_N32() || Subtarget.isABI_N64());
1726 SDValue MipsTargetLowering::
1727 lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
1729 // gp_rel relocation
1730 // FIXME: we should reference the constant pool using small data sections,
1731 // but the asm printer currently doesn't support this feature without
1732 // hacking it. This feature should come soon so we can uncomment the
1734 //if (IsInSmallSection(C->getType())) {
1735 // SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
1736 // SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1737 // ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
1738 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
1739 EVT Ty = Op.getValueType();
1741 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ &&
1742 !Subtarget.isABI_N64())
1743 return getAddrNonPIC(N, Ty, DAG);
1745 return getAddrLocal(N, Ty, DAG,
1746 Subtarget.isABI_N32() || Subtarget.isABI_N64());
1749 SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
1750 MachineFunction &MF = DAG.getMachineFunction();
1751 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
1754 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
1757 // vastart just stores the address of the VarArgsFrameIndex slot into the
1758 // memory location argument.
1759 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1760 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
1761 MachinePointerInfo(SV), false, false, 0);
1764 SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
1765 SDNode *Node = Op.getNode();
1766 EVT VT = Node->getValueType(0);
1767 SDValue Chain = Node->getOperand(0);
1768 SDValue VAListPtr = Node->getOperand(1);
1769 unsigned Align = Node->getConstantOperandVal(3);
1770 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
1772 unsigned ArgSlotSizeInBytes =
1773 (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4;
1775 SDValue VAListLoad = DAG.getLoad(getPointerTy(), DL, Chain, VAListPtr,
1776 MachinePointerInfo(SV), false, false, false,
1778 SDValue VAList = VAListLoad;
1780 // Re-align the pointer if necessary.
1781 // It should only ever be necessary for 64-bit types on O32 since the minimum
1782 // argument alignment is the same as the maximum type alignment for N32/N64.
1784 // FIXME: We currently align too often. The code generator doesn't notice
1785 // when the pointer is still aligned from the last va_arg (or pair of
1786 // va_args for the i64 on O32 case).
1787 if (Align > getMinStackArgumentAlignment()) {
1788 assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2");
1790 VAList = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
1791 DAG.getConstant(Align - 1,
1792 VAList.getValueType()));
1794 VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList,
1795 DAG.getConstant(-(int64_t)Align,
1796 VAList.getValueType()));
1799 // Increment the pointer, VAList, to the next vaarg.
1800 unsigned ArgSizeInBytes = getDataLayout()->getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
1801 SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
1802 DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes, ArgSlotSizeInBytes),
1803 VAList.getValueType()));
1804 // Store the incremented VAList to the legalized pointer
1805 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
1806 MachinePointerInfo(SV), false, false, 0);
1808 // In big-endian mode we must adjust the pointer when the load size is smaller
1809 // than the argument slot size. We must also reduce the known alignment to
1810 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
1811 // the correct half of the slot, and reduce the alignment from 8 (slot
1812 // alignment) down to 4 (type alignment).
1813 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
1814 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
1815 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
1816 DAG.getIntPtrConstant(Adjustment));
1818 // Load the actual argument out of the pointer VAList
1819 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo(), false, false,
1823 static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
1824 bool HasExtractInsert) {
1825 EVT TyX = Op.getOperand(0).getValueType();
1826 EVT TyY = Op.getOperand(1).getValueType();
1827 SDValue Const1 = DAG.getConstant(1, MVT::i32);
1828 SDValue Const31 = DAG.getConstant(31, MVT::i32);
1832 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
1834 SDValue X = (TyX == MVT::f32) ?
1835 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
1836 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
1838 SDValue Y = (TyY == MVT::f32) ?
1839 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
1840 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
1843 if (HasExtractInsert) {
1844 // ext E, Y, 31, 1 ; extract bit31 of Y
1845 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
1846 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
1847 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
1850 // srl SrlX, SllX, 1
1852 // sll SllY, SrlX, 31
1853 // or Or, SrlX, SllY
1854 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
1855 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
1856 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
1857 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
1858 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
1861 if (TyX == MVT::f32)
1862 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
1864 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
1865 Op.getOperand(0), DAG.getConstant(0, MVT::i32));
1866 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
1869 static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
1870 bool HasExtractInsert) {
1871 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
1872 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
1873 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
1874 SDValue Const1 = DAG.getConstant(1, MVT::i32);
1877 // Bitcast to integer nodes.
1878 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
1879 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
1881 if (HasExtractInsert) {
1882 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
1883 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
1884 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
1885 DAG.getConstant(WidthY - 1, MVT::i32), Const1);
1887 if (WidthX > WidthY)
1888 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
1889 else if (WidthY > WidthX)
1890 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
1892 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
1893 DAG.getConstant(WidthX - 1, MVT::i32), Const1, X);
1894 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
1897 // (d)sll SllX, X, 1
1898 // (d)srl SrlX, SllX, 1
1899 // (d)srl SrlY, Y, width(Y)-1
1900 // (d)sll SllY, SrlX, width(Y)-1
1901 // or Or, SrlX, SllY
1902 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
1903 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
1904 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
1905 DAG.getConstant(WidthY - 1, MVT::i32));
1907 if (WidthX > WidthY)
1908 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
1909 else if (WidthY > WidthX)
1910 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
1912 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
1913 DAG.getConstant(WidthX - 1, MVT::i32));
1914 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
1915 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
1919 MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
1920 if (Subtarget.isGP64bit())
1921 return lowerFCOPYSIGN64(Op, DAG, Subtarget.hasExtractInsert());
1923 return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
1926 SDValue MipsTargetLowering::
1927 lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1929 assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) &&
1930 "Frame address can only be determined for current frame.");
1932 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1933 MFI->setFrameAddressIsTaken(true);
1934 EVT VT = Op.getValueType();
1937 DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1938 Subtarget.isABI_N64() ? Mips::FP_64 : Mips::FP, VT);
1942 SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
1943 SelectionDAG &DAG) const {
1944 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1948 assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) &&
1949 "Return address can be determined only for current frame.");
1951 MachineFunction &MF = DAG.getMachineFunction();
1952 MachineFrameInfo *MFI = MF.getFrameInfo();
1953 MVT VT = Op.getSimpleValueType();
1954 unsigned RA = Subtarget.isABI_N64() ? Mips::RA_64 : Mips::RA;
1955 MFI->setReturnAddressIsTaken(true);
1957 // Return RA, which contains the return address. Mark it an implicit live-in.
1958 unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
1959 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
1962 // An EH_RETURN is the result of lowering llvm.eh.return which in turn is
1963 // generated from __builtin_eh_return (offset, handler)
1964 // The effect of this is to adjust the stack pointer by "offset"
1965 // and then branch to "handler".
1966 SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
1968 MachineFunction &MF = DAG.getMachineFunction();
1969 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
1971 MipsFI->setCallsEhReturn();
1972 SDValue Chain = Op.getOperand(0);
1973 SDValue Offset = Op.getOperand(1);
1974 SDValue Handler = Op.getOperand(2);
1976 EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
1978 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
1979 // EH_RETURN nodes, so that instructions are emitted back-to-back.
1980 unsigned OffsetReg = Subtarget.isABI_N64() ? Mips::V1_64 : Mips::V1;
1981 unsigned AddrReg = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
1982 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
1983 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
1984 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
1985 DAG.getRegister(OffsetReg, Ty),
1986 DAG.getRegister(AddrReg, getPointerTy()),
1990 SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
1991 SelectionDAG &DAG) const {
1992 // FIXME: Need pseudo-fence for 'singlethread' fences
1993 // FIXME: Set SType for weaker fences where supported/appropriate.
1996 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
1997 DAG.getConstant(SType, MVT::i32));
2000 SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2001 SelectionDAG &DAG) const {
2003 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2004 SDValue Shamt = Op.getOperand(2);
2007 // lo = (shl lo, shamt)
2008 // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
2011 // hi = (shl lo, shamt[4:0])
2012 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2013 DAG.getConstant(-1, MVT::i32));
2014 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo,
2015 DAG.getConstant(1, MVT::i32));
2016 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, ShiftRight1Lo,
2018 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, Shamt);
2019 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
2020 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, MVT::i32, Lo, Shamt);
2021 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2022 DAG.getConstant(0x20, MVT::i32));
2023 Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
2024 DAG.getConstant(0, MVT::i32), ShiftLeftLo);
2025 Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or);
2027 SDValue Ops[2] = {Lo, Hi};
2028 return DAG.getMergeValues(Ops, DL);
2031 SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2034 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2035 SDValue Shamt = Op.getOperand(2);
2038 // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
2040 // hi = (sra hi, shamt)
2042 // hi = (srl hi, shamt)
2045 // lo = (sra hi, shamt[4:0])
2046 // hi = (sra hi, 31)
2048 // lo = (srl hi, shamt[4:0])
2050 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2051 DAG.getConstant(-1, MVT::i32));
2052 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
2053 DAG.getConstant(1, MVT::i32));
2054 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, ShiftLeft1Hi, Not);
2055 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, Shamt);
2056 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
2057 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2059 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2060 DAG.getConstant(0x20, MVT::i32));
2061 SDValue Shift31 = DAG.getNode(ISD::SRA, DL, MVT::i32, Hi,
2062 DAG.getConstant(31, MVT::i32));
2063 Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftRightHi, Or);
2064 Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
2065 IsSRA ? Shift31 : DAG.getConstant(0, MVT::i32),
2068 SDValue Ops[2] = {Lo, Hi};
2069 return DAG.getMergeValues(Ops, DL);
2072 static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2073 SDValue Chain, SDValue Src, unsigned Offset) {
2074 SDValue Ptr = LD->getBasePtr();
2075 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2076 EVT BasePtrVT = Ptr.getValueType();
2078 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2081 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2082 DAG.getConstant(Offset, BasePtrVT));
2084 SDValue Ops[] = { Chain, Ptr, Src };
2085 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2086 LD->getMemOperand());
2089 // Expand an unaligned 32 or 64-bit integer load node.
2090 SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2091 LoadSDNode *LD = cast<LoadSDNode>(Op);
2092 EVT MemVT = LD->getMemoryVT();
2094 if (Subtarget.systemSupportsUnalignedAccess())
2097 // Return if load is aligned or if MemVT is neither i32 nor i64.
2098 if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
2099 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2102 bool IsLittle = Subtarget.isLittle();
2103 EVT VT = Op.getValueType();
2104 ISD::LoadExtType ExtType = LD->getExtensionType();
2105 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2107 assert((VT == MVT::i32) || (VT == MVT::i64));
2110 // (set dst, (i64 (load baseptr)))
2112 // (set tmp, (ldl (add baseptr, 7), undef))
2113 // (set dst, (ldr baseptr, tmp))
2114 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2115 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2117 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2121 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2123 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2127 // (set dst, (i32 (load baseptr))) or
2128 // (set dst, (i64 (sextload baseptr))) or
2129 // (set dst, (i64 (extload baseptr)))
2131 // (set tmp, (lwl (add baseptr, 3), undef))
2132 // (set dst, (lwr baseptr, tmp))
2133 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2134 (ExtType == ISD::EXTLOAD))
2137 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2140 // (set dst, (i64 (zextload baseptr)))
2142 // (set tmp0, (lwl (add baseptr, 3), undef))
2143 // (set tmp1, (lwr baseptr, tmp0))
2144 // (set tmp2, (shl tmp1, 32))
2145 // (set dst, (srl tmp2, 32))
2147 SDValue Const32 = DAG.getConstant(32, MVT::i32);
2148 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2149 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2150 SDValue Ops[] = { SRL, LWR.getValue(1) };
2151 return DAG.getMergeValues(Ops, DL);
2154 static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2155 SDValue Chain, unsigned Offset) {
2156 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2157 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2159 SDVTList VTList = DAG.getVTList(MVT::Other);
2162 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2163 DAG.getConstant(Offset, BasePtrVT));
2165 SDValue Ops[] = { Chain, Value, Ptr };
2166 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2167 SD->getMemOperand());
2170 // Expand an unaligned 32 or 64-bit integer store node.
2171 static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG,
2173 SDValue Value = SD->getValue(), Chain = SD->getChain();
2174 EVT VT = Value.getValueType();
2177 // (store val, baseptr) or
2178 // (truncstore val, baseptr)
2180 // (swl val, (add baseptr, 3))
2181 // (swr val, baseptr)
2182 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2183 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2185 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2188 assert(VT == MVT::i64);
2191 // (store val, baseptr)
2193 // (sdl val, (add baseptr, 7))
2194 // (sdr val, baseptr)
2195 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2196 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2199 // Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2200 static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG) {
2201 SDValue Val = SD->getValue();
2203 if (Val.getOpcode() != ISD::FP_TO_SINT)
2206 EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits());
2207 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2210 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2211 SD->getPointerInfo(), SD->isVolatile(),
2212 SD->isNonTemporal(), SD->getAlignment());
2215 SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
2216 StoreSDNode *SD = cast<StoreSDNode>(Op);
2217 EVT MemVT = SD->getMemoryVT();
2219 // Lower unaligned integer stores.
2220 if (!Subtarget.systemSupportsUnalignedAccess() &&
2221 (SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
2222 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2223 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2225 return lowerFP_TO_SINT_STORE(SD, DAG);
2228 SDValue MipsTargetLowering::lowerADD(SDValue Op, SelectionDAG &DAG) const {
2229 if (Op->getOperand(0).getOpcode() != ISD::FRAMEADDR
2230 || cast<ConstantSDNode>
2231 (Op->getOperand(0).getOperand(0))->getZExtValue() != 0
2232 || Op->getOperand(1).getOpcode() != ISD::FRAME_TO_ARGS_OFFSET)
2236 // (add (frameaddr 0), (frame_to_args_offset))
2237 // results from lowering llvm.eh.dwarf.cfa intrinsic. Transform it to
2238 // (add FrameObject, 0)
2239 // where FrameObject is a fixed StackObject with offset 0 which points to
2240 // the old stack pointer.
2241 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2242 EVT ValTy = Op->getValueType(0);
2243 int FI = MFI->CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2244 SDValue InArgsAddr = DAG.getFrameIndex(FI, ValTy);
2245 return DAG.getNode(ISD::ADD, SDLoc(Op), ValTy, InArgsAddr,
2246 DAG.getConstant(0, ValTy));
2249 SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2250 SelectionDAG &DAG) const {
2251 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2252 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2254 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2257 //===----------------------------------------------------------------------===//
2258 // Calling Convention Implementation
2259 //===----------------------------------------------------------------------===//
2261 //===----------------------------------------------------------------------===//
2262 // TODO: Implement a generic logic using tblgen that can support this.
2263 // Mips O32 ABI rules:
2265 // i32 - Passed in A0, A1, A2, A3 and stack
2266 // f32 - Only passed in f32 registers if no int reg has been used yet to hold
2267 // an argument. Otherwise, passed in A1, A2, A3 and stack.
2268 // f64 - Only passed in two aliased f32 registers if no int reg has been used
2269 // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2270 // not used, it must be shadowed. If only A3 is avaiable, shadow it and
2273 // For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2274 //===----------------------------------------------------------------------===//
2276 static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2277 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2278 CCState &State, const MCPhysReg *F64Regs) {
2280 static const unsigned IntRegsSize = 4, FloatRegsSize = 2;
2282 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2283 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2285 // Do not process byval args here.
2286 if (ArgFlags.isByVal())
2289 // Promote i8 and i16
2290 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2292 if (ArgFlags.isSExt())
2293 LocInfo = CCValAssign::SExt;
2294 else if (ArgFlags.isZExt())
2295 LocInfo = CCValAssign::ZExt;
2297 LocInfo = CCValAssign::AExt;
2302 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2303 // is true: function is vararg, argument is 3rd or higher, there is previous
2304 // argument which is not f32 or f64.
2305 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1
2306 || State.getFirstUnallocated(F32Regs, FloatRegsSize) != ValNo;
2307 unsigned OrigAlign = ArgFlags.getOrigAlign();
2308 bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8);
2310 if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2311 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2312 // If this is the first part of an i64 arg,
2313 // the allocated register must be either A0 or A2.
2314 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2315 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2317 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2318 // Allocate int register and shadow next int register. If first
2319 // available register is Mips::A1 or Mips::A3, shadow it too.
2320 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2321 if (Reg == Mips::A1 || Reg == Mips::A3)
2322 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2323 State.AllocateReg(IntRegs, IntRegsSize);
2325 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2326 // we are guaranteed to find an available float register
2327 if (ValVT == MVT::f32) {
2328 Reg = State.AllocateReg(F32Regs, FloatRegsSize);
2329 // Shadow int register
2330 State.AllocateReg(IntRegs, IntRegsSize);
2332 Reg = State.AllocateReg(F64Regs, FloatRegsSize);
2333 // Shadow int registers
2334 unsigned Reg2 = State.AllocateReg(IntRegs, IntRegsSize);
2335 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2336 State.AllocateReg(IntRegs, IntRegsSize);
2337 State.AllocateReg(IntRegs, IntRegsSize);
2340 llvm_unreachable("Cannot handle this ValVT.");
2343 unsigned Offset = State.AllocateStack(ValVT.getSizeInBits() >> 3,
2345 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
2347 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2352 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
2353 MVT LocVT, CCValAssign::LocInfo LocInfo,
2354 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2355 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
2357 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2360 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
2361 MVT LocVT, CCValAssign::LocInfo LocInfo,
2362 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2363 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
2365 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2368 #include "MipsGenCallingConv.inc"
2370 //===----------------------------------------------------------------------===//
2371 // Call Calling Convention Implementation
2372 //===----------------------------------------------------------------------===//
2374 // Return next O32 integer argument register.
2375 static unsigned getNextIntArgReg(unsigned Reg) {
2376 assert((Reg == Mips::A0) || (Reg == Mips::A2));
2377 return (Reg == Mips::A0) ? Mips::A1 : Mips::A3;
2381 MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
2382 SDValue Chain, SDValue Arg, SDLoc DL,
2383 bool IsTailCall, SelectionDAG &DAG) const {
2385 SDValue PtrOff = DAG.getNode(ISD::ADD, DL, getPointerTy(), StackPtr,
2386 DAG.getIntPtrConstant(Offset));
2387 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo(), false,
2391 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2392 int FI = MFI->CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
2393 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2394 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(),
2395 /*isVolatile=*/ true, false, 0);
2398 void MipsTargetLowering::
2399 getOpndList(SmallVectorImpl<SDValue> &Ops,
2400 std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
2401 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
2402 CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const {
2403 // Insert node "GP copy globalreg" before call to function.
2405 // R_MIPS_CALL* operators (emitted when non-internal functions are called
2406 // in PIC mode) allow symbols to be resolved via lazy binding.
2407 // The lazy binding stub requires GP to point to the GOT.
2408 if (IsPICCall && !InternalLinkage) {
2409 unsigned GPReg = Subtarget.isABI_N64() ? Mips::GP_64 : Mips::GP;
2410 EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32;
2411 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
2414 // Build a sequence of copy-to-reg nodes chained together with token
2415 // chain and flag operands which copy the outgoing args into registers.
2416 // The InFlag in necessary since all emitted instructions must be
2420 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2421 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, RegsToPass[i].first,
2422 RegsToPass[i].second, InFlag);
2423 InFlag = Chain.getValue(1);
2426 // Add argument registers to the end of the list so that they are
2427 // known live into the call.
2428 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2429 Ops.push_back(CLI.DAG.getRegister(RegsToPass[i].first,
2430 RegsToPass[i].second.getValueType()));
2432 // Add a register mask operand representing the call-preserved registers.
2433 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2434 const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv);
2435 assert(Mask && "Missing call preserved mask for calling convention");
2436 if (Subtarget.inMips16HardFloat()) {
2437 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
2438 llvm::StringRef Sym = G->getGlobal()->getName();
2439 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
2440 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
2441 Mask = MipsRegisterInfo::getMips16RetHelperMask();
2445 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
2447 if (InFlag.getNode())
2448 Ops.push_back(InFlag);
2451 /// LowerCall - functions arguments are copied from virtual regs to
2452 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
2454 MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2455 SmallVectorImpl<SDValue> &InVals) const {
2456 SelectionDAG &DAG = CLI.DAG;
2458 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2459 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2460 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2461 SDValue Chain = CLI.Chain;
2462 SDValue Callee = CLI.Callee;
2463 bool &IsTailCall = CLI.IsTailCall;
2464 CallingConv::ID CallConv = CLI.CallConv;
2465 bool IsVarArg = CLI.IsVarArg;
2467 MachineFunction &MF = DAG.getMachineFunction();
2468 MachineFrameInfo *MFI = MF.getFrameInfo();
2469 const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering();
2470 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2471 bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
2473 // Analyze operands of the call, assigning locations to each operand.
2474 SmallVector<CCValAssign, 16> ArgLocs;
2475 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
2476 getTargetMachine(), ArgLocs, *DAG.getContext());
2477 MipsCC::SpecialCallingConvType SpecialCallingConv =
2478 getSpecialCallingConv(Callee);
2479 MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
2480 CCInfo, SpecialCallingConv);
2482 MipsCCInfo.analyzeCallOperands(Outs, IsVarArg,
2483 Subtarget.abiUsesSoftFloat(),
2484 Callee.getNode(), CLI.getArgs());
2486 // Get a count of how many bytes are to be pushed on the stack.
2487 unsigned NextStackOffset = CCInfo.getNextStackOffset();
2489 // Check if it's really possible to do a tail call.
2492 isEligibleForTailCallOptimization(MipsCCInfo, NextStackOffset,
2493 *MF.getInfo<MipsFunctionInfo>());
2495 if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall())
2496 report_fatal_error("failed to perform tail call elimination on a call "
2497 "site marked musttail");
2502 // Chain is the output chain of the last Load/Store or CopyToReg node.
2503 // ByValChain is the output chain of the last Memcpy node created for copying
2504 // byval arguments to the stack.
2505 unsigned StackAlignment = TFL->getStackAlignment();
2506 NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
2507 SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
2510 Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal, DL);
2512 SDValue StackPtr = DAG.getCopyFromReg(
2513 Chain, DL, Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP,
2516 // With EABI is it possible to have 16 args on registers.
2517 std::deque< std::pair<unsigned, SDValue> > RegsToPass;
2518 SmallVector<SDValue, 8> MemOpChains;
2519 MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin();
2521 // Walk the register/memloc assignments, inserting copies/loads.
2522 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2523 SDValue Arg = OutVals[i];
2524 CCValAssign &VA = ArgLocs[i];
2525 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
2526 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2529 if (Flags.isByVal()) {
2530 assert(Flags.getByValSize() &&
2531 "ByVal args of size 0 should have been ignored by front-end.");
2532 assert(ByValArg != MipsCCInfo.byval_end());
2533 assert(!IsTailCall &&
2534 "Do not tail-call optimize if there is a byval argument.");
2535 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
2536 MipsCCInfo, *ByValArg, Flags, Subtarget.isLittle());
2541 // Promote the value if needed.
2542 switch (VA.getLocInfo()) {
2543 default: llvm_unreachable("Unknown loc info!");
2544 case CCValAssign::Full:
2545 if (VA.isRegLoc()) {
2546 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
2547 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
2548 (ValVT == MVT::i64 && LocVT == MVT::f64))
2549 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
2550 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
2551 SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2552 Arg, DAG.getConstant(0, MVT::i32));
2553 SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2554 Arg, DAG.getConstant(1, MVT::i32));
2555 if (!Subtarget.isLittle())
2557 unsigned LocRegLo = VA.getLocReg();
2558 unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
2559 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
2560 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
2565 case CCValAssign::SExt:
2566 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
2568 case CCValAssign::ZExt:
2569 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
2571 case CCValAssign::AExt:
2572 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
2576 // Arguments that can be passed on register must be kept at
2577 // RegsToPass vector
2578 if (VA.isRegLoc()) {
2579 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2583 // Register can't get to this point...
2584 assert(VA.isMemLoc());
2586 // emit ISD::STORE whichs stores the
2587 // parameter value to a stack Location
2588 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
2589 Chain, Arg, DL, IsTailCall, DAG));
2592 // Transform all store nodes into one single node because all store
2593 // nodes are independent of each other.
2594 if (!MemOpChains.empty())
2595 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2597 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2598 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2599 // node so that legalize doesn't hack it.
2601 (Subtarget.isABI_N64() || IsPIC); // true if calls are translated to
2603 bool GlobalOrExternal = false, InternalLinkage = false;
2605 EVT Ty = Callee.getValueType();
2607 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2609 const GlobalValue *Val = G->getGlobal();
2610 InternalLinkage = Val->hasInternalLinkage();
2612 if (InternalLinkage)
2613 Callee = getAddrLocal(G, Ty, DAG,
2614 Subtarget.isABI_N32() || Subtarget.isABI_N64());
2616 Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16,
2617 MipsII::MO_CALL_LO16, Chain,
2618 FuncInfo->callPtrInfo(Val));
2620 Callee = getAddrGlobal(G, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
2621 FuncInfo->callPtrInfo(Val));
2623 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0,
2624 MipsII::MO_NO_FLAG);
2625 GlobalOrExternal = true;
2627 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2628 const char *Sym = S->getSymbol();
2630 if (!Subtarget.isABI_N64() && !IsPIC) // !N64 && static
2631 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(),
2632 MipsII::MO_NO_FLAG);
2634 Callee = getAddrGlobalLargeGOT(S, Ty, DAG, MipsII::MO_CALL_HI16,
2635 MipsII::MO_CALL_LO16, Chain,
2636 FuncInfo->callPtrInfo(Sym));
2638 Callee = getAddrGlobal(S, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
2639 FuncInfo->callPtrInfo(Sym));
2641 GlobalOrExternal = true;
2644 SmallVector<SDValue, 8> Ops(1, Chain);
2645 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2647 getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal, InternalLinkage,
2648 CLI, Callee, Chain);
2651 return DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
2653 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
2654 SDValue InFlag = Chain.getValue(1);
2656 // Create the CALLSEQ_END node.
2657 Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
2658 DAG.getIntPtrConstant(0, true), InFlag, DL);
2659 InFlag = Chain.getValue(1);
2661 // Handle result values, copying them out of physregs into vregs that we
2663 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg,
2664 Ins, DL, DAG, InVals, CLI.Callee.getNode(), CLI.RetTy);
2667 /// LowerCallResult - Lower the result values of a call into the
2668 /// appropriate copies out of appropriate physical registers.
2670 MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2671 CallingConv::ID CallConv, bool IsVarArg,
2672 const SmallVectorImpl<ISD::InputArg> &Ins,
2673 SDLoc DL, SelectionDAG &DAG,
2674 SmallVectorImpl<SDValue> &InVals,
2675 const SDNode *CallNode,
2676 const Type *RetTy) const {
2677 // Assign locations to each value returned by this call.
2678 SmallVector<CCValAssign, 16> RVLocs;
2679 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
2680 getTargetMachine(), RVLocs, *DAG.getContext());
2681 MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
2684 MipsCCInfo.analyzeCallResult(Ins, Subtarget.abiUsesSoftFloat(),
2687 // Copy all of the result registers out of their specified physreg.
2688 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2689 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
2690 RVLocs[i].getLocVT(), InFlag);
2691 Chain = Val.getValue(1);
2692 InFlag = Val.getValue(2);
2694 if (RVLocs[i].getValVT() != RVLocs[i].getLocVT())
2695 Val = DAG.getNode(ISD::BITCAST, DL, RVLocs[i].getValVT(), Val);
2697 InVals.push_back(Val);
2703 //===----------------------------------------------------------------------===//
2704 // Formal Arguments Calling Convention Implementation
2705 //===----------------------------------------------------------------------===//
2706 /// LowerFormalArguments - transform physical registers into virtual registers
2707 /// and generate load operations for arguments places on the stack.
2709 MipsTargetLowering::LowerFormalArguments(SDValue Chain,
2710 CallingConv::ID CallConv,
2712 const SmallVectorImpl<ISD::InputArg> &Ins,
2713 SDLoc DL, SelectionDAG &DAG,
2714 SmallVectorImpl<SDValue> &InVals)
2716 MachineFunction &MF = DAG.getMachineFunction();
2717 MachineFrameInfo *MFI = MF.getFrameInfo();
2718 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
2720 MipsFI->setVarArgsFrameIndex(0);
2722 // Used with vargs to acumulate store chains.
2723 std::vector<SDValue> OutChains;
2725 // Assign locations to all of the incoming arguments.
2726 SmallVector<CCValAssign, 16> ArgLocs;
2727 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
2728 getTargetMachine(), ArgLocs, *DAG.getContext());
2729 MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
2731 Function::const_arg_iterator FuncArg =
2732 DAG.getMachineFunction().getFunction()->arg_begin();
2733 bool UseSoftFloat = Subtarget.abiUsesSoftFloat();
2735 MipsCCInfo.analyzeFormalArguments(Ins, UseSoftFloat, FuncArg);
2736 MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
2737 MipsCCInfo.hasByValArg());
2739 unsigned CurArgIdx = 0;
2740 MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin();
2742 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2743 CCValAssign &VA = ArgLocs[i];
2744 std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx);
2745 CurArgIdx = Ins[i].OrigArgIndex;
2746 EVT ValVT = VA.getValVT();
2747 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2748 bool IsRegLoc = VA.isRegLoc();
2750 if (Flags.isByVal()) {
2751 assert(Flags.getByValSize() &&
2752 "ByVal args of size 0 should have been ignored by front-end.");
2753 assert(ByValArg != MipsCCInfo.byval_end());
2754 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
2755 MipsCCInfo, *ByValArg);
2760 // Arguments stored on registers
2762 MVT RegVT = VA.getLocVT();
2763 unsigned ArgReg = VA.getLocReg();
2764 const TargetRegisterClass *RC = getRegClassFor(RegVT);
2766 // Transform the arguments stored on
2767 // physical registers into virtual ones
2768 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
2769 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
2771 // If this is an 8 or 16-bit value, it has been passed promoted
2772 // to 32 bits. Insert an assert[sz]ext to capture this, then
2773 // truncate to the right size.
2774 if (VA.getLocInfo() != CCValAssign::Full) {
2775 unsigned Opcode = 0;
2776 if (VA.getLocInfo() == CCValAssign::SExt)
2777 Opcode = ISD::AssertSext;
2778 else if (VA.getLocInfo() == CCValAssign::ZExt)
2779 Opcode = ISD::AssertZext;
2781 ArgValue = DAG.getNode(Opcode, DL, RegVT, ArgValue,
2782 DAG.getValueType(ValVT));
2783 ArgValue = DAG.getNode(ISD::TRUNCATE, DL, ValVT, ArgValue);
2786 // Handle floating point arguments passed in integer registers and
2787 // long double arguments passed in floating point registers.
2788 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
2789 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
2790 (RegVT == MVT::f64 && ValVT == MVT::i64))
2791 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
2792 else if (Subtarget.isABI_O32() && RegVT == MVT::i32 &&
2793 ValVT == MVT::f64) {
2794 unsigned Reg2 = addLiveIn(DAG.getMachineFunction(),
2795 getNextIntArgReg(ArgReg), RC);
2796 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
2797 if (!Subtarget.isLittle())
2798 std::swap(ArgValue, ArgValue2);
2799 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
2800 ArgValue, ArgValue2);
2803 InVals.push_back(ArgValue);
2804 } else { // VA.isRegLoc()
2807 assert(VA.isMemLoc());
2809 // The stack pointer offset is relative to the caller stack frame.
2810 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2811 VA.getLocMemOffset(), true);
2813 // Create load nodes to retrieve arguments from the stack
2814 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2815 SDValue Load = DAG.getLoad(ValVT, DL, Chain, FIN,
2816 MachinePointerInfo::getFixedStack(FI),
2817 false, false, false, 0);
2818 InVals.push_back(Load);
2819 OutChains.push_back(Load.getValue(1));
2823 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2824 // The mips ABIs for returning structs by value requires that we copy
2825 // the sret argument into $v0 for the return. Save the argument into
2826 // a virtual register so that we can access it from the return points.
2827 if (Ins[i].Flags.isSRet()) {
2828 unsigned Reg = MipsFI->getSRetReturnReg();
2830 Reg = MF.getRegInfo().createVirtualRegister(
2831 getRegClassFor(Subtarget.isABI_N64() ? MVT::i64 : MVT::i32));
2832 MipsFI->setSRetReturnReg(Reg);
2834 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
2835 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
2841 writeVarArgRegs(OutChains, MipsCCInfo, Chain, DL, DAG);
2843 // All stores are grouped in one node to allow the matching between
2844 // the size of Ins and InVals. This only happens when on varg functions
2845 if (!OutChains.empty()) {
2846 OutChains.push_back(Chain);
2847 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
2853 //===----------------------------------------------------------------------===//
2854 // Return Value Calling Convention Implementation
2855 //===----------------------------------------------------------------------===//
2858 MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2859 MachineFunction &MF, bool IsVarArg,
2860 const SmallVectorImpl<ISD::OutputArg> &Outs,
2861 LLVMContext &Context) const {
2862 SmallVector<CCValAssign, 16> RVLocs;
2863 CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(),
2865 return CCInfo.CheckReturn(Outs, RetCC_Mips);
2869 MipsTargetLowering::LowerReturn(SDValue Chain,
2870 CallingConv::ID CallConv, bool IsVarArg,
2871 const SmallVectorImpl<ISD::OutputArg> &Outs,
2872 const SmallVectorImpl<SDValue> &OutVals,
2873 SDLoc DL, SelectionDAG &DAG) const {
2874 // CCValAssign - represent the assignment of
2875 // the return value to a location
2876 SmallVector<CCValAssign, 16> RVLocs;
2877 MachineFunction &MF = DAG.getMachineFunction();
2879 // CCState - Info about the registers and stack slot.
2880 CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), RVLocs,
2882 MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(),
2885 // Analyze return values.
2886 MipsCCInfo.analyzeReturn(Outs, Subtarget.abiUsesSoftFloat(),
2887 MF.getFunction()->getReturnType());
2890 SmallVector<SDValue, 4> RetOps(1, Chain);
2892 // Copy the result values into the output registers.
2893 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2894 SDValue Val = OutVals[i];
2895 CCValAssign &VA = RVLocs[i];
2896 assert(VA.isRegLoc() && "Can only return in registers!");
2898 if (RVLocs[i].getValVT() != RVLocs[i].getLocVT())
2899 Val = DAG.getNode(ISD::BITCAST, DL, RVLocs[i].getLocVT(), Val);
2901 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
2903 // Guarantee that all emitted copies are stuck together with flags.
2904 Flag = Chain.getValue(1);
2905 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2908 // The mips ABIs for returning structs by value requires that we copy
2909 // the sret argument into $v0 for the return. We saved the argument into
2910 // a virtual register in the entry block, so now we copy the value out
2912 if (MF.getFunction()->hasStructRetAttr()) {
2913 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
2914 unsigned Reg = MipsFI->getSRetReturnReg();
2917 llvm_unreachable("sret virtual register not created in the entry block");
2918 SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy());
2919 unsigned V0 = Subtarget.isABI_N64() ? Mips::V0_64 : Mips::V0;
2921 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
2922 Flag = Chain.getValue(1);
2923 RetOps.push_back(DAG.getRegister(V0, getPointerTy()));
2926 RetOps[0] = Chain; // Update chain.
2928 // Add the flag if we have it.
2930 RetOps.push_back(Flag);
2932 // Return on Mips is always a "jr $ra"
2933 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
2936 //===----------------------------------------------------------------------===//
2937 // Mips Inline Assembly Support
2938 //===----------------------------------------------------------------------===//
2940 /// getConstraintType - Given a constraint letter, return the type of
2941 /// constraint it is for this target.
2942 MipsTargetLowering::ConstraintType MipsTargetLowering::
2943 getConstraintType(const std::string &Constraint) const
2945 // Mips specific constraints
2946 // GCC config/mips/constraints.md
2948 // 'd' : An address register. Equivalent to r
2949 // unless generating MIPS16 code.
2950 // 'y' : Equivalent to r; retained for
2951 // backwards compatibility.
2952 // 'c' : A register suitable for use in an indirect
2953 // jump. This will always be $25 for -mabicalls.
2954 // 'l' : The lo register. 1 word storage.
2955 // 'x' : The hilo register pair. Double word storage.
2956 if (Constraint.size() == 1) {
2957 switch (Constraint[0]) {
2965 return C_RegisterClass;
2970 return TargetLowering::getConstraintType(Constraint);
2973 /// Examine constraint type and operand type and determine a weight value.
2974 /// This object must already have been set up with the operand type
2975 /// and the current alternative constraint selected.
2976 TargetLowering::ConstraintWeight
2977 MipsTargetLowering::getSingleConstraintMatchWeight(
2978 AsmOperandInfo &info, const char *constraint) const {
2979 ConstraintWeight weight = CW_Invalid;
2980 Value *CallOperandVal = info.CallOperandVal;
2981 // If we don't have a value, we can't do a match,
2982 // but allow it at the lowest weight.
2983 if (!CallOperandVal)
2985 Type *type = CallOperandVal->getType();
2986 // Look at the constraint type.
2987 switch (*constraint) {
2989 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
2993 if (type->isIntegerTy())
2994 weight = CW_Register;
2996 case 'f': // FPU or MSA register
2997 if (Subtarget.hasMSA() && type->isVectorTy() &&
2998 cast<VectorType>(type)->getBitWidth() == 128)
2999 weight = CW_Register;
3000 else if (type->isFloatTy())
3001 weight = CW_Register;
3003 case 'c': // $25 for indirect jumps
3004 case 'l': // lo register
3005 case 'x': // hilo register pair
3006 if (type->isIntegerTy())
3007 weight = CW_SpecificReg;
3009 case 'I': // signed 16 bit immediate
3010 case 'J': // integer zero
3011 case 'K': // unsigned 16 bit immediate
3012 case 'L': // signed 32 bit immediate where lower 16 bits are 0
3013 case 'N': // immediate in the range of -65535 to -1 (inclusive)
3014 case 'O': // signed 15 bit immediate (+- 16383)
3015 case 'P': // immediate in the range of 65535 to 1 (inclusive)
3016 if (isa<ConstantInt>(CallOperandVal))
3017 weight = CW_Constant;
3026 /// This is a helper function to parse a physical register string and split it
3027 /// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
3028 /// that is returned indicates whether parsing was successful. The second flag
3029 /// is true if the numeric part exists.
3030 static std::pair<bool, bool>
3031 parsePhysicalReg(const StringRef &C, std::string &Prefix,
3032 unsigned long long &Reg) {
3033 if (C.front() != '{' || C.back() != '}')
3034 return std::make_pair(false, false);
3036 // Search for the first numeric character.
3037 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
3038 I = std::find_if(B, E, std::ptr_fun(isdigit));
3040 Prefix.assign(B, I - B);
3042 // The second flag is set to false if no numeric characters were found.
3044 return std::make_pair(true, false);
3046 // Parse the numeric characters.
3047 return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
3051 std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
3052 parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const {
3053 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
3054 const TargetRegisterClass *RC;
3056 unsigned long long Reg;
3058 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
3061 return std::make_pair(0U, nullptr);
3063 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
3064 // No numeric characters follow "hi" or "lo".
3066 return std::make_pair(0U, nullptr);
3068 RC = TRI->getRegClass(Prefix == "hi" ?
3069 Mips::HI32RegClassID : Mips::LO32RegClassID);
3070 return std::make_pair(*(RC->begin()), RC);
3071 } else if (Prefix.compare(0, 4, "$msa") == 0) {
3072 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
3074 // No numeric characters follow the name.
3076 return std::make_pair(0U, nullptr);
3078 Reg = StringSwitch<unsigned long long>(Prefix)
3079 .Case("$msair", Mips::MSAIR)
3080 .Case("$msacsr", Mips::MSACSR)
3081 .Case("$msaaccess", Mips::MSAAccess)
3082 .Case("$msasave", Mips::MSASave)
3083 .Case("$msamodify", Mips::MSAModify)
3084 .Case("$msarequest", Mips::MSARequest)
3085 .Case("$msamap", Mips::MSAMap)
3086 .Case("$msaunmap", Mips::MSAUnmap)
3090 return std::make_pair(0U, nullptr);
3092 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
3093 return std::make_pair(Reg, RC);
3097 return std::make_pair(0U, nullptr);
3099 if (Prefix == "$f") { // Parse $f0-$f31.
3100 // If the size of FP registers is 64-bit or Reg is an even number, select
3101 // the 64-bit register class. Otherwise, select the 32-bit register class.
3102 if (VT == MVT::Other)
3103 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
3105 RC = getRegClassFor(VT);
3107 if (RC == &Mips::AFGR64RegClass) {
3108 assert(Reg % 2 == 0);
3111 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
3112 RC = TRI->getRegClass(Mips::FCCRegClassID);
3113 else if (Prefix == "$w") { // Parse $w0-$w31.
3114 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
3115 } else { // Parse $0-$31.
3116 assert(Prefix == "$");
3117 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
3120 assert(Reg < RC->getNumRegs());
3121 return std::make_pair(*(RC->begin() + Reg), RC);
3124 /// Given a register class constraint, like 'r', if this corresponds directly
3125 /// to an LLVM register class, return a register of 0 and the register class
3127 std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
3128 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const
3130 if (Constraint.size() == 1) {
3131 switch (Constraint[0]) {
3132 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
3133 case 'y': // Same as 'r'. Exists for compatibility.
3135 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
3136 if (Subtarget.inMips16Mode())
3137 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
3138 return std::make_pair(0U, &Mips::GPR32RegClass);
3140 if (VT == MVT::i64 && !Subtarget.isGP64bit())
3141 return std::make_pair(0U, &Mips::GPR32RegClass);
3142 if (VT == MVT::i64 && Subtarget.isGP64bit())
3143 return std::make_pair(0U, &Mips::GPR64RegClass);
3144 // This will generate an error message
3145 return std::make_pair(0U, nullptr);
3146 case 'f': // FPU or MSA register
3147 if (VT == MVT::v16i8)
3148 return std::make_pair(0U, &Mips::MSA128BRegClass);
3149 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
3150 return std::make_pair(0U, &Mips::MSA128HRegClass);
3151 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
3152 return std::make_pair(0U, &Mips::MSA128WRegClass);
3153 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
3154 return std::make_pair(0U, &Mips::MSA128DRegClass);
3155 else if (VT == MVT::f32)
3156 return std::make_pair(0U, &Mips::FGR32RegClass);
3157 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
3158 if (Subtarget.isFP64bit())
3159 return std::make_pair(0U, &Mips::FGR64RegClass);
3160 return std::make_pair(0U, &Mips::AFGR64RegClass);
3163 case 'c': // register suitable for indirect jump
3165 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
3166 assert(VT == MVT::i64 && "Unexpected type.");
3167 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
3168 case 'l': // register suitable for indirect jump
3170 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
3171 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
3172 case 'x': // register suitable for indirect jump
3173 // Fixme: Not triggering the use of both hi and low
3174 // This will generate an error message
3175 return std::make_pair(0U, nullptr);
3179 std::pair<unsigned, const TargetRegisterClass *> R;
3180 R = parseRegForInlineAsmConstraint(Constraint, VT);
3185 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3188 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3189 /// vector. If it is invalid, don't add anything to Ops.
3190 void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3191 std::string &Constraint,
3192 std::vector<SDValue>&Ops,
3193 SelectionDAG &DAG) const {
3196 // Only support length 1 constraints for now.
3197 if (Constraint.length() > 1) return;
3199 char ConstraintLetter = Constraint[0];
3200 switch (ConstraintLetter) {
3201 default: break; // This will fall through to the generic implementation
3202 case 'I': // Signed 16 bit constant
3203 // If this fails, the parent routine will give an error
3204 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3205 EVT Type = Op.getValueType();
3206 int64_t Val = C->getSExtValue();
3207 if (isInt<16>(Val)) {
3208 Result = DAG.getTargetConstant(Val, Type);
3213 case 'J': // integer zero
3214 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3215 EVT Type = Op.getValueType();
3216 int64_t Val = C->getZExtValue();
3218 Result = DAG.getTargetConstant(0, Type);
3223 case 'K': // unsigned 16 bit immediate
3224 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3225 EVT Type = Op.getValueType();
3226 uint64_t Val = (uint64_t)C->getZExtValue();
3227 if (isUInt<16>(Val)) {
3228 Result = DAG.getTargetConstant(Val, Type);
3233 case 'L': // signed 32 bit immediate where lower 16 bits are 0
3234 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3235 EVT Type = Op.getValueType();
3236 int64_t Val = C->getSExtValue();
3237 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
3238 Result = DAG.getTargetConstant(Val, Type);
3243 case 'N': // immediate in the range of -65535 to -1 (inclusive)
3244 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3245 EVT Type = Op.getValueType();
3246 int64_t Val = C->getSExtValue();
3247 if ((Val >= -65535) && (Val <= -1)) {
3248 Result = DAG.getTargetConstant(Val, Type);
3253 case 'O': // signed 15 bit immediate
3254 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3255 EVT Type = Op.getValueType();
3256 int64_t Val = C->getSExtValue();
3257 if ((isInt<15>(Val))) {
3258 Result = DAG.getTargetConstant(Val, Type);
3263 case 'P': // immediate in the range of 1 to 65535 (inclusive)
3264 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3265 EVT Type = Op.getValueType();
3266 int64_t Val = C->getSExtValue();
3267 if ((Val <= 65535) && (Val >= 1)) {
3268 Result = DAG.getTargetConstant(Val, Type);
3275 if (Result.getNode()) {
3276 Ops.push_back(Result);
3280 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3283 bool MipsTargetLowering::isLegalAddressingMode(const AddrMode &AM,
3285 // No global is ever allowed as a base.
3290 case 0: // "r+i" or just "i", depending on HasBaseReg.
3293 if (!AM.HasBaseReg) // allow "r+i".
3295 return false; // disallow "r+r" or "r+r+i".
3304 MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3305 // The Mips target isn't yet aware of offsets.
3309 EVT MipsTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
3311 bool IsMemset, bool ZeroMemset,
3313 MachineFunction &MF) const {
3314 if (Subtarget.hasMips64())
3320 bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3321 if (VT != MVT::f32 && VT != MVT::f64)
3323 if (Imm.isNegZero())
3325 return Imm.isZero();
3328 unsigned MipsTargetLowering::getJumpTableEncoding() const {
3329 if (Subtarget.isABI_N64())
3330 return MachineJumpTableInfo::EK_GPRel64BlockAddress;
3332 return TargetLowering::getJumpTableEncoding();
3335 /// This function returns true if CallSym is a long double emulation routine.
3336 static bool isF128SoftLibCall(const char *CallSym) {
3337 const char *const LibCalls[] =
3338 {"__addtf3", "__divtf3", "__eqtf2", "__extenddftf2", "__extendsftf2",
3339 "__fixtfdi", "__fixtfsi", "__fixtfti", "__fixunstfdi", "__fixunstfsi",
3340 "__fixunstfti", "__floatditf", "__floatsitf", "__floattitf",
3341 "__floatunditf", "__floatunsitf", "__floatuntitf", "__getf2", "__gttf2",
3342 "__letf2", "__lttf2", "__multf3", "__netf2", "__powitf2", "__subtf3",
3343 "__trunctfdf2", "__trunctfsf2", "__unordtf2",
3344 "ceill", "copysignl", "cosl", "exp2l", "expl", "floorl", "fmal", "fmodl",
3345 "log10l", "log2l", "logl", "nearbyintl", "powl", "rintl", "sinl", "sqrtl",
3348 const char *const *End = LibCalls + array_lengthof(LibCalls);
3350 // Check that LibCalls is sorted alphabetically.
3351 MipsTargetLowering::LTStr Comp;
3354 for (const char *const *I = LibCalls; I < End - 1; ++I)
3355 assert(Comp(*I, *(I + 1)));
3358 return std::binary_search(LibCalls, End, CallSym, Comp);
3361 /// This function returns true if Ty is fp128 or i128 which was originally a
3363 static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
3364 if (Ty->isFP128Ty())
3367 const ExternalSymbolSDNode *ES =
3368 dyn_cast_or_null<const ExternalSymbolSDNode>(CallNode);
3370 // If the Ty is i128 and the function being called is a long double emulation
3371 // routine, then the original type is f128.
3372 return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
3375 MipsTargetLowering::MipsCC::SpecialCallingConvType
3376 MipsTargetLowering::getSpecialCallingConv(SDValue Callee) const {
3377 MipsCC::SpecialCallingConvType SpecialCallingConv =
3378 MipsCC::NoSpecialCallingConv;
3379 if (Subtarget.inMips16HardFloat()) {
3380 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3381 llvm::StringRef Sym = G->getGlobal()->getName();
3382 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3383 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3384 SpecialCallingConv = MipsCC::Mips16RetHelperConv;
3388 return SpecialCallingConv;
3391 MipsTargetLowering::MipsCC::MipsCC(
3392 CallingConv::ID CC, bool IsO32_, bool IsFP64_, CCState &Info,
3393 MipsCC::SpecialCallingConvType SpecialCallingConv_)
3394 : CCInfo(Info), CallConv(CC), IsO32(IsO32_), IsFP64(IsFP64_),
3395 SpecialCallingConv(SpecialCallingConv_){
3396 // Pre-allocate reserved argument area.
3397 CCInfo.AllocateStack(reservedArgArea(), 1);
3401 void MipsTargetLowering::MipsCC::
3402 analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Args,
3403 bool IsVarArg, bool IsSoftFloat, const SDNode *CallNode,
3404 std::vector<ArgListEntry> &FuncArgs) {
3405 assert((CallConv != CallingConv::Fast || !IsVarArg) &&
3406 "CallingConv::Fast shouldn't be used for vararg functions.");
3408 unsigned NumOpnds = Args.size();
3409 llvm::CCAssignFn *FixedFn = fixedArgFn(), *VarFn = varArgFn();
3411 for (unsigned I = 0; I != NumOpnds; ++I) {
3412 MVT ArgVT = Args[I].VT;
3413 ISD::ArgFlagsTy ArgFlags = Args[I].Flags;
3416 if (ArgFlags.isByVal()) {
3417 handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags);
3421 if (IsVarArg && !Args[I].IsFixed)
3422 R = VarFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
3424 MVT RegVT = getRegVT(ArgVT, FuncArgs[Args[I].OrigArgIndex].Ty, CallNode,
3426 R = FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo);
3431 dbgs() << "Call operand #" << I << " has unhandled type "
3432 << EVT(ArgVT).getEVTString();
3434 llvm_unreachable(nullptr);
3439 void MipsTargetLowering::MipsCC::
3440 analyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Args,
3441 bool IsSoftFloat, Function::const_arg_iterator FuncArg) {
3442 unsigned NumArgs = Args.size();
3443 llvm::CCAssignFn *FixedFn = fixedArgFn();
3444 unsigned CurArgIdx = 0;
3446 for (unsigned I = 0; I != NumArgs; ++I) {
3447 MVT ArgVT = Args[I].VT;
3448 ISD::ArgFlagsTy ArgFlags = Args[I].Flags;
3449 std::advance(FuncArg, Args[I].OrigArgIndex - CurArgIdx);
3450 CurArgIdx = Args[I].OrigArgIndex;
3452 if (ArgFlags.isByVal()) {
3453 handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags);
3457 MVT RegVT = getRegVT(ArgVT, FuncArg->getType(), nullptr, IsSoftFloat);
3459 if (!FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo))
3463 dbgs() << "Formal Arg #" << I << " has unhandled type "
3464 << EVT(ArgVT).getEVTString();
3466 llvm_unreachable(nullptr);
3470 template<typename Ty>
3471 void MipsTargetLowering::MipsCC::
3472 analyzeReturn(const SmallVectorImpl<Ty> &RetVals, bool IsSoftFloat,
3473 const SDNode *CallNode, const Type *RetTy) const {
3476 if (IsSoftFloat && originalTypeIsF128(RetTy, CallNode))
3477 Fn = RetCC_F128Soft;
3481 for (unsigned I = 0, E = RetVals.size(); I < E; ++I) {
3482 MVT VT = RetVals[I].VT;
3483 ISD::ArgFlagsTy Flags = RetVals[I].Flags;
3484 MVT RegVT = this->getRegVT(VT, RetTy, CallNode, IsSoftFloat);
3486 if (Fn(I, VT, RegVT, CCValAssign::Full, Flags, this->CCInfo)) {
3488 dbgs() << "Call result #" << I << " has unhandled type "
3489 << EVT(VT).getEVTString() << '\n';
3491 llvm_unreachable(nullptr);
3496 void MipsTargetLowering::MipsCC::
3497 analyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, bool IsSoftFloat,
3498 const SDNode *CallNode, const Type *RetTy) const {
3499 analyzeReturn(Ins, IsSoftFloat, CallNode, RetTy);
3502 void MipsTargetLowering::MipsCC::
3503 analyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsSoftFloat,
3504 const Type *RetTy) const {
3505 analyzeReturn(Outs, IsSoftFloat, nullptr, RetTy);
3508 void MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT,
3510 CCValAssign::LocInfo LocInfo,
3511 ISD::ArgFlagsTy ArgFlags) {
3512 assert(ArgFlags.getByValSize() && "Byval argument's size shouldn't be 0.");
3514 struct ByValArgInfo ByVal;
3515 unsigned RegSize = regSize();
3516 unsigned ByValSize = RoundUpToAlignment(ArgFlags.getByValSize(), RegSize);
3517 unsigned Align = std::min(std::max(ArgFlags.getByValAlign(), RegSize),
3520 if (useRegsForByval())
3521 allocateRegs(ByVal, ByValSize, Align);
3523 // Allocate space on caller's stack.
3524 ByVal.Address = CCInfo.AllocateStack(ByValSize - RegSize * ByVal.NumRegs,
3526 CCInfo.addLoc(CCValAssign::getMem(ValNo, ValVT, ByVal.Address, LocVT,
3528 ByValArgs.push_back(ByVal);
3531 unsigned MipsTargetLowering::MipsCC::numIntArgRegs() const {
3532 return IsO32 ? array_lengthof(O32IntRegs) : array_lengthof(Mips64IntRegs);
3535 unsigned MipsTargetLowering::MipsCC::reservedArgArea() const {
3536 return (IsO32 && (CallConv != CallingConv::Fast)) ? 16 : 0;
3539 const MCPhysReg *MipsTargetLowering::MipsCC::intArgRegs() const {
3540 return IsO32 ? O32IntRegs : Mips64IntRegs;
3543 llvm::CCAssignFn *MipsTargetLowering::MipsCC::fixedArgFn() const {
3544 if (CallConv == CallingConv::Fast)
3545 return CC_Mips_FastCC;
3547 if (SpecialCallingConv == Mips16RetHelperConv)
3548 return CC_Mips16RetHelper;
3549 return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN;
3552 llvm::CCAssignFn *MipsTargetLowering::MipsCC::varArgFn() const {
3553 return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN_VarArg;
3556 const MCPhysReg *MipsTargetLowering::MipsCC::shadowRegs() const {
3557 return IsO32 ? O32IntRegs : Mips64DPRegs;
3560 void MipsTargetLowering::MipsCC::allocateRegs(ByValArgInfo &ByVal,
3563 unsigned RegSize = regSize(), NumIntArgRegs = numIntArgRegs();
3564 const MCPhysReg *IntArgRegs = intArgRegs(), *ShadowRegs = shadowRegs();
3565 assert(!(ByValSize % RegSize) && !(Align % RegSize) &&
3566 "Byval argument's size and alignment should be a multiple of"
3569 ByVal.FirstIdx = CCInfo.getFirstUnallocated(IntArgRegs, NumIntArgRegs);
3571 // If Align > RegSize, the first arg register must be even.
3572 if ((Align > RegSize) && (ByVal.FirstIdx % 2)) {
3573 CCInfo.AllocateReg(IntArgRegs[ByVal.FirstIdx], ShadowRegs[ByVal.FirstIdx]);
3577 // Mark the registers allocated.
3578 for (unsigned I = ByVal.FirstIdx; ByValSize && (I < NumIntArgRegs);
3579 ByValSize -= RegSize, ++I, ++ByVal.NumRegs)
3580 CCInfo.AllocateReg(IntArgRegs[I], ShadowRegs[I]);
3583 MVT MipsTargetLowering::MipsCC::getRegVT(MVT VT, const Type *OrigTy,
3584 const SDNode *CallNode,
3585 bool IsSoftFloat) const {
3586 if (IsSoftFloat || IsO32)
3589 // Check if the original type was fp128.
3590 if (originalTypeIsF128(OrigTy, CallNode)) {
3591 assert(VT == MVT::i64);
3598 void MipsTargetLowering::
3599 copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
3600 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
3601 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
3602 const MipsCC &CC, const ByValArgInfo &ByVal) const {
3603 MachineFunction &MF = DAG.getMachineFunction();
3604 MachineFrameInfo *MFI = MF.getFrameInfo();
3605 unsigned RegAreaSize = ByVal.NumRegs * CC.regSize();
3606 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
3610 FrameObjOffset = (int)CC.reservedArgArea() -
3611 (int)((CC.numIntArgRegs() - ByVal.FirstIdx) * CC.regSize());
3613 FrameObjOffset = ByVal.Address;
3615 // Create frame object.
3616 EVT PtrTy = getPointerTy();
3617 int FI = MFI->CreateFixedObject(FrameObjSize, FrameObjOffset, true);
3618 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
3619 InVals.push_back(FIN);
3624 // Copy arg registers.
3625 MVT RegTy = MVT::getIntegerVT(CC.regSize() * 8);
3626 const TargetRegisterClass *RC = getRegClassFor(RegTy);
3628 for (unsigned I = 0; I < ByVal.NumRegs; ++I) {
3629 unsigned ArgReg = CC.intArgRegs()[ByVal.FirstIdx + I];
3630 unsigned VReg = addLiveIn(MF, ArgReg, RC);
3631 unsigned Offset = I * CC.regSize();
3632 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
3633 DAG.getConstant(Offset, PtrTy));
3634 SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
3635 StorePtr, MachinePointerInfo(FuncArg, Offset),
3637 OutChains.push_back(Store);
3641 // Copy byVal arg to registers and stack.
3642 void MipsTargetLowering::
3643 passByValArg(SDValue Chain, SDLoc DL,
3644 std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
3645 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
3646 MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
3647 const MipsCC &CC, const ByValArgInfo &ByVal,
3648 const ISD::ArgFlagsTy &Flags, bool isLittle) const {
3649 unsigned ByValSizeInBytes = Flags.getByValSize();
3650 unsigned OffsetInBytes = 0; // From beginning of struct
3651 unsigned RegSizeInBytes = CC.regSize();
3652 unsigned Alignment = std::min(Flags.getByValAlign(), RegSizeInBytes);
3653 EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
3655 if (ByVal.NumRegs) {
3656 const MCPhysReg *ArgRegs = CC.intArgRegs();
3657 bool LeftoverBytes = (ByVal.NumRegs * RegSizeInBytes > ByValSizeInBytes);
3660 // Copy words to registers.
3661 for (; I < ByVal.NumRegs - LeftoverBytes;
3662 ++I, OffsetInBytes += RegSizeInBytes) {
3663 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
3664 DAG.getConstant(OffsetInBytes, PtrTy));
3665 SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
3666 MachinePointerInfo(), false, false, false,
3668 MemOpChains.push_back(LoadVal.getValue(1));
3669 unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
3670 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
3673 // Return if the struct has been fully copied.
3674 if (ByValSizeInBytes == OffsetInBytes)
3677 // Copy the remainder of the byval argument with sub-word loads and shifts.
3678 if (LeftoverBytes) {
3679 assert((ByValSizeInBytes > OffsetInBytes) &&
3680 (ByValSizeInBytes < OffsetInBytes + RegSizeInBytes) &&
3681 "Size of the remainder should be smaller than RegSizeInBytes.");
3684 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
3685 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
3686 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
3688 if (RemainingSizeInBytes < LoadSizeInBytes)
3692 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
3693 DAG.getConstant(OffsetInBytes, PtrTy));
3694 SDValue LoadVal = DAG.getExtLoad(
3695 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
3696 MVT::getIntegerVT(LoadSizeInBytes * 8), false, false, false,
3698 MemOpChains.push_back(LoadVal.getValue(1));
3700 // Shift the loaded value.
3704 Shamt = TotalBytesLoaded * 8;
3706 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
3708 SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
3709 DAG.getConstant(Shamt, MVT::i32));
3712 Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
3716 OffsetInBytes += LoadSizeInBytes;
3717 TotalBytesLoaded += LoadSizeInBytes;
3718 Alignment = std::min(Alignment, LoadSizeInBytes);
3721 unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I];
3722 RegsToPass.push_back(std::make_pair(ArgReg, Val));
3727 // Copy remainder of byval arg to it with memcpy.
3728 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
3729 SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
3730 DAG.getConstant(OffsetInBytes, PtrTy));
3731 SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
3732 DAG.getIntPtrConstant(ByVal.Address));
3733 Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy),
3734 Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false,
3735 MachinePointerInfo(), MachinePointerInfo());
3736 MemOpChains.push_back(Chain);
3739 void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
3740 const MipsCC &CC, SDValue Chain,
3741 SDLoc DL, SelectionDAG &DAG) const {
3742 unsigned NumRegs = CC.numIntArgRegs();
3743 const MCPhysReg *ArgRegs = CC.intArgRegs();
3744 const CCState &CCInfo = CC.getCCInfo();
3745 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumRegs);
3746 unsigned RegSize = CC.regSize();
3747 MVT RegTy = MVT::getIntegerVT(RegSize * 8);
3748 const TargetRegisterClass *RC = getRegClassFor(RegTy);
3749 MachineFunction &MF = DAG.getMachineFunction();
3750 MachineFrameInfo *MFI = MF.getFrameInfo();
3751 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3753 // Offset of the first variable argument from stack pointer.
3757 VaArgOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), RegSize);
3759 VaArgOffset = (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx));
3761 // Record the frame index of the first variable argument
3762 // which is a value necessary to VASTART.
3763 int FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true);
3764 MipsFI->setVarArgsFrameIndex(FI);
3766 // Copy the integer registers that have not been used for argument passing
3767 // to the argument register save area. For O32, the save area is allocated
3768 // in the caller's stack frame, while for N32/64, it is allocated in the
3769 // callee's stack frame.
3770 for (unsigned I = Idx; I < NumRegs; ++I, VaArgOffset += RegSize) {
3771 unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
3772 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
3773 FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true);
3774 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy());
3775 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
3776 MachinePointerInfo(), false, false, 0);
3777 cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
3779 OutChains.push_back(Store);