1 //===-- MipsISelLowering.cpp - Mips DAG Lowering Implementation -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that Mips uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "mips-lower"
16 #include "MipsISelLowering.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "MipsTargetObjectFile.h"
20 #include "MipsSubtarget.h"
21 #include "InstPrinter/MipsInstPrinter.h"
22 #include "MCTargetDesc/MipsBaseInfo.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Function.h"
25 #include "llvm/GlobalVariable.h"
26 #include "llvm/Intrinsics.h"
27 #include "llvm/CallingConv.h"
28 #include "llvm/CodeGen/CallingConvLower.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/SelectionDAGISel.h"
34 #include "llvm/CodeGen/ValueTypes.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/raw_ostream.h"
41 // If I is a shifted mask, set the size (Size) and the first bit of the
42 // mask (Pos), and return true.
43 // For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
44 static bool IsShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
45 if (!isShiftedMask_64(I))
48 Size = CountPopulation_64(I);
49 Pos = CountTrailingZeros_64(I);
53 static SDValue GetGlobalReg(SelectionDAG &DAG, EVT Ty) {
54 MipsFunctionInfo *FI = DAG.getMachineFunction().getInfo<MipsFunctionInfo>();
55 return DAG.getRegister(FI->getGlobalBaseReg(), Ty);
58 const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
60 case MipsISD::JmpLink: return "MipsISD::JmpLink";
61 case MipsISD::TailCall: return "MipsISD::TailCall";
62 case MipsISD::Hi: return "MipsISD::Hi";
63 case MipsISD::Lo: return "MipsISD::Lo";
64 case MipsISD::GPRel: return "MipsISD::GPRel";
65 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
66 case MipsISD::Ret: return "MipsISD::Ret";
67 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
68 case MipsISD::FPCmp: return "MipsISD::FPCmp";
69 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
70 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
71 case MipsISD::FPRound: return "MipsISD::FPRound";
72 case MipsISD::MAdd: return "MipsISD::MAdd";
73 case MipsISD::MAddu: return "MipsISD::MAddu";
74 case MipsISD::MSub: return "MipsISD::MSub";
75 case MipsISD::MSubu: return "MipsISD::MSubu";
76 case MipsISD::DivRem: return "MipsISD::DivRem";
77 case MipsISD::DivRemU: return "MipsISD::DivRemU";
78 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
79 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
80 case MipsISD::Wrapper: return "MipsISD::Wrapper";
81 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
82 case MipsISD::Sync: return "MipsISD::Sync";
83 case MipsISD::Ext: return "MipsISD::Ext";
84 case MipsISD::Ins: return "MipsISD::Ins";
85 case MipsISD::LWL: return "MipsISD::LWL";
86 case MipsISD::LWR: return "MipsISD::LWR";
87 case MipsISD::SWL: return "MipsISD::SWL";
88 case MipsISD::SWR: return "MipsISD::SWR";
89 case MipsISD::LDL: return "MipsISD::LDL";
90 case MipsISD::LDR: return "MipsISD::LDR";
91 case MipsISD::SDL: return "MipsISD::SDL";
92 case MipsISD::SDR: return "MipsISD::SDR";
93 case MipsISD::EXTP: return "MipsISD::EXTP";
94 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
95 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
96 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
97 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
98 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
99 case MipsISD::SHILO: return "MipsISD::SHILO";
100 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
101 case MipsISD::MULT: return "MipsISD::MULT";
102 case MipsISD::MULTU: return "MipsISD::MULTU";
103 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSPDSP";
104 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
105 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
106 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
107 default: return NULL;
112 MipsTargetLowering(MipsTargetMachine &TM)
113 : TargetLowering(TM, new MipsTargetObjectFile()),
114 Subtarget(&TM.getSubtarget<MipsSubtarget>()),
115 HasMips64(Subtarget->hasMips64()), IsN64(Subtarget->isABI_N64()),
116 IsO32(Subtarget->isABI_O32()) {
118 // Mips does not have i1 type, so use i32 for
119 // setcc operations results (slt, sgt, ...).
120 setBooleanContents(ZeroOrOneBooleanContent);
121 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
123 // Set up the register classes
124 addRegisterClass(MVT::i32, &Mips::CPURegsRegClass);
127 addRegisterClass(MVT::i64, &Mips::CPU64RegsRegClass);
129 if (Subtarget->inMips16Mode()) {
130 addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
133 if (Subtarget->hasDSP()) {
134 MVT::SimpleValueType VecTys[2] = {MVT::v2i16, MVT::v4i8};
136 for (unsigned i = 0; i < array_lengthof(VecTys); ++i) {
137 addRegisterClass(VecTys[i], &Mips::DSPRegsRegClass);
139 // Expand all builtin opcodes.
140 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
141 setOperationAction(Opc, VecTys[i], Expand);
143 setOperationAction(ISD::LOAD, VecTys[i], Legal);
144 setOperationAction(ISD::STORE, VecTys[i], Legal);
145 setOperationAction(ISD::BITCAST, VecTys[i], Legal);
149 if (!TM.Options.UseSoftFloat) {
150 addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
152 // When dealing with single precision only, use libcalls
153 if (!Subtarget->isSingleFloat()) {
155 addRegisterClass(MVT::f64, &Mips::FGR64RegClass);
157 addRegisterClass(MVT::f64, &Mips::AFGR64RegClass);
161 // Load extented operations for i1 types must be promoted
162 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
163 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
164 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
166 // MIPS doesn't have extending float->double load/store
167 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
168 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
170 // Used by legalize types to correctly generate the setcc result.
171 // Without this, every float setcc comes with a AND/OR with the result,
172 // we don't want this, since the fpcmp result goes to a flag register,
173 // which is used implicitly by brcond and select operations.
174 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
176 // Mips Custom Operations
177 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
178 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
179 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
180 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
181 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
182 setOperationAction(ISD::SELECT, MVT::f32, Custom);
183 setOperationAction(ISD::SELECT, MVT::f64, Custom);
184 setOperationAction(ISD::SELECT, MVT::i32, Custom);
185 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
186 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
187 setOperationAction(ISD::SETCC, MVT::f32, Custom);
188 setOperationAction(ISD::SETCC, MVT::f64, Custom);
189 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
190 setOperationAction(ISD::VASTART, MVT::Other, Custom);
191 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
192 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
193 setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
194 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
195 if (!Subtarget->inMips16Mode()) {
196 setOperationAction(ISD::LOAD, MVT::i32, Custom);
197 setOperationAction(ISD::STORE, MVT::i32, Custom);
200 if (!TM.Options.NoNaNsFPMath) {
201 setOperationAction(ISD::FABS, MVT::f32, Custom);
202 setOperationAction(ISD::FABS, MVT::f64, Custom);
206 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
207 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
208 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
209 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
210 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
211 setOperationAction(ISD::SELECT, MVT::i64, Custom);
212 setOperationAction(ISD::LOAD, MVT::i64, Custom);
213 setOperationAction(ISD::STORE, MVT::i64, Custom);
217 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
218 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
219 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
222 setOperationAction(ISD::SDIV, MVT::i32, Expand);
223 setOperationAction(ISD::SREM, MVT::i32, Expand);
224 setOperationAction(ISD::UDIV, MVT::i32, Expand);
225 setOperationAction(ISD::UREM, MVT::i32, Expand);
226 setOperationAction(ISD::SDIV, MVT::i64, Expand);
227 setOperationAction(ISD::SREM, MVT::i64, Expand);
228 setOperationAction(ISD::UDIV, MVT::i64, Expand);
229 setOperationAction(ISD::UREM, MVT::i64, Expand);
231 // Operations not directly supported by Mips.
232 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
233 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
234 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
235 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
236 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
237 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
238 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
239 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
240 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
241 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
242 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
243 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
244 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
245 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
246 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
247 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
248 setOperationAction(ISD::ROTL, MVT::i32, Expand);
249 setOperationAction(ISD::ROTL, MVT::i64, Expand);
250 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
251 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
253 if (!Subtarget->hasMips32r2())
254 setOperationAction(ISD::ROTR, MVT::i32, Expand);
256 if (!Subtarget->hasMips64r2())
257 setOperationAction(ISD::ROTR, MVT::i64, Expand);
259 setOperationAction(ISD::FSIN, MVT::f32, Expand);
260 setOperationAction(ISD::FSIN, MVT::f64, Expand);
261 setOperationAction(ISD::FCOS, MVT::f32, Expand);
262 setOperationAction(ISD::FCOS, MVT::f64, Expand);
263 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
264 setOperationAction(ISD::FPOW, MVT::f32, Expand);
265 setOperationAction(ISD::FPOW, MVT::f64, Expand);
266 setOperationAction(ISD::FLOG, MVT::f32, Expand);
267 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
268 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
269 setOperationAction(ISD::FEXP, MVT::f32, Expand);
270 setOperationAction(ISD::FMA, MVT::f32, Expand);
271 setOperationAction(ISD::FMA, MVT::f64, Expand);
272 setOperationAction(ISD::FREM, MVT::f32, Expand);
273 setOperationAction(ISD::FREM, MVT::f64, Expand);
275 if (!TM.Options.NoNaNsFPMath) {
276 setOperationAction(ISD::FNEG, MVT::f32, Expand);
277 setOperationAction(ISD::FNEG, MVT::f64, Expand);
280 setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand);
281 setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand);
282 setOperationAction(ISD::EHSELECTION, MVT::i32, Expand);
283 setOperationAction(ISD::EHSELECTION, MVT::i64, Expand);
285 setOperationAction(ISD::VAARG, MVT::Other, Expand);
286 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
287 setOperationAction(ISD::VAEND, MVT::Other, Expand);
289 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
290 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
292 // Use the default for now
293 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
294 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
296 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
297 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
298 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
299 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
301 setInsertFencesForAtomic(true);
303 if (!Subtarget->hasSEInReg()) {
304 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
305 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
308 if (!Subtarget->hasBitCount()) {
309 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
310 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
313 if (!Subtarget->hasSwap()) {
314 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
315 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
319 setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Custom);
320 setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Custom);
321 setLoadExtAction(ISD::EXTLOAD, MVT::i32, Custom);
322 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
325 setTargetDAGCombine(ISD::ADDE);
326 setTargetDAGCombine(ISD::SUBE);
327 setTargetDAGCombine(ISD::SDIVREM);
328 setTargetDAGCombine(ISD::UDIVREM);
329 setTargetDAGCombine(ISD::SELECT);
330 setTargetDAGCombine(ISD::AND);
331 setTargetDAGCombine(ISD::OR);
332 setTargetDAGCombine(ISD::ADD);
334 setMinFunctionAlignment(HasMips64 ? 3 : 2);
336 setStackPointerRegisterToSaveRestore(IsN64 ? Mips::SP_64 : Mips::SP);
337 computeRegisterProperties();
339 setExceptionPointerRegister(IsN64 ? Mips::A0_64 : Mips::A0);
340 setExceptionSelectorRegister(IsN64 ? Mips::A1_64 : Mips::A1);
342 maxStoresPerMemcpy = 16;
345 bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
346 MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
348 if (Subtarget->inMips16Mode())
360 EVT MipsTargetLowering::getSetCCResultType(EVT VT) const {
365 // Transforms a subgraph in CurDAG if the following pattern is found:
366 // (addc multLo, Lo0), (adde multHi, Hi0),
368 // multHi/Lo: product of multiplication
369 // Lo0: initial value of Lo register
370 // Hi0: initial value of Hi register
371 // Return true if pattern matching was successful.
372 static bool SelectMadd(SDNode *ADDENode, SelectionDAG *CurDAG) {
373 // ADDENode's second operand must be a flag output of an ADDC node in order
374 // for the matching to be successful.
375 SDNode *ADDCNode = ADDENode->getOperand(2).getNode();
377 if (ADDCNode->getOpcode() != ISD::ADDC)
380 SDValue MultHi = ADDENode->getOperand(0);
381 SDValue MultLo = ADDCNode->getOperand(0);
382 SDNode *MultNode = MultHi.getNode();
383 unsigned MultOpc = MultHi.getOpcode();
385 // MultHi and MultLo must be generated by the same node,
386 if (MultLo.getNode() != MultNode)
389 // and it must be a multiplication.
390 if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
393 // MultLo amd MultHi must be the first and second output of MultNode
395 if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
398 // Transform this to a MADD only if ADDENode and ADDCNode are the only users
399 // of the values of MultNode, in which case MultNode will be removed in later
401 // If there exist users other than ADDENode or ADDCNode, this function returns
402 // here, which will result in MultNode being mapped to a single MULT
403 // instruction node rather than a pair of MULT and MADD instructions being
405 if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
408 SDValue Chain = CurDAG->getEntryNode();
409 DebugLoc dl = ADDENode->getDebugLoc();
411 // create MipsMAdd(u) node
412 MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd;
414 SDValue MAdd = CurDAG->getNode(MultOpc, dl, MVT::Glue,
415 MultNode->getOperand(0),// Factor 0
416 MultNode->getOperand(1),// Factor 1
417 ADDCNode->getOperand(1),// Lo0
418 ADDENode->getOperand(1));// Hi0
420 // create CopyFromReg nodes
421 SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, dl, Mips::LO, MVT::i32,
423 SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), dl,
425 CopyFromLo.getValue(2));
427 // replace uses of adde and addc here
428 if (!SDValue(ADDCNode, 0).use_empty())
429 CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), CopyFromLo);
431 if (!SDValue(ADDENode, 0).use_empty())
432 CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), CopyFromHi);
438 // Transforms a subgraph in CurDAG if the following pattern is found:
439 // (addc Lo0, multLo), (sube Hi0, multHi),
441 // multHi/Lo: product of multiplication
442 // Lo0: initial value of Lo register
443 // Hi0: initial value of Hi register
444 // Return true if pattern matching was successful.
445 static bool SelectMsub(SDNode *SUBENode, SelectionDAG *CurDAG) {
446 // SUBENode's second operand must be a flag output of an SUBC node in order
447 // for the matching to be successful.
448 SDNode *SUBCNode = SUBENode->getOperand(2).getNode();
450 if (SUBCNode->getOpcode() != ISD::SUBC)
453 SDValue MultHi = SUBENode->getOperand(1);
454 SDValue MultLo = SUBCNode->getOperand(1);
455 SDNode *MultNode = MultHi.getNode();
456 unsigned MultOpc = MultHi.getOpcode();
458 // MultHi and MultLo must be generated by the same node,
459 if (MultLo.getNode() != MultNode)
462 // and it must be a multiplication.
463 if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI)
466 // MultLo amd MultHi must be the first and second output of MultNode
468 if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0)
471 // Transform this to a MSUB only if SUBENode and SUBCNode are the only users
472 // of the values of MultNode, in which case MultNode will be removed in later
474 // If there exist users other than SUBENode or SUBCNode, this function returns
475 // here, which will result in MultNode being mapped to a single MULT
476 // instruction node rather than a pair of MULT and MSUB instructions being
478 if (!MultHi.hasOneUse() || !MultLo.hasOneUse())
481 SDValue Chain = CurDAG->getEntryNode();
482 DebugLoc dl = SUBENode->getDebugLoc();
484 // create MipsSub(u) node
485 MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub;
487 SDValue MSub = CurDAG->getNode(MultOpc, dl, MVT::Glue,
488 MultNode->getOperand(0),// Factor 0
489 MultNode->getOperand(1),// Factor 1
490 SUBCNode->getOperand(0),// Lo0
491 SUBENode->getOperand(0));// Hi0
493 // create CopyFromReg nodes
494 SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, dl, Mips::LO, MVT::i32,
496 SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), dl,
498 CopyFromLo.getValue(2));
500 // replace uses of sube and subc here
501 if (!SDValue(SUBCNode, 0).use_empty())
502 CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), CopyFromLo);
504 if (!SDValue(SUBENode, 0).use_empty())
505 CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), CopyFromHi);
510 static SDValue PerformADDECombine(SDNode *N, SelectionDAG &DAG,
511 TargetLowering::DAGCombinerInfo &DCI,
512 const MipsSubtarget *Subtarget) {
513 if (DCI.isBeforeLegalize())
516 if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
518 return SDValue(N, 0);
523 static SDValue PerformSUBECombine(SDNode *N, SelectionDAG &DAG,
524 TargetLowering::DAGCombinerInfo &DCI,
525 const MipsSubtarget *Subtarget) {
526 if (DCI.isBeforeLegalize())
529 if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 &&
531 return SDValue(N, 0);
536 static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG &DAG,
537 TargetLowering::DAGCombinerInfo &DCI,
538 const MipsSubtarget *Subtarget) {
539 if (DCI.isBeforeLegalizeOps())
542 EVT Ty = N->getValueType(0);
543 unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64;
544 unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64;
545 unsigned opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem :
547 DebugLoc dl = N->getDebugLoc();
549 SDValue DivRem = DAG.getNode(opc, dl, MVT::Glue,
550 N->getOperand(0), N->getOperand(1));
551 SDValue InChain = DAG.getEntryNode();
552 SDValue InGlue = DivRem;
555 if (N->hasAnyUseOfValue(0)) {
556 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, dl, LO, Ty,
558 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
559 InChain = CopyFromLo.getValue(1);
560 InGlue = CopyFromLo.getValue(2);
564 if (N->hasAnyUseOfValue(1)) {
565 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, dl,
567 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
573 static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) {
575 default: llvm_unreachable("Unknown fp condition code!");
577 case ISD::SETOEQ: return Mips::FCOND_OEQ;
578 case ISD::SETUNE: return Mips::FCOND_UNE;
580 case ISD::SETOLT: return Mips::FCOND_OLT;
582 case ISD::SETOGT: return Mips::FCOND_OGT;
584 case ISD::SETOLE: return Mips::FCOND_OLE;
586 case ISD::SETOGE: return Mips::FCOND_OGE;
587 case ISD::SETULT: return Mips::FCOND_ULT;
588 case ISD::SETULE: return Mips::FCOND_ULE;
589 case ISD::SETUGT: return Mips::FCOND_UGT;
590 case ISD::SETUGE: return Mips::FCOND_UGE;
591 case ISD::SETUO: return Mips::FCOND_UN;
592 case ISD::SETO: return Mips::FCOND_OR;
594 case ISD::SETONE: return Mips::FCOND_ONE;
595 case ISD::SETUEQ: return Mips::FCOND_UEQ;
600 // Returns true if condition code has to be inverted.
601 static bool InvertFPCondCode(Mips::CondCode CC) {
602 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
605 assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
606 "Illegal Condition Code");
611 // Creates and returns an FPCmp node from a setcc node.
612 // Returns Op if setcc is not a floating point comparison.
613 static SDValue CreateFPCmp(SelectionDAG &DAG, const SDValue &Op) {
614 // must be a SETCC node
615 if (Op.getOpcode() != ISD::SETCC)
618 SDValue LHS = Op.getOperand(0);
620 if (!LHS.getValueType().isFloatingPoint())
623 SDValue RHS = Op.getOperand(1);
624 DebugLoc dl = Op.getDebugLoc();
626 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
627 // node if necessary.
628 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
630 return DAG.getNode(MipsISD::FPCmp, dl, MVT::Glue, LHS, RHS,
631 DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32));
634 // Creates and returns a CMovFPT/F node.
635 static SDValue CreateCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
636 SDValue False, DebugLoc DL) {
637 bool invert = InvertFPCondCode((Mips::CondCode)
638 cast<ConstantSDNode>(Cond.getOperand(2))
641 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
642 True.getValueType(), True, False, Cond);
645 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
646 TargetLowering::DAGCombinerInfo &DCI,
647 const MipsSubtarget *Subtarget) {
648 if (DCI.isBeforeLegalizeOps())
651 SDValue SetCC = N->getOperand(0);
653 if ((SetCC.getOpcode() != ISD::SETCC) ||
654 !SetCC.getOperand(0).getValueType().isInteger())
657 SDValue False = N->getOperand(2);
658 EVT FalseTy = False.getValueType();
660 if (!FalseTy.isInteger())
663 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(False);
665 if (!CN || CN->getZExtValue())
668 const DebugLoc DL = N->getDebugLoc();
669 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
670 SDValue True = N->getOperand(1);
672 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
673 SetCC.getOperand(1), ISD::getSetCCInverse(CC, true));
675 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
678 static SDValue PerformANDCombine(SDNode *N, SelectionDAG &DAG,
679 TargetLowering::DAGCombinerInfo &DCI,
680 const MipsSubtarget *Subtarget) {
681 // Pattern match EXT.
682 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
683 // => ext $dst, $src, size, pos
684 if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
687 SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1);
688 unsigned ShiftRightOpc = ShiftRight.getOpcode();
690 // Op's first operand must be a shift right.
691 if (ShiftRightOpc != ISD::SRA && ShiftRightOpc != ISD::SRL)
694 // The second operand of the shift must be an immediate.
696 if (!(CN = dyn_cast<ConstantSDNode>(ShiftRight.getOperand(1))))
699 uint64_t Pos = CN->getZExtValue();
700 uint64_t SMPos, SMSize;
702 // Op's second operand must be a shifted mask.
703 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
704 !IsShiftedMask(CN->getZExtValue(), SMPos, SMSize))
707 // Return if the shifted mask does not start at bit 0 or the sum of its size
708 // and Pos exceeds the word's size.
709 EVT ValTy = N->getValueType(0);
710 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
713 return DAG.getNode(MipsISD::Ext, N->getDebugLoc(), ValTy,
714 ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32),
715 DAG.getConstant(SMSize, MVT::i32));
718 static SDValue PerformORCombine(SDNode *N, SelectionDAG &DAG,
719 TargetLowering::DAGCombinerInfo &DCI,
720 const MipsSubtarget *Subtarget) {
721 // Pattern match INS.
722 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
723 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
724 // => ins $dst, $src, size, pos, $src1
725 if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
728 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
729 uint64_t SMPos0, SMSize0, SMPos1, SMSize1;
732 // See if Op's first operand matches (and $src1 , mask0).
733 if (And0.getOpcode() != ISD::AND)
736 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
737 !IsShiftedMask(~CN->getSExtValue(), SMPos0, SMSize0))
740 // See if Op's second operand matches (and (shl $src, pos), mask1).
741 if (And1.getOpcode() != ISD::AND)
744 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
745 !IsShiftedMask(CN->getZExtValue(), SMPos1, SMSize1))
748 // The shift masks must have the same position and size.
749 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
752 SDValue Shl = And1.getOperand(0);
753 if (Shl.getOpcode() != ISD::SHL)
756 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
759 unsigned Shamt = CN->getZExtValue();
761 // Return if the shift amount and the first bit position of mask are not the
763 EVT ValTy = N->getValueType(0);
764 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
767 return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), ValTy, Shl.getOperand(0),
768 DAG.getConstant(SMPos0, MVT::i32),
769 DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
772 static SDValue PerformADDCombine(SDNode *N, SelectionDAG &DAG,
773 TargetLowering::DAGCombinerInfo &DCI,
774 const MipsSubtarget *Subtarget) {
775 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
777 if (DCI.isBeforeLegalizeOps())
780 SDValue Add = N->getOperand(1);
782 if (Add.getOpcode() != ISD::ADD)
785 SDValue Lo = Add.getOperand(1);
787 if ((Lo.getOpcode() != MipsISD::Lo) ||
788 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
791 EVT ValTy = N->getValueType(0);
792 DebugLoc DL = N->getDebugLoc();
794 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
796 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
799 SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
801 SelectionDAG &DAG = DCI.DAG;
802 unsigned opc = N->getOpcode();
807 return PerformADDECombine(N, DAG, DCI, Subtarget);
809 return PerformSUBECombine(N, DAG, DCI, Subtarget);
812 return PerformDivRemCombine(N, DAG, DCI, Subtarget);
814 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
816 return PerformANDCombine(N, DAG, DCI, Subtarget);
818 return PerformORCombine(N, DAG, DCI, Subtarget);
820 return PerformADDCombine(N, DAG, DCI, Subtarget);
827 MipsTargetLowering::LowerOperationWrapper(SDNode *N,
828 SmallVectorImpl<SDValue> &Results,
829 SelectionDAG &DAG) const {
830 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
832 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
833 Results.push_back(Res.getValue(I));
837 MipsTargetLowering::ReplaceNodeResults(SDNode *N,
838 SmallVectorImpl<SDValue> &Results,
839 SelectionDAG &DAG) const {
840 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
842 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
843 Results.push_back(Res.getValue(I));
846 SDValue MipsTargetLowering::
847 LowerOperation(SDValue Op, SelectionDAG &DAG) const
849 switch (Op.getOpcode())
851 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
852 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
853 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
854 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
855 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
856 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
857 case ISD::SELECT: return LowerSELECT(Op, DAG);
858 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
859 case ISD::SETCC: return LowerSETCC(Op, DAG);
860 case ISD::VASTART: return LowerVASTART(Op, DAG);
861 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
862 case ISD::FABS: return LowerFABS(Op, DAG);
863 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
864 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
865 case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG);
866 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
867 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
868 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG, true);
869 case ISD::SRL_PARTS: return LowerShiftRightParts(Op, DAG, false);
870 case ISD::LOAD: return LowerLOAD(Op, DAG);
871 case ISD::STORE: return LowerSTORE(Op, DAG);
872 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
873 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, DAG);
878 //===----------------------------------------------------------------------===//
879 // Lower helper functions
880 //===----------------------------------------------------------------------===//
882 // AddLiveIn - This helper function adds the specified physical register to the
883 // MachineFunction as a live in value. It also creates a corresponding
884 // virtual register for it.
886 AddLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
888 assert(RC->contains(PReg) && "Not the correct regclass!");
889 unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
890 MF.getRegInfo().addLiveIn(PReg, VReg);
894 // Get fp branch code (not opcode) from condition code.
895 static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) {
896 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
897 return Mips::BRANCH_T;
899 assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
900 "Invalid CondCode.");
902 return Mips::BRANCH_F;
906 static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB,
908 const MipsSubtarget *Subtarget,
909 const TargetInstrInfo *TII,
910 bool isFPCmp, unsigned Opc) {
911 // There is no need to expand CMov instructions if target has
912 // conditional moves.
913 if (Subtarget->hasCondMov())
916 // To "insert" a SELECT_CC instruction, we actually have to insert the
917 // diamond control-flow pattern. The incoming instruction knows the
918 // destination vreg to set, the condition code register to branch on, the
919 // true/false values to select between, and a branch opcode to use.
920 const BasicBlock *LLVM_BB = BB->getBasicBlock();
921 MachineFunction::iterator It = BB;
928 // bNE r1, r0, copy1MBB
929 // fallthrough --> copy0MBB
930 MachineBasicBlock *thisMBB = BB;
931 MachineFunction *F = BB->getParent();
932 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
933 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
934 F->insert(It, copy0MBB);
935 F->insert(It, sinkMBB);
937 // Transfer the remainder of BB and its successor edges to sinkMBB.
938 sinkMBB->splice(sinkMBB->begin(), BB,
939 llvm::next(MachineBasicBlock::iterator(MI)),
941 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
943 // Next, add the true and fallthrough blocks as its successors.
944 BB->addSuccessor(copy0MBB);
945 BB->addSuccessor(sinkMBB);
947 // Emit the right instruction according to the type of the operands compared
949 BuildMI(BB, dl, TII->get(Opc)).addMBB(sinkMBB);
951 BuildMI(BB, dl, TII->get(Opc)).addReg(MI->getOperand(2).getReg())
952 .addReg(Mips::ZERO).addMBB(sinkMBB);
956 // # fallthrough to sinkMBB
959 // Update machine-CFG edges
960 BB->addSuccessor(sinkMBB);
963 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
968 BuildMI(*BB, BB->begin(), dl,
969 TII->get(Mips::PHI), MI->getOperand(0).getReg())
970 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB)
971 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB);
973 BuildMI(*BB, BB->begin(), dl,
974 TII->get(Mips::PHI), MI->getOperand(0).getReg())
975 .addReg(MI->getOperand(3).getReg()).addMBB(thisMBB)
976 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB);
978 MI->eraseFromParent(); // The pseudo instruction is gone now.
984 MipsTargetLowering::EmitBPOSGE32(MachineInstr *MI, MachineBasicBlock *BB) const{
986 // bposge32_pseudo $vr0
996 // $vr0 = phi($vr2, $fbb, $vr1, $tbb)
998 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
999 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1000 const TargetRegisterClass *RC = &Mips::CPURegsRegClass;
1001 DebugLoc DL = MI->getDebugLoc();
1002 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1003 MachineFunction::iterator It = llvm::next(MachineFunction::iterator(BB));
1004 MachineFunction *F = BB->getParent();
1005 MachineBasicBlock *FBB = F->CreateMachineBasicBlock(LLVM_BB);
1006 MachineBasicBlock *TBB = F->CreateMachineBasicBlock(LLVM_BB);
1007 MachineBasicBlock *Sink = F->CreateMachineBasicBlock(LLVM_BB);
1010 F->insert(It, Sink);
1012 // Transfer the remainder of BB and its successor edges to Sink.
1013 Sink->splice(Sink->begin(), BB, llvm::next(MachineBasicBlock::iterator(MI)),
1015 Sink->transferSuccessorsAndUpdatePHIs(BB);
1018 BB->addSuccessor(FBB);
1019 BB->addSuccessor(TBB);
1020 FBB->addSuccessor(Sink);
1021 TBB->addSuccessor(Sink);
1023 // Insert the real bposge32 instruction to $BB.
1024 BuildMI(BB, DL, TII->get(Mips::BPOSGE32)).addMBB(TBB);
1027 unsigned VR2 = RegInfo.createVirtualRegister(RC);
1028 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::ADDiu), VR2)
1029 .addReg(Mips::ZERO).addImm(0);
1030 BuildMI(*FBB, FBB->end(), DL, TII->get(Mips::B)).addMBB(Sink);
1033 unsigned VR1 = RegInfo.createVirtualRegister(RC);
1034 BuildMI(*TBB, TBB->end(), DL, TII->get(Mips::ADDiu), VR1)
1035 .addReg(Mips::ZERO).addImm(1);
1037 // Insert phi function to $Sink.
1038 BuildMI(*Sink, Sink->begin(), DL, TII->get(Mips::PHI),
1039 MI->getOperand(0).getReg())
1040 .addReg(VR2).addMBB(FBB).addReg(VR1).addMBB(TBB);
1042 MI->eraseFromParent(); // The pseudo instruction is gone now.
1047 MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1048 MachineBasicBlock *BB) const {
1049 switch (MI->getOpcode()) {
1050 default: llvm_unreachable("Unexpected instr type to insert");
1051 case Mips::ATOMIC_LOAD_ADD_I8:
1052 case Mips::ATOMIC_LOAD_ADD_I8_P8:
1053 return EmitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu);
1054 case Mips::ATOMIC_LOAD_ADD_I16:
1055 case Mips::ATOMIC_LOAD_ADD_I16_P8:
1056 return EmitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu);
1057 case Mips::ATOMIC_LOAD_ADD_I32:
1058 case Mips::ATOMIC_LOAD_ADD_I32_P8:
1059 return EmitAtomicBinary(MI, BB, 4, Mips::ADDu);
1060 case Mips::ATOMIC_LOAD_ADD_I64:
1061 case Mips::ATOMIC_LOAD_ADD_I64_P8:
1062 return EmitAtomicBinary(MI, BB, 8, Mips::DADDu);
1064 case Mips::ATOMIC_LOAD_AND_I8:
1065 case Mips::ATOMIC_LOAD_AND_I8_P8:
1066 return EmitAtomicBinaryPartword(MI, BB, 1, Mips::AND);
1067 case Mips::ATOMIC_LOAD_AND_I16:
1068 case Mips::ATOMIC_LOAD_AND_I16_P8:
1069 return EmitAtomicBinaryPartword(MI, BB, 2, Mips::AND);
1070 case Mips::ATOMIC_LOAD_AND_I32:
1071 case Mips::ATOMIC_LOAD_AND_I32_P8:
1072 return EmitAtomicBinary(MI, BB, 4, Mips::AND);
1073 case Mips::ATOMIC_LOAD_AND_I64:
1074 case Mips::ATOMIC_LOAD_AND_I64_P8:
1075 return EmitAtomicBinary(MI, BB, 8, Mips::AND64);
1077 case Mips::ATOMIC_LOAD_OR_I8:
1078 case Mips::ATOMIC_LOAD_OR_I8_P8:
1079 return EmitAtomicBinaryPartword(MI, BB, 1, Mips::OR);
1080 case Mips::ATOMIC_LOAD_OR_I16:
1081 case Mips::ATOMIC_LOAD_OR_I16_P8:
1082 return EmitAtomicBinaryPartword(MI, BB, 2, Mips::OR);
1083 case Mips::ATOMIC_LOAD_OR_I32:
1084 case Mips::ATOMIC_LOAD_OR_I32_P8:
1085 return EmitAtomicBinary(MI, BB, 4, Mips::OR);
1086 case Mips::ATOMIC_LOAD_OR_I64:
1087 case Mips::ATOMIC_LOAD_OR_I64_P8:
1088 return EmitAtomicBinary(MI, BB, 8, Mips::OR64);
1090 case Mips::ATOMIC_LOAD_XOR_I8:
1091 case Mips::ATOMIC_LOAD_XOR_I8_P8:
1092 return EmitAtomicBinaryPartword(MI, BB, 1, Mips::XOR);
1093 case Mips::ATOMIC_LOAD_XOR_I16:
1094 case Mips::ATOMIC_LOAD_XOR_I16_P8:
1095 return EmitAtomicBinaryPartword(MI, BB, 2, Mips::XOR);
1096 case Mips::ATOMIC_LOAD_XOR_I32:
1097 case Mips::ATOMIC_LOAD_XOR_I32_P8:
1098 return EmitAtomicBinary(MI, BB, 4, Mips::XOR);
1099 case Mips::ATOMIC_LOAD_XOR_I64:
1100 case Mips::ATOMIC_LOAD_XOR_I64_P8:
1101 return EmitAtomicBinary(MI, BB, 8, Mips::XOR64);
1103 case Mips::ATOMIC_LOAD_NAND_I8:
1104 case Mips::ATOMIC_LOAD_NAND_I8_P8:
1105 return EmitAtomicBinaryPartword(MI, BB, 1, 0, true);
1106 case Mips::ATOMIC_LOAD_NAND_I16:
1107 case Mips::ATOMIC_LOAD_NAND_I16_P8:
1108 return EmitAtomicBinaryPartword(MI, BB, 2, 0, true);
1109 case Mips::ATOMIC_LOAD_NAND_I32:
1110 case Mips::ATOMIC_LOAD_NAND_I32_P8:
1111 return EmitAtomicBinary(MI, BB, 4, 0, true);
1112 case Mips::ATOMIC_LOAD_NAND_I64:
1113 case Mips::ATOMIC_LOAD_NAND_I64_P8:
1114 return EmitAtomicBinary(MI, BB, 8, 0, true);
1116 case Mips::ATOMIC_LOAD_SUB_I8:
1117 case Mips::ATOMIC_LOAD_SUB_I8_P8:
1118 return EmitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu);
1119 case Mips::ATOMIC_LOAD_SUB_I16:
1120 case Mips::ATOMIC_LOAD_SUB_I16_P8:
1121 return EmitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu);
1122 case Mips::ATOMIC_LOAD_SUB_I32:
1123 case Mips::ATOMIC_LOAD_SUB_I32_P8:
1124 return EmitAtomicBinary(MI, BB, 4, Mips::SUBu);
1125 case Mips::ATOMIC_LOAD_SUB_I64:
1126 case Mips::ATOMIC_LOAD_SUB_I64_P8:
1127 return EmitAtomicBinary(MI, BB, 8, Mips::DSUBu);
1129 case Mips::ATOMIC_SWAP_I8:
1130 case Mips::ATOMIC_SWAP_I8_P8:
1131 return EmitAtomicBinaryPartword(MI, BB, 1, 0);
1132 case Mips::ATOMIC_SWAP_I16:
1133 case Mips::ATOMIC_SWAP_I16_P8:
1134 return EmitAtomicBinaryPartword(MI, BB, 2, 0);
1135 case Mips::ATOMIC_SWAP_I32:
1136 case Mips::ATOMIC_SWAP_I32_P8:
1137 return EmitAtomicBinary(MI, BB, 4, 0);
1138 case Mips::ATOMIC_SWAP_I64:
1139 case Mips::ATOMIC_SWAP_I64_P8:
1140 return EmitAtomicBinary(MI, BB, 8, 0);
1142 case Mips::ATOMIC_CMP_SWAP_I8:
1143 case Mips::ATOMIC_CMP_SWAP_I8_P8:
1144 return EmitAtomicCmpSwapPartword(MI, BB, 1);
1145 case Mips::ATOMIC_CMP_SWAP_I16:
1146 case Mips::ATOMIC_CMP_SWAP_I16_P8:
1147 return EmitAtomicCmpSwapPartword(MI, BB, 2);
1148 case Mips::ATOMIC_CMP_SWAP_I32:
1149 case Mips::ATOMIC_CMP_SWAP_I32_P8:
1150 return EmitAtomicCmpSwap(MI, BB, 4);
1151 case Mips::ATOMIC_CMP_SWAP_I64:
1152 case Mips::ATOMIC_CMP_SWAP_I64_P8:
1153 return EmitAtomicCmpSwap(MI, BB, 8);
1154 case Mips::BPOSGE32_PSEUDO:
1155 return EmitBPOSGE32(MI, BB);
1159 // This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1160 // Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1162 MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
1163 unsigned Size, unsigned BinOpcode,
1165 assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicBinary.");
1167 MachineFunction *MF = BB->getParent();
1168 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1169 const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
1170 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1171 DebugLoc dl = MI->getDebugLoc();
1172 unsigned LL, SC, AND, NOR, ZERO, BEQ;
1175 LL = IsN64 ? Mips::LL_P8 : Mips::LL;
1176 SC = IsN64 ? Mips::SC_P8 : Mips::SC;
1183 LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
1184 SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
1187 ZERO = Mips::ZERO_64;
1191 unsigned OldVal = MI->getOperand(0).getReg();
1192 unsigned Ptr = MI->getOperand(1).getReg();
1193 unsigned Incr = MI->getOperand(2).getReg();
1195 unsigned StoreVal = RegInfo.createVirtualRegister(RC);
1196 unsigned AndRes = RegInfo.createVirtualRegister(RC);
1197 unsigned Success = RegInfo.createVirtualRegister(RC);
1199 // insert new blocks after the current block
1200 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1201 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1202 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1203 MachineFunction::iterator It = BB;
1205 MF->insert(It, loopMBB);
1206 MF->insert(It, exitMBB);
1208 // Transfer the remainder of BB and its successor edges to exitMBB.
1209 exitMBB->splice(exitMBB->begin(), BB,
1210 llvm::next(MachineBasicBlock::iterator(MI)),
1212 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1216 // fallthrough --> loopMBB
1217 BB->addSuccessor(loopMBB);
1218 loopMBB->addSuccessor(loopMBB);
1219 loopMBB->addSuccessor(exitMBB);
1222 // ll oldval, 0(ptr)
1223 // <binop> storeval, oldval, incr
1224 // sc success, storeval, 0(ptr)
1225 // beq success, $0, loopMBB
1227 BuildMI(BB, dl, TII->get(LL), OldVal).addReg(Ptr).addImm(0);
1229 // and andres, oldval, incr
1230 // nor storeval, $0, andres
1231 BuildMI(BB, dl, TII->get(AND), AndRes).addReg(OldVal).addReg(Incr);
1232 BuildMI(BB, dl, TII->get(NOR), StoreVal).addReg(ZERO).addReg(AndRes);
1233 } else if (BinOpcode) {
1234 // <binop> storeval, oldval, incr
1235 BuildMI(BB, dl, TII->get(BinOpcode), StoreVal).addReg(OldVal).addReg(Incr);
1239 BuildMI(BB, dl, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0);
1240 BuildMI(BB, dl, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB);
1242 MI->eraseFromParent(); // The instruction is gone now.
1248 MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
1249 MachineBasicBlock *BB,
1250 unsigned Size, unsigned BinOpcode,
1252 assert((Size == 1 || Size == 2) &&
1253 "Unsupported size for EmitAtomicBinaryPartial.");
1255 MachineFunction *MF = BB->getParent();
1256 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1257 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1258 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1259 DebugLoc dl = MI->getDebugLoc();
1260 unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
1261 unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
1263 unsigned Dest = MI->getOperand(0).getReg();
1264 unsigned Ptr = MI->getOperand(1).getReg();
1265 unsigned Incr = MI->getOperand(2).getReg();
1267 unsigned AlignedAddr = RegInfo.createVirtualRegister(RC);
1268 unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
1269 unsigned Mask = RegInfo.createVirtualRegister(RC);
1270 unsigned Mask2 = RegInfo.createVirtualRegister(RC);
1271 unsigned NewVal = RegInfo.createVirtualRegister(RC);
1272 unsigned OldVal = RegInfo.createVirtualRegister(RC);
1273 unsigned Incr2 = RegInfo.createVirtualRegister(RC);
1274 unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC);
1275 unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
1276 unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
1277 unsigned AndRes = RegInfo.createVirtualRegister(RC);
1278 unsigned BinOpRes = RegInfo.createVirtualRegister(RC);
1279 unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC);
1280 unsigned StoreVal = RegInfo.createVirtualRegister(RC);
1281 unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC);
1282 unsigned SrlRes = RegInfo.createVirtualRegister(RC);
1283 unsigned SllRes = RegInfo.createVirtualRegister(RC);
1284 unsigned Success = RegInfo.createVirtualRegister(RC);
1286 // insert new blocks after the current block
1287 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1288 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1289 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1290 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1291 MachineFunction::iterator It = BB;
1293 MF->insert(It, loopMBB);
1294 MF->insert(It, sinkMBB);
1295 MF->insert(It, exitMBB);
1297 // Transfer the remainder of BB and its successor edges to exitMBB.
1298 exitMBB->splice(exitMBB->begin(), BB,
1299 llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
1300 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1302 BB->addSuccessor(loopMBB);
1303 loopMBB->addSuccessor(loopMBB);
1304 loopMBB->addSuccessor(sinkMBB);
1305 sinkMBB->addSuccessor(exitMBB);
1308 // addiu masklsb2,$0,-4 # 0xfffffffc
1309 // and alignedaddr,ptr,masklsb2
1310 // andi ptrlsb2,ptr,3
1311 // sll shiftamt,ptrlsb2,3
1312 // ori maskupper,$0,255 # 0xff
1313 // sll mask,maskupper,shiftamt
1314 // nor mask2,$0,mask
1315 // sll incr2,incr,shiftamt
1317 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1318 BuildMI(BB, dl, TII->get(Mips::ADDiu), MaskLSB2)
1319 .addReg(Mips::ZERO).addImm(-4);
1320 BuildMI(BB, dl, TII->get(Mips::AND), AlignedAddr)
1321 .addReg(Ptr).addReg(MaskLSB2);
1322 BuildMI(BB, dl, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
1323 BuildMI(BB, dl, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1324 BuildMI(BB, dl, TII->get(Mips::ORi), MaskUpper)
1325 .addReg(Mips::ZERO).addImm(MaskImm);
1326 BuildMI(BB, dl, TII->get(Mips::SLLV), Mask)
1327 .addReg(ShiftAmt).addReg(MaskUpper);
1328 BuildMI(BB, dl, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1329 BuildMI(BB, dl, TII->get(Mips::SLLV), Incr2).addReg(ShiftAmt).addReg(Incr);
1331 // atomic.load.binop
1333 // ll oldval,0(alignedaddr)
1334 // binop binopres,oldval,incr2
1335 // and newval,binopres,mask
1336 // and maskedoldval0,oldval,mask2
1337 // or storeval,maskedoldval0,newval
1338 // sc success,storeval,0(alignedaddr)
1339 // beq success,$0,loopMBB
1343 // ll oldval,0(alignedaddr)
1344 // and newval,incr2,mask
1345 // and maskedoldval0,oldval,mask2
1346 // or storeval,maskedoldval0,newval
1347 // sc success,storeval,0(alignedaddr)
1348 // beq success,$0,loopMBB
1351 BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
1353 // and andres, oldval, incr2
1354 // nor binopres, $0, andres
1355 // and newval, binopres, mask
1356 BuildMI(BB, dl, TII->get(Mips::AND), AndRes).addReg(OldVal).addReg(Incr2);
1357 BuildMI(BB, dl, TII->get(Mips::NOR), BinOpRes)
1358 .addReg(Mips::ZERO).addReg(AndRes);
1359 BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask);
1360 } else if (BinOpcode) {
1361 // <binop> binopres, oldval, incr2
1362 // and newval, binopres, mask
1363 BuildMI(BB, dl, TII->get(BinOpcode), BinOpRes).addReg(OldVal).addReg(Incr2);
1364 BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask);
1365 } else {// atomic.swap
1366 // and newval, incr2, mask
1367 BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(Incr2).addReg(Mask);
1370 BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0)
1371 .addReg(OldVal).addReg(Mask2);
1372 BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
1373 .addReg(MaskedOldVal0).addReg(NewVal);
1374 BuildMI(BB, dl, TII->get(SC), Success)
1375 .addReg(StoreVal).addReg(AlignedAddr).addImm(0);
1376 BuildMI(BB, dl, TII->get(Mips::BEQ))
1377 .addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB);
1380 // and maskedoldval1,oldval,mask
1381 // srl srlres,maskedoldval1,shiftamt
1382 // sll sllres,srlres,24
1383 // sra dest,sllres,24
1385 int64_t ShiftImm = (Size == 1) ? 24 : 16;
1387 BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal1)
1388 .addReg(OldVal).addReg(Mask);
1389 BuildMI(BB, dl, TII->get(Mips::SRLV), SrlRes)
1390 .addReg(ShiftAmt).addReg(MaskedOldVal1);
1391 BuildMI(BB, dl, TII->get(Mips::SLL), SllRes)
1392 .addReg(SrlRes).addImm(ShiftImm);
1393 BuildMI(BB, dl, TII->get(Mips::SRA), Dest)
1394 .addReg(SllRes).addImm(ShiftImm);
1396 MI->eraseFromParent(); // The instruction is gone now.
1402 MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI,
1403 MachineBasicBlock *BB,
1404 unsigned Size) const {
1405 assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap.");
1407 MachineFunction *MF = BB->getParent();
1408 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1409 const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
1410 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1411 DebugLoc dl = MI->getDebugLoc();
1412 unsigned LL, SC, ZERO, BNE, BEQ;
1415 LL = IsN64 ? Mips::LL_P8 : Mips::LL;
1416 SC = IsN64 ? Mips::SC_P8 : Mips::SC;
1422 LL = IsN64 ? Mips::LLD_P8 : Mips::LLD;
1423 SC = IsN64 ? Mips::SCD_P8 : Mips::SCD;
1424 ZERO = Mips::ZERO_64;
1429 unsigned Dest = MI->getOperand(0).getReg();
1430 unsigned Ptr = MI->getOperand(1).getReg();
1431 unsigned OldVal = MI->getOperand(2).getReg();
1432 unsigned NewVal = MI->getOperand(3).getReg();
1434 unsigned Success = RegInfo.createVirtualRegister(RC);
1436 // insert new blocks after the current block
1437 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1438 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1439 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1440 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1441 MachineFunction::iterator It = BB;
1443 MF->insert(It, loop1MBB);
1444 MF->insert(It, loop2MBB);
1445 MF->insert(It, exitMBB);
1447 // Transfer the remainder of BB and its successor edges to exitMBB.
1448 exitMBB->splice(exitMBB->begin(), BB,
1449 llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
1450 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1454 // fallthrough --> loop1MBB
1455 BB->addSuccessor(loop1MBB);
1456 loop1MBB->addSuccessor(exitMBB);
1457 loop1MBB->addSuccessor(loop2MBB);
1458 loop2MBB->addSuccessor(loop1MBB);
1459 loop2MBB->addSuccessor(exitMBB);
1463 // bne dest, oldval, exitMBB
1465 BuildMI(BB, dl, TII->get(LL), Dest).addReg(Ptr).addImm(0);
1466 BuildMI(BB, dl, TII->get(BNE))
1467 .addReg(Dest).addReg(OldVal).addMBB(exitMBB);
1470 // sc success, newval, 0(ptr)
1471 // beq success, $0, loop1MBB
1473 BuildMI(BB, dl, TII->get(SC), Success)
1474 .addReg(NewVal).addReg(Ptr).addImm(0);
1475 BuildMI(BB, dl, TII->get(BEQ))
1476 .addReg(Success).addReg(ZERO).addMBB(loop1MBB);
1478 MI->eraseFromParent(); // The instruction is gone now.
1484 MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI,
1485 MachineBasicBlock *BB,
1486 unsigned Size) const {
1487 assert((Size == 1 || Size == 2) &&
1488 "Unsupported size for EmitAtomicCmpSwapPartial.");
1490 MachineFunction *MF = BB->getParent();
1491 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1492 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1493 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
1494 DebugLoc dl = MI->getDebugLoc();
1495 unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL;
1496 unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC;
1498 unsigned Dest = MI->getOperand(0).getReg();
1499 unsigned Ptr = MI->getOperand(1).getReg();
1500 unsigned CmpVal = MI->getOperand(2).getReg();
1501 unsigned NewVal = MI->getOperand(3).getReg();
1503 unsigned AlignedAddr = RegInfo.createVirtualRegister(RC);
1504 unsigned ShiftAmt = RegInfo.createVirtualRegister(RC);
1505 unsigned Mask = RegInfo.createVirtualRegister(RC);
1506 unsigned Mask2 = RegInfo.createVirtualRegister(RC);
1507 unsigned ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1508 unsigned OldVal = RegInfo.createVirtualRegister(RC);
1509 unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC);
1510 unsigned ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1511 unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC);
1512 unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC);
1513 unsigned MaskUpper = RegInfo.createVirtualRegister(RC);
1514 unsigned MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1515 unsigned MaskedNewVal = RegInfo.createVirtualRegister(RC);
1516 unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC);
1517 unsigned StoreVal = RegInfo.createVirtualRegister(RC);
1518 unsigned SrlRes = RegInfo.createVirtualRegister(RC);
1519 unsigned SllRes = RegInfo.createVirtualRegister(RC);
1520 unsigned Success = RegInfo.createVirtualRegister(RC);
1522 // insert new blocks after the current block
1523 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1524 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1525 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
1526 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1527 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1528 MachineFunction::iterator It = BB;
1530 MF->insert(It, loop1MBB);
1531 MF->insert(It, loop2MBB);
1532 MF->insert(It, sinkMBB);
1533 MF->insert(It, exitMBB);
1535 // Transfer the remainder of BB and its successor edges to exitMBB.
1536 exitMBB->splice(exitMBB->begin(), BB,
1537 llvm::next(MachineBasicBlock::iterator(MI)), BB->end());
1538 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1540 BB->addSuccessor(loop1MBB);
1541 loop1MBB->addSuccessor(sinkMBB);
1542 loop1MBB->addSuccessor(loop2MBB);
1543 loop2MBB->addSuccessor(loop1MBB);
1544 loop2MBB->addSuccessor(sinkMBB);
1545 sinkMBB->addSuccessor(exitMBB);
1547 // FIXME: computation of newval2 can be moved to loop2MBB.
1549 // addiu masklsb2,$0,-4 # 0xfffffffc
1550 // and alignedaddr,ptr,masklsb2
1551 // andi ptrlsb2,ptr,3
1552 // sll shiftamt,ptrlsb2,3
1553 // ori maskupper,$0,255 # 0xff
1554 // sll mask,maskupper,shiftamt
1555 // nor mask2,$0,mask
1556 // andi maskedcmpval,cmpval,255
1557 // sll shiftedcmpval,maskedcmpval,shiftamt
1558 // andi maskednewval,newval,255
1559 // sll shiftednewval,maskednewval,shiftamt
1560 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1561 BuildMI(BB, dl, TII->get(Mips::ADDiu), MaskLSB2)
1562 .addReg(Mips::ZERO).addImm(-4);
1563 BuildMI(BB, dl, TII->get(Mips::AND), AlignedAddr)
1564 .addReg(Ptr).addReg(MaskLSB2);
1565 BuildMI(BB, dl, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3);
1566 BuildMI(BB, dl, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1567 BuildMI(BB, dl, TII->get(Mips::ORi), MaskUpper)
1568 .addReg(Mips::ZERO).addImm(MaskImm);
1569 BuildMI(BB, dl, TII->get(Mips::SLLV), Mask)
1570 .addReg(ShiftAmt).addReg(MaskUpper);
1571 BuildMI(BB, dl, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1572 BuildMI(BB, dl, TII->get(Mips::ANDi), MaskedCmpVal)
1573 .addReg(CmpVal).addImm(MaskImm);
1574 BuildMI(BB, dl, TII->get(Mips::SLLV), ShiftedCmpVal)
1575 .addReg(ShiftAmt).addReg(MaskedCmpVal);
1576 BuildMI(BB, dl, TII->get(Mips::ANDi), MaskedNewVal)
1577 .addReg(NewVal).addImm(MaskImm);
1578 BuildMI(BB, dl, TII->get(Mips::SLLV), ShiftedNewVal)
1579 .addReg(ShiftAmt).addReg(MaskedNewVal);
1582 // ll oldval,0(alginedaddr)
1583 // and maskedoldval0,oldval,mask
1584 // bne maskedoldval0,shiftedcmpval,sinkMBB
1586 BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0);
1587 BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0)
1588 .addReg(OldVal).addReg(Mask);
1589 BuildMI(BB, dl, TII->get(Mips::BNE))
1590 .addReg(MaskedOldVal0).addReg(ShiftedCmpVal).addMBB(sinkMBB);
1593 // and maskedoldval1,oldval,mask2
1594 // or storeval,maskedoldval1,shiftednewval
1595 // sc success,storeval,0(alignedaddr)
1596 // beq success,$0,loop1MBB
1598 BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal1)
1599 .addReg(OldVal).addReg(Mask2);
1600 BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
1601 .addReg(MaskedOldVal1).addReg(ShiftedNewVal);
1602 BuildMI(BB, dl, TII->get(SC), Success)
1603 .addReg(StoreVal).addReg(AlignedAddr).addImm(0);
1604 BuildMI(BB, dl, TII->get(Mips::BEQ))
1605 .addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB);
1608 // srl srlres,maskedoldval0,shiftamt
1609 // sll sllres,srlres,24
1610 // sra dest,sllres,24
1612 int64_t ShiftImm = (Size == 1) ? 24 : 16;
1614 BuildMI(BB, dl, TII->get(Mips::SRLV), SrlRes)
1615 .addReg(ShiftAmt).addReg(MaskedOldVal0);
1616 BuildMI(BB, dl, TII->get(Mips::SLL), SllRes)
1617 .addReg(SrlRes).addImm(ShiftImm);
1618 BuildMI(BB, dl, TII->get(Mips::SRA), Dest)
1619 .addReg(SllRes).addImm(ShiftImm);
1621 MI->eraseFromParent(); // The instruction is gone now.
1626 //===----------------------------------------------------------------------===//
1627 // Misc Lower Operation implementation
1628 //===----------------------------------------------------------------------===//
1629 SDValue MipsTargetLowering::
1630 LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
1632 // The first operand is the chain, the second is the condition, the third is
1633 // the block to branch to if the condition is true.
1634 SDValue Chain = Op.getOperand(0);
1635 SDValue Dest = Op.getOperand(2);
1636 DebugLoc dl = Op.getDebugLoc();
1638 SDValue CondRes = CreateFPCmp(DAG, Op.getOperand(1));
1640 // Return if flag is not set by a floating point comparison.
1641 if (CondRes.getOpcode() != MipsISD::FPCmp)
1644 SDValue CCNode = CondRes.getOperand(2);
1646 (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
1647 SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32);
1649 return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode,
1653 SDValue MipsTargetLowering::
1654 LowerSELECT(SDValue Op, SelectionDAG &DAG) const
1656 SDValue Cond = CreateFPCmp(DAG, Op.getOperand(0));
1658 // Return if flag is not set by a floating point comparison.
1659 if (Cond.getOpcode() != MipsISD::FPCmp)
1662 return CreateCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
1666 SDValue MipsTargetLowering::
1667 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
1669 DebugLoc DL = Op.getDebugLoc();
1670 EVT Ty = Op.getOperand(0).getValueType();
1671 SDValue Cond = DAG.getNode(ISD::SETCC, DL, getSetCCResultType(Ty),
1672 Op.getOperand(0), Op.getOperand(1),
1675 return DAG.getNode(ISD::SELECT, DL, Op.getValueType(), Cond, Op.getOperand(2),
1679 SDValue MipsTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
1680 SDValue Cond = CreateFPCmp(DAG, Op);
1682 assert(Cond.getOpcode() == MipsISD::FPCmp &&
1683 "Floating point operand expected.");
1685 SDValue True = DAG.getConstant(1, MVT::i32);
1686 SDValue False = DAG.getConstant(0, MVT::i32);
1688 return CreateCMovFP(DAG, Cond, True, False, Op.getDebugLoc());
1691 SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
1692 SelectionDAG &DAG) const {
1693 // FIXME there isn't actually debug info here
1694 DebugLoc dl = Op.getDebugLoc();
1695 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1697 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
1698 SDVTList VTs = DAG.getVTList(MVT::i32);
1700 const MipsTargetObjectFile &TLOF =
1701 (const MipsTargetObjectFile&)getObjFileLowering();
1703 // %gp_rel relocation
1704 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
1705 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
1707 SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1);
1708 SDValue GPReg = DAG.getRegister(Mips::GP, MVT::i32);
1709 return DAG.getNode(ISD::ADD, dl, MVT::i32, GPReg, GPRelNode);
1711 // %hi/%lo relocation
1712 SDValue GAHi = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
1714 SDValue GALo = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
1716 SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, VTs, &GAHi, 1);
1717 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, GALo);
1718 return DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
1721 EVT ValTy = Op.getValueType();
1722 bool HasGotOfst = (GV->hasInternalLinkage() ||
1723 (GV->hasLocalLinkage() && !isa<Function>(GV)));
1724 unsigned GotFlag = HasMips64 ?
1725 (HasGotOfst ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT_DISP) :
1726 (HasGotOfst ? MipsII::MO_GOT : MipsII::MO_GOT16);
1727 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, ValTy, 0, GotFlag);
1728 GA = DAG.getNode(MipsISD::Wrapper, dl, ValTy, GetGlobalReg(DAG, ValTy), GA);
1729 SDValue ResNode = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), GA,
1730 MachinePointerInfo(), false, false, false, 0);
1731 // On functions and global targets not internal linked only
1732 // a load from got/GP is necessary for PIC to work.
1735 SDValue GALo = DAG.getTargetGlobalAddress(GV, dl, ValTy, 0,
1736 HasMips64 ? MipsII::MO_GOT_OFST :
1738 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, GALo);
1739 return DAG.getNode(ISD::ADD, dl, ValTy, ResNode, Lo);
1742 SDValue MipsTargetLowering::LowerBlockAddress(SDValue Op,
1743 SelectionDAG &DAG) const {
1744 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1745 // FIXME there isn't actually debug info here
1746 DebugLoc dl = Op.getDebugLoc();
1748 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
1749 // %hi/%lo relocation
1751 DAG.getTargetBlockAddress(BA, MVT::i32, 0, MipsII::MO_ABS_HI);
1753 DAG.getTargetBlockAddress(BA, MVT::i32, 0, MipsII::MO_ABS_LO);
1754 SDValue Hi = DAG.getNode(MipsISD::Hi, dl, MVT::i32, BAHi);
1755 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, BALo);
1756 return DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, Lo);
1759 EVT ValTy = Op.getValueType();
1760 unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
1761 unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
1762 SDValue BAGOTOffset = DAG.getTargetBlockAddress(BA, ValTy, 0, GOTFlag);
1763 BAGOTOffset = DAG.getNode(MipsISD::Wrapper, dl, ValTy,
1764 GetGlobalReg(DAG, ValTy), BAGOTOffset);
1765 SDValue BALOOffset = DAG.getTargetBlockAddress(BA, ValTy, 0, OFSTFlag);
1766 SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), BAGOTOffset,
1767 MachinePointerInfo(), false, false, false, 0);
1768 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, BALOOffset);
1769 return DAG.getNode(ISD::ADD, dl, ValTy, Load, Lo);
1772 SDValue MipsTargetLowering::
1773 LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
1775 // If the relocation model is PIC, use the General Dynamic TLS Model or
1776 // Local Dynamic TLS model, otherwise use the Initial Exec or
1777 // Local Exec TLS Model.
1779 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
1780 DebugLoc dl = GA->getDebugLoc();
1781 const GlobalValue *GV = GA->getGlobal();
1782 EVT PtrVT = getPointerTy();
1784 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
1786 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
1787 // General Dynamic and Local Dynamic TLS Model.
1788 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
1791 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, Flag);
1792 SDValue Argument = DAG.getNode(MipsISD::Wrapper, dl, PtrVT,
1793 GetGlobalReg(DAG, PtrVT), TGA);
1794 unsigned PtrSize = PtrVT.getSizeInBits();
1795 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
1797 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
1801 Entry.Node = Argument;
1803 Args.push_back(Entry);
1805 TargetLowering::CallLoweringInfo CLI(DAG.getEntryNode(), PtrTy,
1806 false, false, false, false, 0, CallingConv::C,
1807 /*isTailCall=*/false, /*doesNotRet=*/false,
1808 /*isReturnValueUsed=*/true,
1809 TlsGetAddr, Args, DAG, dl);
1810 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
1812 SDValue Ret = CallResult.first;
1814 if (model != TLSModel::LocalDynamic)
1817 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1818 MipsII::MO_DTPREL_HI);
1819 SDValue Hi = DAG.getNode(MipsISD::Hi, dl, PtrVT, TGAHi);
1820 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1821 MipsII::MO_DTPREL_LO);
1822 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, TGALo);
1823 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Ret);
1824 return DAG.getNode(ISD::ADD, dl, PtrVT, Add, Lo);
1828 if (model == TLSModel::InitialExec) {
1829 // Initial Exec TLS Model
1830 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1831 MipsII::MO_GOTTPREL);
1832 TGA = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, GetGlobalReg(DAG, PtrVT),
1834 Offset = DAG.getLoad(PtrVT, dl,
1835 DAG.getEntryNode(), TGA, MachinePointerInfo(),
1836 false, false, false, 0);
1838 // Local Exec TLS Model
1839 assert(model == TLSModel::LocalExec);
1840 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1841 MipsII::MO_TPREL_HI);
1842 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
1843 MipsII::MO_TPREL_LO);
1844 SDValue Hi = DAG.getNode(MipsISD::Hi, dl, PtrVT, TGAHi);
1845 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, TGALo);
1846 Offset = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo);
1849 SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, dl, PtrVT);
1850 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
1853 SDValue MipsTargetLowering::
1854 LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
1856 SDValue HiPart, JTI, JTILo;
1857 // FIXME there isn't actually debug info here
1858 DebugLoc dl = Op.getDebugLoc();
1859 bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
1860 EVT PtrVT = Op.getValueType();
1861 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1863 if (!IsPIC && !IsN64) {
1864 JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MipsII::MO_ABS_HI);
1865 HiPart = DAG.getNode(MipsISD::Hi, dl, PtrVT, JTI);
1866 JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MipsII::MO_ABS_LO);
1867 } else {// Emit Load from Global Pointer
1868 unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
1869 unsigned OfstFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
1870 JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, GOTFlag);
1871 JTI = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, GetGlobalReg(DAG, PtrVT),
1873 HiPart = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), JTI,
1874 MachinePointerInfo(), false, false, false, 0);
1875 JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OfstFlag);
1878 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, JTILo);
1879 return DAG.getNode(ISD::ADD, dl, PtrVT, HiPart, Lo);
1882 SDValue MipsTargetLowering::
1883 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
1886 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
1887 const Constant *C = N->getConstVal();
1888 // FIXME there isn't actually debug info here
1889 DebugLoc dl = Op.getDebugLoc();
1891 // gp_rel relocation
1892 // FIXME: we should reference the constant pool using small data sections,
1893 // but the asm printer currently doesn't support this feature without
1894 // hacking it. This feature should come soon so we can uncomment the
1896 //if (IsInSmallSection(C->getType())) {
1897 // SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
1898 // SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
1899 // ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
1901 if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
1902 SDValue CPHi = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
1903 N->getOffset(), MipsII::MO_ABS_HI);
1904 SDValue CPLo = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
1905 N->getOffset(), MipsII::MO_ABS_LO);
1906 SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CPHi);
1907 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CPLo);
1908 ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
1910 EVT ValTy = Op.getValueType();
1911 unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT;
1912 unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO;
1913 SDValue CP = DAG.getTargetConstantPool(C, ValTy, N->getAlignment(),
1914 N->getOffset(), GOTFlag);
1915 CP = DAG.getNode(MipsISD::Wrapper, dl, ValTy, GetGlobalReg(DAG, ValTy), CP);
1916 SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), CP,
1917 MachinePointerInfo::getConstantPool(), false,
1919 SDValue CPLo = DAG.getTargetConstantPool(C, ValTy, N->getAlignment(),
1920 N->getOffset(), OFSTFlag);
1921 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, CPLo);
1922 ResNode = DAG.getNode(ISD::ADD, dl, ValTy, Load, Lo);
1928 SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
1929 MachineFunction &MF = DAG.getMachineFunction();
1930 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
1932 DebugLoc dl = Op.getDebugLoc();
1933 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
1936 // vastart just stores the address of the VarArgsFrameIndex slot into the
1937 // memory location argument.
1938 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1939 return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
1940 MachinePointerInfo(SV), false, false, 0);
1943 static SDValue LowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
1944 EVT TyX = Op.getOperand(0).getValueType();
1945 EVT TyY = Op.getOperand(1).getValueType();
1946 SDValue Const1 = DAG.getConstant(1, MVT::i32);
1947 SDValue Const31 = DAG.getConstant(31, MVT::i32);
1948 DebugLoc DL = Op.getDebugLoc();
1951 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
1953 SDValue X = (TyX == MVT::f32) ?
1954 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
1955 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
1957 SDValue Y = (TyY == MVT::f32) ?
1958 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
1959 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
1963 // ext E, Y, 31, 1 ; extract bit31 of Y
1964 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
1965 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
1966 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
1969 // srl SrlX, SllX, 1
1971 // sll SllY, SrlX, 31
1972 // or Or, SrlX, SllY
1973 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
1974 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
1975 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
1976 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
1977 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
1980 if (TyX == MVT::f32)
1981 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
1983 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
1984 Op.getOperand(0), DAG.getConstant(0, MVT::i32));
1985 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
1988 static SDValue LowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
1989 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
1990 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
1991 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
1992 SDValue Const1 = DAG.getConstant(1, MVT::i32);
1993 DebugLoc DL = Op.getDebugLoc();
1995 // Bitcast to integer nodes.
1996 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
1997 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2000 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2001 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2002 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2003 DAG.getConstant(WidthY - 1, MVT::i32), Const1);
2005 if (WidthX > WidthY)
2006 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2007 else if (WidthY > WidthX)
2008 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2010 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2011 DAG.getConstant(WidthX - 1, MVT::i32), Const1, X);
2012 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2015 // (d)sll SllX, X, 1
2016 // (d)srl SrlX, SllX, 1
2017 // (d)srl SrlY, Y, width(Y)-1
2018 // (d)sll SllY, SrlX, width(Y)-1
2019 // or Or, SrlX, SllY
2020 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2021 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2022 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2023 DAG.getConstant(WidthY - 1, MVT::i32));
2025 if (WidthX > WidthY)
2026 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2027 else if (WidthY > WidthX)
2028 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2030 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2031 DAG.getConstant(WidthX - 1, MVT::i32));
2032 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2033 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2037 MipsTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2038 if (Subtarget->hasMips64())
2039 return LowerFCOPYSIGN64(Op, DAG, Subtarget->hasMips32r2());
2041 return LowerFCOPYSIGN32(Op, DAG, Subtarget->hasMips32r2());
2044 static SDValue LowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) {
2045 SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
2046 DebugLoc DL = Op.getDebugLoc();
2048 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2050 SDValue X = (Op.getValueType() == MVT::f32) ?
2051 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2052 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2057 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2058 DAG.getRegister(Mips::ZERO, MVT::i32),
2059 DAG.getConstant(31, MVT::i32), Const1, X);
2061 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2062 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2065 if (Op.getValueType() == MVT::f32)
2066 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2068 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2069 Op.getOperand(0), DAG.getConstant(0, MVT::i32));
2070 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2073 static SDValue LowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) {
2074 SDValue Res, Const1 = DAG.getConstant(1, MVT::i32);
2075 DebugLoc DL = Op.getDebugLoc();
2077 // Bitcast to integer node.
2078 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2082 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2083 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2084 DAG.getConstant(63, MVT::i32), Const1, X);
2086 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2087 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2090 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2094 MipsTargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const {
2095 if (Subtarget->hasMips64() && (Op.getValueType() == MVT::f64))
2096 return LowerFABS64(Op, DAG, Subtarget->hasMips32r2());
2098 return LowerFABS32(Op, DAG, Subtarget->hasMips32r2());
2101 SDValue MipsTargetLowering::
2102 LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2104 assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) &&
2105 "Frame address can only be determined for current frame.");
2107 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2108 MFI->setFrameAddressIsTaken(true);
2109 EVT VT = Op.getValueType();
2110 DebugLoc dl = Op.getDebugLoc();
2111 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
2112 IsN64 ? Mips::FP_64 : Mips::FP, VT);
2116 SDValue MipsTargetLowering::LowerRETURNADDR(SDValue Op,
2117 SelectionDAG &DAG) const {
2119 assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) &&
2120 "Return address can be determined only for current frame.");
2122 MachineFunction &MF = DAG.getMachineFunction();
2123 MachineFrameInfo *MFI = MF.getFrameInfo();
2124 EVT VT = Op.getValueType();
2125 unsigned RA = IsN64 ? Mips::RA_64 : Mips::RA;
2126 MFI->setReturnAddressIsTaken(true);
2128 // Return RA, which contains the return address. Mark it an implicit live-in.
2129 unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
2130 return DAG.getCopyFromReg(DAG.getEntryNode(), Op.getDebugLoc(), Reg, VT);
2133 // TODO: set SType according to the desired memory barrier behavior.
2135 MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const {
2137 DebugLoc dl = Op.getDebugLoc();
2138 return DAG.getNode(MipsISD::Sync, dl, MVT::Other, Op.getOperand(0),
2139 DAG.getConstant(SType, MVT::i32));
2142 SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op,
2143 SelectionDAG &DAG) const {
2144 // FIXME: Need pseudo-fence for 'singlethread' fences
2145 // FIXME: Set SType for weaker fences where supported/appropriate.
2147 DebugLoc dl = Op.getDebugLoc();
2148 return DAG.getNode(MipsISD::Sync, dl, MVT::Other, Op.getOperand(0),
2149 DAG.getConstant(SType, MVT::i32));
2152 SDValue MipsTargetLowering::LowerShiftLeftParts(SDValue Op,
2153 SelectionDAG &DAG) const {
2154 DebugLoc DL = Op.getDebugLoc();
2155 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2156 SDValue Shamt = Op.getOperand(2);
2159 // lo = (shl lo, shamt)
2160 // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
2163 // hi = (shl lo, shamt[4:0])
2164 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2165 DAG.getConstant(-1, MVT::i32));
2166 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo,
2167 DAG.getConstant(1, MVT::i32));
2168 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, ShiftRight1Lo,
2170 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, Shamt);
2171 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
2172 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, MVT::i32, Lo, Shamt);
2173 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2174 DAG.getConstant(0x20, MVT::i32));
2175 Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
2176 DAG.getConstant(0, MVT::i32), ShiftLeftLo);
2177 Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftLeftLo, Or);
2179 SDValue Ops[2] = {Lo, Hi};
2180 return DAG.getMergeValues(Ops, 2, DL);
2183 SDValue MipsTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2185 DebugLoc DL = Op.getDebugLoc();
2186 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2187 SDValue Shamt = Op.getOperand(2);
2190 // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
2192 // hi = (sra hi, shamt)
2194 // hi = (srl hi, shamt)
2197 // lo = (sra hi, shamt[4:0])
2198 // hi = (sra hi, 31)
2200 // lo = (srl hi, shamt[4:0])
2202 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2203 DAG.getConstant(-1, MVT::i32));
2204 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
2205 DAG.getConstant(1, MVT::i32));
2206 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, MVT::i32, ShiftLeft1Hi, Not);
2207 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, MVT::i32, Lo, Shamt);
2208 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::i32, ShiftLeftHi, ShiftRightLo);
2209 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2211 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2212 DAG.getConstant(0x20, MVT::i32));
2213 SDValue Shift31 = DAG.getNode(ISD::SRA, DL, MVT::i32, Hi,
2214 DAG.getConstant(31, MVT::i32));
2215 Lo = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond, ShiftRightHi, Or);
2216 Hi = DAG.getNode(ISD::SELECT, DL, MVT::i32, Cond,
2217 IsSRA ? Shift31 : DAG.getConstant(0, MVT::i32),
2220 SDValue Ops[2] = {Lo, Hi};
2221 return DAG.getMergeValues(Ops, 2, DL);
2224 static SDValue CreateLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2225 SDValue Chain, SDValue Src, unsigned Offset) {
2226 SDValue Ptr = LD->getBasePtr();
2227 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2228 EVT BasePtrVT = Ptr.getValueType();
2229 DebugLoc DL = LD->getDebugLoc();
2230 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2233 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2234 DAG.getConstant(Offset, BasePtrVT));
2236 SDValue Ops[] = { Chain, Ptr, Src };
2237 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT,
2238 LD->getMemOperand());
2241 // Expand an unaligned 32 or 64-bit integer load node.
2242 SDValue MipsTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2243 LoadSDNode *LD = cast<LoadSDNode>(Op);
2244 EVT MemVT = LD->getMemoryVT();
2246 // Return if load is aligned or if MemVT is neither i32 nor i64.
2247 if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
2248 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2251 bool IsLittle = Subtarget->isLittle();
2252 EVT VT = Op.getValueType();
2253 ISD::LoadExtType ExtType = LD->getExtensionType();
2254 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2256 assert((VT == MVT::i32) || (VT == MVT::i64));
2259 // (set dst, (i64 (load baseptr)))
2261 // (set tmp, (ldl (add baseptr, 7), undef))
2262 // (set dst, (ldr baseptr, tmp))
2263 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2264 SDValue LDL = CreateLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2266 return CreateLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2270 SDValue LWL = CreateLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2272 SDValue LWR = CreateLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2276 // (set dst, (i32 (load baseptr))) or
2277 // (set dst, (i64 (sextload baseptr))) or
2278 // (set dst, (i64 (extload baseptr)))
2280 // (set tmp, (lwl (add baseptr, 3), undef))
2281 // (set dst, (lwr baseptr, tmp))
2282 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2283 (ExtType == ISD::EXTLOAD))
2286 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2289 // (set dst, (i64 (zextload baseptr)))
2291 // (set tmp0, (lwl (add baseptr, 3), undef))
2292 // (set tmp1, (lwr baseptr, tmp0))
2293 // (set tmp2, (shl tmp1, 32))
2294 // (set dst, (srl tmp2, 32))
2295 DebugLoc DL = LD->getDebugLoc();
2296 SDValue Const32 = DAG.getConstant(32, MVT::i32);
2297 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2298 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2299 SDValue Ops[] = { SRL, LWR.getValue(1) };
2300 return DAG.getMergeValues(Ops, 2, DL);
2303 static SDValue CreateStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2304 SDValue Chain, unsigned Offset) {
2305 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2306 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2307 DebugLoc DL = SD->getDebugLoc();
2308 SDVTList VTList = DAG.getVTList(MVT::Other);
2311 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2312 DAG.getConstant(Offset, BasePtrVT));
2314 SDValue Ops[] = { Chain, Value, Ptr };
2315 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, 3, MemVT,
2316 SD->getMemOperand());
2319 // Expand an unaligned 32 or 64-bit integer store node.
2320 SDValue MipsTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
2321 StoreSDNode *SD = cast<StoreSDNode>(Op);
2322 EVT MemVT = SD->getMemoryVT();
2324 // Return if store is aligned or if MemVT is neither i32 nor i64.
2325 if ((SD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
2326 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2329 bool IsLittle = Subtarget->isLittle();
2330 SDValue Value = SD->getValue(), Chain = SD->getChain();
2331 EVT VT = Value.getValueType();
2334 // (store val, baseptr) or
2335 // (truncstore val, baseptr)
2337 // (swl val, (add baseptr, 3))
2338 // (swr val, baseptr)
2339 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2340 SDValue SWL = CreateStoreLR(MipsISD::SWL, DAG, SD, Chain,
2342 return CreateStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2345 assert(VT == MVT::i64);
2348 // (store val, baseptr)
2350 // (sdl val, (add baseptr, 7))
2351 // (sdr val, baseptr)
2352 SDValue SDL = CreateStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2353 return CreateStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2356 // This function expands mips intrinsic nodes which have 64-bit input operands
2357 // or output values.
2359 // out64 = intrinsic-node in64
2361 // lo = copy (extract-element (in64, 0))
2362 // hi = copy (extract-element (in64, 1))
2363 // mips-specific-node
2366 // out64 = merge-values (v0, v1)
2368 static SDValue LowerDSPIntr(SDValue Op, SelectionDAG &DAG,
2369 unsigned Opc, bool HasI64In, bool HasI64Out) {
2370 DebugLoc DL = Op.getDebugLoc();
2371 bool HasChainIn = Op->getOperand(0).getValueType() == MVT::Other;
2372 SDValue Chain = HasChainIn ? Op->getOperand(0) : DAG.getEntryNode();
2373 SmallVector<SDValue, 3> Ops;
2376 SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
2377 Op->getOperand(1 + HasChainIn),
2378 DAG.getConstant(0, MVT::i32));
2379 SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32,
2380 Op->getOperand(1 + HasChainIn),
2381 DAG.getConstant(1, MVT::i32));
2383 Chain = DAG.getCopyToReg(Chain, DL, Mips::LO, InLo, SDValue());
2384 Chain = DAG.getCopyToReg(Chain, DL, Mips::HI, InHi, Chain.getValue(1));
2386 Ops.push_back(Chain);
2387 Ops.append(Op->op_begin() + HasChainIn + 2, Op->op_end());
2388 Ops.push_back(Chain.getValue(1));
2390 Ops.push_back(Chain);
2391 Ops.append(Op->op_begin() + HasChainIn + 1, Op->op_end());
2395 return DAG.getNode(Opc, DL, Op->value_begin(), Op->getNumValues(),
2396 Ops.begin(), Ops.size());
2398 SDValue Intr = DAG.getNode(Opc, DL, DAG.getVTList(MVT::Other, MVT::Glue),
2399 Ops.begin(), Ops.size());
2400 SDValue OutLo = DAG.getCopyFromReg(Intr.getValue(0), DL, Mips::LO, MVT::i32,
2402 SDValue OutHi = DAG.getCopyFromReg(OutLo.getValue(1), DL, Mips::HI, MVT::i32,
2404 SDValue Out = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, OutLo, OutHi);
2409 SDValue Vals[] = { Out, OutHi.getValue(1) };
2410 return DAG.getMergeValues(Vals, 2, DL);
2413 SDValue MipsTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
2414 SelectionDAG &DAG) const {
2415 switch (cast<ConstantSDNode>(Op->getOperand(0))->getZExtValue()) {
2418 case Intrinsic::mips_shilo:
2419 return LowerDSPIntr(Op, DAG, MipsISD::SHILO, true, true);
2420 case Intrinsic::mips_dpau_h_qbl:
2421 return LowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBL, true, true);
2422 case Intrinsic::mips_dpau_h_qbr:
2423 return LowerDSPIntr(Op, DAG, MipsISD::DPAU_H_QBR, true, true);
2424 case Intrinsic::mips_dpsu_h_qbl:
2425 return LowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBL, true, true);
2426 case Intrinsic::mips_dpsu_h_qbr:
2427 return LowerDSPIntr(Op, DAG, MipsISD::DPSU_H_QBR, true, true);
2428 case Intrinsic::mips_dpa_w_ph:
2429 return LowerDSPIntr(Op, DAG, MipsISD::DPA_W_PH, true, true);
2430 case Intrinsic::mips_dps_w_ph:
2431 return LowerDSPIntr(Op, DAG, MipsISD::DPS_W_PH, true, true);
2432 case Intrinsic::mips_dpax_w_ph:
2433 return LowerDSPIntr(Op, DAG, MipsISD::DPAX_W_PH, true, true);
2434 case Intrinsic::mips_dpsx_w_ph:
2435 return LowerDSPIntr(Op, DAG, MipsISD::DPSX_W_PH, true, true);
2436 case Intrinsic::mips_mulsa_w_ph:
2437 return LowerDSPIntr(Op, DAG, MipsISD::MULSA_W_PH, true, true);
2438 case Intrinsic::mips_mult:
2439 return LowerDSPIntr(Op, DAG, MipsISD::MULT, false, true);
2440 case Intrinsic::mips_multu:
2441 return LowerDSPIntr(Op, DAG, MipsISD::MULTU, false, true);
2442 case Intrinsic::mips_madd:
2443 return LowerDSPIntr(Op, DAG, MipsISD::MADD_DSP, true, true);
2444 case Intrinsic::mips_maddu:
2445 return LowerDSPIntr(Op, DAG, MipsISD::MADDU_DSP, true, true);
2446 case Intrinsic::mips_msub:
2447 return LowerDSPIntr(Op, DAG, MipsISD::MSUB_DSP, true, true);
2448 case Intrinsic::mips_msubu:
2449 return LowerDSPIntr(Op, DAG, MipsISD::MSUBU_DSP, true, true);
2453 SDValue MipsTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
2454 SelectionDAG &DAG) const {
2455 switch (cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue()) {
2458 case Intrinsic::mips_extp:
2459 return LowerDSPIntr(Op, DAG, MipsISD::EXTP, true, false);
2460 case Intrinsic::mips_extpdp:
2461 return LowerDSPIntr(Op, DAG, MipsISD::EXTPDP, true, false);
2462 case Intrinsic::mips_extr_w:
2463 return LowerDSPIntr(Op, DAG, MipsISD::EXTR_W, true, false);
2464 case Intrinsic::mips_extr_r_w:
2465 return LowerDSPIntr(Op, DAG, MipsISD::EXTR_R_W, true, false);
2466 case Intrinsic::mips_extr_rs_w:
2467 return LowerDSPIntr(Op, DAG, MipsISD::EXTR_RS_W, true, false);
2468 case Intrinsic::mips_extr_s_h:
2469 return LowerDSPIntr(Op, DAG, MipsISD::EXTR_S_H, true, false);
2470 case Intrinsic::mips_mthlip:
2471 return LowerDSPIntr(Op, DAG, MipsISD::MTHLIP, true, true);
2472 case Intrinsic::mips_mulsaq_s_w_ph:
2473 return LowerDSPIntr(Op, DAG, MipsISD::MULSAQ_S_W_PH, true, true);
2474 case Intrinsic::mips_maq_s_w_phl:
2475 return LowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHL, true, true);
2476 case Intrinsic::mips_maq_s_w_phr:
2477 return LowerDSPIntr(Op, DAG, MipsISD::MAQ_S_W_PHR, true, true);
2478 case Intrinsic::mips_maq_sa_w_phl:
2479 return LowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHL, true, true);
2480 case Intrinsic::mips_maq_sa_w_phr:
2481 return LowerDSPIntr(Op, DAG, MipsISD::MAQ_SA_W_PHR, true, true);
2482 case Intrinsic::mips_dpaq_s_w_ph:
2483 return LowerDSPIntr(Op, DAG, MipsISD::DPAQ_S_W_PH, true, true);
2484 case Intrinsic::mips_dpsq_s_w_ph:
2485 return LowerDSPIntr(Op, DAG, MipsISD::DPSQ_S_W_PH, true, true);
2486 case Intrinsic::mips_dpaq_sa_l_w:
2487 return LowerDSPIntr(Op, DAG, MipsISD::DPAQ_SA_L_W, true, true);
2488 case Intrinsic::mips_dpsq_sa_l_w:
2489 return LowerDSPIntr(Op, DAG, MipsISD::DPSQ_SA_L_W, true, true);
2490 case Intrinsic::mips_dpaqx_s_w_ph:
2491 return LowerDSPIntr(Op, DAG, MipsISD::DPAQX_S_W_PH, true, true);
2492 case Intrinsic::mips_dpaqx_sa_w_ph:
2493 return LowerDSPIntr(Op, DAG, MipsISD::DPAQX_SA_W_PH, true, true);
2494 case Intrinsic::mips_dpsqx_s_w_ph:
2495 return LowerDSPIntr(Op, DAG, MipsISD::DPSQX_S_W_PH, true, true);
2496 case Intrinsic::mips_dpsqx_sa_w_ph:
2497 return LowerDSPIntr(Op, DAG, MipsISD::DPSQX_SA_W_PH, true, true);
2501 //===----------------------------------------------------------------------===//
2502 // Calling Convention Implementation
2503 //===----------------------------------------------------------------------===//
2505 //===----------------------------------------------------------------------===//
2506 // TODO: Implement a generic logic using tblgen that can support this.
2507 // Mips O32 ABI rules:
2509 // i32 - Passed in A0, A1, A2, A3 and stack
2510 // f32 - Only passed in f32 registers if no int reg has been used yet to hold
2511 // an argument. Otherwise, passed in A1, A2, A3 and stack.
2512 // f64 - Only passed in two aliased f32 registers if no int reg has been used
2513 // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2514 // not used, it must be shadowed. If only A3 is avaiable, shadow it and
2517 // For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2518 //===----------------------------------------------------------------------===//
2520 static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
2521 MVT LocVT, CCValAssign::LocInfo LocInfo,
2522 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2524 static const unsigned IntRegsSize=4, FloatRegsSize=2;
2526 static const uint16_t IntRegs[] = {
2527 Mips::A0, Mips::A1, Mips::A2, Mips::A3
2529 static const uint16_t F32Regs[] = {
2530 Mips::F12, Mips::F14
2532 static const uint16_t F64Regs[] = {
2537 if (ArgFlags.isByVal()) {
2538 State.HandleByVal(ValNo, ValVT, LocVT, LocInfo,
2539 1 /*MinSize*/, 4 /*MinAlign*/, ArgFlags);
2540 unsigned NextReg = (State.getNextStackOffset() + 3) / 4;
2541 for (unsigned r = State.getFirstUnallocated(IntRegs, IntRegsSize);
2542 r < std::min(IntRegsSize, NextReg); ++r)
2543 State.AllocateReg(IntRegs[r]);
2547 // Promote i8 and i16
2548 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2550 if (ArgFlags.isSExt())
2551 LocInfo = CCValAssign::SExt;
2552 else if (ArgFlags.isZExt())
2553 LocInfo = CCValAssign::ZExt;
2555 LocInfo = CCValAssign::AExt;
2560 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2561 // is true: function is vararg, argument is 3rd or higher, there is previous
2562 // argument which is not f32 or f64.
2563 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1
2564 || State.getFirstUnallocated(F32Regs, FloatRegsSize) != ValNo;
2565 unsigned OrigAlign = ArgFlags.getOrigAlign();
2566 bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8);
2568 if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2569 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2570 // If this is the first part of an i64 arg,
2571 // the allocated register must be either A0 or A2.
2572 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2573 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2575 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2576 // Allocate int register and shadow next int register. If first
2577 // available register is Mips::A1 or Mips::A3, shadow it too.
2578 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2579 if (Reg == Mips::A1 || Reg == Mips::A3)
2580 Reg = State.AllocateReg(IntRegs, IntRegsSize);
2581 State.AllocateReg(IntRegs, IntRegsSize);
2583 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2584 // we are guaranteed to find an available float register
2585 if (ValVT == MVT::f32) {
2586 Reg = State.AllocateReg(F32Regs, FloatRegsSize);
2587 // Shadow int register
2588 State.AllocateReg(IntRegs, IntRegsSize);
2590 Reg = State.AllocateReg(F64Regs, FloatRegsSize);
2591 // Shadow int registers
2592 unsigned Reg2 = State.AllocateReg(IntRegs, IntRegsSize);
2593 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2594 State.AllocateReg(IntRegs, IntRegsSize);
2595 State.AllocateReg(IntRegs, IntRegsSize);
2598 llvm_unreachable("Cannot handle this ValVT.");
2600 unsigned SizeInBytes = ValVT.getSizeInBits() >> 3;
2602 if (!ArgFlags.isSRet())
2603 Offset = State.AllocateStack(SizeInBytes, OrigAlign);
2605 Offset = State.AllocateStack(SizeInBytes, SizeInBytes);
2608 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
2610 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2612 return false; // CC must always match
2615 static const uint16_t Mips64IntRegs[8] =
2616 {Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
2617 Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
2618 static const uint16_t Mips64DPRegs[8] =
2619 {Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
2620 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64};
2622 static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT,
2623 CCValAssign::LocInfo LocInfo,
2624 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2625 unsigned Align = std::max(ArgFlags.getByValAlign(), (unsigned)8);
2626 unsigned Size = (ArgFlags.getByValSize() + 7) / 8 * 8;
2627 unsigned FirstIdx = State.getFirstUnallocated(Mips64IntRegs, 8);
2629 assert(Align <= 16 && "Cannot handle alignments larger than 16.");
2631 // If byval is 16-byte aligned, the first arg register must be even.
2632 if ((Align == 16) && (FirstIdx % 2)) {
2633 State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]);
2637 // Mark the registers allocated.
2638 for (unsigned I = FirstIdx; Size && (I < 8); Size -= 8, ++I)
2639 State.AllocateReg(Mips64IntRegs[I], Mips64DPRegs[I]);
2641 // Allocate space on caller's stack.
2642 unsigned Offset = State.AllocateStack(Size, Align);
2645 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx],
2648 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
2653 #include "MipsGenCallingConv.inc"
2656 AnalyzeMips64CallOperands(CCState &CCInfo,
2657 const SmallVectorImpl<ISD::OutputArg> &Outs) {
2658 unsigned NumOps = Outs.size();
2659 for (unsigned i = 0; i != NumOps; ++i) {
2660 MVT ArgVT = Outs[i].VT;
2661 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2664 if (Outs[i].IsFixed)
2665 R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
2667 R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
2671 dbgs() << "Call operand #" << i << " has unhandled type "
2672 << EVT(ArgVT).getEVTString();
2674 llvm_unreachable(0);
2679 //===----------------------------------------------------------------------===//
2680 // Call Calling Convention Implementation
2681 //===----------------------------------------------------------------------===//
2683 static const unsigned O32IntRegsSize = 4;
2685 static const uint16_t O32IntRegs[] = {
2686 Mips::A0, Mips::A1, Mips::A2, Mips::A3
2689 // Return next O32 integer argument register.
2690 static unsigned getNextIntArgReg(unsigned Reg) {
2691 assert((Reg == Mips::A0) || (Reg == Mips::A2));
2692 return (Reg == Mips::A0) ? Mips::A1 : Mips::A3;
2695 // Write ByVal Arg to arg registers and stack.
2697 WriteByValArg(SDValue Chain, DebugLoc dl,
2698 SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
2699 SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
2700 MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
2701 const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
2702 MVT PtrType, bool isLittle) {
2703 unsigned LocMemOffset = VA.getLocMemOffset();
2704 unsigned Offset = 0;
2705 uint32_t RemainingSize = Flags.getByValSize();
2706 unsigned ByValAlign = Flags.getByValAlign();
2708 // Copy the first 4 words of byval arg to registers A0 - A3.
2709 // FIXME: Use a stricter alignment if it enables better optimization in passes
2711 for (; RemainingSize >= 4 && LocMemOffset < 4 * 4;
2712 Offset += 4, RemainingSize -= 4, LocMemOffset += 4) {
2713 SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
2714 DAG.getConstant(Offset, MVT::i32));
2715 SDValue LoadVal = DAG.getLoad(MVT::i32, dl, Chain, LoadPtr,
2716 MachinePointerInfo(), false, false, false,
2717 std::min(ByValAlign, (unsigned )4));
2718 MemOpChains.push_back(LoadVal.getValue(1));
2719 unsigned DstReg = O32IntRegs[LocMemOffset / 4];
2720 RegsToPass.push_back(std::make_pair(DstReg, LoadVal));
2723 if (RemainingSize == 0)
2726 // If there still is a register available for argument passing, write the
2727 // remaining part of the structure to it using subword loads and shifts.
2728 if (LocMemOffset < 4 * 4) {
2729 assert(RemainingSize <= 3 && RemainingSize >= 1 &&
2730 "There must be one to three bytes remaining.");
2731 unsigned LoadSize = (RemainingSize == 3 ? 2 : RemainingSize);
2732 SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
2733 DAG.getConstant(Offset, MVT::i32));
2734 unsigned Alignment = std::min(ByValAlign, (unsigned )4);
2735 SDValue LoadVal = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
2736 LoadPtr, MachinePointerInfo(),
2737 MVT::getIntegerVT(LoadSize * 8), false,
2739 MemOpChains.push_back(LoadVal.getValue(1));
2741 // If target is big endian, shift it to the most significant half-word or
2744 LoadVal = DAG.getNode(ISD::SHL, dl, MVT::i32, LoadVal,
2745 DAG.getConstant(32 - LoadSize * 8, MVT::i32));
2748 RemainingSize -= LoadSize;
2750 // Read second subword if necessary.
2751 if (RemainingSize != 0) {
2752 assert(RemainingSize == 1 && "There must be one byte remaining.");
2753 LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
2754 DAG.getConstant(Offset, MVT::i32));
2755 unsigned Alignment = std::min(ByValAlign, (unsigned )2);
2756 SDValue Subword = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
2757 LoadPtr, MachinePointerInfo(),
2758 MVT::i8, false, false, Alignment);
2759 MemOpChains.push_back(Subword.getValue(1));
2760 // Insert the loaded byte to LoadVal.
2761 // FIXME: Use INS if supported by target.
2762 unsigned ShiftAmt = isLittle ? 16 : 8;
2763 SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i32, Subword,
2764 DAG.getConstant(ShiftAmt, MVT::i32));
2765 LoadVal = DAG.getNode(ISD::OR, dl, MVT::i32, LoadVal, Shift);
2768 unsigned DstReg = O32IntRegs[LocMemOffset / 4];
2769 RegsToPass.push_back(std::make_pair(DstReg, LoadVal));
2773 // Copy remaining part of byval arg using memcpy.
2774 SDValue Src = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
2775 DAG.getConstant(Offset, MVT::i32));
2776 SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr,
2777 DAG.getIntPtrConstant(LocMemOffset));
2778 Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
2779 DAG.getConstant(RemainingSize, MVT::i32),
2780 std::min(ByValAlign, (unsigned)4),
2781 /*isVolatile=*/false, /*AlwaysInline=*/false,
2782 MachinePointerInfo(0), MachinePointerInfo(0));
2783 MemOpChains.push_back(Chain);
2786 // Copy Mips64 byVal arg to registers and stack.
2788 PassByValArg64(SDValue Chain, DebugLoc dl,
2789 SmallVector<std::pair<unsigned, SDValue>, 16> &RegsToPass,
2790 SmallVector<SDValue, 8> &MemOpChains, SDValue StackPtr,
2791 MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
2792 const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
2793 EVT PtrTy, bool isLittle) {
2794 unsigned ByValSize = Flags.getByValSize();
2795 unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8);
2796 bool IsRegLoc = VA.isRegLoc();
2797 unsigned Offset = 0; // Offset in # of bytes from the beginning of struct.
2798 unsigned LocMemOffset = 0;
2799 unsigned MemCpySize = ByValSize;
2802 LocMemOffset = VA.getLocMemOffset();
2804 const uint16_t *Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8,
2806 const uint16_t *RegEnd = Mips64IntRegs + 8;
2808 // Copy double words to registers.
2809 for (; (Reg != RegEnd) && (ByValSize >= Offset + 8); ++Reg, Offset += 8) {
2810 SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
2811 DAG.getConstant(Offset, PtrTy));
2812 SDValue LoadVal = DAG.getLoad(MVT::i64, dl, Chain, LoadPtr,
2813 MachinePointerInfo(), false, false, false,
2815 MemOpChains.push_back(LoadVal.getValue(1));
2816 RegsToPass.push_back(std::make_pair(*Reg, LoadVal));
2819 // Return if the struct has been fully copied.
2820 if (!(MemCpySize = ByValSize - Offset))
2823 // If there is an argument register available, copy the remainder of the
2824 // byval argument with sub-doubleword loads and shifts.
2825 if (Reg != RegEnd) {
2826 assert((ByValSize < Offset + 8) &&
2827 "Size of the remainder should be smaller than 8-byte.");
2829 for (unsigned LoadSize = 4; Offset < ByValSize; LoadSize /= 2) {
2830 unsigned RemSize = ByValSize - Offset;
2832 if (RemSize < LoadSize)
2835 SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
2836 DAG.getConstant(Offset, PtrTy));
2838 DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr,
2839 MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
2840 false, false, Alignment);
2841 MemOpChains.push_back(LoadVal.getValue(1));
2843 // Offset in number of bits from double word boundary.
2844 unsigned OffsetDW = (Offset % 8) * 8;
2845 unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8);
2846 SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal,
2847 DAG.getConstant(Shamt, MVT::i32));
2849 Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) :
2852 Alignment = std::min(Alignment, LoadSize);
2855 RegsToPass.push_back(std::make_pair(*Reg, Val));
2860 assert(MemCpySize && "MemCpySize must not be zero.");
2862 // Copy remainder of byval arg to it with memcpy.
2863 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
2864 DAG.getConstant(Offset, PtrTy));
2865 SDValue Dst = DAG.getNode(ISD::ADD, dl, MVT::i64, StackPtr,
2866 DAG.getIntPtrConstant(LocMemOffset));
2867 Chain = DAG.getMemcpy(Chain, dl, Dst, Src,
2868 DAG.getConstant(MemCpySize, PtrTy), Alignment,
2869 /*isVolatile=*/false, /*AlwaysInline=*/false,
2870 MachinePointerInfo(0), MachinePointerInfo(0));
2871 MemOpChains.push_back(Chain);
2874 /// LowerCall - functions arguments are copied from virtual regs to
2875 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
2876 /// TODO: isTailCall.
2878 MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2879 SmallVectorImpl<SDValue> &InVals) const {
2880 SelectionDAG &DAG = CLI.DAG;
2881 DebugLoc &dl = CLI.DL;
2882 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
2883 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
2884 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
2885 SDValue Chain = CLI.Chain;
2886 SDValue Callee = CLI.Callee;
2887 bool &isTailCall = CLI.IsTailCall;
2888 CallingConv::ID CallConv = CLI.CallConv;
2889 bool isVarArg = CLI.IsVarArg;
2891 // MIPs target does not yet support tail call optimization.
2894 MachineFunction &MF = DAG.getMachineFunction();
2895 MachineFrameInfo *MFI = MF.getFrameInfo();
2896 const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering();
2897 bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
2899 // Analyze operands of the call, assigning locations to each operand.
2900 SmallVector<CCValAssign, 16> ArgLocs;
2901 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
2902 getTargetMachine(), ArgLocs, *DAG.getContext());
2904 if (CallConv == CallingConv::Fast)
2905 CCInfo.AnalyzeCallOperands(Outs, CC_Mips_FastCC);
2907 CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
2909 AnalyzeMips64CallOperands(CCInfo, Outs);
2911 CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
2913 // Get a count of how many bytes are to be pushed on the stack.
2914 unsigned NextStackOffset = CCInfo.getNextStackOffset();
2915 unsigned StackAlignment = TFL->getStackAlignment();
2916 NextStackOffset = RoundUpToAlignment(NextStackOffset, StackAlignment);
2918 // Update size of the maximum argument space.
2919 // For O32, a minimum of four words (16 bytes) of argument space is
2921 if (IsO32 && (CallConv != CallingConv::Fast))
2922 NextStackOffset = std::max(NextStackOffset, (unsigned)16);
2924 // Chain is the output chain of the last Load/Store or CopyToReg node.
2925 // ByValChain is the output chain of the last Memcpy node created for copying
2926 // byval arguments to the stack.
2927 SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true);
2928 Chain = DAG.getCALLSEQ_START(Chain, NextStackOffsetVal);
2930 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl,
2931 IsN64 ? Mips::SP_64 : Mips::SP,
2934 // With EABI is it possible to have 16 args on registers.
2935 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
2936 SmallVector<SDValue, 8> MemOpChains;
2938 // Walk the register/memloc assignments, inserting copies/loads.
2939 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2940 SDValue Arg = OutVals[i];
2941 CCValAssign &VA = ArgLocs[i];
2942 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
2943 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2946 if (Flags.isByVal()) {
2947 assert(Flags.getByValSize() &&
2948 "ByVal args of size 0 should have been ignored by front-end.");
2950 WriteByValArg(Chain, dl, RegsToPass, MemOpChains, StackPtr,
2951 MFI, DAG, Arg, VA, Flags, getPointerTy(),
2952 Subtarget->isLittle());
2954 PassByValArg64(Chain, dl, RegsToPass, MemOpChains, StackPtr,
2955 MFI, DAG, Arg, VA, Flags, getPointerTy(),
2956 Subtarget->isLittle());
2960 // Promote the value if needed.
2961 switch (VA.getLocInfo()) {
2962 default: llvm_unreachable("Unknown loc info!");
2963 case CCValAssign::Full:
2964 if (VA.isRegLoc()) {
2965 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
2966 (ValVT == MVT::f64 && LocVT == MVT::i64))
2967 Arg = DAG.getNode(ISD::BITCAST, dl, LocVT, Arg);
2968 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
2969 SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32,
2970 Arg, DAG.getConstant(0, MVT::i32));
2971 SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32,
2972 Arg, DAG.getConstant(1, MVT::i32));
2973 if (!Subtarget->isLittle())
2975 unsigned LocRegLo = VA.getLocReg();
2976 unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
2977 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
2978 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
2983 case CCValAssign::SExt:
2984 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, LocVT, Arg);
2986 case CCValAssign::ZExt:
2987 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, LocVT, Arg);
2989 case CCValAssign::AExt:
2990 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, LocVT, Arg);
2994 // Arguments that can be passed on register must be kept at
2995 // RegsToPass vector
2996 if (VA.isRegLoc()) {
2997 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3001 // Register can't get to this point...
3002 assert(VA.isMemLoc());
3004 // emit ISD::STORE whichs stores the
3005 // parameter value to a stack Location
3006 SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr,
3007 DAG.getIntPtrConstant(VA.getLocMemOffset()));
3008 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
3009 MachinePointerInfo(), false, false, 0));
3012 // Transform all store nodes into one single node because all store
3013 // nodes are independent of each other.
3014 if (!MemOpChains.empty())
3015 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3016 &MemOpChains[0], MemOpChains.size());
3018 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3019 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3020 // node so that legalize doesn't hack it.
3021 unsigned char OpFlag;
3022 bool IsPICCall = (IsN64 || IsPIC); // true if calls are translated to jalr $25
3023 bool GlobalOrExternal = false;
3026 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3027 if (IsPICCall && G->getGlobal()->hasInternalLinkage()) {
3028 OpFlag = IsO32 ? MipsII::MO_GOT : MipsII::MO_GOT_PAGE;
3029 unsigned char LoFlag = IsO32 ? MipsII::MO_ABS_LO : MipsII::MO_GOT_OFST;
3030 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy(), 0,
3032 CalleeLo = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy(),
3035 OpFlag = IsPICCall ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG;
3036 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
3037 getPointerTy(), 0, OpFlag);
3040 GlobalOrExternal = true;
3042 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3043 if (IsN64 || (!IsO32 && IsPIC))
3044 OpFlag = MipsII::MO_GOT_DISP;
3045 else if (!IsPIC) // !N64 && static
3046 OpFlag = MipsII::MO_NO_FLAG;
3048 OpFlag = MipsII::MO_GOT_CALL;
3049 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3051 GlobalOrExternal = true;
3056 // Create nodes that load address of callee and copy it to T9
3058 if (GlobalOrExternal) {
3059 // Load callee address
3060 Callee = DAG.getNode(MipsISD::Wrapper, dl, getPointerTy(),
3061 GetGlobalReg(DAG, getPointerTy()), Callee);
3062 SDValue LoadValue = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
3063 Callee, MachinePointerInfo::getGOT(),
3064 false, false, false, 0);
3066 // Use GOT+LO if callee has internal linkage.
3067 if (CalleeLo.getNode()) {
3068 SDValue Lo = DAG.getNode(MipsISD::Lo, dl, getPointerTy(), CalleeLo);
3069 Callee = DAG.getNode(ISD::ADD, dl, getPointerTy(), LoadValue, Lo);
3075 // T9 register operand.
3078 // T9 should contain the address of the callee function if
3079 // -reloction-model=pic or it is an indirect call.
3080 if (IsPICCall || !GlobalOrExternal) {
3082 unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9;
3083 Chain = DAG.getCopyToReg(Chain, dl, T9Reg, Callee, SDValue(0, 0));
3084 InFlag = Chain.getValue(1);
3086 if (Subtarget->inMips16Mode())
3087 T9 = DAG.getRegister(T9Reg, getPointerTy());
3089 Callee = DAG.getRegister(T9Reg, getPointerTy());
3092 // Insert node "GP copy globalreg" before call to function.
3093 // Lazy-binding stubs require GP to point to the GOT.
3095 unsigned GPReg = IsN64 ? Mips::GP_64 : Mips::GP;
3096 EVT Ty = IsN64 ? MVT::i64 : MVT::i32;
3097 RegsToPass.push_back(std::make_pair(GPReg, GetGlobalReg(DAG, Ty)));
3100 // Build a sequence of copy-to-reg nodes chained together with token
3101 // chain and flag operands which copy the outgoing args into registers.
3102 // The InFlag in necessary since all emitted instructions must be
3104 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3105 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3106 RegsToPass[i].second, InFlag);
3107 InFlag = Chain.getValue(1);
3110 // MipsJmpLink = #chain, #target_address, #opt_in_flags...
3111 // = Chain, Callee, Reg#1, Reg#2, ...
3113 // Returns a chain & a flag for retval copy to use.
3114 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3115 SmallVector<SDValue, 8> Ops;
3116 Ops.push_back(Chain);
3117 Ops.push_back(Callee);
3119 // Add argument registers to the end of the list so that they are
3120 // known live into the call.
3121 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3122 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3123 RegsToPass[i].second.getValueType()));
3125 // Add T9 register operand.
3129 // Add a register mask operand representing the call-preserved registers.
3130 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
3131 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3132 assert(Mask && "Missing call preserved mask for calling convention");
3133 Ops.push_back(DAG.getRegisterMask(Mask));
3135 if (InFlag.getNode())
3136 Ops.push_back(InFlag);
3138 Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size());
3139 InFlag = Chain.getValue(1);
3141 // Create the CALLSEQ_END node.
3142 Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
3143 DAG.getIntPtrConstant(0, true), InFlag);
3144 InFlag = Chain.getValue(1);
3146 // Handle result values, copying them out of physregs into vregs that we
3148 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3149 Ins, dl, DAG, InVals);
3152 /// LowerCallResult - Lower the result values of a call into the
3153 /// appropriate copies out of appropriate physical registers.
3155 MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
3156 CallingConv::ID CallConv, bool isVarArg,
3157 const SmallVectorImpl<ISD::InputArg> &Ins,
3158 DebugLoc dl, SelectionDAG &DAG,
3159 SmallVectorImpl<SDValue> &InVals) const {
3160 // Assign locations to each value returned by this call.
3161 SmallVector<CCValAssign, 16> RVLocs;
3162 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3163 getTargetMachine(), RVLocs, *DAG.getContext());
3165 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips);
3167 // Copy all of the result registers out of their specified physreg.
3168 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3169 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
3170 RVLocs[i].getValVT(), InFlag).getValue(1);
3171 InFlag = Chain.getValue(2);
3172 InVals.push_back(Chain.getValue(0));
3178 //===----------------------------------------------------------------------===//
3179 // Formal Arguments Calling Convention Implementation
3180 //===----------------------------------------------------------------------===//
3181 static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl,
3182 std::vector<SDValue> &OutChains,
3183 SelectionDAG &DAG, unsigned NumWords, SDValue FIN,
3184 const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
3185 const Argument *FuncArg) {
3186 unsigned LocMem = VA.getLocMemOffset();
3187 unsigned FirstWord = LocMem / 4;
3189 // copy register A0 - A3 to frame object
3190 for (unsigned i = 0; i < NumWords; ++i) {
3191 unsigned CurWord = FirstWord + i;
3192 if (CurWord >= O32IntRegsSize)
3195 unsigned SrcReg = O32IntRegs[CurWord];
3196 unsigned Reg = AddLiveIn(MF, SrcReg, &Mips::CPURegsRegClass);
3197 SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN,
3198 DAG.getConstant(i * 4, MVT::i32));
3199 SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32),
3200 StorePtr, MachinePointerInfo(FuncArg, i * 4),
3202 OutChains.push_back(Store);
3206 // Create frame object on stack and copy registers used for byval passing to it.
3208 CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
3209 std::vector<SDValue> &OutChains, SelectionDAG &DAG,
3210 const CCValAssign &VA, const ISD::ArgFlagsTy &Flags,
3211 MachineFrameInfo *MFI, bool IsRegLoc,
3212 SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI,
3213 EVT PtrTy, const Argument *FuncArg) {
3214 const uint16_t *Reg = Mips64IntRegs + 8;
3215 int FOOffset; // Frame object offset from virtual frame pointer.
3218 Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, VA.getLocReg());
3219 FOOffset = (Reg - Mips64IntRegs) * 8 - 8 * 8;
3222 FOOffset = VA.getLocMemOffset();
3224 // Create frame object.
3225 unsigned NumRegs = (Flags.getByValSize() + 7) / 8;
3226 unsigned LastFI = MFI->CreateFixedObject(NumRegs * 8, FOOffset, true);
3227 SDValue FIN = DAG.getFrameIndex(LastFI, PtrTy);
3228 InVals.push_back(FIN);
3230 // Copy arg registers.
3231 for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs);
3233 unsigned VReg = AddLiveIn(MF, *Reg, &Mips::CPU64RegsRegClass);
3234 SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN,
3235 DAG.getConstant(I * 8, PtrTy));
3236 SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64),
3237 StorePtr, MachinePointerInfo(FuncArg, I * 8),
3239 OutChains.push_back(Store);
3245 /// LowerFormalArguments - transform physical registers into virtual registers
3246 /// and generate load operations for arguments places on the stack.
3248 MipsTargetLowering::LowerFormalArguments(SDValue Chain,
3249 CallingConv::ID CallConv,
3251 const SmallVectorImpl<ISD::InputArg> &Ins,
3252 DebugLoc dl, SelectionDAG &DAG,
3253 SmallVectorImpl<SDValue> &InVals)
3255 MachineFunction &MF = DAG.getMachineFunction();
3256 MachineFrameInfo *MFI = MF.getFrameInfo();
3257 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3259 MipsFI->setVarArgsFrameIndex(0);
3261 // Used with vargs to acumulate store chains.
3262 std::vector<SDValue> OutChains;
3264 // Assign locations to all of the incoming arguments.
3265 SmallVector<CCValAssign, 16> ArgLocs;
3266 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3267 getTargetMachine(), ArgLocs, *DAG.getContext());
3269 if (CallConv == CallingConv::Fast)
3270 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FastCC);
3272 CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32);
3274 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
3276 Function::const_arg_iterator FuncArg =
3277 DAG.getMachineFunction().getFunction()->arg_begin();
3278 int LastFI = 0;// MipsFI->LastInArgFI is 0 at the entry of this function.
3280 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++FuncArg) {
3281 CCValAssign &VA = ArgLocs[i];
3282 EVT ValVT = VA.getValVT();
3283 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3284 bool IsRegLoc = VA.isRegLoc();
3286 if (Flags.isByVal()) {
3287 assert(Flags.getByValSize() &&
3288 "ByVal args of size 0 should have been ignored by front-end.");
3290 unsigned NumWords = (Flags.getByValSize() + 3) / 4;
3291 LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(),
3293 SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
3294 InVals.push_back(FIN);
3295 ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags,
3298 LastFI = CopyMips64ByValRegs(MF, Chain, dl, OutChains, DAG, VA, Flags,
3299 MFI, IsRegLoc, InVals, MipsFI,
3300 getPointerTy(), &*FuncArg);
3304 // Arguments stored on registers
3306 EVT RegVT = VA.getLocVT();
3307 unsigned ArgReg = VA.getLocReg();
3308 const TargetRegisterClass *RC;
3310 if (RegVT == MVT::i32)
3311 RC = &Mips::CPURegsRegClass;
3312 else if (RegVT == MVT::i64)
3313 RC = &Mips::CPU64RegsRegClass;
3314 else if (RegVT == MVT::f32)
3315 RC = &Mips::FGR32RegClass;
3316 else if (RegVT == MVT::f64)
3317 RC = HasMips64 ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
3319 llvm_unreachable("RegVT not supported by FormalArguments Lowering");
3321 // Transform the arguments stored on
3322 // physical registers into virtual ones
3323 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3324 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3326 // If this is an 8 or 16-bit value, it has been passed promoted
3327 // to 32 bits. Insert an assert[sz]ext to capture this, then
3328 // truncate to the right size.
3329 if (VA.getLocInfo() != CCValAssign::Full) {
3330 unsigned Opcode = 0;
3331 if (VA.getLocInfo() == CCValAssign::SExt)
3332 Opcode = ISD::AssertSext;
3333 else if (VA.getLocInfo() == CCValAssign::ZExt)
3334 Opcode = ISD::AssertZext;
3336 ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
3337 DAG.getValueType(ValVT));
3338 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
3341 // Handle floating point arguments passed in integer registers.
3342 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3343 (RegVT == MVT::i64 && ValVT == MVT::f64))
3344 ArgValue = DAG.getNode(ISD::BITCAST, dl, ValVT, ArgValue);
3345 else if (IsO32 && RegVT == MVT::i32 && ValVT == MVT::f64) {
3346 unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
3347 getNextIntArgReg(ArgReg), RC);
3348 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
3349 if (!Subtarget->isLittle())
3350 std::swap(ArgValue, ArgValue2);
3351 ArgValue = DAG.getNode(MipsISD::BuildPairF64, dl, MVT::f64,
3352 ArgValue, ArgValue2);
3355 InVals.push_back(ArgValue);
3356 } else { // VA.isRegLoc()
3359 assert(VA.isMemLoc());
3361 // The stack pointer offset is relative to the caller stack frame.
3362 LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
3363 VA.getLocMemOffset(), true);
3365 // Create load nodes to retrieve arguments from the stack
3366 SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy());
3367 InVals.push_back(DAG.getLoad(ValVT, dl, Chain, FIN,
3368 MachinePointerInfo::getFixedStack(LastFI),
3369 false, false, false, 0));
3373 // The mips ABIs for returning structs by value requires that we copy
3374 // the sret argument into $v0 for the return. Save the argument into
3375 // a virtual register so that we can access it from the return points.
3376 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
3377 unsigned Reg = MipsFI->getSRetReturnReg();
3379 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
3380 MipsFI->setSRetReturnReg(Reg);
3382 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
3383 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3387 unsigned NumOfRegs = IsO32 ? 4 : 8;
3388 const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs;
3389 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs);
3390 int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot.
3391 const TargetRegisterClass *RC = IsO32 ?
3392 (const TargetRegisterClass*)&Mips::CPURegsRegClass :
3393 (const TargetRegisterClass*)&Mips::CPU64RegsRegClass;
3394 unsigned RegSize = RC->getSize();
3395 int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize;
3397 // Offset of the first variable argument from stack pointer.
3398 int FirstVaArgOffset;
3400 if (IsO32 || (Idx == NumOfRegs)) {
3402 (CCInfo.getNextStackOffset() + RegSize - 1) / RegSize * RegSize;
3404 FirstVaArgOffset = RegSlotOffset;
3406 // Record the frame index of the first variable argument
3407 // which is a value necessary to VASTART.
3408 LastFI = MFI->CreateFixedObject(RegSize, FirstVaArgOffset, true);
3409 MipsFI->setVarArgsFrameIndex(LastFI);
3411 // Copy the integer registers that have not been used for argument passing
3412 // to the argument register save area. For O32, the save area is allocated
3413 // in the caller's stack frame, while for N32/64, it is allocated in the
3414 // callee's stack frame.
3415 for (int StackOffset = RegSlotOffset;
3416 Idx < NumOfRegs; ++Idx, StackOffset += RegSize) {
3417 unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegs[Idx], RC);
3418 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3419 MVT::getIntegerVT(RegSize * 8));
3420 LastFI = MFI->CreateFixedObject(RegSize, StackOffset, true);
3421 SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy());
3422 OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff,
3423 MachinePointerInfo(), false, false, 0));
3427 MipsFI->setLastInArgFI(LastFI);
3429 // All stores are grouped in one node to allow the matching between
3430 // the size of Ins and InVals. This only happens when on varg functions
3431 if (!OutChains.empty()) {
3432 OutChains.push_back(Chain);
3433 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3434 &OutChains[0], OutChains.size());
3440 //===----------------------------------------------------------------------===//
3441 // Return Value Calling Convention Implementation
3442 //===----------------------------------------------------------------------===//
3445 MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3446 MachineFunction &MF, bool isVarArg,
3447 const SmallVectorImpl<ISD::OutputArg> &Outs,
3448 LLVMContext &Context) const {
3449 SmallVector<CCValAssign, 16> RVLocs;
3450 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(),
3452 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3456 MipsTargetLowering::LowerReturn(SDValue Chain,
3457 CallingConv::ID CallConv, bool isVarArg,
3458 const SmallVectorImpl<ISD::OutputArg> &Outs,
3459 const SmallVectorImpl<SDValue> &OutVals,
3460 DebugLoc dl, SelectionDAG &DAG) const {
3462 // CCValAssign - represent the assignment of
3463 // the return value to a location
3464 SmallVector<CCValAssign, 16> RVLocs;
3466 // CCState - Info about the registers and stack slot.
3467 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3468 getTargetMachine(), RVLocs, *DAG.getContext());
3470 // Analize return values.
3471 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3473 // If this is the first return lowered for this function, add
3474 // the regs to the liveout set for the function.
3475 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
3476 for (unsigned i = 0; i != RVLocs.size(); ++i)
3477 if (RVLocs[i].isRegLoc())
3478 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
3483 // Copy the result values into the output registers.
3484 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3485 CCValAssign &VA = RVLocs[i];
3486 assert(VA.isRegLoc() && "Can only return in registers!");
3488 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
3490 // guarantee that all emitted copies are
3491 // stuck together, avoiding something bad
3492 Flag = Chain.getValue(1);
3495 // The mips ABIs for returning structs by value requires that we copy
3496 // the sret argument into $v0 for the return. We saved the argument into
3497 // a virtual register in the entry block, so now we copy the value out
3499 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
3500 MachineFunction &MF = DAG.getMachineFunction();
3501 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3502 unsigned Reg = MipsFI->getSRetReturnReg();
3505 llvm_unreachable("sret virtual register not created in the entry block");
3506 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
3508 Chain = DAG.getCopyToReg(Chain, dl, Mips::V0, Val, Flag);
3509 Flag = Chain.getValue(1);
3512 // Return on Mips is always a "jr $ra"
3514 return DAG.getNode(MipsISD::Ret, dl, MVT::Other, Chain, Flag);
3517 return DAG.getNode(MipsISD::Ret, dl, MVT::Other, Chain);
3520 //===----------------------------------------------------------------------===//
3521 // Mips Inline Assembly Support
3522 //===----------------------------------------------------------------------===//
3524 /// getConstraintType - Given a constraint letter, return the type of
3525 /// constraint it is for this target.
3526 MipsTargetLowering::ConstraintType MipsTargetLowering::
3527 getConstraintType(const std::string &Constraint) const
3529 // Mips specific constrainy
3530 // GCC config/mips/constraints.md
3532 // 'd' : An address register. Equivalent to r
3533 // unless generating MIPS16 code.
3534 // 'y' : Equivalent to r; retained for
3535 // backwards compatibility.
3536 // 'c' : A register suitable for use in an indirect
3537 // jump. This will always be $25 for -mabicalls.
3538 // 'l' : The lo register. 1 word storage.
3539 // 'x' : The hilo register pair. Double word storage.
3540 if (Constraint.size() == 1) {
3541 switch (Constraint[0]) {
3549 return C_RegisterClass;
3552 return TargetLowering::getConstraintType(Constraint);
3555 /// Examine constraint type and operand type and determine a weight value.
3556 /// This object must already have been set up with the operand type
3557 /// and the current alternative constraint selected.
3558 TargetLowering::ConstraintWeight
3559 MipsTargetLowering::getSingleConstraintMatchWeight(
3560 AsmOperandInfo &info, const char *constraint) const {
3561 ConstraintWeight weight = CW_Invalid;
3562 Value *CallOperandVal = info.CallOperandVal;
3563 // If we don't have a value, we can't do a match,
3564 // but allow it at the lowest weight.
3565 if (CallOperandVal == NULL)
3567 Type *type = CallOperandVal->getType();
3568 // Look at the constraint type.
3569 switch (*constraint) {
3571 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3575 if (type->isIntegerTy())
3576 weight = CW_Register;
3579 if (type->isFloatTy())
3580 weight = CW_Register;
3582 case 'c': // $25 for indirect jumps
3583 case 'l': // lo register
3584 case 'x': // hilo register pair
3585 if (type->isIntegerTy())
3586 weight = CW_SpecificReg;
3588 case 'I': // signed 16 bit immediate
3589 case 'J': // integer zero
3590 case 'K': // unsigned 16 bit immediate
3591 case 'L': // signed 32 bit immediate where lower 16 bits are 0
3592 case 'N': // immediate in the range of -65535 to -1 (inclusive)
3593 case 'O': // signed 15 bit immediate (+- 16383)
3594 case 'P': // immediate in the range of 65535 to 1 (inclusive)
3595 if (isa<ConstantInt>(CallOperandVal))
3596 weight = CW_Constant;
3602 /// Given a register class constraint, like 'r', if this corresponds directly
3603 /// to an LLVM register class, return a register of 0 and the register class
3605 std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering::
3606 getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
3608 if (Constraint.size() == 1) {
3609 switch (Constraint[0]) {
3610 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
3611 case 'y': // Same as 'r'. Exists for compatibility.
3613 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
3614 if (Subtarget->inMips16Mode())
3615 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
3616 return std::make_pair(0U, &Mips::CPURegsRegClass);
3618 if (VT == MVT::i64 && !HasMips64)
3619 return std::make_pair(0U, &Mips::CPURegsRegClass);
3620 if (VT == MVT::i64 && HasMips64)
3621 return std::make_pair(0U, &Mips::CPU64RegsRegClass);
3622 // This will generate an error message
3623 return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
3626 return std::make_pair(0U, &Mips::FGR32RegClass);
3627 if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
3628 if (Subtarget->isFP64bit())
3629 return std::make_pair(0U, &Mips::FGR64RegClass);
3630 return std::make_pair(0U, &Mips::AFGR64RegClass);
3633 case 'c': // register suitable for indirect jump
3635 return std::make_pair((unsigned)Mips::T9, &Mips::CPURegsRegClass);
3636 assert(VT == MVT::i64 && "Unexpected type.");
3637 return std::make_pair((unsigned)Mips::T9_64, &Mips::CPU64RegsRegClass);
3638 case 'l': // register suitable for indirect jump
3640 return std::make_pair((unsigned)Mips::LO, &Mips::HILORegClass);
3641 return std::make_pair((unsigned)Mips::LO64, &Mips::HILO64RegClass);
3642 case 'x': // register suitable for indirect jump
3643 // Fixme: Not triggering the use of both hi and low
3644 // This will generate an error message
3645 return std::make_pair(0u, static_cast<const TargetRegisterClass*>(0));
3648 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
3651 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
3652 /// vector. If it is invalid, don't add anything to Ops.
3653 void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
3654 std::string &Constraint,
3655 std::vector<SDValue>&Ops,
3656 SelectionDAG &DAG) const {
3657 SDValue Result(0, 0);
3659 // Only support length 1 constraints for now.
3660 if (Constraint.length() > 1) return;
3662 char ConstraintLetter = Constraint[0];
3663 switch (ConstraintLetter) {
3664 default: break; // This will fall through to the generic implementation
3665 case 'I': // Signed 16 bit constant
3666 // If this fails, the parent routine will give an error
3667 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3668 EVT Type = Op.getValueType();
3669 int64_t Val = C->getSExtValue();
3670 if (isInt<16>(Val)) {
3671 Result = DAG.getTargetConstant(Val, Type);
3676 case 'J': // integer zero
3677 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3678 EVT Type = Op.getValueType();
3679 int64_t Val = C->getZExtValue();
3681 Result = DAG.getTargetConstant(0, Type);
3686 case 'K': // unsigned 16 bit immediate
3687 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3688 EVT Type = Op.getValueType();
3689 uint64_t Val = (uint64_t)C->getZExtValue();
3690 if (isUInt<16>(Val)) {
3691 Result = DAG.getTargetConstant(Val, Type);
3696 case 'L': // signed 32 bit immediate where lower 16 bits are 0
3697 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3698 EVT Type = Op.getValueType();
3699 int64_t Val = C->getSExtValue();
3700 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
3701 Result = DAG.getTargetConstant(Val, Type);
3706 case 'N': // immediate in the range of -65535 to -1 (inclusive)
3707 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3708 EVT Type = Op.getValueType();
3709 int64_t Val = C->getSExtValue();
3710 if ((Val >= -65535) && (Val <= -1)) {
3711 Result = DAG.getTargetConstant(Val, Type);
3716 case 'O': // signed 15 bit immediate
3717 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3718 EVT Type = Op.getValueType();
3719 int64_t Val = C->getSExtValue();
3720 if ((isInt<15>(Val))) {
3721 Result = DAG.getTargetConstant(Val, Type);
3726 case 'P': // immediate in the range of 1 to 65535 (inclusive)
3727 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
3728 EVT Type = Op.getValueType();
3729 int64_t Val = C->getSExtValue();
3730 if ((Val <= 65535) && (Val >= 1)) {
3731 Result = DAG.getTargetConstant(Val, Type);
3738 if (Result.getNode()) {
3739 Ops.push_back(Result);
3743 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3747 MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
3748 // The Mips target isn't yet aware of offsets.
3752 EVT MipsTargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
3753 unsigned SrcAlign, bool IsZeroVal,
3755 MachineFunction &MF) const {
3756 if (Subtarget->hasMips64())
3762 bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3763 if (VT != MVT::f32 && VT != MVT::f64)
3765 if (Imm.isNegZero())
3767 return Imm.isZero();
3770 unsigned MipsTargetLowering::getJumpTableEncoding() const {
3772 return MachineJumpTableInfo::EK_GPRel64BlockAddress;
3774 return TargetLowering::getJumpTableEncoding();