X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FMips%2FMipsISelLowering.cpp;h=bc0924eac7fa7606b28fc5b9c9ca98b9dde85f18;hp=bfecb965521e71f3b1b21bd2adbddf71b9c46ebd;hb=cfe761c9e6cf97dc804725df08247b9402085f84;hpb=7d54c5b0f2d4923c58ba8cf70d0526cc24783fc6 diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index bfecb965521..bc0924eac7f 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -24,6 +24,7 @@ #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/ValueTypes.h" @@ -70,6 +71,106 @@ static const MCPhysReg Mips64DPRegs[8] = { Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64 }; +static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode); + +namespace { +class MipsCCState : public CCState { +private: + /// Identify lowered values that originated from f128 arguments and record + /// this for use by RetCC_MipsN. + void + PreAnalyzeCallResultForF128(const SmallVectorImpl &Ins, + const TargetLowering::CallLoweringInfo &CLI) { + for (unsigned i = 0; i < Ins.size(); ++i) + OriginalArgWasF128.push_back( + originalTypeIsF128(CLI.RetTy, CLI.Callee.getNode())); + } + + /// Identify lowered values that originated from f128 arguments and record + /// this for use by RetCC_MipsN. + void PreAnalyzeReturnForF128(const SmallVectorImpl &Outs) { + const MachineFunction &MF = getMachineFunction(); + for (unsigned i = 0; i < Outs.size(); ++i) + OriginalArgWasF128.push_back( + originalTypeIsF128(MF.getFunction()->getReturnType(), nullptr)); + } + + /// Identify lowered values that originated from f128 arguments and record + /// this. + void PreAnalyzeCallOperandsForF128( + const SmallVectorImpl &Outs, + std::vector &FuncArgs, SDNode *CallNode) { + for (unsigned i = 0; i < Outs.size(); ++i) + OriginalArgWasF128.push_back( + originalTypeIsF128(FuncArgs[Outs[i].OrigArgIndex].Ty, CallNode)); + } + + /// Identify lowered values that originated from f128 arguments and record + /// this. + void + PreAnalyzeFormalArgumentsForF128(const SmallVectorImpl &Ins) { + const MachineFunction &MF = getMachineFunction(); + for (unsigned i = 0; i < Ins.size(); ++i) { + Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin(); + std::advance(FuncArg, Ins[i].OrigArgIndex); + + OriginalArgWasF128.push_back( + originalTypeIsF128(FuncArg->getType(), nullptr)); + } + } + + /// Records whether the value has been lowered from an f128. + SmallVector OriginalArgWasF128; + +public: + // FIXME: Remove this from a public inteface ASAP. It's a temporary trap door + // to allow analyzeCallOperands to be removed incrementally. + void PreAnalyzeCallOperandsForF128_( + const SmallVectorImpl &Outs, + std::vector &FuncArgs, SDNode *CallNode) { + PreAnalyzeCallOperandsForF128(Outs, FuncArgs, CallNode); + } + // FIXME: Remove this from a public inteface ASAP. It's a temporary trap door + // to allow analyzeFormalArguments to be removed incrementally. + void + PreAnalyzeFormalArgumentsForF128_(const SmallVectorImpl &Ins) { + PreAnalyzeFormalArgumentsForF128(Ins); + } + // FIXME: Remove this from a public inteface ASAP. It's a temporary trap door + // to clean up after the above functions. + void ClearOriginalArgWasF128() { OriginalArgWasF128.clear(); } + + MipsCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, + SmallVectorImpl &locs, LLVMContext &C) + : CCState(CC, isVarArg, MF, locs, C) {} + + void AnalyzeCallResult(const SmallVectorImpl &Ins, + CCAssignFn Fn, + const TargetLowering::CallLoweringInfo &CLI) { + PreAnalyzeCallResultForF128(Ins, CLI); + CCState::AnalyzeCallResult(Ins, Fn); + OriginalArgWasF128.clear(); + } + + void AnalyzeReturn(const SmallVectorImpl &Outs, + CCAssignFn Fn) { + PreAnalyzeReturnForF128(Outs); + CCState::AnalyzeReturn(Outs, Fn); + OriginalArgWasF128.clear(); + } + + bool CheckReturn(const SmallVectorImpl &ArgsFlags, + CCAssignFn Fn) { + PreAnalyzeReturnForF128(ArgsFlags); + bool Return = CCState::CheckReturn(ArgsFlags, Fn); + OriginalArgWasF128.clear(); + return Return; + } + + bool WasOriginalArgF128(unsigned ValNo) { return OriginalArgWasF128[ValNo]; } +}; +} + // If I is a shifted mask, set the size (Size) and the first bit of the // mask (Pos), and return true. // For example, if I is 0x003ff800, (Pos, Size) = (11, 11). @@ -208,7 +309,7 @@ const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { } } -MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM, +MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM, const MipsSubtarget &STI) : TargetLowering(TM, new MipsTargetObjectFile()), Subtarget(STI) { // Mips does not have i1 type, so use i32 for @@ -251,7 +352,6 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM, setOperationAction(ISD::SETCC, MVT::f32, Custom); setOperationAction(ISD::SETCC, MVT::f64, Custom); setOperationAction(ISD::BRCOND, MVT::Other, Custom); - setOperationAction(ISD::VASTART, MVT::Other, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); @@ -343,7 +443,8 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM, setOperationAction(ISD::EH_RETURN, MVT::Other, Custom); - setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction(ISD::VAARG, MVT::Other, Custom); setOperationAction(ISD::VACOPY, MVT::Other, Expand); setOperationAction(ISD::VAEND, MVT::Other, Expand); @@ -392,6 +493,11 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM, setMinFunctionAlignment(Subtarget.isGP64bit() ? 3 : 2); + // The arguments on the stack are defined in terms of 4-byte slots on O32 + // and 8-byte slots on N32/N64. + setMinStackArgumentAlignment( + (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4); + setStackPointerRegisterToSaveRestore(Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP); @@ -403,7 +509,7 @@ MipsTargetLowering::MipsTargetLowering(MipsTargetMachine &TM, isMicroMips = Subtarget.inMicroMipsMode(); } -const MipsTargetLowering *MipsTargetLowering::create(MipsTargetMachine &TM, +const MipsTargetLowering *MipsTargetLowering::create(const MipsTargetMachine &TM, const MipsSubtarget &STI) { if (STI.inMips16Mode()) return llvm::createMips16TargetLowering(TM, STI); @@ -792,6 +898,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const case ISD::SELECT_CC: return lowerSELECT_CC(Op, DAG); case ISD::SETCC: return lowerSETCC(Op, DAG); case ISD::VASTART: return lowerVASTART(Op, DAG); + case ISD::VAARG: return lowerVAARG(Op, DAG); case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG); case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG); case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG); @@ -934,16 +1041,16 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, case Mips::DIVU: case Mips::MOD: case Mips::MODU: - return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(), - false); + return insertDivByZeroTrap( + MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), false); case Mips::PseudoDSDIV: case Mips::PseudoDUDIV: case Mips::DDIV: case Mips::DDIVU: case Mips::DMOD: case Mips::DMODU: - return insertDivByZeroTrap(MI, *BB, *getTargetMachine().getInstrInfo(), - true); + return insertDivByZeroTrap( + MI, *BB, *getTargetMachine().getSubtargetImpl()->getInstrInfo(), true); case Mips::SEL_D: return emitSEL_D(MI, BB); } @@ -960,7 +1067,8 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, MachineFunction *MF = BB->getParent(); MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8)); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + const TargetInstrInfo *TII = + getTargetMachine().getSubtargetImpl()->getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); unsigned LL, SC, AND, NOR, ZERO, BEQ; @@ -1043,7 +1151,8 @@ MipsTargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg( MachineInstr *MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg, unsigned SrcReg) const { - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + const TargetInstrInfo *TII = + getTargetMachine().getSubtargetImpl()->getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); if (Subtarget.hasMips32r2() && Size == 1) { @@ -1079,7 +1188,8 @@ MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword( MachineFunction *MF = BB->getParent(); MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetRegisterClass *RC = getRegClassFor(MVT::i32); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + const TargetInstrInfo *TII = + getTargetMachine().getSubtargetImpl()->getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); unsigned Dest = MI->getOperand(0).getReg(); @@ -1229,7 +1339,8 @@ MachineBasicBlock * MipsTargetLowering::emitAtomicCmpSwap(MachineInstr *MI, MachineFunction *MF = BB->getParent(); MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8)); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + const TargetInstrInfo *TII = + getTargetMachine().getSubtargetImpl()->getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); unsigned LL, SC, ZERO, BNE, BEQ; @@ -1311,7 +1422,8 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI, MachineFunction *MF = BB->getParent(); MachineRegisterInfo &RegInfo = MF->getRegInfo(); const TargetRegisterClass *RC = getRegClassFor(MVT::i32); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + const TargetInstrInfo *TII = + getTargetMachine().getSubtargetImpl()->getInstrInfo(); DebugLoc DL = MI->getDebugLoc(); unsigned Dest = MI->getOperand(0).getReg(); @@ -1446,8 +1558,10 @@ MipsTargetLowering::emitAtomicCmpSwapPartword(MachineInstr *MI, MachineBasicBlock *MipsTargetLowering::emitSEL_D(MachineInstr *MI, MachineBasicBlock *BB) const { MachineFunction *MF = BB->getParent(); - const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); - const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + const TargetRegisterInfo *TRI = + getTargetMachine().getSubtargetImpl()->getRegisterInfo(); + const TargetInstrInfo *TII = + getTargetMachine().getSubtargetImpl()->getInstrInfo(); MachineRegisterInfo &RegInfo = MF->getRegInfo(); DebugLoc DL = MI->getDebugLoc(); MachineBasicBlock::iterator II(MI); @@ -1755,6 +1869,65 @@ SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { MachinePointerInfo(SV), false, false, 0); } +SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const { + SDNode *Node = Op.getNode(); + EVT VT = Node->getValueType(0); + SDValue Chain = Node->getOperand(0); + SDValue VAListPtr = Node->getOperand(1); + unsigned Align = Node->getConstantOperandVal(3); + const Value *SV = cast(Node->getOperand(2))->getValue(); + SDLoc DL(Node); + unsigned ArgSlotSizeInBytes = + (Subtarget.isABI_N32() || Subtarget.isABI_N64()) ? 8 : 4; + + SDValue VAListLoad = DAG.getLoad(getPointerTy(), DL, Chain, VAListPtr, + MachinePointerInfo(SV), false, false, false, + 0); + SDValue VAList = VAListLoad; + + // Re-align the pointer if necessary. + // It should only ever be necessary for 64-bit types on O32 since the minimum + // argument alignment is the same as the maximum type alignment for N32/N64. + // + // FIXME: We currently align too often. The code generator doesn't notice + // when the pointer is still aligned from the last va_arg (or pair of + // va_args for the i64 on O32 case). + if (Align > getMinStackArgumentAlignment()) { + assert(((Align & (Align-1)) == 0) && "Expected Align to be a power of 2"); + + VAList = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList, + DAG.getConstant(Align - 1, + VAList.getValueType())); + + VAList = DAG.getNode(ISD::AND, DL, VAList.getValueType(), VAList, + DAG.getConstant(-(int64_t)Align, + VAList.getValueType())); + } + + // Increment the pointer, VAList, to the next vaarg. + unsigned ArgSizeInBytes = getDataLayout()->getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())); + SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList, + DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes, ArgSlotSizeInBytes), + VAList.getValueType())); + // Store the incremented VAList to the legalized pointer + Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr, + MachinePointerInfo(SV), false, false, 0); + + // In big-endian mode we must adjust the pointer when the load size is smaller + // than the argument slot size. We must also reduce the known alignment to + // match. For example in the N64 ABI, we must add 4 bytes to the offset to get + // the correct half of the slot, and reduce the alignment from 8 (slot + // alignment) down to 4 (type alignment). + if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) { + unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes; + VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList, + DAG.getIntPtrConstant(Adjustment)); + } + // Load the actual argument out of the pointer VAList + return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo(), false, false, + false, 0); +} + static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasExtractInsert) { EVT TyX = Op.getOperand(0).getValueType(); @@ -2202,7 +2375,7 @@ SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op, // an argument. Otherwise, passed in A1, A2, A3 and stack. // f64 - Only passed in two aliased f32 registers if no int reg has been used // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is -// not used, it must be shadowed. If only A3 is avaiable, shadow it and +// not used, it must be shadowed. If only A3 is available, shadow it and // go to stack. // // For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack. @@ -2334,13 +2507,19 @@ void MipsTargetLowering:: getOpndList(SmallVectorImpl &Ops, std::deque< std::pair > &RegsToPass, bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage, - CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const { + bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee, + SDValue Chain) const { // Insert node "GP copy globalreg" before call to function. // // R_MIPS_CALL* operators (emitted when non-internal functions are called // in PIC mode) allow symbols to be resolved via lazy binding. // The lazy binding stub requires GP to point to the GOT. - if (IsPICCall && !InternalLinkage) { + // Note that we don't need GP to point to the GOT for indirect calls + // (when R_MIPS_CALL* is not used for the call) because Mips linker generates + // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs + // used for the function (that is, Mips linker doesn't generate lazy binding + // stub for a function whose address is taken in the program). + if (IsPICCall && !InternalLinkage && IsCallReloc) { unsigned GPReg = Subtarget.isABI_N64() ? Mips::GP_64 : Mips::GP; EVT Ty = Subtarget.isABI_N64() ? MVT::i64 : MVT::i32; RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty))); @@ -2365,7 +2544,8 @@ getOpndList(SmallVectorImpl &Ops, RegsToPass[i].second.getValueType())); // Add a register mask operand representing the call-preserved registers. - const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); + const TargetRegisterInfo *TRI = + getTargetMachine().getSubtargetImpl()->getRegisterInfo(); const uint32_t *Mask = TRI->getCallPreservedMask(CLI.CallConv); assert(Mask && "Missing call preserved mask for calling convention"); if (Subtarget.inMips16HardFloat()) { @@ -2401,31 +2581,28 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering(); + const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); MipsFunctionInfo *FuncInfo = MF.getInfo(); bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); - MipsCC::SpecialCallingConvType SpecialCallingConv = - getSpecialCallingConv(Callee); - MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), - CCInfo, SpecialCallingConv); + MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + MipsCC MipsCCInfo(CallConv, Subtarget, CCInfo); - MipsCCInfo.analyzeCallOperands(Outs, IsVarArg, - Subtarget.abiUsesSoftFloat(), - Callee.getNode(), CLI.getArgs()); + CCInfo.PreAnalyzeCallOperandsForF128_(Outs, CLI.getArgs(), Callee.getNode()); + MipsCCInfo.analyzeCallOperands(Outs, IsVarArg, Subtarget.abiUsesSoftFloat(), + Callee.getNode(), CLI.getArgs(), CCInfo); + CCInfo.ClearOriginalArgWasF128(); // Get a count of how many bytes are to be pushed on the stack. unsigned NextStackOffset = CCInfo.getNextStackOffset(); // Check if it's really possible to do a tail call. if (IsTailCall) - IsTailCall = - isEligibleForTailCallOptimization(MipsCCInfo, NextStackOffset, - *MF.getInfo()); + IsTailCall = isEligibleForTailCallOptimization( + CCInfo, NextStackOffset, *MF.getInfo()); if (!IsTailCall && CLI.CS && CLI.CS->isMustTailCall()) report_fatal_error("failed to perform tail call elimination on a call " @@ -2451,7 +2628,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // With EABI is it possible to have 16 args on registers. std::deque< std::pair > RegsToPass; SmallVector MemOpChains; - MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin(); + + CCInfo.rewindByValRegsInfo(); // Walk the register/memloc assignments, inserting copies/loads. for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { @@ -2462,14 +2640,19 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // ByVal Arg. if (Flags.isByVal()) { + unsigned FirstByValReg, LastByValReg; + unsigned ByValIdx = CCInfo.getInRegsParamsProcessed(); + CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg); + assert(Flags.getByValSize() && "ByVal args of size 0 should have been ignored by front-end."); - assert(ByValArg != MipsCCInfo.byval_end()); + assert(ByValIdx < CCInfo.getInRegsParamsCount()); assert(!IsTailCall && "Do not tail-call optimize if there is a byval argument."); passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg, - MipsCCInfo, *ByValArg, Flags, Subtarget.isLittle()); - ++ByValArg; + MipsCCInfo, FirstByValReg, LastByValReg, Flags, + Subtarget.isLittle(), VA); + CCInfo.nextInRegsParam(); continue; } @@ -2497,6 +2680,9 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, } } break; + case CCValAssign::BCvt: + Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg); + break; case CCValAssign::SExt: Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg); break; @@ -2535,7 +2721,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, bool IsPICCall = (Subtarget.isABI_N64() || IsPIC); // true if calls are translated to // jalr $25 - bool GlobalOrExternal = false, InternalLinkage = false; + bool GlobalOrExternal = false, InternalLinkage = false, IsCallReloc = false; SDValue CalleeLo; EVT Ty = Callee.getValueType(); @@ -2547,13 +2733,16 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (InternalLinkage) Callee = getAddrLocal(G, Ty, DAG, Subtarget.isABI_N32() || Subtarget.isABI_N64()); - else if (LargeGOT) + else if (LargeGOT) { Callee = getAddrGlobalLargeGOT(G, Ty, DAG, MipsII::MO_CALL_HI16, MipsII::MO_CALL_LO16, Chain, FuncInfo->callPtrInfo(Val)); - else + IsCallReloc = true; + } else { Callee = getAddrGlobal(G, Ty, DAG, MipsII::MO_GOT_CALL, Chain, FuncInfo->callPtrInfo(Val)); + IsCallReloc = true; + } } else Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, getPointerTy(), 0, MipsII::MO_NO_FLAG); @@ -2565,13 +2754,16 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, if (!Subtarget.isABI_N64() && !IsPIC) // !N64 && static Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy(), MipsII::MO_NO_FLAG); - else if (LargeGOT) + else if (LargeGOT) { Callee = getAddrGlobalLargeGOT(S, Ty, DAG, MipsII::MO_CALL_HI16, MipsII::MO_CALL_LO16, Chain, FuncInfo->callPtrInfo(Sym)); - else // N64 || PIC + IsCallReloc = true; + } else { // N64 || PIC Callee = getAddrGlobal(S, Ty, DAG, MipsII::MO_GOT_CALL, Chain, FuncInfo->callPtrInfo(Sym)); + IsCallReloc = true; + } GlobalOrExternal = true; } @@ -2580,7 +2772,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal, InternalLinkage, - CLI, Callee, Chain); + IsCallReloc, CLI, Callee, Chain); if (IsTailCall) return DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops); @@ -2595,39 +2787,68 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Handle result values, copying them out of physregs into vregs that we // return. - return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, - Ins, DL, DAG, InVals, CLI.Callee.getNode(), CLI.RetTy); + return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, + InVals, CLI); } /// LowerCallResult - Lower the result values of a call into the /// appropriate copies out of appropriate physical registers. -SDValue -MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, - CallingConv::ID CallConv, bool IsVarArg, - const SmallVectorImpl &Ins, - SDLoc DL, SelectionDAG &DAG, - SmallVectorImpl &InVals, - const SDNode *CallNode, - const Type *RetTy) const { +SDValue MipsTargetLowering::LowerCallResult( + SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg, + const SmallVectorImpl &Ins, SDLoc DL, SelectionDAG &DAG, + SmallVectorImpl &InVals, + TargetLowering::CallLoweringInfo &CLI) const { // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); - MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), - CCInfo); - - MipsCCInfo.analyzeCallResult(Ins, Subtarget.abiUsesSoftFloat(), - CallNode, RetTy); + MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); + CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI); // Copy all of the result registers out of their specified physreg. for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(), RVLocs[i].getLocVT(), InFlag); Chain = Val.getValue(1); InFlag = Val.getValue(2); - if (RVLocs[i].getValVT() != RVLocs[i].getLocVT()) - Val = DAG.getNode(ISD::BITCAST, DL, RVLocs[i].getValVT(), Val); + if (VA.isUpperBitsInLoc()) { + unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits(); + unsigned LocSizeInBits = VA.getLocVT().getSizeInBits(); + unsigned Shift = + VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA; + Val = DAG.getNode( + Shift, DL, VA.getLocVT(), Val, + DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT())); + } + + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::BCvt: + Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); + break; + case CCValAssign::AExt: + case CCValAssign::AExtUpper: + Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); + break; + case CCValAssign::ZExt: + case CCValAssign::ZExtUpper: + Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val, + DAG.getValueType(VA.getValVT())); + Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); + break; + case CCValAssign::SExt: + case CCValAssign::SExtUpper: + Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val, + DAG.getValueType(VA.getValVT())); + Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val); + break; + } InVals.push_back(Val); } @@ -2659,20 +2880,21 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); - MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), - CCInfo); + MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); + MipsCC MipsCCInfo(CallConv, Subtarget, CCInfo); Function::const_arg_iterator FuncArg = DAG.getMachineFunction().getFunction()->arg_begin(); bool UseSoftFloat = Subtarget.abiUsesSoftFloat(); - MipsCCInfo.analyzeFormalArguments(Ins, UseSoftFloat, FuncArg); + CCInfo.PreAnalyzeFormalArgumentsForF128_(Ins); + MipsCCInfo.analyzeFormalArguments(Ins, UseSoftFloat, CCInfo); + CCInfo.ClearOriginalArgWasF128(); MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(), - MipsCCInfo.hasByValArg()); + CCInfo.getInRegsParamsCount() > 0); unsigned CurArgIdx = 0; - MipsCC::byval_iterator ByValArg = MipsCCInfo.byval_begin(); + CCInfo.rewindByValRegsInfo(); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { CCValAssign &VA = ArgLocs[i]; @@ -2683,12 +2905,16 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, bool IsRegLoc = VA.isRegLoc(); if (Flags.isByVal()) { + unsigned FirstByValReg, LastByValReg; + unsigned ByValIdx = CCInfo.getInRegsParamsProcessed(); + CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg); + assert(Flags.getByValSize() && "ByVal args of size 0 should have been ignored by front-end."); - assert(ByValArg != MipsCCInfo.byval_end()); + assert(ByValIdx < CCInfo.getInRegsParamsCount()); copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg, - MipsCCInfo, *ByValArg); - ++ByValArg; + MipsCCInfo, FirstByValReg, LastByValReg, VA); + CCInfo.nextInRegsParam(); continue; } @@ -2706,16 +2932,24 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, // If this is an 8 or 16-bit value, it has been passed promoted // to 32 bits. Insert an assert[sz]ext to capture this, then // truncate to the right size. - if (VA.getLocInfo() != CCValAssign::Full) { - unsigned Opcode = 0; - if (VA.getLocInfo() == CCValAssign::SExt) - Opcode = ISD::AssertSext; - else if (VA.getLocInfo() == CCValAssign::ZExt) - Opcode = ISD::AssertZext; - if (Opcode) - ArgValue = DAG.getNode(Opcode, DL, RegVT, ArgValue, - DAG.getValueType(ValVT)); + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::SExt: + ArgValue = DAG.getNode(ISD::AssertSext, DL, RegVT, ArgValue, + DAG.getValueType(ValVT)); + ArgValue = DAG.getNode(ISD::TRUNCATE, DL, ValVT, ArgValue); + break; + case CCValAssign::ZExt: + ArgValue = DAG.getNode(ISD::AssertZext, DL, RegVT, ArgValue, + DAG.getValueType(ValVT)); ArgValue = DAG.getNode(ISD::TRUNCATE, DL, ValVT, ArgValue); + break; + case CCValAssign::BCvt: + ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue); + break; } // Handle floating point arguments passed in integer registers and @@ -2773,7 +3007,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, } if (IsVarArg) - writeVarArgRegs(OutChains, MipsCCInfo, Chain, DL, DAG); + writeVarArgRegs(OutChains, MipsCCInfo, Chain, DL, DAG, CCInfo); // All stores are grouped in one node to allow the matching between // the size of Ins and InVals. This only happens when on varg functions @@ -2795,8 +3029,7 @@ MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv, const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; - CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), - RVLocs, Context); + MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC_Mips); } @@ -2812,14 +3045,11 @@ MipsTargetLowering::LowerReturn(SDValue Chain, MachineFunction &MF = DAG.getMachineFunction(); // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), RVLocs, - *DAG.getContext()); - MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), - CCInfo); + MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); + MipsCC MipsCCInfo(CallConv, Subtarget, CCInfo); // Analyze return values. - MipsCCInfo.analyzeReturn(Outs, Subtarget.abiUsesSoftFloat(), - MF.getFunction()->getReturnType()); + CCInfo.AnalyzeReturn(Outs, RetCC_Mips); SDValue Flag; SmallVector RetOps(1, Chain); @@ -2829,9 +3059,43 @@ MipsTargetLowering::LowerReturn(SDValue Chain, SDValue Val = OutVals[i]; CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); + bool UseUpperBits = false; - if (RVLocs[i].getValVT() != RVLocs[i].getLocVT()) - Val = DAG.getNode(ISD::BITCAST, DL, RVLocs[i].getLocVT(), Val); + switch (VA.getLocInfo()) { + default: + llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + break; + case CCValAssign::BCvt: + Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val); + break; + case CCValAssign::AExtUpper: + UseUpperBits = true; + // Fallthrough + case CCValAssign::AExt: + Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val); + break; + case CCValAssign::ZExtUpper: + UseUpperBits = true; + // Fallthrough + case CCValAssign::ZExt: + Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val); + break; + case CCValAssign::SExtUpper: + UseUpperBits = true; + // Fallthrough + case CCValAssign::SExt: + Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val); + break; + } + + if (UseUpperBits) { + unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits(); + unsigned LocSizeInBits = VA.getLocVT().getSizeInBits(); + Val = DAG.getNode( + ISD::SHL, DL, VA.getLocVT(), Val, + DAG.getConstant(LocSizeInBits - ValSizeInBits, VA.getLocVT())); + } Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag); @@ -2963,7 +3227,7 @@ MipsTargetLowering::getSingleConstraintMatchWeight( /// that is returned indicates whether parsing was successful. The second flag /// is true if the numeric part exists. static std::pair -parsePhysicalReg(const StringRef &C, std::string &Prefix, +parsePhysicalReg(StringRef C, std::string &Prefix, unsigned long long &Reg) { if (C.front() != '{' || C.back() != '}') return std::make_pair(false, false); @@ -2984,8 +3248,9 @@ parsePhysicalReg(const StringRef &C, std::string &Prefix, } std::pair MipsTargetLowering:: -parseRegForInlineAsmConstraint(const StringRef &C, MVT VT) const { - const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); +parseRegForInlineAsmConstraint(StringRef C, MVT VT) const { + const TargetRegisterInfo *TRI = + getTargetMachine().getSubtargetImpl()->getRegisterInfo(); const TargetRegisterClass *RC; std::string Prefix; unsigned long long Reg; @@ -3293,12 +3558,16 @@ static bool isF128SoftLibCall(const char *CallSym) { return std::binary_search(LibCalls, End, CallSym, Comp); } -/// This function returns true if Ty is fp128 or i128 which was originally a -/// fp128. +/// This function returns true if Ty is fp128, {f128} or i128 which was +/// originally a fp128. static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) { if (Ty->isFP128Ty()) return true; + if (Ty->isStructTy() && Ty->getStructNumElements() == 1 && + Ty->getStructElementType(0)->isFP128Ty()) + return true; + const ExternalSymbolSDNode *ES = dyn_cast_or_null(CallNode); @@ -3308,11 +3577,12 @@ static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) { } MipsTargetLowering::MipsCC::SpecialCallingConvType - MipsTargetLowering::getSpecialCallingConv(SDValue Callee) const { +MipsTargetLowering::MipsCC::getSpecialCallingConv(const SDNode *Callee) const { MipsCC::SpecialCallingConvType SpecialCallingConv = MipsCC::NoSpecialCallingConv; if (Subtarget.inMips16HardFloat()) { - if (GlobalAddressSDNode *G = dyn_cast(Callee)) { + if (const GlobalAddressSDNode *G = + dyn_cast(Callee)) { llvm::StringRef Sym = G->getGlobal()->getName(); Function *F = G->getGlobal()->getParent()->getFunction(Sym); if (F && F->hasFnAttribute("__Mips16RetHelper")) { @@ -3323,43 +3593,38 @@ MipsTargetLowering::MipsCC::SpecialCallingConvType return SpecialCallingConv; } -MipsTargetLowering::MipsCC::MipsCC( - CallingConv::ID CC, bool IsO32_, bool IsFP64_, CCState &Info, - MipsCC::SpecialCallingConvType SpecialCallingConv_) - : CCInfo(Info), CallConv(CC), IsO32(IsO32_), IsFP64(IsFP64_), - SpecialCallingConv(SpecialCallingConv_){ +MipsTargetLowering::MipsCC::MipsCC(CallingConv::ID CC, + const MipsSubtarget &Subtarget_, + CCState &Info) + : CallConv(CC), Subtarget(Subtarget_) { // Pre-allocate reserved argument area. - CCInfo.AllocateStack(reservedArgArea(), 1); + Info.AllocateStack(reservedArgArea(), 1); } - -void MipsTargetLowering::MipsCC:: -analyzeCallOperands(const SmallVectorImpl &Args, - bool IsVarArg, bool IsSoftFloat, const SDNode *CallNode, - std::vector &FuncArgs) { +void MipsTargetLowering::MipsCC::analyzeCallOperands( + const SmallVectorImpl &Args, bool IsVarArg, + bool IsSoftFloat, const SDNode *CallNode, + std::vector &FuncArgs, CCState &State) { + MipsCC::SpecialCallingConvType SpecialCallingConv = + getSpecialCallingConv(CallNode); assert((CallConv != CallingConv::Fast || !IsVarArg) && "CallingConv::Fast shouldn't be used for vararg functions."); unsigned NumOpnds = Args.size(); - llvm::CCAssignFn *FixedFn = fixedArgFn(), *VarFn = varArgFn(); + llvm::CCAssignFn *FixedFn = CC_Mips_FixedArg; + if (CallConv != CallingConv::Fast && + SpecialCallingConv == Mips16RetHelperConv) + FixedFn = CC_Mips16RetHelper; for (unsigned I = 0; I != NumOpnds; ++I) { MVT ArgVT = Args[I].VT; ISD::ArgFlagsTy ArgFlags = Args[I].Flags; bool R; - if (ArgFlags.isByVal()) { - handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags); - continue; - } - if (IsVarArg && !Args[I].IsFixed) - R = VarFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); - else { - MVT RegVT = getRegVT(ArgVT, FuncArgs[Args[I].OrigArgIndex].Ty, CallNode, - IsSoftFloat); - R = FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo); - } + R = CC_Mips_VarArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, State); + else + R = FixedFn(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, State); if (R) { #ifndef NDEBUG @@ -3371,27 +3636,16 @@ analyzeCallOperands(const SmallVectorImpl &Args, } } -void MipsTargetLowering::MipsCC:: -analyzeFormalArguments(const SmallVectorImpl &Args, - bool IsSoftFloat, Function::const_arg_iterator FuncArg) { +void MipsTargetLowering::MipsCC::analyzeFormalArguments( + const SmallVectorImpl &Args, bool IsSoftFloat, + CCState &State) { unsigned NumArgs = Args.size(); - llvm::CCAssignFn *FixedFn = fixedArgFn(); - unsigned CurArgIdx = 0; for (unsigned I = 0; I != NumArgs; ++I) { MVT ArgVT = Args[I].VT; ISD::ArgFlagsTy ArgFlags = Args[I].Flags; - std::advance(FuncArg, Args[I].OrigArgIndex - CurArgIdx); - CurArgIdx = Args[I].OrigArgIndex; - - if (ArgFlags.isByVal()) { - handleByValArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags); - continue; - } - - MVT RegVT = getRegVT(ArgVT, FuncArg->getType(), nullptr, IsSoftFloat); - if (!FixedFn(I, ArgVT, RegVT, CCValAssign::Full, ArgFlags, CCInfo)) + if (!CC_Mips_FixedArg(I, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, State)) continue; #ifndef NDEBUG @@ -3402,123 +3656,20 @@ analyzeFormalArguments(const SmallVectorImpl &Args, } } -template -void MipsTargetLowering::MipsCC:: -analyzeReturn(const SmallVectorImpl &RetVals, bool IsSoftFloat, - const SDNode *CallNode, const Type *RetTy) const { - CCAssignFn *Fn; - - if (IsSoftFloat && originalTypeIsF128(RetTy, CallNode)) - Fn = RetCC_F128Soft; - else - Fn = RetCC_Mips; - - for (unsigned I = 0, E = RetVals.size(); I < E; ++I) { - MVT VT = RetVals[I].VT; - ISD::ArgFlagsTy Flags = RetVals[I].Flags; - MVT RegVT = this->getRegVT(VT, RetTy, CallNode, IsSoftFloat); - - if (Fn(I, VT, RegVT, CCValAssign::Full, Flags, this->CCInfo)) { -#ifndef NDEBUG - dbgs() << "Call result #" << I << " has unhandled type " - << EVT(VT).getEVTString() << '\n'; -#endif - llvm_unreachable(nullptr); - } - } -} - -void MipsTargetLowering::MipsCC:: -analyzeCallResult(const SmallVectorImpl &Ins, bool IsSoftFloat, - const SDNode *CallNode, const Type *RetTy) const { - analyzeReturn(Ins, IsSoftFloat, CallNode, RetTy); -} - -void MipsTargetLowering::MipsCC:: -analyzeReturn(const SmallVectorImpl &Outs, bool IsSoftFloat, - const Type *RetTy) const { - analyzeReturn(Outs, IsSoftFloat, nullptr, RetTy); -} - -void MipsTargetLowering::MipsCC::handleByValArg(unsigned ValNo, MVT ValVT, - MVT LocVT, - CCValAssign::LocInfo LocInfo, - ISD::ArgFlagsTy ArgFlags) { - assert(ArgFlags.getByValSize() && "Byval argument's size shouldn't be 0."); - - struct ByValArgInfo ByVal; - unsigned RegSize = regSize(); - unsigned ByValSize = RoundUpToAlignment(ArgFlags.getByValSize(), RegSize); - unsigned Align = std::min(std::max(ArgFlags.getByValAlign(), RegSize), - RegSize * 2); - - if (useRegsForByval()) - allocateRegs(ByVal, ByValSize, Align); - - // Allocate space on caller's stack. - ByVal.Address = CCInfo.AllocateStack(ByValSize - RegSize * ByVal.NumRegs, - Align); - CCInfo.addLoc(CCValAssign::getMem(ValNo, ValVT, ByVal.Address, LocVT, - LocInfo)); - ByValArgs.push_back(ByVal); -} - -unsigned MipsTargetLowering::MipsCC::numIntArgRegs() const { - return IsO32 ? array_lengthof(O32IntRegs) : array_lengthof(Mips64IntRegs); -} - unsigned MipsTargetLowering::MipsCC::reservedArgArea() const { - return (IsO32 && (CallConv != CallingConv::Fast)) ? 16 : 0; + return (Subtarget.isABI_O32() && (CallConv != CallingConv::Fast)) ? 16 : 0; } -const MCPhysReg *MipsTargetLowering::MipsCC::intArgRegs() const { - return IsO32 ? O32IntRegs : Mips64IntRegs; -} - -llvm::CCAssignFn *MipsTargetLowering::MipsCC::fixedArgFn() const { - if (CallConv == CallingConv::Fast) - return CC_Mips_FastCC; - - if (SpecialCallingConv == Mips16RetHelperConv) - return CC_Mips16RetHelper; - return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN; -} - -llvm::CCAssignFn *MipsTargetLowering::MipsCC::varArgFn() const { - return IsO32 ? (IsFP64 ? CC_MipsO32_FP64 : CC_MipsO32_FP32) : CC_MipsN_VarArg; -} - -const MCPhysReg *MipsTargetLowering::MipsCC::shadowRegs() const { - return IsO32 ? O32IntRegs : Mips64DPRegs; -} - -void MipsTargetLowering::MipsCC::allocateRegs(ByValArgInfo &ByVal, - unsigned ByValSize, - unsigned Align) { - unsigned RegSize = regSize(), NumIntArgRegs = numIntArgRegs(); - const MCPhysReg *IntArgRegs = intArgRegs(), *ShadowRegs = shadowRegs(); - assert(!(ByValSize % RegSize) && !(Align % RegSize) && - "Byval argument's size and alignment should be a multiple of" - "RegSize."); - - ByVal.FirstIdx = CCInfo.getFirstUnallocated(IntArgRegs, NumIntArgRegs); - - // If Align > RegSize, the first arg register must be even. - if ((Align > RegSize) && (ByVal.FirstIdx % 2)) { - CCInfo.AllocateReg(IntArgRegs[ByVal.FirstIdx], ShadowRegs[ByVal.FirstIdx]); - ++ByVal.FirstIdx; - } - - // Mark the registers allocated. - for (unsigned I = ByVal.FirstIdx; ByValSize && (I < NumIntArgRegs); - ByValSize -= RegSize, ++I, ++ByVal.NumRegs) - CCInfo.AllocateReg(IntArgRegs[I], ShadowRegs[I]); +const ArrayRef MipsTargetLowering::MipsCC::intArgRegs() const { + if (Subtarget.isABI_O32()) + return makeArrayRef(O32IntRegs); + return makeArrayRef(Mips64IntRegs); } MVT MipsTargetLowering::MipsCC::getRegVT(MVT VT, const Type *OrigTy, const SDNode *CallNode, bool IsSoftFloat) const { - if (IsSoftFloat || IsO32) + if (IsSoftFloat || Subtarget.isABI_O32()) return VT; // Check if the original type was fp128. @@ -3530,22 +3681,25 @@ MVT MipsTargetLowering::MipsCC::getRegVT(MVT VT, const Type *OrigTy, return VT; } -void MipsTargetLowering:: -copyByValRegs(SDValue Chain, SDLoc DL, std::vector &OutChains, - SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags, - SmallVectorImpl &InVals, const Argument *FuncArg, - const MipsCC &CC, const ByValArgInfo &ByVal) const { +void MipsTargetLowering::copyByValRegs( + SDValue Chain, SDLoc DL, std::vector &OutChains, SelectionDAG &DAG, + const ISD::ArgFlagsTy &Flags, SmallVectorImpl &InVals, + const Argument *FuncArg, const MipsCC &CC, unsigned FirstReg, + unsigned LastReg, const CCValAssign &VA) const { MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - unsigned RegAreaSize = ByVal.NumRegs * CC.regSize(); + unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes(); + unsigned NumRegs = LastReg - FirstReg; + unsigned RegAreaSize = NumRegs * GPRSizeInBytes; unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize); int FrameObjOffset; if (RegAreaSize) - FrameObjOffset = (int)CC.reservedArgArea() - - (int)((CC.numIntArgRegs() - ByVal.FirstIdx) * CC.regSize()); + FrameObjOffset = + (int)CC.reservedArgArea() - + (int)((CC.intArgRegs().size() - FirstReg) * GPRSizeInBytes); else - FrameObjOffset = ByVal.Address; + FrameObjOffset = VA.getLocMemOffset(); // Create frame object. EVT PtrTy = getPointerTy(); @@ -3553,17 +3707,17 @@ copyByValRegs(SDValue Chain, SDLoc DL, std::vector &OutChains, SDValue FIN = DAG.getFrameIndex(FI, PtrTy); InVals.push_back(FIN); - if (!ByVal.NumRegs) + if (!NumRegs) return; // Copy arg registers. - MVT RegTy = MVT::getIntegerVT(CC.regSize() * 8); + MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8); const TargetRegisterClass *RC = getRegClassFor(RegTy); - for (unsigned I = 0; I < ByVal.NumRegs; ++I) { - unsigned ArgReg = CC.intArgRegs()[ByVal.FirstIdx + I]; + for (unsigned I = 0; I < NumRegs; ++I) { + unsigned ArgReg = CC.intArgRegs()[FirstReg + I]; unsigned VReg = addLiveIn(MF, ArgReg, RC); - unsigned Offset = I * CC.regSize(); + unsigned Offset = I * GPRSizeInBytes; SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN, DAG.getConstant(Offset, PtrTy)); SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy), @@ -3574,34 +3728,34 @@ copyByValRegs(SDValue Chain, SDLoc DL, std::vector &OutChains, } // Copy byVal arg to registers and stack. -void MipsTargetLowering:: -passByValArg(SDValue Chain, SDLoc DL, - std::deque< std::pair > &RegsToPass, - SmallVectorImpl &MemOpChains, SDValue StackPtr, - MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, - const MipsCC &CC, const ByValArgInfo &ByVal, - const ISD::ArgFlagsTy &Flags, bool isLittle) const { +void MipsTargetLowering::passByValArg( + SDValue Chain, SDLoc DL, + std::deque> &RegsToPass, + SmallVectorImpl &MemOpChains, SDValue StackPtr, + MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, const MipsCC &CC, + unsigned FirstReg, unsigned LastReg, const ISD::ArgFlagsTy &Flags, + bool isLittle, const CCValAssign &VA) const { unsigned ByValSizeInBytes = Flags.getByValSize(); unsigned OffsetInBytes = 0; // From beginning of struct - unsigned RegSizeInBytes = CC.regSize(); + unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes(); unsigned Alignment = std::min(Flags.getByValAlign(), RegSizeInBytes); EVT PtrTy = getPointerTy(), RegTy = MVT::getIntegerVT(RegSizeInBytes * 8); + unsigned NumRegs = LastReg - FirstReg; - if (ByVal.NumRegs) { - const MCPhysReg *ArgRegs = CC.intArgRegs(); - bool LeftoverBytes = (ByVal.NumRegs * RegSizeInBytes > ByValSizeInBytes); + if (NumRegs) { + const ArrayRef ArgRegs = CC.intArgRegs(); + bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes); unsigned I = 0; // Copy words to registers. - for (; I < ByVal.NumRegs - LeftoverBytes; - ++I, OffsetInBytes += RegSizeInBytes) { + for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) { SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, DAG.getConstant(OffsetInBytes, PtrTy)); SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr, MachinePointerInfo(), false, false, false, Alignment); MemOpChains.push_back(LoadVal.getValue(1)); - unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I]; + unsigned ArgReg = ArgRegs[FirstReg + I]; RegsToPass.push_back(std::make_pair(ArgReg, LoadVal)); } @@ -3611,9 +3765,6 @@ passByValArg(SDValue Chain, SDLoc DL, // Copy the remainder of the byval argument with sub-word loads and shifts. if (LeftoverBytes) { - assert((ByValSizeInBytes > OffsetInBytes) && - (ByValSizeInBytes < OffsetInBytes + RegSizeInBytes) && - "Size of the remainder should be smaller than RegSizeInBytes."); SDValue Val; for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0; @@ -3653,7 +3804,7 @@ passByValArg(SDValue Chain, SDLoc DL, Alignment = std::min(Alignment, LoadSizeInBytes); } - unsigned ArgReg = ArgRegs[ByVal.FirstIdx + I]; + unsigned ArgReg = ArgRegs[FirstReg + I]; RegsToPass.push_back(std::make_pair(ArgReg, Val)); return; } @@ -3664,7 +3815,7 @@ passByValArg(SDValue Chain, SDLoc DL, SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg, DAG.getConstant(OffsetInBytes, PtrTy)); SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr, - DAG.getIntPtrConstant(ByVal.Address)); + DAG.getIntPtrConstant(VA.getLocMemOffset())); Chain = DAG.getMemcpy(Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, PtrTy), Alignment, /*isVolatile=*/false, /*AlwaysInline=*/false, MachinePointerInfo(), MachinePointerInfo()); @@ -3673,13 +3824,12 @@ passByValArg(SDValue Chain, SDLoc DL, void MipsTargetLowering::writeVarArgRegs(std::vector &OutChains, const MipsCC &CC, SDValue Chain, - SDLoc DL, SelectionDAG &DAG) const { - unsigned NumRegs = CC.numIntArgRegs(); - const MCPhysReg *ArgRegs = CC.intArgRegs(); - const CCState &CCInfo = CC.getCCInfo(); - unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumRegs); - unsigned RegSize = CC.regSize(); - MVT RegTy = MVT::getIntegerVT(RegSize * 8); + SDLoc DL, SelectionDAG &DAG, + CCState &State) const { + const ArrayRef ArgRegs = CC.intArgRegs(); + unsigned Idx = State.getFirstUnallocated(ArgRegs.data(), ArgRegs.size()); + unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes(); + MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8); const TargetRegisterClass *RC = getRegClassFor(RegTy); MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); @@ -3688,24 +3838,27 @@ void MipsTargetLowering::writeVarArgRegs(std::vector &OutChains, // Offset of the first variable argument from stack pointer. int VaArgOffset; - if (NumRegs == Idx) - VaArgOffset = RoundUpToAlignment(CCInfo.getNextStackOffset(), RegSize); + if (ArgRegs.size() == Idx) + VaArgOffset = + RoundUpToAlignment(State.getNextStackOffset(), RegSizeInBytes); else - VaArgOffset = (int)CC.reservedArgArea() - (int)(RegSize * (NumRegs - Idx)); + VaArgOffset = (int)CC.reservedArgArea() - + (int)(RegSizeInBytes * (ArgRegs.size() - Idx)); // Record the frame index of the first variable argument // which is a value necessary to VASTART. - int FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true); + int FI = MFI->CreateFixedObject(RegSizeInBytes, VaArgOffset, true); MipsFI->setVarArgsFrameIndex(FI); // Copy the integer registers that have not been used for argument passing // to the argument register save area. For O32, the save area is allocated // in the caller's stack frame, while for N32/64, it is allocated in the // callee's stack frame. - for (unsigned I = Idx; I < NumRegs; ++I, VaArgOffset += RegSize) { + for (unsigned I = Idx; I < ArgRegs.size(); + ++I, VaArgOffset += RegSizeInBytes) { unsigned Reg = addLiveIn(MF, ArgRegs[I], RC); SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy); - FI = MFI->CreateFixedObject(RegSize, VaArgOffset, true); + FI = MFI->CreateFixedObject(RegSizeInBytes, VaArgOffset, true); SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy()); SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo(), false, false, 0); @@ -3714,3 +3867,49 @@ void MipsTargetLowering::writeVarArgRegs(std::vector &OutChains, OutChains.push_back(Store); } } + +void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size, + unsigned Align) const { + MachineFunction &MF = State->getMachineFunction(); + const TargetFrameLowering *TFL = MF.getSubtarget().getFrameLowering(); + + assert(Size && "Byval argument's size shouldn't be 0."); + + Align = std::min(Align, TFL->getStackAlignment()); + + unsigned FirstReg = 0; + unsigned NumRegs = 0; + + if (State->getCallingConv() != CallingConv::Fast) { + unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes(); + const ArrayRef IntArgRegs = Subtarget.getABI().GetByValArgRegs(); + // FIXME: The O32 case actually describes no shadow registers. + const MCPhysReg *ShadowRegs = + Subtarget.isABI_O32() ? IntArgRegs.data() : Mips64DPRegs; + + // We used to check the size as well but we can't do that anymore since + // CCState::HandleByVal() rounds up the size after calling this function. + assert(!(Align % RegSizeInBytes) && + "Byval argument's alignment should be a multiple of" + "RegSizeInBytes."); + + FirstReg = State->getFirstUnallocated(IntArgRegs.data(), IntArgRegs.size()); + + // If Align > RegSizeInBytes, the first arg register must be even. + // FIXME: This condition happens to do the right thing but it's not the + // right way to test it. We want to check that the stack frame offset + // of the register is aligned. + if ((Align > RegSizeInBytes) && (FirstReg % 2)) { + State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]); + ++FirstReg; + } + + // Mark the registers allocated. + Size = RoundUpToAlignment(Size, RegSizeInBytes); + for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size()); + Size -= RegSizeInBytes, ++I, ++NumRegs) + State->AllocateReg(IntArgRegs[I], ShadowRegs[I]); + } + + State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs); +}