X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FARM%2FARMISelLowering.cpp;h=2cfbc39a05bf857e480338cd6d2d0de81ee73cbf;hp=4b1fd22b4ef9cfa11fb537f82cd069451a6c7f10;hb=7963762fcef932e36d0ac0e7ee0af823eefbe2c2;hpb=d811b4bacb44065e4a5b7b477e35ec7e5a02acbd diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 4b1fd22b4ef..2cfbc39a05b 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1483,9 +1483,10 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet(); bool isThisReturn = false; bool isSibCall = false; + auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls"); // Disable tail calls if they're not supported. - if (!Subtarget->supportsTailCall() || MF.getTarget().Options.DisableTailCalls) + if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true") isTailCall = false; if (isTailCall) { @@ -1933,7 +1934,6 @@ void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size, Size = std::max(Size - Excess, 0); } - /// MatchingStackOffset - Return true if the given stack call argument is /// already available in the same position (relatively) of the caller's /// incoming argument stack. @@ -2376,7 +2376,9 @@ bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const { if (!Subtarget->supportsTailCall()) return false; - if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls) + auto Attr = + CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); + if (!CI->isTailCall() || Attr.getValueAsString() == "true") return false; return !Subtarget->isThumb1Only(); @@ -4113,20 +4115,23 @@ unsigned ARMTargetLowering::getRegisterByName(const char* RegName, // Result is 64 bit value so split into two 32 bit values and return as a // pair of values. -static SDValue ExpandREAD_REGISTER(SDNode *N, SelectionDAG &DAG) { +static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl &Results, + SelectionDAG &DAG) { SDLoc DL(N); - SDValue Op = N->getOperand(0); // This function is only supposed to be called for i64 type destination. assert(N->getValueType(0) == MVT::i64 && "ExpandREAD_REGISTER called for non-i64 type result."); SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL, - DAG.getVTList(MVT::i32, MVT::i32), Op); - return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), - Read.getValue(1)); -} + DAG.getVTList(MVT::i32, MVT::i32, MVT::Other), + N->getOperand(0), + N->getOperand(1)); + Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0), + Read.getValue(1))); + Results.push_back(Read.getOperand(0)); +} /// ExpandBITCAST - If the target supports VFP, this function is called to /// expand a bit convert where either the source or destination type is i64 to @@ -6481,7 +6486,7 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N, default: llvm_unreachable("Don't know how to custom expand this!"); case ISD::READ_REGISTER: - Res = ExpandREAD_REGISTER(N, DAG); + ExpandREAD_REGISTER(N, Results, DAG); break; case ISD::BITCAST: Res = ExpandBITCAST(N, DAG); @@ -7957,7 +7962,7 @@ static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1, // Get widened type and narrowed type. MVT widenType; unsigned numElem = VT.getVectorNumElements(); - + EVT inputLaneType = Vec.getValueType().getVectorElementType(); switch (inputLaneType.getSimpleVT().SimpleTy) { case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break; @@ -7994,13 +7999,13 @@ static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode, // a glue link from the first add to the second add. // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by // a S/UMLAL instruction. - // loAdd UMUL_LOHI - // \ / :lo \ :hi - // \ / \ [no multiline comment] - // ADDC | hiAdd - // \ :glue / / - // \ / / - // ADDE + // UMUL_LOHI + // / :lo \ :hi + // / \ [no multiline comment] + // loAdd -> ADDE | + // \ :glue / + // \ / + // ADDC <- hiAdd // assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC"); SDValue AddcOp0 = AddcNode->getOperand(0); @@ -10266,7 +10271,8 @@ bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM, /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM, - Type *Ty) const { + Type *Ty, + unsigned AS) const { EVT VT = getValueType(Ty, true); if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget)) return false; @@ -11338,17 +11344,17 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, if (!Subtarget->isLittle()) std::swap (Lo, Hi); Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); - return Builder.CreateCall3(Strex, Lo, Hi, Addr); + return Builder.CreateCall(Strex, {Lo, Hi, Addr}); } Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex; Type *Tys[] = { Addr->getType() }; Function *Strex = Intrinsic::getDeclaration(M, Int, Tys); - return Builder.CreateCall2( - Strex, Builder.CreateZExtOrBitCast( - Val, Strex->getFunctionType()->getParamType(0)), - Addr); + return Builder.CreateCall( + Strex, {Builder.CreateZExtOrBitCast( + Val, Strex->getFunctionType()->getParamType(0)), + Addr}); } enum HABaseType {