From 41612a9b850407d4b2aaf33de5c49f9f5e5359e8 Mon Sep 17 00:00:00 2001 From: Eric Christopher Date: Wed, 6 Aug 2014 18:45:26 +0000 Subject: [PATCH] Remove the target machine from CCState. Previously it was only used to get the subtarget and that's accessible from the MachineFunction now. This helps clear the way for smaller changes where we getting a subtarget will require passing in a MachineFunction/Function as well. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@214988 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/CodeGen/CallingConvLower.h | 5 +-- lib/CodeGen/CallingConvLower.cpp | 9 +++-- .../AArch64/AArch64CallingConvention.td | 2 +- lib/Target/AArch64/AArch64FastISel.cpp | 7 ++-- lib/Target/AArch64/AArch64ISelLowering.cpp | 34 +++++++++--------- lib/Target/ARM/ARMFastISel.cpp | 10 +++--- lib/Target/ARM/ARMISelLowering.cpp | 36 +++++++++---------- lib/Target/Hexagon/HexagonISelLowering.cpp | 23 ++++++------ lib/Target/MSP430/MSP430ISelLowering.cpp | 16 ++++----- lib/Target/Mips/MipsCallingConv.td | 7 ++-- lib/Target/Mips/MipsISelLowering.cpp | 18 +++++----- lib/Target/PowerPC/PPCCallingConv.td | 10 ++++-- lib/Target/PowerPC/PPCFastISel.cpp | 8 ++--- lib/Target/PowerPC/PPCISelLowering.cpp | 23 ++++++------ lib/Target/R600/AMDGPUCallingConv.td | 32 ++++++++++------- lib/Target/R600/R600ISelLowering.cpp | 4 +-- lib/Target/R600/SIISelLowering.cpp | 4 +-- lib/Target/Sparc/SparcISelLowering.cpp | 32 ++++++++--------- lib/Target/SystemZ/SystemZISelLowering.cpp | 12 +++---- lib/Target/X86/X86CallingConv.td | 4 ++- lib/Target/X86/X86FastISel.cpp | 8 ++--- lib/Target/X86/X86ISelLowering.cpp | 36 +++++++++---------- lib/Target/XCore/XCoreISelLowering.cpp | 18 +++++----- utils/TableGen/CallingConvEmitter.cpp | 4 +-- 24 files changed, 181 insertions(+), 181 deletions(-) diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h index 04af4bd4d59..866b3c8c6bc 100644 --- a/include/llvm/CodeGen/CallingConvLower.h +++ b/include/llvm/CodeGen/CallingConvLower.h @@ -174,7 +174,6 @@ private: CallingConv::ID CallingConv; bool IsVarArg; MachineFunction &MF; - const TargetMachine &TM; const TargetRegisterInfo &TRI; SmallVectorImpl &Locs; LLVMContext &Context; @@ -238,15 +237,13 @@ protected: public: CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, - const TargetMachine &TM, SmallVectorImpl &locs, - LLVMContext &C); + SmallVectorImpl &locs, LLVMContext &C); void addLoc(const CCValAssign &V) { Locs.push_back(V); } LLVMContext &getContext() const { return Context; } - const TargetMachine &getTarget() const { return TM; } MachineFunction &getMachineFunction() const { return MF; } CallingConv::ID getCallingConv() const { return CallingConv; } bool isVarArg() const { return IsVarArg; } diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp index cfcc9eae14e..9c1cbaf971a 100644 --- a/lib/CodeGen/CallingConvLower.cpp +++ b/lib/CodeGen/CallingConvLower.cpp @@ -25,10 +25,9 @@ using namespace llvm; CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf, - const TargetMachine &tm, SmallVectorImpl &locs, - LLVMContext &C) - : CallingConv(CC), IsVarArg(isVarArg), MF(mf), TM(tm), - TRI(*TM.getSubtargetImpl()->getRegisterInfo()), Locs(locs), Context(C), + SmallVectorImpl &locs, LLVMContext &C) + : CallingConv(CC), IsVarArg(isVarArg), MF(mf), + TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C), CallOrPrologue(Unknown) { // No stack is used. StackOffset = 0; @@ -51,7 +50,7 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT, if (MinAlign > (int)Align) Align = MinAlign; MF.getFrameInfo()->ensureMaxAlignment(Align); - TM.getSubtargetImpl()->getTargetLowering()->HandleByVal(this, Size, Align); + MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align); Size = unsigned(RoundUpToAlignment(Size, MinAlign)); unsigned Offset = AllocateStack(Size, Align); addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); diff --git a/lib/Target/AArch64/AArch64CallingConvention.td b/lib/Target/AArch64/AArch64CallingConvention.td index dd63271ad30..c163d22006a 100644 --- a/lib/Target/AArch64/AArch64CallingConvention.td +++ b/lib/Target/AArch64/AArch64CallingConvention.td @@ -16,7 +16,7 @@ class CCIfAlign : CCIf; /// CCIfBigEndian - Match only if we're in big endian mode. class CCIfBigEndian : - CCIf<"State.getTarget().getSubtargetImpl()->getDataLayout()->isBigEndian()", A>; + CCIf<"State.getMachineFunction().getSubtarget().getDataLayout()->isBigEndian()", A>; //===----------------------------------------------------------------------===// // ARM AAPCS64 Calling Convention diff --git a/lib/Target/AArch64/AArch64FastISel.cpp b/lib/Target/AArch64/AArch64FastISel.cpp index 7919e47f5df..68531dbf1e6 100644 --- a/lib/Target/AArch64/AArch64FastISel.cpp +++ b/lib/Target/AArch64/AArch64FastISel.cpp @@ -1421,7 +1421,7 @@ bool AArch64FastISel::ProcessCallArgs(CallLoweringInfo &CLI, unsigned &NumBytes) { CallingConv::ID CC = CLI.CallConv; SmallVector ArgLocs; - CCState CCInfo(CC, false, *FuncInfo.MF, TM, ArgLocs, *Context); + CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context); CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC)); // Get a count of how many bytes are to be pushed on the stack. @@ -1514,7 +1514,7 @@ bool AArch64FastISel::FinishCall(CallLoweringInfo &CLI, MVT RetVT, // Now the return value. if (RetVT != MVT::isVoid) { SmallVector RVLocs; - CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); + CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC)); // Only handle a single return value. @@ -2068,8 +2068,7 @@ bool AArch64FastISel::SelectRet(const Instruction *I) { // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; - CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, - I->getContext()); + CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); CCAssignFn *RetCC = CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS : RetCC_AArch64_AAPCS; CCInfo.AnalyzeReturn(Outs, RetCC); diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 44d3b34cccc..f236e6f6803 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1674,8 +1674,8 @@ SDValue AArch64TargetLowering::LowerFormalArguments( // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); // At this point, Ins[].VT may already be promoted to i32. To correctly // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and @@ -1946,8 +1946,8 @@ SDValue AArch64TargetLowering::LowerCallResult( : RetCC_AArch64_AAPCS; // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC); // Copy all of the result registers out of their specified physreg. @@ -2033,8 +2033,8 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( // FIXME: for now we take the most conservative of these in both cases: // disallow all variadic memory operands. SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true)); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) @@ -2046,13 +2046,13 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( // results are returned in the same way as what the caller expects. if (!CCMatch) { SmallVector RVLocs1; - CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), - getTargetMachine(), RVLocs1, *DAG.getContext()); + CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1, + *DAG.getContext()); CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForCall(CalleeCC, isVarArg)); SmallVector RVLocs2; - CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), - getTargetMachine(), RVLocs2, *DAG.getContext()); + CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2, + *DAG.getContext()); CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForCall(CallerCC, isVarArg)); if (RVLocs1.size() != RVLocs2.size()) @@ -2077,8 +2077,8 @@ bool AArch64TargetLowering::isEligibleForTailCallOptimization( return true; SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); @@ -2175,8 +2175,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); if (IsVarArg) { // Handle fixed and variable vector arguments differently. @@ -2500,7 +2500,7 @@ bool AArch64TargetLowering::CanLowerReturn( ? RetCC_AArch64_WebKit_JS : RetCC_AArch64_AAPCS; SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC); } @@ -2514,8 +2514,8 @@ AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, ? RetCC_AArch64_WebKit_JS : RetCC_AArch64_AAPCS; SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC); // Copy the result values into the output registers. diff --git a/lib/Target/ARM/ARMFastISel.cpp b/lib/Target/ARM/ARMFastISel.cpp index 4bcfbbf6f1a..aff2fd9ba4d 100644 --- a/lib/Target/ARM/ARMFastISel.cpp +++ b/lib/Target/ARM/ARMFastISel.cpp @@ -1885,7 +1885,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl &Args, unsigned &NumBytes, bool isVarArg) { SmallVector ArgLocs; - CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); + CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context); CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CCAssignFnForCall(CC, false, isVarArg)); @@ -2034,7 +2034,7 @@ bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl &UsedRegs, // Now the return value. if (RetVT != MVT::isVoid) { SmallVector RVLocs; - CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); + CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); // Copy all of the result registers out of their specified physreg. @@ -2095,7 +2095,7 @@ bool ARMFastISel::SelectRet(const Instruction *I) { // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; - CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,I->getContext()); + CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */, F.isVarArg())); @@ -2200,7 +2200,7 @@ bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) { // Can't handle non-double multi-reg retvals. if (RetVT != MVT::isVoid && RetVT != MVT::i32) { SmallVector RVLocs; - CCState CCInfo(CC, false, *FuncInfo.MF, TM, RVLocs, *Context); + CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context); CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false)); if (RVLocs.size() >= 2 && RetVT != MVT::f64) return false; @@ -2311,7 +2311,7 @@ bool ARMFastISel::SelectCall(const Instruction *I, if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 && RetVT != MVT::i16 && RetVT != MVT::i32) { SmallVector RVLocs; - CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, RVLocs, *Context); + CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context); CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg)); if (RVLocs.size() >= 2 && RetVT != MVT::f64) return false; diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 9232e37573e..3a4f788c848 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -70,9 +70,9 @@ namespace { class ARMCCState : public CCState { public: ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, - const TargetMachine &TM, SmallVectorImpl &locs, - LLVMContext &C, ParmContext PC) - : CCState(CC, isVarArg, MF, TM, locs, C) { + SmallVectorImpl &locs, LLVMContext &C, + ParmContext PC) + : CCState(CC, isVarArg, MF, locs, C) { assert(((PC == Call) || (PC == Prologue)) && "ARMCCState users must specify whether their context is call" "or prologue generation."); @@ -1269,8 +1269,8 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Assign locations to each value returned by this call. SmallVector RVLocs; - ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext(), Call); + ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext(), Call); CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv, /* Return*/ true, isVarArg)); @@ -1430,8 +1430,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext(), Call); + ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext(), Call); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv, /* Return*/ false, isVarArg)); @@ -1961,13 +1961,13 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // results are returned in the same way as what the caller expects. if (!CCMatch) { SmallVector RVLocs1; - ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), - getTargetMachine(), RVLocs1, *DAG.getContext(), Call); + ARMCCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1, + *DAG.getContext(), Call); CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC, true, isVarArg)); SmallVector RVLocs2; - ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), - getTargetMachine(), RVLocs2, *DAG.getContext(), Call); + ARMCCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2, + *DAG.getContext(), Call); CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC, true, isVarArg)); if (RVLocs1.size() != RVLocs2.size()) @@ -2001,8 +2001,8 @@ ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // Check if stack adjustment is needed. For now, do not do this if any // argument is passed on the stack. SmallVector ArgLocs; - ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext(), Call); + ARMCCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext(), Call); CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC, false, isVarArg)); if (CCInfo.getNextStackOffset()) { @@ -2056,7 +2056,7 @@ ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv, const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, CCAssignFnForNode(CallConv, /*Return=*/true, isVarArg)); } @@ -2104,8 +2104,8 @@ ARMTargetLowering::LowerReturn(SDValue Chain, SmallVector RVLocs; // CCState - Info about the registers and stack slots. - ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext(), Call); + ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext(), Call); // Analyze outgoing return values. CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv, /* Return */ true, @@ -2941,8 +2941,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext(), Prologue); + ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext(), Prologue); CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv, /* Return*/ false, isVarArg)); diff --git a/lib/Target/Hexagon/HexagonISelLowering.cpp b/lib/Target/Hexagon/HexagonISelLowering.cpp index 714d51f51eb..ac297124508 100644 --- a/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -51,9 +51,9 @@ class HexagonCCState : public CCState { public: HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF, - const TargetMachine &TM, SmallVectorImpl &locs, - LLVMContext &C, int NumNamedVarArgParams) - : CCState(CC, isVarArg, MF, TM, locs, C), + SmallVectorImpl &locs, LLVMContext &C, + int NumNamedVarArgParams) + : CCState(CC, isVarArg, MF, locs, C), NumNamedVarArgParams(NumNamedVarArgParams) {} int getNumNamedVarArgParams() const { return NumNamedVarArgParams; } @@ -322,8 +322,8 @@ HexagonTargetLowering::LowerReturn(SDValue Chain, SmallVector RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analyze return values of ISD::RET CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon); @@ -372,8 +372,8 @@ HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon); @@ -427,9 +427,8 @@ HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext(), - NumNamedVarArgParams); + HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext(), NumNamedVarArgParams); if (NumNamedVarArgParams > 0) CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg); @@ -843,8 +842,8 @@ const { // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon); diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index 900f36d543b..e79373058f4 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -437,8 +437,8 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); AnalyzeArguments(CCInfo, ArgLocs, Ins); // Create frame index for the start of the first vararg value @@ -533,8 +533,8 @@ MSP430TargetLowering::LowerReturn(SDValue Chain, report_fatal_error("ISRs cannot return any value"); // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analize return values. AnalyzeReturnValues(CCInfo, RVLocs, Outs); @@ -583,8 +583,8 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, SmallVectorImpl &InVals) const { // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); AnalyzeArguments(CCInfo, ArgLocs, Outs); // Get a count of how many bytes are to be pushed on the stack. @@ -719,8 +719,8 @@ MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); AnalyzeReturnValues(CCInfo, RVLocs, Ins); diff --git a/lib/Target/Mips/MipsCallingConv.td b/lib/Target/Mips/MipsCallingConv.td index b1cd3c35782..3b239358ebf 100644 --- a/lib/Target/Mips/MipsCallingConv.td +++ b/lib/Target/Mips/MipsCallingConv.td @@ -10,8 +10,11 @@ //===----------------------------------------------------------------------===// /// CCIfSubtarget - Match if the current subtarget has a feature F. -class CCIfSubtarget: - CCIf().", F), A>; +class CCIfSubtarget + : CCIf" + "(State.getMachineFunction().getSubtarget()).", + F), + A>; //===----------------------------------------------------------------------===// // Mips O32 Calling Convention diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index c90b9a5674e..416de552b4a 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -2480,8 +2480,8 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); MipsCC::SpecialCallingConvType SpecialCallingConv = getSpecialCallingConv(Callee); MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), @@ -2684,8 +2684,8 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, const Type *RetTy) const { // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), CCInfo); @@ -2732,8 +2732,8 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), CCInfo); Function::const_arg_iterator FuncArg = @@ -2868,8 +2868,7 @@ MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv, const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; - CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), - RVLocs, Context); + CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC_Mips); } @@ -2885,8 +2884,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain, MachineFunction &MF = DAG.getMachineFunction(); // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, IsVarArg, MF, getTargetMachine(), RVLocs, - *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); MipsCC MipsCCInfo(CallConv, Subtarget.isABI_O32(), Subtarget.isFP64bit(), CCInfo); diff --git a/lib/Target/PowerPC/PPCCallingConv.td b/lib/Target/PowerPC/PPCCallingConv.td index 222760a0cb9..cf8fee49197 100644 --- a/lib/Target/PowerPC/PPCCallingConv.td +++ b/lib/Target/PowerPC/PPCCallingConv.td @@ -14,9 +14,15 @@ /// CCIfSubtarget - Match if the current subtarget has a feature F. class CCIfSubtarget - : CCIf().", F), A>; + : CCIf" + "(State.getMachineFunction().getSubtarget()).", + F), + A>; class CCIfNotSubtarget - : CCIf().", F), A>; + : CCIf" + "(State.getMachineFunction().getSubtarget()).", + F), + A>; //===----------------------------------------------------------------------===// // Return Value Calling Convention diff --git a/lib/Target/PowerPC/PPCFastISel.cpp b/lib/Target/PowerPC/PPCFastISel.cpp index a397e8f2c39..04a391a92db 100644 --- a/lib/Target/PowerPC/PPCFastISel.cpp +++ b/lib/Target/PowerPC/PPCFastISel.cpp @@ -1199,7 +1199,7 @@ bool PPCFastISel::processCallArgs(SmallVectorImpl &Args, unsigned &NumBytes, bool IsVarArg) { SmallVector ArgLocs; - CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs, *Context); + CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, *Context); // Reserve space for the linkage area on the stack. bool isELFv2ABI = PPCSubTarget->isELFv2ABI(); @@ -1320,7 +1320,7 @@ void PPCFastISel::finishCall(MVT RetVT, SmallVectorImpl &UsedRegs, // any real difficulties there. if (RetVT != MVT::isVoid) { SmallVector RVLocs; - CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, *Context); + CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, *Context); CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS); CCValAssign &VA = RVLocs[0]; assert(RVLocs.size() == 1 && "No support for multi-reg return values!"); @@ -1410,7 +1410,7 @@ bool PPCFastISel::SelectCall(const Instruction *I) { RetVT != MVT::i32 && RetVT != MVT::i64 && RetVT != MVT::f32 && RetVT != MVT::f64) { SmallVector RVLocs; - CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, *Context); + CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, *Context); CCInfo.AnalyzeCallResult(RetVT, RetCC_PPC64_ELF_FIS); if (RVLocs.size() > 1) return false; @@ -1537,7 +1537,7 @@ bool PPCFastISel::SelectRet(const Instruction *I) { // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; - CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, *Context); + CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, *Context); CCInfo.AnalyzeReturn(Outs, RetCC_PPC64_ELF_FIS); const Value *RV = Ret->getOperand(0); diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 113db311a47..36e1e1334e5 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2386,8 +2386,8 @@ PPCTargetLowering::LowerFormalArguments_32SVR4( // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); // Reserve space for the linkage area on the stack. unsigned LinkageSize = PPCFrameLowering::getLinkageSize(false, false, false); @@ -2461,7 +2461,7 @@ PPCTargetLowering::LowerFormalArguments_32SVR4( // caller's stack frame, right above the parameter list area. SmallVector ByValArgLocs; CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ByValArgLocs, *DAG.getContext()); + ByValArgLocs, *DAG.getContext()); // Reserve stack space for the allocations in CCInfo. CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); @@ -3743,8 +3743,8 @@ PPCTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, SmallVectorImpl &InVals) const { SmallVector RVLocs; - CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); CCRetInfo.AnalyzeCallResult(Ins, RetCC_PPC); // Copy all of the result registers out of their specified physreg. @@ -3959,8 +3959,8 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, // Assign locations to all of the outgoing arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); // Reserve space for the linkage area on the stack. CCInfo.AllocateStack(PPCFrameLowering::getLinkageSize(false, false, false), @@ -4001,7 +4001,7 @@ PPCTargetLowering::LowerCall_32SVR4(SDValue Chain, SDValue Callee, // Assign locations to all of the outgoing aggregate by value arguments. SmallVector ByValArgLocs; CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ByValArgLocs, *DAG.getContext()); + ByValArgLocs, *DAG.getContext()); // Reserve stack space for the allocations in CCInfo. CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize); @@ -5037,8 +5037,7 @@ PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv, const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), - RVLocs, Context); + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC_PPC); } @@ -5050,8 +5049,8 @@ PPCTargetLowering::LowerReturn(SDValue Chain, SDLoc dl, SelectionDAG &DAG) const { SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_PPC); SDValue Flag; diff --git a/lib/Target/R600/AMDGPUCallingConv.td b/lib/Target/R600/AMDGPUCallingConv.td index 3586c882690..6ffa7a08358 100644 --- a/lib/Target/R600/AMDGPUCallingConv.td +++ b/lib/Target/R600/AMDGPUCallingConv.td @@ -59,16 +59,24 @@ def CC_AMDGPU_Kernel : CallingConv<[ ]>; def CC_AMDGPU : CallingConv<[ - CCIf<"State.getTarget().getSubtarget().getGeneration() >= " - "AMDGPUSubtarget::SOUTHERN_ISLANDS && " - "State.getMachineFunction().getInfo()->"# - "getShaderType() == ShaderType::COMPUTE", CCDelegateTo>, - CCIf<"State.getTarget().getSubtarget().getGeneration() < " - "AMDGPUSubtarget::SOUTHERN_ISLANDS && " - "State.getMachineFunction().getInfo()->" - "getShaderType() == ShaderType::COMPUTE", CCDelegateTo>, - CCIf<"State.getTarget().getSubtarget()"# - ".getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo>, - CCIf<"State.getTarget().getSubtarget()"# - ".getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS", CCDelegateTo> + CCIf<"static_cast" + "(State.getMachineFunction().getSubtarget()).getGeneration() >=" + "AMDGPUSubtarget::SOUTHERN_ISLANDS && " + "State.getMachineFunction().getInfo()" + "->getShaderType() == ShaderType::COMPUTE", + CCDelegateTo>, + CCIf<"static_cast" + "(State.getMachineFunction().getSubtarget()).getGeneration() < " + "AMDGPUSubtarget::SOUTHERN_ISLANDS && " + "State.getMachineFunction().getInfo()" + "->getShaderType() == ShaderType::COMPUTE", + CCDelegateTo>, + CCIf<"static_cast" + "(State.getMachineFunction().getSubtarget()).getGeneration() >= " + "AMDGPUSubtarget::SOUTHERN_ISLANDS", + CCDelegateTo>, + CCIf<"static_cast" + "(State.getMachineFunction().getSubtarget()).getGeneration() < " + "AMDGPUSubtarget::SOUTHERN_ISLANDS", + CCDelegateTo> ]>; diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index aa51ac8ae39..8877cc85504 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -1692,8 +1692,8 @@ SDValue R600TargetLowering::LowerFormalArguments( SDLoc DL, SelectionDAG &DAG, SmallVectorImpl &InVals) const { SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); MachineFunction &MF = DAG.getMachineFunction(); unsigned ShaderType = MF.getInfo()->getShaderType(); diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index 2e982e2c0a3..f7717dacfc6 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -395,8 +395,8 @@ SDValue SITargetLowering::LowerFormalArguments( } SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); // At least one interpolation mode must be enabled or else the GPU will hang. if (Info->getShaderType() == ShaderType::PIXEL && diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index bda07e39b26..8a7b437c881 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -190,8 +190,8 @@ SparcTargetLowering::LowerReturn_32(SDValue Chain, SmallVector RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analyze return values. CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32); @@ -250,8 +250,8 @@ SparcTargetLowering::LowerReturn_64(SDValue Chain, SmallVector RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analyze return values. CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64); @@ -349,8 +349,8 @@ LowerFormalArguments_32(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32); const unsigned StackOffset = 92; @@ -549,8 +549,8 @@ LowerFormalArguments_64(SDValue Chain, // Analyze arguments according to CC_Sparc64. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64); // The argument array begins at %fp+BIAS+128, after the register save area. @@ -698,8 +698,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - DAG.getTarget(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32); // Get the size of the outgoing arguments stack space requirement. @@ -934,8 +934,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI, // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32); @@ -1061,8 +1061,8 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64); // Get the size of the outgoing arguments stack space requirement. @@ -1255,8 +1255,8 @@ SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI, // Assign locations to each value returned by this call. SmallVector RVLocs; - CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Set inreg flag manually for codegen generated library calls that // return float. diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 721d3c2a36e..228ca6e3d1e 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -679,8 +679,7 @@ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs, - *DAG.getContext()); + CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ); unsigned NumFixedGPRs = 0; @@ -818,8 +817,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, // Analyze the operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState ArgCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), ArgLocs, - *DAG.getContext()); + CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ); // We don't support GuaranteedTailCallOpt, only automatically-detected @@ -942,8 +940,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI, // Assign locations to each value returned by this call. SmallVector RetLocs; - CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs, - *DAG.getContext()); + CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ); // Copy all of the result registers out of their specified physreg. @@ -974,8 +971,7 @@ SystemZTargetLowering::LowerReturn(SDValue Chain, // Assign locations to each returned value. SmallVector RetLocs; - CCState RetCCInfo(CallConv, IsVarArg, MF, DAG.getTarget(), RetLocs, - *DAG.getContext()); + CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext()); RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ); // Quick exit for void returns diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index 1f3cde828b6..899a9600857 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -14,7 +14,9 @@ /// CCIfSubtarget - Match if the current subtarget has a feature F. class CCIfSubtarget - : CCIf().", F), A>; + : CCIf" + "(State.getMachineFunction().getSubtarget()).", F), + A>; //===----------------------------------------------------------------------===// // Return Value Calling Conventions diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index ca631ea4688..101c723e6a5 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -993,8 +993,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) { // Analyze operands of the call, assigning locations to each operand. SmallVector ValLocs; - CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs, - I->getContext()); + CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86); const Value *RV = Ret->getOperand(0); @@ -2730,8 +2729,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, TM, ArgLocs, - CLI.RetTy->getContext()); + CCState CCInfo(CC, IsVarArg, *FuncInfo.MF, ArgLocs, CLI.RetTy->getContext()); // Allocate shadow area for Win64 if (IsWin64) @@ -2972,7 +2970,7 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) { // Now handle call return values. SmallVector RVLocs; - CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, TM, RVLocs, + CCState CCRetInfo(CC, IsVarArg, *FuncInfo.MF, RVLocs, CLI.RetTy->getContext()); CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86); diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index ab87d172297..47e3a5a6dac 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1901,8 +1901,7 @@ X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv, const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, MF, MF.getTarget(), - RVLocs, Context); + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); return CCInfo.CheckReturn(Outs, RetCC_X86); } @@ -1921,8 +1920,7 @@ X86TargetLowering::LowerReturn(SDValue Chain, X86MachineFunctionInfo *FuncInfo = MF.getInfo(); SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, MF, DAG.getTarget(), - RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext()); CCInfo.AnalyzeReturn(Outs, RetCC_X86); SDValue Flag; @@ -2092,8 +2090,8 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, // Assign locations to each value returned by this call. SmallVector RVLocs; bool Is64Bit = Subtarget->is64Bit(); - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC_X86); // Copy all of the result registers out of their specified physreg. @@ -2289,8 +2287,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, MF, DAG.getTarget(), - ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); // Allocate shadow area for Win64 if (IsWin64) @@ -2656,8 +2653,7 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, MF, MF.getTarget(), - ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); // Allocate shadow area for Win64 if (IsWin64) @@ -3253,8 +3249,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, return false; SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), - DAG.getTarget(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeCallOperands(Outs, CC_X86); for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) @@ -3274,8 +3270,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, } if (Unused) { SmallVector RVLocs; - CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs, *DAG.getContext()); + CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); CCInfo.AnalyzeCallResult(Ins, RetCC_X86); for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { CCValAssign &VA = RVLocs[i]; @@ -3288,13 +3284,13 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // results are returned in the same way as what the caller expects. if (!CCMatch) { SmallVector RVLocs1; - CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs1, *DAG.getContext()); + CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1, + *DAG.getContext()); CCInfo1.AnalyzeCallResult(Ins, RetCC_X86); SmallVector RVLocs2; - CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), - DAG.getTarget(), RVLocs2, *DAG.getContext()); + CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2, + *DAG.getContext()); CCInfo2.AnalyzeCallResult(Ins, RetCC_X86); if (RVLocs1.size() != RVLocs2.size()) @@ -3320,8 +3316,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, // Check if stack adjustment is needed. For now, do not do this if any // argument is passed on the stack. SmallVector ArgLocs; - CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), - DAG.getTarget(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); // Allocate shadow area for Win64 if (IsCalleeWin64) diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 39fd26e1d37..76c2d9549d4 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -1125,8 +1125,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, // Analyze operands of the call, assigning locations to each operand. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); // The ABI dictates there should be one stack slot available to the callee // on function entry (for saving lr). @@ -1136,8 +1136,8 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, SmallVector RVLocs; // Analyze return values to determine the number of bytes of stack required. - CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState RetCCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); RetCCInfo.AllocateStack(CCInfo.getNextStackOffset(), 4); RetCCInfo.AnalyzeCallResult(Ins, RetCC_XCore); @@ -1291,8 +1291,8 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain, // Assign locations to all of the incoming arguments. SmallVector ArgLocs; - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), ArgLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, + *DAG.getContext()); CCInfo.AnalyzeFormalArguments(Ins, CC_XCore); @@ -1450,7 +1450,7 @@ CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, const SmallVectorImpl &Outs, LLVMContext &Context) const { SmallVector RVLocs; - CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context); + CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); if (!CCInfo.CheckReturn(Outs, RetCC_XCore)) return false; if (CCInfo.getNextStackOffset() != 0 && isVarArg) @@ -1474,8 +1474,8 @@ XCoreTargetLowering::LowerReturn(SDValue Chain, SmallVector RVLocs; // CCState - Info about the registers and stack slot. - CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), - getTargetMachine(), RVLocs, *DAG.getContext()); + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, + *DAG.getContext()); // Analyze return values. if (!isVarArg) diff --git a/utils/TableGen/CallingConvEmitter.cpp b/utils/TableGen/CallingConvEmitter.cpp index 435d3f001d2..907a90c76df 100644 --- a/utils/TableGen/CallingConvEmitter.cpp +++ b/utils/TableGen/CallingConvEmitter.cpp @@ -179,14 +179,14 @@ void CallingConvEmitter::EmitAction(Record *Action, O << Size << ", "; else O << "\n" << IndentStr - << " State.getTarget().getSubtargetImpl()->getDataLayout()" + << " State.getMachineFunction().getSubtarget().getDataLayout()" "->getTypeAllocSize(EVT(LocVT).getTypeForEVT(State.getContext()))," " "; if (Align) O << Align; else O << "\n" << IndentStr - << " State.getTarget().getSubtargetImpl()->getDataLayout()" + << " State.getMachineFunction().getSubtarget().getDataLayout()" "->getABITypeAlignment(EVT(LocVT).getTypeForEVT(State.getContext()" "))"; O << ");\n" << IndentStr -- 2.34.1