X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FCallingConvLower.cpp;h=23c0d542560ec0a0ce1cb10af997bb3f54daf092;hb=114db87382813de635450d88dded16511aea1fda;hp=56ecde0936c04b5af64b2462b19d6429ca55d01a;hpb=ded375f282a873c0852422f070e65a97f9a4ef74;p=oota-llvm.git diff --git a/lib/CodeGen/CallingConvLower.cpp b/lib/CodeGen/CallingConvLower.cpp index 56ecde0936c..23c0d542560 100644 --- a/lib/CodeGen/CallingConvLower.cpp +++ b/lib/CodeGen/CallingConvLower.cpp @@ -14,9 +14,11 @@ #include "llvm/CodeGen/CallingConvLower.h" #include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/SaveAndRestore.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetRegisterInfo.h" @@ -30,14 +32,15 @@ CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf, CallOrPrologue(Unknown) { // No stack is used. StackOffset = 0; + MaxStackArgAlign = 1; clearByValRegsInfo(); UsedRegs.resize((TRI.getNumRegs()+31)/32); } -// HandleByVal - Allocate space on the stack large enough to pass an argument -// by value. The size and alignment information of the argument is encoded in -// its parameter attribute. +/// Allocate space on the stack large enough to pass an argument by value. +/// The size and alignment information of the argument is encoded in +/// its parameter attribute. void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, int MinSize, int MinAlign, @@ -55,13 +58,13 @@ void CCState::HandleByVal(unsigned ValNo, MVT ValVT, addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); } -/// MarkAllocated - Mark a register and all of its aliases as allocated. +/// Mark a register and all of its aliases as allocated. void CCState::MarkAllocated(unsigned Reg) { for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) UsedRegs[*AI/32] |= 1 << (*AI&31); } -/// AnalyzeFormalArguments - Analyze an array of argument values, +/// Analyze an array of argument values, /// incorporating info about the formals into this state. void CCState::AnalyzeFormalArguments(const SmallVectorImpl &Ins, @@ -81,8 +84,8 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl &Ins, } } -/// CheckReturn - Analyze the return values of a function, returning true if -/// the return can be performed without sret-demotion, and false otherwise. +/// Analyze the return values of a function, returning true if the return can +/// be performed without sret-demotion and false otherwise. bool CCState::CheckReturn(const SmallVectorImpl &Outs, CCAssignFn Fn) { // Determine which register each value should be copied into. @@ -95,7 +98,7 @@ bool CCState::CheckReturn(const SmallVectorImpl &Outs, return true; } -/// AnalyzeReturn - Analyze the returned values of a return, +/// Analyze the returned values of a return, /// incorporating info about the result values into this state. void CCState::AnalyzeReturn(const SmallVectorImpl &Outs, CCAssignFn Fn) { @@ -113,7 +116,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl &Outs, } } -/// AnalyzeCallOperands - Analyze the outgoing arguments to a call, +/// Analyze the outgoing arguments to a call, /// incorporating info about the passed values into this state. void CCState::AnalyzeCallOperands(const SmallVectorImpl &Outs, CCAssignFn Fn) { @@ -131,8 +134,7 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl &Outs, } } -/// AnalyzeCallOperands - Same as above except it takes vectors of types -/// and argument flags. +/// Same as above except it takes vectors of types and argument flags. void CCState::AnalyzeCallOperands(SmallVectorImpl &ArgVTs, SmallVectorImpl &Flags, CCAssignFn Fn) { @@ -150,8 +152,8 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl &ArgVTs, } } -/// AnalyzeCallResult - Analyze the return values of a call, -/// incorporating info about the passed values into this state. +/// Analyze the return values of a call, incorporating info about the passed +/// values into this state. void CCState::AnalyzeCallResult(const SmallVectorImpl &Ins, CCAssignFn Fn) { for (unsigned i = 0, e = Ins.size(); i != e; ++i) { @@ -167,8 +169,7 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl &Ins, } } -/// AnalyzeCallResult - Same as above except it's specialized for calls which -/// produce a single value. +/// Same as above except it's specialized for calls that produce a single value. void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { #ifndef NDEBUG @@ -178,3 +179,72 @@ void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { llvm_unreachable(nullptr); } } + +static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) { + if (VT.isVector()) + return true; // Assume -msse-regparm might be in effect. + if (!VT.isInteger()) + return false; + if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall) + return true; + return false; +} + +void CCState::getRemainingRegParmsForType(SmallVectorImpl &Regs, + MVT VT, CCAssignFn Fn) { + unsigned SavedStackOffset = StackOffset; + unsigned SavedMaxStackArgAlign = MaxStackArgAlign; + unsigned NumLocs = Locs.size(); + + // Set the 'inreg' flag if it is used for this calling convention. + ISD::ArgFlagsTy Flags; + if (isValueTypeInRegForCC(CallingConv, VT)) + Flags.setInReg(); + + // Allocate something of this value type repeatedly until we get assigned a + // location in memory. + bool HaveRegParm = true; + while (HaveRegParm) { + if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) { +#ifndef NDEBUG + dbgs() << "Call has unhandled type " << EVT(VT).getEVTString() + << " while computing remaining regparms\n"; +#endif + llvm_unreachable(nullptr); + } + HaveRegParm = Locs.back().isRegLoc(); + } + + // Copy all the registers from the value locations we added. + assert(NumLocs < Locs.size() && "CC assignment failed to add location"); + for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I) + if (Locs[I].isRegLoc()) + Regs.push_back(MCPhysReg(Locs[I].getLocReg())); + + // Clear the assigned values and stack memory. We leave the registers marked + // as allocated so that future queries don't return the same registers, i.e. + // when i64 and f64 are both passed in GPRs. + StackOffset = SavedStackOffset; + MaxStackArgAlign = SavedMaxStackArgAlign; + Locs.resize(NumLocs); +} + +void CCState::analyzeMustTailForwardedRegisters( + SmallVectorImpl &Forwards, ArrayRef RegParmTypes, + CCAssignFn Fn) { + // Oftentimes calling conventions will not user register parameters for + // variadic functions, so we need to assume we're not variadic so that we get + // all the registers that might be used in a non-variadic call. + SaveAndRestore SavedVarArg(IsVarArg, false); + + for (MVT RegVT : RegParmTypes) { + SmallVector RemainingRegs; + getRemainingRegParmsForType(RemainingRegs, RegVT, Fn); + const TargetLowering *TL = MF.getSubtarget().getTargetLowering(); + const TargetRegisterClass *RC = TL->getRegClassFor(RegVT); + for (MCPhysReg PReg : RemainingRegs) { + unsigned VReg = MF.addLiveIn(PReg, RC); + Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT)); + } + } +}