#include "ARMTargetMachine.h"
#include "llvm/CallingConv.h"
#include "llvm/Constants.h"
+#include "llvm/Function.h"
#include "llvm/Instruction.h"
#include "llvm/Intrinsics.h"
#include "llvm/GlobalValue.h"
+#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Support/MathExtras.h"
using namespace llvm;
+static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
+static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
+static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
+static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State);
+
ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
: TargetLowering(TM), ARMPCLabelIndex(0) {
Subtarget = &TM.getSubtarget<ARMSubtarget>();
// Integer to floating-point conversions.
// i64 conversions are done via library routines even when generating VFP
// instructions, so use the same ones.
- // FIXME: There appears to be some naming inconsistency in ARM libgcc: e.g.
- // __floatunsidf vs. __floatunssidfvfp.
+ // FIXME: There appears to be some naming inconsistency in ARM libgcc:
+ // e.g., __floatunsidf vs. __floatunssidfvfp.
setLibcallName(RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp");
setLibcallName(RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp");
setLibcallName(RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp");
}
}
- addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
+ if (Subtarget->isThumb())
+ addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
+ else
+ addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
-
+
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
}
computeRegisterProperties();
// ARM does not have ROTL.
setOperationAction(ISD::ROTL, MVT::i32, Expand);
- setOperationAction(ISD::CTTZ , MVT::i32, Expand);
+ setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
if (!Subtarget->hasV5TOps() || Subtarget->isThumb())
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
-
+
// Support label based line numbers.
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
// Use the default implementation.
- setOperationAction(ISD::VASTART , MVT::Other, Custom);
- setOperationAction(ISD::VAARG , MVT::Other, Expand);
- setOperationAction(ISD::VACOPY , MVT::Other, Expand);
- setOperationAction(ISD::VAEND , MVT::Other, Expand);
- setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
+ setOperationAction(ISD::VASTART, MVT::Other, Custom);
+ setOperationAction(ISD::VAARG, MVT::Other, Expand);
+ setOperationAction(ISD::VACOPY, MVT::Other, Expand);
+ setOperationAction(ISD::VAEND, MVT::Other, Expand);
+ setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
- setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Expand);
- setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
+ setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
if (!Subtarget->hasV6Ops()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
- setOperationAction(ISD::SETCC , MVT::i32, Expand);
- setOperationAction(ISD::SETCC , MVT::f32, Expand);
- setOperationAction(ISD::SETCC , MVT::f64, Expand);
- setOperationAction(ISD::SELECT , MVT::i32, Expand);
- setOperationAction(ISD::SELECT , MVT::f32, Expand);
- setOperationAction(ISD::SELECT , MVT::f64, Expand);
+ setOperationAction(ISD::SETCC, MVT::i32, Expand);
+ setOperationAction(ISD::SETCC, MVT::f32, Expand);
+ setOperationAction(ISD::SETCC, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
- setOperationAction(ISD::BRCOND , MVT::Other, Expand);
- setOperationAction(ISD::BR_CC , MVT::i32, Custom);
- setOperationAction(ISD::BR_CC , MVT::f32, Custom);
- setOperationAction(ISD::BR_CC , MVT::f64, Custom);
- setOperationAction(ISD::BR_JT , MVT::Other, Custom);
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f32, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f64, Custom);
+ setOperationAction(ISD::BR_JT, MVT::Other, Custom);
// We don't support sin/cos/fmod/copysign/pow
- setOperationAction(ISD::FSIN , MVT::f64, Expand);
- setOperationAction(ISD::FSIN , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f32, Expand);
- setOperationAction(ISD::FCOS , MVT::f64, Expand);
- setOperationAction(ISD::FREM , MVT::f64, Expand);
- setOperationAction(ISD::FREM , MVT::f32, Expand);
+ setOperationAction(ISD::FSIN, MVT::f64, Expand);
+ setOperationAction(ISD::FSIN, MVT::f32, Expand);
+ setOperationAction(ISD::FCOS, MVT::f32, Expand);
+ setOperationAction(ISD::FCOS, MVT::f64, Expand);
+ setOperationAction(ISD::FREM, MVT::f64, Expand);
+ setOperationAction(ISD::FREM, MVT::f32, Expand);
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
}
- setOperationAction(ISD::FPOW , MVT::f64, Expand);
- setOperationAction(ISD::FPOW , MVT::f32, Expand);
-
+ setOperationAction(ISD::FPOW, MVT::f64, Expand);
+ setOperationAction(ISD::FPOW, MVT::f32, Expand);
+
// int <-> fp are custom expanded into bit_convert + ARMISD ops.
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
// ARMISD::FMRRD - No need to call setTargetDAGCombine
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::SUB);
-
-
+
setStackPointerRegisterToSaveRestore(ARM::SP);
setSchedulingPreference(SchedulingForRegPressure);
setIfCvtBlockSizeLimit(Subtarget->isThumb() ? 0 : 10);
maxStoresPerMemcpy = 1; //// temporary - rewrite interface to use type
}
-
const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
switch (Opcode) {
default: return 0;
case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
case ARMISD::CMOV: return "ARMISD::CMOV";
case ARMISD::CNEG: return "ARMISD::CNEG";
-
+
case ARMISD::FTOSI: return "ARMISD::FTOSI";
case ARMISD::FTOUI: return "ARMISD::FTOUI";
case ARMISD::SITOF: return "ARMISD::SITOF";
case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
case ARMISD::RRX: return "ARMISD::RRX";
-
+
case ARMISD::FMRRD: return "ARMISD::FMRRD";
case ARMISD::FMDRR: return "ARMISD::FMDRR";
// Lowering Code
//===----------------------------------------------------------------------===//
-
/// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
switch (CC) {
return Invert;
}
-static void
-HowToPassArgument(MVT ObjectVT, unsigned NumGPRs,
- unsigned StackOffset, unsigned &NeededGPRs,
- unsigned &NeededStackSize, unsigned &GPRPad,
- unsigned &StackPad, ISD::ArgFlagsTy Flags) {
- NeededStackSize = 0;
- NeededGPRs = 0;
- StackPad = 0;
- GPRPad = 0;
- unsigned align = Flags.getOrigAlign();
- GPRPad = NumGPRs % ((align + 3)/4);
- StackPad = StackOffset % align;
- unsigned firstGPR = NumGPRs + GPRPad;
- switch (ObjectVT.getSimpleVT()) {
- default: assert(0 && "Unhandled argument type!");
- case MVT::i32:
- case MVT::f32:
- if (firstGPR < 4)
- NeededGPRs = 1;
- else
- NeededStackSize = 4;
- break;
- case MVT::i64:
- case MVT::f64:
- if (firstGPR < 3)
- NeededGPRs = 2;
- else if (firstGPR == 3) {
- NeededGPRs = 1;
- NeededStackSize = 4;
- } else
- NeededStackSize = 8;
+//===----------------------------------------------------------------------===//
+// Calling Convention Implementation
+//
+// The lower operations present on calling convention works on this order:
+// LowerCALL (virt regs --> phys regs, virt regs --> stack)
+// LowerFORMAL_ARGUMENTS (phys --> virt regs, stack --> virt regs)
+// LowerRET (virt regs --> phys regs)
+// LowerCALL (phys regs --> virt regs)
+//
+//===----------------------------------------------------------------------===//
+
+#include "ARMGenCallingConv.inc"
+
+// APCS f64 is in register pairs, possibly split to stack
+static bool CC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ static const unsigned HiRegList[] = { ARM::R0, ARM::R1, ARM::R2, ARM::R3 };
+ static const unsigned LoRegList[] = { ARM::R1,
+ ARM::R2,
+ ARM::R3,
+ ARM::NoRegister };
+
+ unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 4);
+ if (Reg == 0)
+ return false; // we didn't handle it
+
+ unsigned i;
+ for (i = 0; i < 4; ++i)
+ if (HiRegList[i] == Reg)
+ break;
+
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, MVT::i32, LocInfo));
+ if (LoRegList[i] != ARM::NoRegister)
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
+ MVT::i32, LocInfo));
+ else
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(4, 4),
+ MVT::i32, LocInfo));
+ return true; // we handled it
+}
+
+// AAPCS f64 is in aligned register pairs
+static bool CC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
+ static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
+
+ unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
+ if (Reg == 0)
+ return false; // we didn't handle it
+
+ unsigned i;
+ for (i = 0; i < 2; ++i)
+ if (HiRegList[i] == Reg)
+ break;
+
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, MVT::i32, LocInfo));
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
+ MVT::i32, LocInfo));
+ return true; // we handled it
+}
+
+static bool RetCC_ARM_APCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ static const unsigned HiRegList[] = { ARM::R0, ARM::R2 };
+ static const unsigned LoRegList[] = { ARM::R1, ARM::R3 };
+
+ unsigned Reg = State.AllocateReg(HiRegList, LoRegList, 2);
+ if (Reg == 0)
+ return false; // we didn't handle it
+
+ unsigned i;
+ for (i = 0; i < 2; ++i)
+ if (HiRegList[i] == Reg)
+ break;
+
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, MVT::i32, LocInfo));
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, LoRegList[i],
+ MVT::i32, LocInfo));
+ return true; // we handled it
+}
+
+static bool RetCC_ARM_AAPCS_Custom_f64(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
+ CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags,
+ CCState &State) {
+ return RetCC_ARM_APCS_Custom_f64(ValNo, ValVT, LocVT, LocInfo, ArgFlags,
+ State);
+}
+
+/// LowerCallResult - Lower the result values of an ISD::CALL into the
+/// appropriate copies out of appropriate physical registers. This assumes that
+/// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
+/// being lowered. The returns a SDNode with the same number of values as the
+/// ISD::CALL.
+SDNode *ARMTargetLowering::
+LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
+ unsigned CallingConv, SelectionDAG &DAG) {
+
+ DebugLoc dl = TheCall->getDebugLoc();
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ bool isVarArg = TheCall->isVarArg();
+ CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs);
+ CCInfo.AnalyzeCallResult(TheCall, RetCC_ARM);
+
+ SmallVector<SDValue, 8> ResultVals;
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign VA = RVLocs[i];
+
+ // handle f64 as custom
+ if (VA.needsCustom()) {
+ SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
+ InFlag);
+ VA = RVLocs[++i]; // skip ahead to next loc
+ SDValue Hi = DAG.getCopyFromReg(Lo, dl, VA.getLocReg(), VA.getLocVT(),
+ Lo.getValue(2));
+ ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, dl, VA.getValVT(), Lo,
+ Hi));
+ } else {
+ Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
+ InFlag).getValue(1);
+ SDValue Val = Chain.getValue(0);
+ InFlag = Chain.getValue(2);
+
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::BCvt:
+ Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(),
+ Chain.getValue(0));
+ break;
+ }
+
+ ResultVals.push_back(Val);
+ }
}
+
+ // Merge everything together with a MERGE_VALUES node.
+ ResultVals.push_back(Chain);
+ return DAG.getNode(ISD::MERGE_VALUES, dl, TheCall->getVTList(),
+ &ResultVals[0], ResultVals.size()).getNode();
+}
+
+/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
+/// by "Src" to address "Dst" of size "Size". Alignment information is
+/// specified by the specific parameter attribute. The copy will be passed as
+/// a byval function parameter.
+/// Sometimes what we are copying is the end of a larger object, the part that
+/// does not fit in registers.
+static SDValue
+CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
+ ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
+ DebugLoc dl) {
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
+ /*AlwaysInline=*/false, NULL, 0, NULL, 0);
+}
+
+/// LowerMemOpCallTo - Store the argument to the stack.
+SDValue
+ARMTargetLowering::LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
+ const SDValue &StackPtr,
+ const CCValAssign &VA, SDValue Chain,
+ SDValue Arg, ISD::ArgFlagsTy Flags) {
+ DebugLoc dl = TheCall->getDebugLoc();
+ unsigned LocMemOffset = VA.getLocMemOffset();
+ SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
+ PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
+ if (Flags.isByVal()) {
+ return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
+ }
+ return DAG.getStore(Chain, dl, Arg, PtrOff,
+ PseudoSourceValue::getStack(), LocMemOffset);
}
/// LowerCALL - Lowering a ISD::CALL node into a callseq_start <-
/// nodes.
SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
CallSDNode *TheCall = cast<CallSDNode>(Op.getNode());
- MVT RetVT = TheCall->getRetValType(0);
- SDValue Chain = TheCall->getChain();
- assert((TheCall->getCallingConv() == CallingConv::C ||
- TheCall->getCallingConv() == CallingConv::Fast) &&
- "unknown calling convention");
- SDValue Callee = TheCall->getCallee();
- unsigned NumOps = TheCall->getNumArgs();
- DebugLoc dl = TheCall->getDebugLoc();
- unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
- unsigned NumGPRs = 0; // GPRs used for parameter passing.
-
- // Count how many bytes are to be pushed on the stack.
- unsigned NumBytes = 0;
-
- // Add up all the space actually used.
- for (unsigned i = 0; i < NumOps; ++i) {
- unsigned ObjSize;
- unsigned ObjGPRs;
- unsigned StackPad;
- unsigned GPRPad;
- MVT ObjectVT = TheCall->getArg(i).getValueType();
- ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
- HowToPassArgument(ObjectVT, NumGPRs, NumBytes, ObjGPRs, ObjSize,
- GPRPad, StackPad, Flags);
- NumBytes += ObjSize + StackPad;
- NumGPRs += ObjGPRs + GPRPad;
- }
+ MVT RetVT = TheCall->getRetValType(0);
+ SDValue Chain = TheCall->getChain();
+ unsigned CC = TheCall->getCallingConv();
+ assert((CC == CallingConv::C ||
+ CC == CallingConv::Fast) && "unknown calling convention");
+ bool isVarArg = TheCall->isVarArg();
+ SDValue Callee = TheCall->getCallee();
+ DebugLoc dl = TheCall->getDebugLoc();
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeCallOperands(TheCall, CC_ARM);
+
+ // Get a count of how many bytes are to be pushed on the stack.
+ unsigned NumBytes = CCInfo.getNextStackOffset();
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
SDValue StackPtr = DAG.getRegister(ARM::SP, MVT::i32);
- static const unsigned GPRArgRegs[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
-
- NumGPRs = 0;
- std::vector<std::pair<unsigned, SDValue> > RegsToPass;
- std::vector<SDValue> MemOpChains;
- for (unsigned i = 0; i != NumOps; ++i) {
- SDValue Arg = TheCall->getArg(i);
- ISD::ArgFlagsTy Flags = TheCall->getArgFlags(i);
- MVT ArgVT = Arg.getValueType();
-
- unsigned ObjSize;
- unsigned ObjGPRs;
- unsigned GPRPad;
- unsigned StackPad;
- HowToPassArgument(ArgVT, NumGPRs, ArgOffset, ObjGPRs,
- ObjSize, GPRPad, StackPad, Flags);
- NumGPRs += GPRPad;
- ArgOffset += StackPad;
- if (ObjGPRs > 0) {
- switch (ArgVT.getSimpleVT()) {
- default: assert(0 && "Unexpected ValueType for argument!");
- case MVT::i32:
- RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Arg));
- break;
- case MVT::f32:
- RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs],
- DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Arg)));
- break;
- case MVT::i64: {
- SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
- DAG.getConstant(0, getPointerTy()));
- SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
- DAG.getConstant(1, getPointerTy()));
- RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Lo));
- if (ObjGPRs == 2)
- RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1], Hi));
- else {
- SDValue PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType());
- PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Hi, PtrOff, NULL, 0));
- }
- break;
- }
- case MVT::f64: {
- SDValue Cvt = DAG.getNode(ARMISD::FMRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32),
- &Arg, 1);
- RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs], Cvt));
- if (ObjGPRs == 2)
- RegsToPass.push_back(std::make_pair(GPRArgRegs[NumGPRs+1],
- Cvt.getValue(1)));
- else {
- SDValue PtrOff= DAG.getConstant(ArgOffset, StackPtr.getValueType());
- PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Cvt.getValue(1), PtrOff,
- NULL, 0));
- }
- break;
- }
+ SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+
+ // Walk the register/memloc assignments, inserting copies/loads. In the case
+ // of tail call optimization, arguments are handled later.
+ for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
+ i != e;
+ ++i, ++realArgIdx) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = TheCall->getArg(realArgIdx);
+ ISD::ArgFlagsTy Flags = TheCall->getArgFlags(realArgIdx);
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
+ break;
+ }
+
+ // f64 is passed in i32 pairs and must be combined
+ if (VA.needsCustom()) {
+ SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
+ DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd));
+ VA = ArgLocs[++i]; // skip ahead to next loc
+ if (VA.isRegLoc())
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(1)));
+ else {
+ assert(VA.isMemLoc());
+ if (StackPtr.getNode() == 0)
+ StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
+
+ MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
+ Chain, fmrrd.getValue(1),
+ Flags));
}
+ } else if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
} else {
- assert(ObjSize != 0);
- SDValue PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
- PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, NULL, 0));
- }
+ assert(VA.isMemLoc());
+ if (StackPtr.getNode() == 0)
+ StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy());
- NumGPRs += ObjGPRs;
- ArgOffset += ObjSize;
+ MemOpChains.push_back(LowerMemOpCallTo(TheCall, DAG, StackPtr, VA,
+ Chain, Arg, Flags));
+ }
}
if (!MemOpChains.empty())
// and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
- Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
+ Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
ARMCP::CPStub, 4);
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- Callee = DAG.getLoad(getPointerTy(), dl,
- DAG.getEntryNode(), CPAddr, NULL, 0);
+ Callee = DAG.getLoad(getPointerTy(), dl,
+ DAG.getEntryNode(), CPAddr, NULL, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
- Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
+ Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
} else
Callee = DAG.getTargetGlobalAddress(GV, getPointerTy());
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
Callee = DAG.getLoad(getPointerTy(), dl,
- DAG.getEntryNode(), CPAddr, NULL, 0);
+ DAG.getEntryNode(), CPAddr, NULL, 0);
SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex++, MVT::i32);
- Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
+ Callee = DAG.getNode(ARMISD::PIC_ADD, dl,
getPointerTy(), Callee, PICLabel);
} else
Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
if (RetVT != MVT::Other)
InFlag = Chain.getValue(1);
- std::vector<SDValue> ResultVals;
-
- // If the call has results, copy the values out of the ret val registers.
- switch (RetVT.getSimpleVT()) {
- default: assert(0 && "Unexpected ret value!");
- case MVT::Other:
- break;
- case MVT::i32:
- Chain = DAG.getCopyFromReg(Chain, dl, ARM::R0,
- MVT::i32, InFlag).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- if (TheCall->getNumRetVals() > 1 &&
- TheCall->getRetValType(1) == MVT::i32) {
- // Returns a i64 value.
- Chain = DAG.getCopyFromReg(Chain, dl, ARM::R1, MVT::i32,
- Chain.getValue(2)).getValue(1);
- ResultVals.push_back(Chain.getValue(0));
- }
- break;
- case MVT::f32:
- Chain = DAG.getCopyFromReg(Chain, dl, ARM::R0,
- MVT::i32, InFlag).getValue(1);
- ResultVals.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32,
- Chain.getValue(0)));
- break;
- case MVT::f64: {
- SDValue Lo = DAG.getCopyFromReg(Chain, dl, ARM::R0, MVT::i32, InFlag);
- SDValue Hi = DAG.getCopyFromReg(Lo, dl, ARM::R1, MVT::i32, Lo.getValue(2));
- ResultVals.push_back(DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi));
- break;
- }
- }
-
- if (ResultVals.empty())
- return Chain;
-
- ResultVals.push_back(Chain);
- SDValue Res = DAG.getMergeValues(&ResultVals[0], ResultVals.size(), dl);
- return Res.getValue(Op.getResNo());
+ // Handle result values, copying them out of physregs into vregs that we
+ // return.
+ return SDValue(LowerCallResult(Chain, InFlag, TheCall, CC, DAG),
+ Op.getResNo());
}
-static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
- SDValue Copy;
+SDValue ARMTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
+ // The chain is always operand #0
SDValue Chain = Op.getOperand(0);
DebugLoc dl = Op.getDebugLoc();
- switch(Op.getNumOperands()) {
- default:
- assert(0 && "Do not know how to return this many arguments!");
- abort();
- case 1: {
- SDValue LR = DAG.getRegister(ARM::LR, MVT::i32);
- return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
+
+ // CCValAssign - represent the assignment of the return value to a location.
+ SmallVector<CCValAssign, 16> RVLocs;
+ unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
+ bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
+
+ // CCState - Info about the registers and stack slots.
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs);
+
+ // Analyze return values of ISD::RET.
+ CCInfo.AnalyzeReturn(Op.getNode(), RetCC_ARM);
+
+ // If this is the first return lowered for this function, add
+ // the regs to the liveout set for the function.
+ if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
+ for (unsigned i = 0; i != RVLocs.size(); ++i)
+ if (RVLocs[i].isRegLoc())
+ DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
}
- case 3:
- Op = Op.getOperand(1);
- if (Op.getValueType() == MVT::f32) {
- Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
- } else if (Op.getValueType() == MVT::f64) {
- // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
- // available.
- Op = DAG.getNode(ARMISD::FMRRD, dl,
- DAG.getVTList(MVT::i32, MVT::i32), &Op,1);
- SDValue Sign = DAG.getConstant(0, MVT::i32);
- return DAG.getNode(ISD::RET, dl, MVT::Other, Chain, Op, Sign,
- Op.getValue(1), Sign);
- }
- Copy = DAG.getCopyToReg(Chain, dl, ARM::R0, Op, SDValue());
- if (DAG.getMachineFunction().getRegInfo().liveout_empty())
- DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R0);
- break;
- case 5:
- Copy = DAG.getCopyToReg(Chain, dl, ARM::R1, Op.getOperand(3), SDValue());
- Copy = DAG.getCopyToReg(Copy, dl, ARM::R0, Op.getOperand(1),
- Copy.getValue(1));
- // If we haven't noted the R0+R1 are live out, do so now.
- if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
- DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R0);
- DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R1);
- }
- break;
- case 9: // i128 -> 4 regs
- Copy = DAG.getCopyToReg(Chain, dl, ARM::R3, Op.getOperand(7), SDValue());
- Copy = DAG.getCopyToReg(Copy , dl, ARM::R2, Op.getOperand(5),
- Copy.getValue(1));
- Copy = DAG.getCopyToReg(Copy , dl, ARM::R1, Op.getOperand(3),
- Copy.getValue(1));
- Copy = DAG.getCopyToReg(Copy , dl, ARM::R0, Op.getOperand(1),
- Copy.getValue(1));
- // If we haven't noted the R0+R1 are live out, do so now.
- if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
- DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R0);
- DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R1);
- DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R2);
- DAG.getMachineFunction().getRegInfo().addLiveOut(ARM::R3);
+
+ SDValue Flag;
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0, realRVLocIdx = 0;
+ i != RVLocs.size();
+ ++i, ++realRVLocIdx) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ // ISD::RET => ret chain, (regnum1,val1), ...
+ // So i*2+1 index only the regnums
+ SDValue Arg = Op.getOperand(realRVLocIdx*2+1);
+
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
+ break;
}
- break;
-
+
+ // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
+ // available.
+ if (VA.needsCustom()) {
+ SDValue fmrrd = DAG.getNode(ARMISD::FMRRD, dl,
+ DAG.getVTList(MVT::i32, MVT::i32), &Arg, 1);
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd, Flag);
+ VA = RVLocs[++i]; // skip ahead to next loc
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), fmrrd.getValue(1),
+ Flag);
+ } else
+ Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
+
+ // Guarantee that all emitted copies are
+ // stuck together, avoiding something bad.
+ Flag = Chain.getValue(1);
}
- //We must use RET_FLAG instead of BRIND because BRIND doesn't have a flag
- return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Copy, Copy.getValue(1));
+ SDValue result;
+ if (Flag.getNode())
+ result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
+ else // Return Void
+ result = DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, Chain);
+
+ return result;
}
-// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
+// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target countpart wrapped in the ARMISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
// "local exec" model.
SDValue
ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) {
GlobalValue *GV = GA->getGlobal();
DebugLoc dl = GA->getDebugLoc();
SDValue Offset;
}
SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) {
MVT PtrVT = getPointerTy();
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
new ARMConstantPoolValue(GV, ARMCP::CPValue, UseGOTOFF ? "GOTOFF":"GOT");
SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
- SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
+ SDValue Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
CPAddr, NULL, 0);
SDValue Chain = Result.getValue(1);
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
}
SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
- SelectionDAG &DAG) {
+ SelectionDAG &DAG) {
MVT PtrVT = getPointerTy();
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
}
SDValue ARMTargetLowering::LowerGLOBAL_OFFSET_TABLE(SDValue Op,
- SelectionDAG &DAG){
+ SelectionDAG &DAG){
assert(Subtarget->isTargetELF() &&
"GLOBAL OFFSET TABLE not implemented for non-ELF targets");
MVT PtrVT = getPointerTy();
}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
- unsigned VarArgsFrameIndex) {
+ unsigned VarArgsFrameIndex) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
DebugLoc dl = Op.getDebugLoc();
return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1), SV, 0);
}
-static SDValue LowerFORMAL_ARGUMENT(SDValue Op, SelectionDAG &DAG,
- unsigned ArgNo, unsigned &NumGPRs,
- unsigned &ArgOffset, DebugLoc dl) {
+SDValue
+ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
- MVT ObjectVT = Op.getValue(ArgNo).getValueType();
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+
SDValue Root = Op.getOperand(0);
- MachineRegisterInfo &RegInfo = MF.getRegInfo();
-
- static const unsigned GPRArgRegs[] = {
- ARM::R0, ARM::R1, ARM::R2, ARM::R3
- };
-
- unsigned ObjSize;
- unsigned ObjGPRs;
- unsigned GPRPad;
- unsigned StackPad;
- ISD::ArgFlagsTy Flags =
- cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo + 3))->getArgFlags();
- HowToPassArgument(ObjectVT, NumGPRs, ArgOffset, ObjGPRs,
- ObjSize, GPRPad, StackPad, Flags);
- NumGPRs += GPRPad;
- ArgOffset += StackPad;
-
- SDValue ArgValue;
- if (ObjGPRs == 1) {
- unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
- RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
- ArgValue = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
- if (ObjectVT == MVT::f32)
- ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
- } else if (ObjGPRs == 2) {
- unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
- RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
- ArgValue = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
-
- VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
- RegInfo.addLiveIn(GPRArgRegs[NumGPRs+1], VReg);
- SDValue ArgValue2 = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
-
- assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
- ArgValue = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, ArgValue, ArgValue2);
- }
- NumGPRs += ObjGPRs;
-
- if (ObjSize) {
- MachineFrameInfo *MFI = MF.getFrameInfo();
- int FI = MFI->CreateFixedObject(ObjSize, ArgOffset);
- SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
- if (ObjGPRs == 0)
- ArgValue = DAG.getLoad(ObjectVT, dl, Root, FIN, NULL, 0);
- else {
- SDValue ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, NULL, 0);
- assert(ObjectVT != MVT::i64 && "i64 should already be lowered");
- ArgValue = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, ArgValue, ArgValue2);
- }
+ DebugLoc dl = Op.getDebugLoc();
+ bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
+ unsigned CC = MF.getFunction()->getCallingConv();
+ ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs);
+ CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_ARM);
+
+ SmallVector<SDValue, 16> ArgValues;
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+
+ // Arguments stored in registers.
+ if (VA.isRegLoc()) {
+ MVT RegVT = VA.getLocVT();
+ TargetRegisterClass *RC;
+ if (AFI->isThumbFunction())
+ RC = ARM::tGPRRegisterClass;
+ else
+ RC = ARM::GPRRegisterClass;
+
+ if (RegVT == MVT::f64) {
+ // f64 is passed in pairs of GPRs and must be combined.
+ RegVT = MVT::i32;
+ } else if (!((RegVT == MVT::i32) || (RegVT == MVT::f32)))
+ assert(0 && "RegVT not supported by FORMAL_ARGUMENTS Lowering");
+
+ // Transform the arguments stored in physical registers into virtual ones.
+ unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, RegVT);
+
+ // f64 is passed in i32 pairs and must be combined.
+ if (VA.needsCustom()) {
+ SDValue ArgValue2;
+
+ VA = ArgLocs[++i]; // skip ahead to next loc
+ if (VA.isMemLoc()) {
+ // must be APCS and older than V5T to split like this
+ unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
+ int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset());
+
+ // Create load node to retrieve arguments from the stack.
+ SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
+ ArgValue2 = DAG.getLoad(MVT::i32, dl, Root, FIN, NULL, 0);
+ } else {
+ Reg = MF.addLiveIn(VA.getLocReg(), RC);
+ ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
+ }
- ArgOffset += ObjSize; // Move on to the next argument.
- }
+ ArgValue = DAG.getNode(ARMISD::FMDRR, dl, MVT::f64,
+ ArgValue, ArgValue2);
+ }
- return ArgValue;
-}
+ // If this is an 8 or 16-bit value, it is really passed promoted
+ // to 32 bits. Insert an assert[sz]ext to capture this, then
+ // truncate to the right size.
+ switch (VA.getLocInfo()) {
+ default: assert(0 && "Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::BCvt:
+ ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
+ break;
+ case CCValAssign::SExt:
+ ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+ ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
+ break;
+ case CCValAssign::ZExt:
+ ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
+ DAG.getValueType(VA.getValVT()));
+ ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
+ break;
+ }
-SDValue
-ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
- std::vector<SDValue> ArgValues;
- SDValue Root = Op.getOperand(0);
- DebugLoc dl = Op.getDebugLoc();
- unsigned ArgOffset = 0; // Frame mechanisms handle retaddr slot
- unsigned NumGPRs = 0; // GPRs used for parameter passing.
+ ArgValues.push_back(ArgValue);
- unsigned NumArgs = Op.getNode()->getNumValues()-1;
- for (unsigned ArgNo = 0; ArgNo < NumArgs; ++ArgNo)
- ArgValues.push_back(LowerFORMAL_ARGUMENT(Op, DAG, ArgNo,
- NumGPRs, ArgOffset, dl));
+ } else { // VA.isRegLoc()
- bool isVarArg = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() != 0;
+ // sanity check
+ assert(VA.isMemLoc());
+ assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
+
+ unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
+ int FI = MFI->CreateFixedObject(ArgSize, VA.getLocMemOffset());
+
+ // Create load nodes to retrieve arguments from the stack.
+ SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
+ ArgValues.push_back(DAG.getLoad(VA.getValVT(), dl, Root, FIN, NULL, 0));
+ }
+ }
+
+ // varargs
if (isVarArg) {
static const unsigned GPRArgRegs[] = {
ARM::R0, ARM::R1, ARM::R2, ARM::R3
};
- MachineFunction &MF = DAG.getMachineFunction();
- MachineRegisterInfo &RegInfo = MF.getRegInfo();
- MachineFrameInfo *MFI = MF.getFrameInfo();
- ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
+ unsigned NumGPRs = CCInfo.getFirstUnallocated
+ (GPRArgRegs, sizeof(GPRArgRegs) / sizeof(GPRArgRegs[0]));
+
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
unsigned VARegSize = (4 - NumGPRs) * 4;
unsigned VARegSaveSize = (VARegSize + Align - 1) & ~(Align - 1);
+ unsigned ArgOffset = 0;
if (VARegSaveSize) {
// If this function is vararg, store any remaining integer argument regs
// to their spots on the stack so that they may be loaded by deferencing
// the result of va_next.
AFI->setVarArgsRegSaveSize(VARegSaveSize);
+ ArgOffset = CCInfo.getNextStackOffset();
VarArgsFrameIndex = MFI->CreateFixedObject(VARegSaveSize, ArgOffset +
VARegSaveSize - VARegSize);
SDValue FIN = DAG.getFrameIndex(VarArgsFrameIndex, getPointerTy());
SmallVector<SDValue, 4> MemOps;
for (; NumGPRs < 4; ++NumGPRs) {
- unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
- RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
+ TargetRegisterClass *RC;
+ if (AFI->isThumbFunction())
+ RC = ARM::tGPRRegisterClass;
+ else
+ RC = ARM::GPRRegisterClass;
+
+ unsigned VReg = MF.addLiveIn(GPRArgRegs[NumGPRs], RC);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store);
// Return the new list of results.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op.getNode()->getVTList(),
- &ArgValues[0], ArgValues.size());
+ &ArgValues[0], ArgValues.size()).getValue(Op.getResNo());
}
/// isFloatingPointZero - Return true if this is +0.0.
}
/// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
-static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
+static SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
DebugLoc dl) {
SDValue Cmp;
if (!isFloatingPointZero(RHS))
}
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
+ const ARMSubtarget *ST) {
MVT VT = Op.getValueType();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue ARMCC2 = DAG.getConstant(CondCode2, MVT::i32);
// FIXME: Needs another CMP because flag can have but one use.
SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
- Result = DAG.getNode(ARMISD::CMOV, dl, VT,
+ Result = DAG.getNode(ARMISD::CMOV, dl, VT,
Result, TrueVal, ARMCC2, CCR, Cmp2);
}
return Result;
}
static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
- const ARMSubtarget *ST) {
+ const ARMSubtarget *ST) {
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
SDValue LHS = Op.getOperand(2);
SDValue ARMCC;
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb(), dl);
- return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
+ return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
Chain, Dest, ARMCC, CCR,Cmp);
}
if (FPCCToARMCC(CC, CondCode, CondCode2))
// Swap the LHS/RHS of the comparison if needed.
std::swap(LHS, RHS);
-
+
SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
SDValue ARMCC = DAG.getConstant(CondCode, MVT::i32);
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
for (i = 0;
i < MAX_LOADS_IN_LDM && EmittedNumMemOps + i < NumMemOps; ++i) {
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
- DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
+ DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
DstSV, DstSVOff + DstOff);
DstOff += VTSize;
EmittedNumMemOps += i;
}
- if (BytesLeft == 0)
+ if (BytesLeft == 0)
return Chain;
// Issue loads / stores for the trailing (1 - 3) bytes.
}
TFOps[i] = DAG.getStore(Chain, dl, Loads[i],
- DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
+ DAG.getNode(ISD::ADD, dl, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
DstSV, DstSVOff + DstOff);
++i;
DAG.getConstant(1, MVT::i32));
return DAG.getNode(ARMISD::FMDRR, dl, MVT::f64, Lo, Hi);
}
-
+
// Turn f64->i64 into FMRRD.
- SDValue Cvt = DAG.getNode(ARMISD::FMRRD, dl,
+ SDValue Cvt = DAG.getNode(ARMISD::FMRRD, dl,
DAG.getVTList(MVT::i32, MVT::i32), &Op, 1);
-
+
// Merge the pieces into a single i64 value.
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
}
if (!isa<ConstantSDNode>(N->getOperand(1)) ||
cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() != 1)
return SDValue();
-
+
// If we are in thumb mode, we don't have RRX.
if (ST->isThumb()) return SDValue();
-
+
// Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
DebugLoc dl = N->getDebugLoc();
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
DAG.getConstant(1, MVT::i32));
-
+
// First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
// captures the result into a carry flag.
unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Flag), &Hi, 1);
-
+
// The low part is an ARMISD::RRX operand, which shifts the carry in.
Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
-
+
// Merge the pieces into a single i64 value.
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
}
-
SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (Op.getOpcode()) {
default: assert(0 && "Don't know how to custom lower this!"); abort();
return SDValue();
}
-
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
-///
void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) {
}
}
}
-
//===----------------------------------------------------------------------===//
// ARM Scheduler Hooks
static
SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
TargetLowering::DAGCombinerInfo &DCI) {
-
SelectionDAG &DAG = DCI.DAG;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
MVT VT = N->getValueType(0);
TargetLowering::DAGCombinerInfo &DCI) {
// added by evan in r37685 with no testcase.
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
-
+
// fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
if (N0.getOpcode() == ISD::SELECT && N0.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
if (Result.getNode()) return Result;
}
-
+
return SDValue();
}
TargetLowering::DAGCombinerInfo &DCI) {
// added by evan in r37685 with no testcase.
SDValue N0 = N->getOperand(0), N1 = N->getOperand(1);
-
+
// fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
if (N1.getOpcode() == ISD::SELECT && N1.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
if (Result.getNode()) return Result;
}
-
+
return SDValue();
}
/// PerformFMRRDCombine - Target-specific dag combine xforms for ARMISD::FMRRD.
-static SDValue PerformFMRRDCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
+static SDValue PerformFMRRDCombine(SDNode *N,
+ TargetLowering::DAGCombinerInfo &DCI) {
// fmrrd(fmdrr x, y) -> x,y
SDValue InDouble = N->getOperand(0);
if (InDouble.getOpcode() == ARMISD::FMDRR)
}
SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
- DAGCombinerInfo &DCI) const {
+ DAGCombinerInfo &DCI) const {
switch (N->getOpcode()) {
default: break;
case ISD::ADD: return PerformADDCombine(N, DCI);
case ISD::SUB: return PerformSUBCombine(N, DCI);
case ARMISD::FMRRD: return PerformFMRRDCombine(N, DCI);
}
-
+
return SDValue();
}
-
/// isLegalAddressImmediate - Return true if the integer value can be used
/// as the offset of the target addressing mode for load / store of the
/// given type.
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
-bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
+bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type *Ty) const {
- if (!isLegalAddressImmediate(AM.BaseOffs, getValueType(Ty, true), Subtarget))
+ MVT VT = getValueType(Ty, true);
+ if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
return false;
-
+
// Can never fold addr of global into load/store.
- if (AM.BaseGV)
+ if (AM.BaseGV)
return false;
-
+
switch (AM.Scale) {
case 0: // no scale reg, must be "r+i" or "r", or "i".
break;
// ARM doesn't support any R+R*scale+imm addr modes.
if (AM.BaseOffs)
return false;
-
+
+ if (!VT.isSimple())
+ return false;
+
int Scale = AM.Scale;
- switch (getValueType(Ty).getSimpleVT()) {
+ switch (VT.getSimpleVT()) {
default: return false;
case MVT::i1:
case MVT::i8:
if (((unsigned)AM.HasBaseReg + Scale) <= 2)
return true;
return false;
-
+
case MVT::isVoid:
// Note, we allow "void" uses (basically, uses that aren't loads or
// stores), because arm allows folding a scale into many arithmetic
// operations. This should be made more precise and revisited later.
-
+
// Allow r << imm, but the imm has to be a multiple of two.
if (AM.Scale & 1) return false;
return isPowerOf2_32(AM.Scale);
return true;
}
-
static bool getIndexedAddressParts(SDNode *Ptr, MVT VT,
bool isSEXTLoad, SDValue &Base,
SDValue &Offset, bool &isInc,
void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
const APInt &Mask,
- APInt &KnownZero,
+ APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
return TargetLowering::getConstraintType(Constraint);
}
-std::pair<unsigned, const TargetRegisterClass*>
+std::pair<unsigned, const TargetRegisterClass*>
ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
// GCC RS6000 Constraint Letters
switch (Constraint[0]) {
case 'l':
- // FIXME: in thumb mode, 'l' is only low-regs.
- // FALL THROUGH.
+ if (Subtarget->isThumb())
+ return std::make_pair(0U, ARM::tGPRRegisterClass);
+ else
+ return std::make_pair(0U, ARM::GPRRegisterClass);
case 'r':
return std::make_pair(0U, ARM::GPRRegisterClass);
case 'w':
switch (Constraint[0]) { // GCC ARM Constraint Letters
default: break;
case 'l':
+ return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
+ ARM::R4, ARM::R5, ARM::R6, ARM::R7,
+ 0);
case 'r':
return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
ARM::R4, ARM::R5, ARM::R6, ARM::R7,
return std::vector<unsigned>();
}
+
+/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+/// vector. If it is invalid, don't add anything to Ops.
+void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
+ char Constraint,
+ bool hasMemory,
+ std::vector<SDValue>&Ops,
+ SelectionDAG &DAG) const {
+ SDValue Result(0, 0);
+
+ switch (Constraint) {
+ default: break;
+ case 'I': case 'J': case 'K': case 'L':
+ case 'M': case 'N': case 'O':
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
+ if (!C)
+ return;
+
+ int64_t CVal64 = C->getSExtValue();
+ int CVal = (int) CVal64;
+ // None of these constraints allow values larger than 32 bits. Check
+ // that the value fits in an int.
+ if (CVal != CVal64)
+ return;
+
+ switch (Constraint) {
+ case 'I':
+ if (Subtarget->isThumb()) {
+ // This must be a constant between 0 and 255, for ADD immediates.
+ if (CVal >= 0 && CVal <= 255)
+ break;
+ } else {
+ // A constant that can be used as an immediate value in a
+ // data-processing instruction.
+ if (ARM_AM::getSOImmVal(CVal) != -1)
+ break;
+ }
+ return;
+
+ case 'J':
+ if (Subtarget->isThumb()) {
+ // This must be a constant between -255 and -1, for negated ADD
+ // immediates. This can be used in GCC with an "n" modifier that
+ // prints the negated value, for use with SUB instructions. It is
+ // not useful otherwise but is implemented for compatibility.
+ if (CVal >= -255 && CVal <= -1)
+ break;
+ } else {
+ // This must be a constant between -4095 and 4095. It is not clear
+ // what this constraint is intended for. Implemented for
+ // compatibility with GCC.
+ if (CVal >= -4095 && CVal <= 4095)
+ break;
+ }
+ return;
+
+ case 'K':
+ if (Subtarget->isThumb()) {
+ // A 32-bit value where only one byte has a nonzero value. Exclude
+ // zero to match GCC. This constraint is used by GCC internally for
+ // constants that can be loaded with a move/shift combination.
+ // It is not useful otherwise but is implemented for compatibility.
+ if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
+ break;
+ } else {
+ // A constant whose bitwise inverse can be used as an immediate
+ // value in a data-processing instruction. This can be used in GCC
+ // with a "B" modifier that prints the inverted value, for use with
+ // BIC and MVN instructions. It is not useful otherwise but is
+ // implemented for compatibility.
+ if (ARM_AM::getSOImmVal(~CVal) != -1)
+ break;
+ }
+ return;
+
+ case 'L':
+ if (Subtarget->isThumb()) {
+ // This must be a constant between -7 and 7,
+ // for 3-operand ADD/SUB immediate instructions.
+ if (CVal >= -7 && CVal < 7)
+ break;
+ } else {
+ // A constant whose negation can be used as an immediate value in a
+ // data-processing instruction. This can be used in GCC with an "n"
+ // modifier that prints the negated value, for use with SUB
+ // instructions. It is not useful otherwise but is implemented for
+ // compatibility.
+ if (ARM_AM::getSOImmVal(-CVal) != -1)
+ break;
+ }
+ return;
+
+ case 'M':
+ if (Subtarget->isThumb()) {
+ // This must be a multiple of 4 between 0 and 1020, for
+ // ADD sp + immediate.
+ if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
+ break;
+ } else {
+ // A power of two or a constant between 0 and 32. This is used in
+ // GCC for the shift amount on shifted register operands, but it is
+ // useful in general for any shift amounts.
+ if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
+ break;
+ }
+ return;
+
+ case 'N':
+ if (Subtarget->isThumb()) {
+ // This must be a constant between 0 and 31, for shift amounts.
+ if (CVal >= 0 && CVal <= 31)
+ break;
+ }
+ return;
+
+ case 'O':
+ if (Subtarget->isThumb()) {
+ // This must be a multiple of 4 between -508 and 508, for
+ // ADD/SUB sp = sp + immediate.
+ if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
+ break;
+ }
+ return;
+ }
+ Result = DAG.getTargetConstant(CVal, Op.getValueType());
+ break;
+ }
+
+ if (Result.getNode()) {
+ Ops.push_back(Result);
+ return;
+ }
+ return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, hasMemory,
+ Ops, DAG);
+}