llvm_unreachable("unknown subtarget type");
}
-
AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
- : TargetLowering(TM, createTLOF(TM)),
- Subtarget(&TM.getSubtarget<AArch64Subtarget>()),
- RegInfo(TM.getRegisterInfo()),
- Itins(TM.getInstrItineraryData()) {
+ : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
// SIMD compares set the entire lane's bits to 1
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
computeRegisterProperties();
- // We have particularly efficient implementations of atomic fences if they can
- // be combined with nearby atomic loads and stores.
- setShouldFoldAtomicFences(true);
-
// We combine OR nodes for bitfield and NEON BSL operations.
setTargetDAGCombine(ISD::OR);
setExceptionSelectorRegister(AArch64::X1);
}
-EVT AArch64TargetLowering::getSetCCResultType(EVT VT) const {
+EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
// It's reasonably important that this value matches the "natural" legal
// promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
// in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
+ case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
default: return NULL;
void
AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
- DebugLoc DL, SDValue &Chain) const {
+ SDLoc DL, SDValue &Chain) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo();
AArch64MachineFunctionInfo *FuncInfo
AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
AArch64MachineFunctionInfo *FuncInfo
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
- DebugLoc dl, SelectionDAG &DAG) const {
+ SDLoc dl, SelectionDAG &DAG) const {
// CCValAssign - represent the assignment of the return value to a location.
SmallVector<CCValAssign, 16> RVLocs;
AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
- DebugLoc &dl = CLI.DL;
+ SDLoc &dl = CLI.DL;
SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
}
if (!IsSibCall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
+ dl);
SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
getPointerTy());
// in the correct location.
if (IsTailCall && !IsSibCall) {
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag);
+ DAG.getIntPtrConstant(0, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
DAG.getIntPtrConstant(CalleePopBytes, true),
- InFlag);
+ InFlag, dl);
InFlag = Chain.getValue(1);
}
AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
- DebugLoc dl, SelectionDAG &DAG,
+ SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const {
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
}
// Build a tokenfactor for all the chains.
- return DAG.getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
+ return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
&ArgChains[0], ArgChains.size());
}
SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
ISD::CondCode CC, SDValue &A64cc,
- SelectionDAG &DAG, DebugLoc &dl) const {
+ SelectionDAG &DAG, SDLoc &dl) const {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
int64_t C = 0;
EVT VT = RHSC->getValueType(0);
SDValue
AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
EVT PtrVT = getPointerTy();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
- assert(getTargetMachine().getCodeModel() == CodeModel::Small
- && "Only small code model supported at the moment");
-
- // The most efficient code is PC-relative anyway for the small memory model,
- // so we don't need to worry about relocation model.
- return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
- DAG.getTargetBlockAddress(BA, PtrVT, 0,
- AArch64II::MO_NO_FLAG),
- DAG.getTargetBlockAddress(BA, PtrVT, 0,
- AArch64II::MO_LO12),
- DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
+ switch(getTargetMachine().getCodeModel()) {
+ case CodeModel::Small:
+ // The most efficient code is PC-relative anyway for the small memory model,
+ // so we don't need to worry about relocation model.
+ return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
+ DAG.getTargetBlockAddress(BA, PtrVT, 0,
+ AArch64II::MO_NO_FLAG),
+ DAG.getTargetBlockAddress(BA, PtrVT, 0,
+ AArch64II::MO_LO12),
+ DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
+ case CodeModel::Large:
+ return DAG.getNode(
+ AArch64ISD::WrapperLarge, DL, PtrVT,
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
+ DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
+ default:
+ llvm_unreachable("Only small and large code models supported now");
+ }
}
// (BRCOND chain, val, dest)
SDValue
AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Chain = Op.getOperand(0);
SDValue TheBit = Op.getOperand(1);
SDValue DestBB = Op.getOperand(2);
// (BR_CC chain, condcode, lhs, rhs, dest)
SDValue
AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue Chain = Op.getOperand(0);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
SDValue LHS = Op.getOperand(2);
CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
0, getLibcallCallingConv(Call), isTailCall,
/*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
- Callee, Args, DAG, Op->getDebugLoc());
+ Callee, Args, DAG, SDLoc(Op));
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
if (!CallInfo.second.getNode())
SDValue SrcVal = Op.getOperand(0);
return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
- /*isSigned*/ false, Op.getDebugLoc());
+ /*isSigned*/ false, SDLoc(Op));
}
SDValue
}
SDValue
-AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
- SelectionDAG &DAG) const {
- // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
- // we make that distinction here.
+AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
+ SelectionDAG &DAG) const {
+ assert(getTargetMachine().getCodeModel() == CodeModel::Large);
+ assert(getTargetMachine().getRelocationModel() == Reloc::Static);
+
+ EVT PtrVT = getPointerTy();
+ SDLoc dl(Op);
+ const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
+ const GlobalValue *GV = GN->getGlobal();
+
+ SDValue GlobalAddr = DAG.getNode(
+ AArch64ISD::WrapperLarge, dl, PtrVT,
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
+ DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
+
+ if (GN->getOffset() != 0)
+ return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
+ DAG.getConstant(GN->getOffset(), PtrVT));
- // We support the small memory model for now.
+ return GlobalAddr;
+}
+
+SDValue
+AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
+ SelectionDAG &DAG) const {
assert(getTargetMachine().getCodeModel() == CodeModel::Small);
EVT PtrVT = getPointerTy();
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GN->getGlobal();
unsigned Alignment = GV->getAlignment();
}
unsigned char HiFixup, LoFixup;
- bool UseGOT = Subtarget->GVIsIndirectSymbol(GV, RelocM);
+ bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
if (UseGOT) {
HiFixup = AArch64II::MO_GOT;
return GlobalRef;
}
+SDValue
+AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
+ SelectionDAG &DAG) const {
+ // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
+ // we make those distinctions here.
+
+ switch (getTargetMachine().getCodeModel()) {
+ case CodeModel::Small:
+ return LowerGlobalAddressELFSmall(Op, DAG);
+ case CodeModel::Large:
+ return LowerGlobalAddressELFLarge(Op, DAG);
+ default:
+ llvm_unreachable("Only small and large code models supported now");
+ }
+}
+
SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
SDValue DescAddr,
- DebugLoc DL,
+ SDLoc DL,
SelectionDAG &DAG) const {
EVT PtrVT = getPointerTy();
SDValue
AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
- assert(Subtarget->isTargetELF() &&
+ assert(getSubtarget()->isTargetELF() &&
"TLS not implemented for non-ELF targets");
+ assert(getTargetMachine().getCodeModel() == CodeModel::Small
+ && "TLS only supported in small memory model");
const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
SDValue TPOff;
EVT PtrVT = getPointerTy();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
const GlobalValue *GV = GA->getGlobal();
SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
SDValue
AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- DebugLoc dl = JT->getDebugLoc();
+ SDLoc dl(JT);
+ EVT PtrVT = getPointerTy();
// When compiling PIC, jump tables get put in the code section so a static
// relocation-style is acceptable for both cases.
- return DAG.getNode(AArch64ISD::WrapperSmall, dl, getPointerTy(),
- DAG.getTargetJumpTable(JT->getIndex(), getPointerTy()),
- DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
- AArch64II::MO_LO12),
- DAG.getConstant(1, MVT::i32));
+ switch (getTargetMachine().getCodeModel()) {
+ case CodeModel::Small:
+ return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
+ AArch64II::MO_LO12),
+ DAG.getConstant(1, MVT::i32));
+ case CodeModel::Large:
+ return DAG.getNode(
+ AArch64ISD::WrapperLarge, dl, PtrVT,
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
+ DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
+ default:
+ llvm_unreachable("Only small and large code models supported now");
+ }
}
// (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
SDValue
AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue IfTrue = Op.getOperand(2);
// (SELECT testbit, iftrue, iffalse)
SDValue
AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue TheBit = Op.getOperand(0);
SDValue IfTrue = Op.getOperand(1);
SDValue IfFalse = Op.getOperand(2);
// (SETCC lhs, rhs, condcode)
SDValue
AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
- DebugLoc dl = Op.getDebugLoc();
+ SDLoc dl(Op);
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
// We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
// rather than just 8.
- return DAG.getMemcpy(Op.getOperand(0), Op.getDebugLoc(),
+ return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
Op.getOperand(1), Op.getOperand(2),
DAG.getConstant(32, MVT::i32), 8, false, false,
MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
MachineFunction &MF = DAG.getMachineFunction();
AArch64MachineFunctionInfo *FuncInfo
= MF.getInfo<AArch64MachineFunctionInfo>();
- DebugLoc DL = Op.getDebugLoc();
+ SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
SDValue VAList = Op.getOperand(1);
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
// We're looking for an SRA/SHL pair which form an SBFX.
/// a compatible SHL operation (unless they're already low). This function
/// checks that condition and returns the least-significant bit that's
/// intended. If the operation not a field preparation, -1 is returned.
-static int32_t getLSBForBFI(SelectionDAG &DAG, DebugLoc DL, EVT VT,
+static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
SDValue &MaskedVal, uint64_t Mask) {
if (!isShiftedMask_64(Mask))
return -1;
// cases (e.g. bitfield to bitfield copy) may still need a real shift before
// the BFI.
- uint64_t LSB = CountTrailingZeros_64(Mask);
+ uint64_t LSB = countTrailingZeros(Mask);
int64_t ShiftRightRequired = LSB;
if (MaskedVal.getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
assert(N->getOpcode() == ISD::OR && "Unexpected root");
TargetLowering::DAGCombinerInfo &DCI,
const AArch64Subtarget *Subtarget) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
// First job is to hunt for a MaskedBFI on either the left or right. Swap
static SDValue tryCombineToEXTR(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
assert(N->getOpcode() == ISD::OR && "Unexpected root");
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
- DebugLoc DL = N->getDebugLoc();
+ SDLoc DL(N);
EVT VT = N->getValueType(0);
// We're looking for an SRA/SHL pair which form an SBFX.
switch (N->getOpcode()) {
default: break;
case ISD::AND: return PerformANDCombine(N, DCI);
- case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
+ case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
case ISD::SRA: return PerformSRACombine(N, DCI);
}
return SDValue();
case 'S': {
// An absolute symbolic address or label reference.
if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
- Result = DAG.getTargetGlobalAddress(GA->getGlobal(), Op.getDebugLoc(),
+ Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
GA->getValueType(0));
} else if (const BlockAddressSDNode *BA
= dyn_cast<BlockAddressSDNode>(Op)) {
std::pair<unsigned, const TargetRegisterClass*>
AArch64TargetLowering::getRegForInlineAsmConstraint(
const std::string &Constraint,
- EVT VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r':