ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
SDLoc dl) {
- SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
+ SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile=*/false, /*AlwaysInline=*/false,
/*isTailCall=*/false,
bool HexagonTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
// If either no tail call or told not to tail call at all, don't.
- if (!CI->isTailCall() || HTM.Options.DisableTailCalls)
+ auto Attr =
+ CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
+ if (!CI->isTailCall() || Attr.getValueAsString() == "true")
return false;
return true;
bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
MachineFunction &MF = DAG.getMachineFunction();
+ auto PtrVT = getPointerTy(MF.getDataLayout());
// Check for varargs.
int NumNamedVarArgParams = -1;
else
CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
- if (DAG.getTarget().Options.DisableTailCalls)
+ auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
+ if (Attr.getValueAsString() == "true")
isTailCall = false;
if (isTailCall) {
SmallVector<SDValue, 8> MemOpChains;
auto &HRI = *Subtarget.getRegisterInfo();
- SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(),
- getPointerTy());
+ SDValue StackPtr =
+ DAG.getCopyFromReg(Chain, dl, HRI.getStackRegister(), PtrVT);
// Walk the register/memloc assignments, inserting copies/loads.
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
if (VA.isMemLoc()) {
unsigned LocMemOffset = VA.getLocMemOffset();
- SDValue MemAddr = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
+ SDValue MemAddr = DAG.getConstant(LocMemOffset, dl,
+ StackPtr.getValueType());
MemAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, MemAddr);
if (Flags.isByVal()) {
// The argument is a struct passed by value. According to LLVM, "Arg"
MemOpChains.push_back(CreateCopyOfByValArgument(Arg, MemAddr, Chain,
Flags, DAG, dl));
} else {
- MachinePointerInfo LocPI = MachinePointerInfo::getStack(LocMemOffset);
+ MachinePointerInfo LocPI = MachinePointerInfo::getStack(
+ DAG.getMachineFunction(), LocMemOffset);
SDValue S = DAG.getStore(Chain, dl, Arg, MemAddr, LocPI, false,
false, 0);
MemOpChains.push_back(S);
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
if (!isTailCall) {
- SDValue C = DAG.getConstant(NumBytes, getPointerTy(), true);
+ SDValue C = DAG.getConstant(NumBytes, dl, PtrVT, true);
Chain = DAG.getCALLSEQ_START(Chain, C, dl);
}
if (flag_aligned_memcpy) {
const char *MemcpyName =
"__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
- Callee = DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
+ Callee = DAG.getTargetExternalSymbol(MemcpyName, PtrVT);
flag_aligned_memcpy = false;
} else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, PtrVT);
} else if (ExternalSymbolSDNode *S =
dyn_cast<ExternalSymbolSDNode>(Callee)) {
- Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
+ Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT);
}
// Returns a chain & a flag for retval copy to use.
if (InFlag.getNode())
Ops.push_back(InFlag);
- if (isTailCall)
+ if (isTailCall) {
+ MF.getFrameInfo()->setHasTailCall();
return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
+ }
int OpCode = doesNotReturn ? HexagonISD::CALLv3nr : HexagonISD::CALLv3;
Chain = DAG.getNode(OpCode, dl, NodeTys, Ops);
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
- Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
- DAG.getIntPtrConstant(0, true), InFlag, dl);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
// Handle result values, copying them out of physregs into vregs that we
BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
}
- SDValue JumpTableBase = DAG.getNode(HexagonISD::JT, dl,
- getPointerTy(), TargetJT);
+ SDValue JumpTableBase = DAG.getNode(
+ HexagonISD::JT, dl, getPointerTy(DAG.getDataLayout()), TargetJT);
SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
- DAG.getConstant(2, MVT::i32));
+ DAG.getConstant(2, dl, MVT::i32));
SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
ShiftIndex);
SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
dbgs() << "\n";
});
- SDValue AC = DAG.getConstant(A, MVT::i32);
+ SDValue AC = DAG.getConstant(A, dl, MVT::i32);
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
- return DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
+ SDValue AA = DAG.getNode(HexagonISD::ALLOCA, dl, VTs, Chain, Size, AC);
+ if (Op.getNode()->getHasDebugValue())
+ DAG.TransferDbgValues(Op, AA);
+ return AA;
}
SDValue
SDValue InpVal = Op.getOperand(0);
if (isa<ConstantSDNode>(InpVal)) {
uint64_t V = cast<ConstantSDNode>(InpVal)->getZExtValue();
- return DAG.getTargetConstant(countPopulation(V), MVT::i64);
+ return DAG.getTargetConstant(countPopulation(V), dl, MVT::i64);
}
SDValue PopOut = DAG.getNode(HexagonISD::POPCOUNT, dl, MVT::i32, InpVal);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, PopOut);
LoadNode->isInvariant(),
Alignment);
// Base+2 load.
- SDValue Increment = DAG.getConstant(2, MVT::i32);
+ SDValue Increment = DAG.getConstant(2, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[1] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
LoadNode->getPointerInfo(), MVT::i16,
LoadNode->isInvariant(),
Alignment);
// SHL 16, then OR base and base+2.
- SDValue ShiftAmount = DAG.getConstant(16, MVT::i32);
+ SDValue ShiftAmount = DAG.getConstant(16, DL, MVT::i32);
SDValue Tmp1 = DAG.getNode(ISD::SHL, DL, MVT::i32, Loads[1], ShiftAmount);
SDValue Tmp2 = DAG.getNode(ISD::OR, DL, MVT::i32, Tmp1, Loads[0]);
// Base + 4.
- Increment = DAG.getConstant(4, MVT::i32);
+ Increment = DAG.getConstant(4, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[2] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
LoadNode->getPointerInfo(), MVT::i16,
LoadNode->isInvariant(),
Alignment);
// Base + 6.
- Increment = DAG.getConstant(6, MVT::i32);
+ Increment = DAG.getConstant(6, DL, MVT::i32);
Ptr = DAG.getNode(ISD::ADD, DL, Base.getValueType(), Base, Increment);
Loads[3] = DAG.getExtLoad(Ext, DL, MVT::i32, Chain, Ptr,
LoadNode->getPointerInfo(), MVT::i16,
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
if (Depth) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
- SDValue Offset = DAG.getConstant(4, MVT::i32);
+ SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
return DAG.getLoad(VT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
MachinePointerInfo(), false, false, false, 0);
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
SDLoc dl(Op);
- Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+ Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
const HexagonTargetObjectFile *TLOF =
static_cast<const HexagonTargetObjectFile *>(
getTargetMachine().getObjFileLowering());
if (TLOF->IsGlobalInSmallSection(GV, getTargetMachine())) {
- return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
+ return DAG.getNode(HexagonISD::CONST32_GP, dl, PtrVT, Result);
}
- return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
+ return DAG.getNode(HexagonISD::CONST32, dl, PtrVT, Result);
}
// Specifies that for loads and stores VT can be promoted to PromotedLdStVT.
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
SDLoc dl(Op);
- return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
+ return DAG.getNode(HexagonISD::CONST32_GP, dl,
+ getPointerTy(DAG.getDataLayout()), BA_SD);
}
//===----------------------------------------------------------------------===//
setPrefFunctionAlignment(4);
setMinFunctionAlignment(2);
setInsertFencesForAtomic(false);
- setExceptionPointerRegister(Hexagon::R0);
- setExceptionSelectorRegister(Hexagon::R1);
setStackPointerRegisterToSaveRestore(HRI.getStackRegister());
if (EnableHexSDNodeSched)
addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
}
- // Generic action function (for use in std::for_each).
- auto ExpandOp = [this] (MVT VT) -> std::function<void(unsigned)> {
- HexagonTargetLowering *T = this;
- return [T, VT] (unsigned Op) { T->setOperationAction(Op, VT, Expand); };
- };
-
//
// Handling of scalar operations.
//
setOperationAction(ISD::MUL, MVT::i64, Expand);
setOperationAction(ISD::MULHS, MVT::i64, Expand);
- static unsigned IntExpOps[] = {
- ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM,
- ISD::SDIVREM, ISD::UDIVREM, ISD::ROTL, ISD::ROTR,
- ISD::BSWAP, ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS,
- ISD::SMUL_LOHI, ISD::UMUL_LOHI
- };
- static unsigned IntExpOpsLen = array_lengthof(IntExpOps);
- std::for_each(IntExpOps, IntExpOps+IntExpOpsLen, ExpandOp(MVT::i32));
- std::for_each(IntExpOps, IntExpOps+IntExpOpsLen, ExpandOp(MVT::i64));
+ for (unsigned IntExpOp :
+ {ISD::SDIV, ISD::UDIV, ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM,
+ ISD::ROTL, ISD::ROTR, ISD::BSWAP, ISD::SHL_PARTS, ISD::SRA_PARTS,
+ ISD::SRL_PARTS, ISD::SMUL_LOHI, ISD::UMUL_LOHI}) {
+ setOperationAction(IntExpOp, MVT::i32, Expand);
+ setOperationAction(IntExpOp, MVT::i64, Expand);
+ }
- static unsigned FPExpOps[] = {
- ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
- ISD::FPOW, ISD::FCOPYSIGN
- };
- static unsigned FPExpOpsLen = array_lengthof(FPExpOps);
- std::for_each(FPExpOps, FPExpOps+FPExpOpsLen, ExpandOp(MVT::f32));
- std::for_each(FPExpOps, FPExpOps+FPExpOpsLen, ExpandOp(MVT::f64));
+ for (unsigned FPExpOp :
+ {ISD::FDIV, ISD::FREM, ISD::FSQRT, ISD::FSIN, ISD::FCOS, ISD::FSINCOS,
+ ISD::FPOW, ISD::FCOPYSIGN}) {
+ setOperationAction(FPExpOp, MVT::f32, Expand);
+ setOperationAction(FPExpOp, MVT::f64, Expand);
+ }
// No extending loads from i32.
for (MVT VT : MVT::integer_valuetypes()) {
// Set the action for vector operations to "expand", then override it with
// either "custom" or "legal" for specific cases.
- static unsigned VectExpOps[] = {
+ static const unsigned VectExpOps[] = {
// Integer arithmetic:
ISD::ADD, ISD::SUB, ISD::MUL, ISD::SDIV, ISD::UDIV,
ISD::SREM, ISD::UREM, ISD::SDIVREM, ISD::UDIVREM, ISD::ADDC,
ISD::EXTRACT_SUBVECTOR, ISD::INSERT_SUBVECTOR,
ISD::CONCAT_VECTORS, ISD::VECTOR_SHUFFLE
};
- static unsigned VectExpOpsLen = array_lengthof(VectExpOps);
for (MVT VT : MVT::vector_valuetypes()) {
- std::for_each(VectExpOps, VectExpOps+VectExpOpsLen, ExpandOp(VT));
+ for (unsigned VectExpOp : VectExpOps)
+ setOperationAction(VectExpOp, VT, Expand);
// Expand all extended loads and truncating stores:
for (MVT TargetVT : MVT::vector_valuetypes()) {
}
// Types natively supported:
- static MVT NativeVT[] = {
- MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v32i1, MVT::v64i1,
- MVT::v4i8, MVT::v8i8,
- MVT::v2i16, MVT::v4i16,
- MVT::v1i32, MVT::v2i32,
- MVT::v1i64
- };
- static unsigned NativeVTLen = array_lengthof(NativeVT);
- for (auto I = NativeVT, E = NativeVT+NativeVTLen; I != E; ++I) {
- setOperationAction(ISD::BUILD_VECTOR, *I, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, *I, Custom);
- setOperationAction(ISD::INSERT_VECTOR_ELT, *I, Custom);
- setOperationAction(ISD::EXTRACT_SUBVECTOR, *I, Custom);
- setOperationAction(ISD::INSERT_SUBVECTOR, *I, Custom);
- setOperationAction(ISD::CONCAT_VECTORS, *I, Custom);
-
- setOperationAction(ISD::ADD, *I, Legal);
- setOperationAction(ISD::SUB, *I, Legal);
- setOperationAction(ISD::MUL, *I, Legal);
- setOperationAction(ISD::AND, *I, Legal);
- setOperationAction(ISD::OR, *I, Legal);
- setOperationAction(ISD::XOR, *I, Legal);
+ for (MVT NativeVT : {MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v32i1, MVT::v64i1,
+ MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16, MVT::v1i32,
+ MVT::v2i32, MVT::v1i64}) {
+ setOperationAction(ISD::BUILD_VECTOR, NativeVT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, NativeVT, Custom);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, NativeVT, Custom);
+ setOperationAction(ISD::EXTRACT_SUBVECTOR, NativeVT, Custom);
+ setOperationAction(ISD::INSERT_SUBVECTOR, NativeVT, Custom);
+ setOperationAction(ISD::CONCAT_VECTORS, NativeVT, Custom);
+
+ setOperationAction(ISD::ADD, NativeVT, Legal);
+ setOperationAction(ISD::SUB, NativeVT, Legal);
+ setOperationAction(ISD::MUL, NativeVT, Legal);
+ setOperationAction(ISD::AND, NativeVT, Legal);
+ setOperationAction(ISD::OR, NativeVT, Legal);
+ setOperationAction(ISD::XOR, NativeVT, Legal);
}
setOperationAction(ISD::SETCC, MVT::v2i16, Custom);
setOperationAction(ISD::CTPOP, MVT::i64, Expand);
// Expand these operations for both f32 and f64:
- static unsigned FPExpOpsV4[] = {
- ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FABS, ISD::FNEG, ISD::FMA
- };
- static unsigned FPExpOpsV4Len = array_lengthof(FPExpOpsV4);
- std::for_each(FPExpOpsV4, FPExpOpsV4+FPExpOpsV4Len, ExpandOp(MVT::f32));
- std::for_each(FPExpOpsV4, FPExpOpsV4+FPExpOpsV4Len, ExpandOp(MVT::f64));
-
- static ISD::CondCode FPExpCCV4[] = {
- ISD::SETOEQ, ISD::SETOGT, ISD::SETOLT, ISD::SETOGE, ISD::SETOLE,
- ISD::SETUO, ISD::SETO
- };
- static unsigned FPExpCCV4Len = array_lengthof(FPExpCCV4);
- for (auto I = FPExpCCV4, E = FPExpCCV4+FPExpCCV4Len; I != E; ++I) {
- setCondCodeAction(*I, MVT::f32, Expand);
- setCondCodeAction(*I, MVT::f64, Expand);
+ for (unsigned FPExpOpV4 :
+ {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FABS, ISD::FNEG, ISD::FMA}) {
+ setOperationAction(FPExpOpV4, MVT::f32, Expand);
+ setOperationAction(FPExpOpV4, MVT::f64, Expand);
+ }
+
+ for (ISD::CondCode FPExpCCV4 :
+ {ISD::SETOEQ, ISD::SETOGT, ISD::SETOLT, ISD::SETOGE, ISD::SETOLE,
+ ISD::SETUO, ISD::SETO}) {
+ setCondCodeAction(FPExpCCV4, MVT::f32, Expand);
+ setCondCodeAction(FPExpCCV4, MVT::f64, Expand);
}
}
// Handling of indexed loads/stores: default is "expand".
//
- static MVT LSXTys[] = {
- MVT::i8, MVT::i16, MVT::i32, MVT::i64,
- };
- static unsigned LSXTysLen = array_lengthof(LSXTys);
-
- for (auto I = LSXTys, E = LSXTys+LSXTysLen; I != E; ++I) {
- setIndexedLoadAction(ISD::POST_INC, *I, Legal);
- setIndexedStoreAction(ISD::POST_INC, *I, Legal);
+ for (MVT LSXTy : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
+ setIndexedLoadAction(ISD::POST_INC, LSXTy, Legal);
+ setIndexedStoreAction(ISD::POST_INC, LSXTy, Legal);
}
computeRegisterProperties(&HRI);
const char* HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
- switch (Opcode) {
- default: return nullptr;
+ switch ((HexagonISD::NodeType)Opcode) {
case HexagonISD::ALLOCA: return "HexagonISD::ALLOCA";
case HexagonISD::ARGEXTEND: return "HexagonISD::ARGEXTEND";
case HexagonISD::AT_GOT: return "HexagonISD::AT_GOT";
case HexagonISD::VSRLW: return "HexagonISD::VSRLW";
case HexagonISD::VSXTBH: return "HexagonISD::VSXTBH";
case HexagonISD::VSXTBW: return "HexagonISD::VSXTBW";
+ case HexagonISD::OP_END: break;
}
+ return nullptr;
}
bool HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (IsScalarToVector)
return createSplat(DAG, dl, VT, V1.getOperand(0));
}
- return createSplat(DAG, dl, VT, DAG.getConstant(Lane, MVT::i32));
+ return createSplat(DAG, dl, VT, DAG.getConstant(Lane, dl, MVT::i32));
}
// FIXME: We need to support more general vector shuffles. See
unsigned SplatBits = APSplatBits.getZExtValue();
int32_t SextVal = ((int32_t) (SplatBits << (32 - SplatBitSize)) >>
(32 - SplatBitSize));
- return createSplat(DAG, dl, VT, DAG.getConstant(SextVal, MVT::i32));
+ return createSplat(DAG, dl, VT, DAG.getConstant(SextVal, dl, MVT::i32));
}
// Try to generate COMBINE to build v2i32 vectors.
SDValue V1 = BVN->getOperand(1);
if (V0.getOpcode() == ISD::UNDEF)
- V0 = DAG.getConstant(0, MVT::i32);
+ V0 = DAG.getConstant(0, dl, MVT::i32);
if (V1.getOpcode() == ISD::UNDEF)
- V1 = DAG.getConstant(0, MVT::i32);
+ V1 = DAG.getConstant(0, dl, MVT::i32);
ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(V0);
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(V1);
}
if (Size == 64)
- ConstVal = DAG.getConstant(Res, MVT::i64);
+ ConstVal = DAG.getConstant(Res, dl, MVT::i64);
else
- ConstVal = DAG.getConstant(Res, MVT::i32);
+ ConstVal = DAG.getConstant(Res, dl, MVT::i32);
// When there are non constant operands, add them with INSERT_VECTOR_ELT to
// ConstVal, the constant part of the vector.
if (HasNonConstantElements) {
EVT EltVT = VT.getVectorElementType();
- SDValue Width = DAG.getConstant(EltVT.getSizeInBits(), MVT::i64);
+ SDValue Width = DAG.getConstant(EltVT.getSizeInBits(), dl, MVT::i64);
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
for (unsigned i = 0, e = NElts; i != e; ++i) {
// LLVM's BUILD_VECTOR operands are in Little Endian mode, whereas Hexagon
if (VT.getSizeInBits() == 64 &&
Operand.getValueType().getSizeInBits() == 32) {
- SDValue C = DAG.getConstant(0, MVT::i32);
+ SDValue C = DAG.getConstant(0, dl, MVT::i32);
Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
}
- SDValue Idx = DAG.getConstant(OpIdx, MVT::i64);
+ SDValue Idx = DAG.getConstant(OpIdx, dl, MVT::i64);
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
const SDValue Ops[] = {ConstVal, Operand, Combined};
unsigned NElts = Op.getNumOperands();
SDValue Vec = Op.getOperand(0);
EVT VecVT = Vec.getValueType();
- SDValue Width = DAG.getConstant(VecVT.getSizeInBits(), MVT::i64);
+ SDValue Width = DAG.getConstant(VecVT.getSizeInBits(), dl, MVT::i64);
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
- SDValue ConstVal = DAG.getConstant(0, MVT::i64);
+ DAG.getConstant(32, dl, MVT::i64));
+ SDValue ConstVal = DAG.getConstant(0, dl, MVT::i64);
ConstantSDNode *W = dyn_cast<ConstantSDNode>(Width);
ConstantSDNode *S = dyn_cast<ConstantSDNode>(Shifted);
if (VT.getSizeInBits() == 64 &&
Operand.getValueType().getSizeInBits() == 32) {
- SDValue C = DAG.getConstant(0, MVT::i32);
+ SDValue C = DAG.getConstant(0, dl, MVT::i32);
Operand = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Operand);
}
- SDValue Idx = DAG.getConstant(OpIdx, MVT::i64);
+ SDValue Idx = DAG.getConstant(OpIdx, dl, MVT::i64);
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i64, Idx, Width);
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
const SDValue Ops[] = {ConstVal, Operand, Combined};
EVT EltVT = VecVT.getVectorElementType();
int EltSize = EltVT.getSizeInBits();
SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT ?
- EltSize : VTN * EltSize, MVT::i64);
+ EltSize : VTN * EltSize, dl, MVT::i64);
// Constant element number.
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Idx)) {
uint64_t X = CI->getZExtValue();
- SDValue Offset = DAG.getConstant(X * EltSize, MVT::i32);
+ SDValue Offset = DAG.getConstant(X * EltSize, dl, MVT::i32);
const SDValue Ops[] = {Vec, Width, Offset};
ConstantSDNode *CW = dyn_cast<ConstantSDNode>(Width);
// Variable element number.
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
- DAG.getConstant(EltSize, MVT::i32));
+ DAG.getConstant(EltSize, dl, MVT::i32));
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
const SDValue Ops[] = {Vec, Combined};
EVT EltVT = VecVT.getVectorElementType();
int EltSize = EltVT.getSizeInBits();
SDValue Width = DAG.getConstant(Op.getOpcode() == ISD::INSERT_VECTOR_ELT ?
- EltSize : VTN * EltSize, MVT::i64);
+ EltSize : VTN * EltSize, dl, MVT::i64);
if (ConstantSDNode *C = cast<ConstantSDNode>(Idx)) {
- SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, MVT::i32);
+ SDValue Offset = DAG.getConstant(C->getSExtValue() * EltSize, dl, MVT::i32);
const SDValue Ops[] = {Vec, Val, Width, Offset};
SDValue N;
// Variable element number.
SDValue Offset = DAG.getNode(ISD::MUL, dl, MVT::i32, Idx,
- DAG.getConstant(EltSize, MVT::i32));
+ DAG.getConstant(EltSize, dl, MVT::i32));
SDValue Shifted = DAG.getNode(ISD::SHL, dl, MVT::i64, Width,
- DAG.getConstant(32, MVT::i64));
+ DAG.getConstant(32, dl, MVT::i64));
SDValue Combined = DAG.getNode(ISD::OR, dl, MVT::i64, Shifted, Offset);
if (VT.getSizeInBits() == 64 &&
Val.getValueType().getSizeInBits() == 32) {
- SDValue C = DAG.getConstant(0, MVT::i32);
+ SDValue C = DAG.getConstant(0, dl, MVT::i32);
Val = DAG.getNode(HexagonISD::COMBINE, dl, VT, C, Val);
}
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
SDLoc dl(Op);
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
// Mark function as containing a call to EH_RETURN.
HexagonMachineFunctionInfo *FuncInfo =
unsigned OffsetReg = Hexagon::R28;
- SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
- DAG.getRegister(Hexagon::R30, getPointerTy()),
- DAG.getIntPtrConstant(4));
+ SDValue StoreAddr =
+ DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getRegister(Hexagon::R30, PtrVT),
+ DAG.getIntPtrConstant(4, dl));
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
false, false, 0);
Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
std::pair<unsigned, const TargetRegisterClass *>
HexagonTargetLowering::getRegForInlineAsmConstraint(
- const TargetRegisterInfo *TRI, const std::string &Constraint,
- MVT VT) const {
+ const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'r': // R0-R31
/// isLegalAddressingMode - Return true if the addressing mode represented by
/// AM is legal for this target, for a load/store of the specified type.
-bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
- Type *Ty) const {
+bool HexagonTargetLowering::isLegalAddressingMode(const DataLayout &DL,
+ const AddrMode &AM, Type *Ty,
+ unsigned AS) const {
// Allows a signed-extended 11-bit immediate field.
if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1)
return false;
// ***************************************************************************
// If this is a tail call via a function pointer, then don't do it!
- if (!(dyn_cast<GlobalAddressSDNode>(Callee))
- && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
+ if (!(isa<GlobalAddressSDNode>(Callee)) &&
+ !(isa<ExternalSymbolSDNode>(Callee))) {
return false;
}
return true;
}
}
+
+Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const {
+ BasicBlock *BB = Builder.GetInsertBlock();
+ Module *M = BB->getParent()->getParent();
+ Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
+ unsigned SZ = Ty->getPrimitiveSizeInBits();
+ assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
+ Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
+ : Intrinsic::hexagon_L4_loadd_locked;
+ Value *Fn = Intrinsic::getDeclaration(M, IntID);
+ return Builder.CreateCall(Fn, Addr, "larx");
+}
+
+/// Perform a store-conditional operation to Addr. Return the status of the
+/// store. This should be 0 if the store succeeded, non-zero otherwise.
+Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
+ Value *Val, Value *Addr, AtomicOrdering Ord) const {
+ BasicBlock *BB = Builder.GetInsertBlock();
+ Module *M = BB->getParent()->getParent();
+ Type *Ty = Val->getType();
+ unsigned SZ = Ty->getPrimitiveSizeInBits();
+ assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
+ Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
+ : Intrinsic::hexagon_S4_stored_locked;
+ Value *Fn = Intrinsic::getDeclaration(M, IntID);
+ Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
+ Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
+ Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
+ return Ext;
+}
+
+TargetLowering::AtomicExpansionKind
+HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
+ // Do not expand loads and stores that don't exceed 64 bits.
+ return LI->getType()->getPrimitiveSizeInBits() > 64
+ ? AtomicExpansionKind::LLSC
+ : AtomicExpansionKind::None;
+}
+
+bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
+ // Do not expand loads and stores that don't exceed 64 bits.
+ return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
+}
+