SDOperand getMemcpy(SDOperand Chain, SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff);
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff);
SDOperand getMemmove(SDOperand Chain, SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff);
+ const Value *DstSV, uint64_t DstOSVff,
+ const Value *SrcSV, uint64_t SrcSVOff);
SDOperand getMemset(SDOperand Chain, SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
- const Value *DstSV, uint64_t DstOff);
+ const Value *DstSV, uint64_t DstSVOff);
/// getSetCC - Helper function to make it easier to build SetCC's if you just
/// have an ISD::CondCode instead of an SDOperand.
SDOperand Src, uint64_t Size,
unsigned Align,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) {
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff){
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Expand memcpy to a series of store ops if the size operand falls below
GlobalAddressSDNode *G = NULL;
std::string Str;
bool CopyFromStr = false;
+ uint64_t SrcOff = 0, DstOff = 0;
if (Src.getOpcode() == ISD::GlobalAddress)
G = cast<GlobalAddressSDNode>(Src);
Store =
DAG.getStore(Chain, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstOff);
+ DstSV, DstSVOff + DstOff);
} else {
Value = DAG.getLoad(VT, Chain,
getMemBasePlusOffset(Src, SrcOff, DAG),
- SrcSV, SrcOff, false, Align);
+ SrcSV, SrcSVOff + SrcOff, false, Align);
Store =
DAG.getStore(Chain, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstOff, false, Align);
+ DstSV, DstSVOff + DstOff, false, Align);
}
OutChains.push_back(Store);
SrcOff += VTSize;
SDOperand Chain, SDOperand Dst,
SDOperand Src, uint64_t Size,
unsigned Align,
- const Value *DstSV, uint64_t DstOff) {
+ const Value *DstSV, uint64_t DstSVOff) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Expand memset to a series of load/store ops if the size operand
return SDOperand();
SmallVector<SDOperand, 8> OutChains;
+ uint64_t DstOff = 0;
unsigned NumMemOps = MemOps.size();
for (unsigned i = 0; i < NumMemOps; i++) {
SDOperand Value = getMemsetValue(Src, VT, DAG);
SDOperand Store = DAG.getStore(Chain, Value,
getMemBasePlusOffset(Dst, DstOff, DAG),
- DstSV, DstOff);
+ DstSV, DstSVOff + DstOff);
OutChains.push_back(Store);
DstOff += VTSize;
}
SDOperand SelectionDAG::getMemcpy(SDOperand Chain, SDOperand Dst,
SDOperand Src, SDOperand Size,
unsigned Align, bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) {
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff) {
// Check to see if we should lower the memcpy to loads and stores first.
// For cases within the target-specified limits, this is the best choice.
SDOperand Result =
getMemcpyLoadsAndStores(*this, Chain, Dst, Src, ConstantSize->getValue(),
- Align, false, DstSV, DstOff, SrcSV, SrcOff);
+ Align, false, DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.Val)
return Result;
}
SDOperand Result =
TLI.EmitTargetCodeForMemcpy(*this, Chain, Dst, Src, Size, Align,
AlwaysInline,
- DstSV, DstOff, SrcSV, SrcOff);
+ DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.Val)
return Result;
assert(ConstantSize && "AlwaysInline requires a constant size!");
return getMemcpyLoadsAndStores(*this, Chain, Dst, Src,
ConstantSize->getValue(), Align, true,
- DstSV, DstOff, SrcSV, SrcOff);
+ DstSV, DstSVOff, SrcSV, SrcSVOff);
}
// Emit a library call.
SDOperand SelectionDAG::getMemmove(SDOperand Chain, SDOperand Dst,
SDOperand Src, SDOperand Size,
unsigned Align,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff) {
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff) {
// TODO: Optimize small memmove cases with simple loads and stores,
// ensuring that all loads precede all stores. This can cause severe
// code. If the target chooses to do this, this is the next best.
SDOperand Result =
TLI.EmitTargetCodeForMemmove(*this, Chain, Dst, Src, Size, Align,
- DstSV, DstOff, SrcSV, SrcOff);
+ DstSV, DstSVOff, SrcSV, SrcSVOff);
if (Result.Val)
return Result;
SDOperand SelectionDAG::getMemset(SDOperand Chain, SDOperand Dst,
SDOperand Src, SDOperand Size,
unsigned Align,
- const Value *DstSV, uint64_t DstOff) {
+ const Value *DstSV, uint64_t DstSVOff) {
// Check to see if we should lower the memset to stores first.
// For cases within the target-specified limits, this is the best choice.
SDOperand Result =
getMemsetStores(*this, Chain, Dst, Src, ConstantSize->getValue(), Align,
- DstSV, DstOff);
+ DstSV, DstSVOff);
if (Result.Val)
return Result;
}
// code. If the target chooses to do this, this is the next best.
SDOperand Result =
TLI.EmitTargetCodeForMemset(*this, Chain, Dst, Src, Size, Align,
- DstSV, DstOff);
+ DstSV, DstSVOff);
if (Result.Val)
return Result;
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff){
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff){
// Do repeated 4-byte loads and stores. To be improved.
// This requires 4-byte alignment.
if ((Align & 3) != 0)
const unsigned MAX_LOADS_IN_LDM = 6;
SDOperand TFOps[MAX_LOADS_IN_LDM];
SDOperand Loads[MAX_LOADS_IN_LDM];
+ uint64_t SrcOff = 0, DstOff = 0;
// Emit up to MAX_LOADS_IN_LDM loads, then a TokenFactor barrier, then the
// same number of stores. The loads and stores will get combined into
Loads[i] = DAG.getLoad(VT, Chain,
DAG.getNode(ISD::ADD, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcOff);
+ SrcSV, SrcSVOff + SrcOff);
TFOps[i] = Loads[i].getValue(1);
SrcOff += VTSize;
}
TFOps[i] = DAG.getStore(Chain, Loads[i],
DAG.getNode(ISD::ADD, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstOff);
+ DstSV, DstSVOff + DstOff);
DstOff += VTSize;
}
Chain = DAG.getNode(ISD::TokenFactor, MVT::Other, &TFOps[0], i);
Loads[i] = DAG.getLoad(VT, Chain,
DAG.getNode(ISD::ADD, MVT::i32, Src,
DAG.getConstant(SrcOff, MVT::i32)),
- SrcSV, SrcOff);
+ SrcSV, SrcSVOff + SrcOff);
TFOps[i] = Loads[i].getValue(1);
++i;
SrcOff += VTSize;
TFOps[i] = DAG.getStore(Chain, Loads[i],
DAG.getNode(ISD::ADD, MVT::i32, Dst,
DAG.getConstant(DstOff, MVT::i32)),
- DstSV, DstOff);
+ DstSV, DstSVOff + DstOff);
++i;
DstOff += VTSize;
BytesLeft -= VTSize;
SDOperand Chain,
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
- const Value *DstSV, uint64_t DstOff) {
+ const Value *DstSV, uint64_t DstSVOff) {
ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
/// If not DWORD aligned or size is more than the threshold, call the library.
DAG.getConstant(Offset, AddrVT)),
Src,
DAG.getConstant(BytesLeft, SizeVT),
- Align, DstSV, 0);
+ Align, DstSV, DstSVOff + Offset);
}
// TODO: Use a Tokenfactor, as in memcpy, instead of a single chain.
SDOperand Dst, SDOperand Src,
SDOperand Size, unsigned Align,
bool AlwaysInline,
- const Value *DstSV, uint64_t DstOff,
- const Value *SrcSV, uint64_t SrcOff){
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff){
// This requires the copy size to be a constant, preferrably
// within a subtarget-specific limit.
DAG.getConstant(Offset, SrcVT)),
DAG.getConstant(BytesLeft, SizeVT),
Align, AlwaysInline,
- DstSV, 0, SrcSV, 0));
+ DstSV, DstSVOff + Offset,
+ SrcSV, SrcSVOff + Offset));
}
return DAG.getNode(ISD::TokenFactor, MVT::Other, &Results[0], Results.size());