#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/TargetTransformInfo.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h"
#include "llvm/Target/TargetLowering.h"
unsigned NumVTBytes = VT.getSizeInBits() / 8;
unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
- uint64_t Val = 0;
+ APInt Val(NumBytes*8, 0);
if (TLI.isLittleEndian()) {
for (unsigned i = 0; i != NumBytes; ++i)
Val |= (uint64_t)(unsigned char)Str[i] << i*8;
Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
}
- return DAG.getConstant(Val, VT);
+ // If the "cost" of materializing the integer immediate is 1 or free, then
+ // it is cost effective to turn the load into the immediate.
+ if (DAG.getTarget().getScalarTargetTransformInfo()->
+ getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2)
+ return DAG.getConstant(Val, VT);
+ return SDValue(0, 0);
}
/// getMemBasePlusOffset - Returns base and offset node for the
unsigned DstAlign, unsigned SrcAlign,
bool IsZeroVal,
bool MemcpyStrSrc,
+ bool AllowOverlap,
SelectionDAG &DAG,
const TargetLowering &TLI) {
assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
unsigned NumMemOps = 0;
while (Size != 0) {
+ if (++NumMemOps > Limit)
+ return false;
+
unsigned VTSize = VT.getSizeInBits() / 8;
while (VTSize > Size) {
// For now, only use non-vector load / store's for the left-over pieces.
+ EVT NewVT = VT;
+ unsigned NewVTSize;
+
+ bool Found = false;
if (VT.isVector() || VT.isFloatingPoint()) {
- VT = MVT::i64;
- while (!TLI.isTypeLegal(VT))
- VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
- VTSize = VT.getSizeInBits() / 8;
- } else {
- // This can result in a type that is not legal on the target, e.g.
- // 1 or 2 bytes on PPC.
- VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
- VTSize >>= 1;
+ NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
+ if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
+ TLI.isLegalMemOpType(NewVT.getSimpleVT()))
+ Found = true;
+ else if (NewVT == MVT::i64 &&
+ TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
+ TLI.isLegalMemOpType(MVT::f64)) {
+ // i64 is usually not legal on 32-bit targets, but f64 may be.
+ NewVT = MVT::f64;
+ Found = true;
+ }
+ }
+
+ if (!Found) {
+ do {
+ NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
+ if (NewVT == MVT::i8)
+ break;
+ } while (!TLI.isLegalMemOpType(NewVT.getSimpleVT()));
+ }
+ NewVTSize = NewVT.getSizeInBits() / 8;
+
+ // If the new VT cannot cover all of the remaining bits, then consider
+ // issuing a (or a pair of) unaligned and overlapping load / store.
+ // FIXME: Only does this for 64-bit or more since we don't have proper
+ // cost model for unaligned load / store.
+ bool Fast;
+ if (AllowOverlap && VTSize >= 8 && NewVTSize < Size &&
+ TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
+ VTSize = Size;
+ else {
+ VT = NewVT;
+ VTSize = NewVTSize;
}
}
- if (++NumMemOps > Limit)
- return false;
MemOps.push_back(VT);
Size -= VTSize;
}
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
(isZeroStr ? 0 : SrcAlign),
- true, CopyFromStr, DAG, TLI))
+ true, CopyFromStr, true, DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Value, Store;
+ if (VTSize > Size) {
+ // Issuing an unaligned load / store pair that overlaps with the previous
+ // pair. Adjust the offset accordingly.
+ assert(i == NumMemOps-1 && i != 0);
+ SrcOff -= VTSize - Size;
+ DstOff -= VTSize - Size;
+ }
+
if (CopyFromStr &&
(isZeroStr || (VT.isInteger() && !VT.isVector()))) {
// It's unlikely a store of a vector immediate can be done in a single
// FIXME: Handle other cases where store of vector immediate is done in
// a single instruction.
Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
- Store = DAG.getStore(Chain, dl, Value,
- getMemBasePlusOffset(Dst, DstOff, DAG),
- DstPtrInfo.getWithOffset(DstOff), isVol,
- false, Align);
- } else {
+ if (Value.getNode())
+ Store = DAG.getStore(Chain, dl, Value,
+ getMemBasePlusOffset(Dst, DstOff, DAG),
+ DstPtrInfo.getWithOffset(DstOff), isVol,
+ false, Align);
+ }
+
+ if (!Store.getNode()) {
// The type might not be legal for the target. This should only happen
// if the type is smaller than a legal type, as on PPC, so the right
// thing to do is generate a LoadExt/StoreTrunc pair. These simplify
OutChains.push_back(Store);
SrcOff += VTSize;
DstOff += VTSize;
+ Size -= VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
- SrcAlign, true, false, DAG, TLI))
+ SrcAlign, true, false, false, DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
Size, (DstAlignCanChange ? 0 : Align), 0,
- IsZeroVal, false, DAG, TLI))
+ IsZeroVal, false, true, DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
for (unsigned i = 0; i < NumMemOps; i++) {
EVT VT = MemOps[i];
+ unsigned VTSize = VT.getSizeInBits() / 8;
+ if (VTSize > Size) {
+ // Issuing an unaligned load / store pair that overlaps with the previous
+ // pair. Adjust the offset accordingly.
+ assert(i == NumMemOps-1 && i != 0);
+ DstOff -= VTSize - Size;
+ }
// If this store is smaller than the largest store see whether we can get
// the smaller value for free with a truncate.
isVol, false, Align);
OutChains.push_back(Store);
DstOff += VT.getSizeInBits() / 8;
+ Size -= VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,