// void foo(_Complex float *P)
// for (i) { __real__(*P) = 0; __imag__(*P) = 0; }
//
-// We should enhance this to handle negative strides through memory.
-// Alternatively (and perhaps better) we could rely on an earlier pass to force
-// forward iteration through memory, which is generally better for cache
-// behavior. Negative strides *do* happen for memset/memcpy loops.
-//
// This could recognize common matrix multiplies and dot product idioms and
// replace them with calls to BLAS (if linked in??).
//
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
const TargetTransformInfo *TTI;
+ const DataLayout *DL;
public:
static char ID;
bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
unsigned StoreAlignment, Value *SplatValue,
Instruction *TheStore, const SCEVAddRecExpr *Ev,
- const SCEV *BECount);
+ const SCEV *BECount, bool NegStride);
bool processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
const SCEVAddRecExpr *StoreEv,
const SCEVAddRecExpr *LoadEv,
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
*CurLoop->getHeader()->getParent());
+ DL = &CurLoop->getHeader()->getModule()->getDataLayout();
if (SE->hasLoopInvariantBackedgeTakenCount(L))
return runOnCountableLoop();
Value *StorePtr = SI->getPointerOperand();
// Reject stores that are so large that they overflow an unsigned.
- auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
- uint64_t SizeInBits = DL.getTypeSizeInBits(StoredVal->getType());
+ uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
return false;
// Check to see if the stride matches the size of the store. If so, then we
// know that every byte is touched in the loop.
unsigned StoreSize = (unsigned)SizeInBits >> 3;
- const SCEVConstant *Stride = dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
-
- if (!Stride || StoreSize != Stride->getValue()->getValue()) {
- // TODO: Could also handle negative stride here someday, that will require
- // the validity check in mayLoopAccessLocation to be updated though.
- // Enable this to print exact negative strides.
- if (0 && Stride && StoreSize == -Stride->getValue()->getValue()) {
- dbgs() << "NEGATIVE STRIDE: " << *SI << "\n";
- dbgs() << "BB: " << *SI->getParent();
- }
+ const SCEVConstant *ConstStride =
+ dyn_cast<SCEVConstant>(StoreEv->getOperand(1));
+ if (!ConstStride)
+ return false;
+
+ APInt Stride = ConstStride->getValue()->getValue();
+ if (StoreSize != Stride && StoreSize != -Stride)
return false;
- }
+
+ bool NegStride = StoreSize == -Stride;
// See if we can optimize just this store in isolation.
if (processLoopStridedStore(StorePtr, StoreSize, SI->getAlignment(),
- StoredVal, SI, StoreEv, BECount))
+ StoredVal, SI, StoreEv, BECount, NegStride))
return true;
+ // TODO: We don't handle negative stride memcpys.
+ if (NegStride)
+ return false;
+
// If the stored value is a strided load in the same loop with the same stride
- // this this may be transformable into a memcpy. This kicks in for stuff like
+ // this may be transformable into a memcpy. This kicks in for stuff like
// for (i) A[i] = B[i];
if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
const SCEVAddRecExpr *LoadEv =
return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
MSI->getAlignment(), MSI->getValue(), MSI, Ev,
- BECount);
+ BECount, /*NegStride=*/false);
}
/// mayLoopAccessLocation - Return true if the specified loop might access the
///
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
/// just replicate their input array and then pass on to memset_pattern16.
-static Constant *getMemSetPatternValue(Value *V, const DataLayout &DL) {
+static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
// If the value isn't a constant, we can't promote it to being in a constant
// array. We could theoretically do a store to an alloca or something, but
// that doesn't seem worthwhile.
return nullptr;
// Only handle simple values that are a power of two bytes in size.
- uint64_t Size = DL.getTypeSizeInBits(V->getType());
+ uint64_t Size = DL->getTypeSizeInBits(V->getType());
if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
return nullptr;
// Don't care enough about darwin/ppc to implement this.
- if (DL.isBigEndian())
+ if (DL->isBigEndian())
return nullptr;
// Convert to size in bytes.
bool LoopIdiomRecognize::processLoopStridedStore(
Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
Value *StoredVal, Instruction *TheStore, const SCEVAddRecExpr *Ev,
- const SCEV *BECount) {
+ const SCEV *BECount, bool NegStride) {
// If the stored value is a byte-wise value (like i32 -1), then it may be
// turned into a memset of i8 -1, assuming that all the consecutive bytes
// but it can be turned into memset_pattern if the target supports it.
Value *SplatValue = isBytewiseValue(StoredVal);
Constant *PatternValue = nullptr;
- auto &DL = CurLoop->getHeader()->getModule()->getDataLayout();
unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
// If we're allowed to form a memset, and the stored value would be acceptable
// header. This allows us to insert code for it in the preheader.
BasicBlock *Preheader = CurLoop->getLoopPreheader();
IRBuilder<> Builder(Preheader->getTerminator());
- SCEVExpander Expander(*SE, DL, "loop-idiom");
+ SCEVExpander Expander(*SE, *DL, "loop-idiom");
Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
+ Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
+
+ const SCEV *Start = Ev->getStart();
+ // If we have a negative stride, Start refers to the end of the memory
+ // location we're trying to memset. Therefore, we need to recompute the start
+ // point, which is just Start - BECount*Size.
+ if (NegStride) {
+ const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
+ if (StoreSize != 1)
+ Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
+ SCEV::FlagNUW);
+ Start = SE->getMinusSCEV(Ev->getStart(), Index);
+ }
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
// this into a memset in the loop preheader now if we want. However, this
// would be unsafe to do if there is anything else in the loop that may read
// or write to the aliased location. Check for any overlap by generating the
// base pointer and checking the region.
- Value *BasePtr = Expander.expandCodeFor(Ev->getStart(), DestInt8PtrTy,
- Preheader->getTerminator());
-
+ Value *BasePtr =
+ Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
if (mayLoopAccessLocation(BasePtr, MRI_ModRef, CurLoop, BECount, StoreSize,
*AA, TheStore)) {
Expander.clear();
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtr = Builder.getIntPtrTy(DL, DestAS);
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
const SCEV *NumBytesS =
// header. This allows us to insert code for it in the preheader.
BasicBlock *Preheader = CurLoop->getLoopPreheader();
IRBuilder<> Builder(Preheader->getTerminator());
- const DataLayout &DL = Preheader->getModule()->getDataLayout();
- SCEVExpander Expander(*SE, DL, "loop-idiom");
+ SCEVExpander Expander(*SE, *DL, "loop-idiom");
// Okay, we have a strided store "p[i]" of a loaded value. We can turn
// this into a memcpy in the loop preheader now if we want. However, this
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
- Type *IntPtrTy = Builder.getIntPtrTy(DL, SI->getPointerAddressSpace());
+ Type *IntPtrTy = Builder.getIntPtrTy(*DL, SI->getPointerAddressSpace());
BECount = SE->getTruncateOrZeroExtend(BECount, IntPtrTy);
const SCEV *NumBytesS =