X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FSROA.cpp;h=859e3a79b7d5bd85b9deb59a2e10eb913201ea8c;hb=38e90e3de1c05ccfd469275c08d403f638cdcce1;hp=46f3ab5c362e18790f9a02852f249f19550befde;hpb=10b2920851ed4a6ea57e3ec3e9ee0215e92ec54a;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 46f3ab5c362..859e3a79b7d 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -85,6 +85,11 @@ ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden); static cl::opt SROARandomShuffleSlices("sroa-random-shuffle-slices", cl::init(false), cl::Hidden); +/// Hidden option to experiment with completely strict handling of inbounds +/// GEPs. +static cl::opt SROAStrictInbounds("sroa-strict-inbounds", + cl::init(false), cl::Hidden); + namespace { /// \brief A custom IRBuilder inserter which prefixes all names if they are /// preserved. @@ -351,7 +356,7 @@ private: bool IsSplittable = false) { // Completely skip uses which have a zero size or start either before or // past the end of the allocation. - if (Size == 0 || Offset.isNegative() || Offset.uge(AllocSize)) { + if (Size == 0 || Offset.uge(AllocSize)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset << " which has zero size or starts outside of the " << AllocSize << " byte alloca:\n" @@ -392,6 +397,43 @@ private: if (GEPI.use_empty()) return markAsDead(GEPI); + if (SROAStrictInbounds && GEPI.isInBounds()) { + // FIXME: This is a manually un-factored variant of the basic code inside + // of GEPs with checking of the inbounds invariant specified in the + // langref in a very strict sense. If we ever want to enable + // SROAStrictInbounds, this code should be factored cleanly into + // PtrUseVisitor, but it is easier to experiment with SROAStrictInbounds + // by writing out the code here where we have tho underlying allocation + // size readily available. + APInt GEPOffset = Offset; + for (gep_type_iterator GTI = gep_type_begin(GEPI), + GTE = gep_type_end(GEPI); + GTI != GTE; ++GTI) { + ConstantInt *OpC = dyn_cast(GTI.getOperand()); + if (!OpC) + break; + + // Handle a struct index, which adds its field offset to the pointer. + if (StructType *STy = dyn_cast(*GTI)) { + unsigned ElementIdx = OpC->getZExtValue(); + const StructLayout *SL = DL.getStructLayout(STy); + GEPOffset += + APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); + } else { + // For array or vector indices, scale the index by the size of the type. + APInt Index = OpC->getValue().sextOrTrunc(Offset.getBitWidth()); + GEPOffset += Index * APInt(Offset.getBitWidth(), + DL.getTypeAllocSize(GTI.getIndexedType())); + } + + // If this index has computed an intermediate pointer which is not + // inbounds, then the result of the GEP is a poison value and we can + // delete it and all uses. + if (GEPOffset.ugt(AllocSize)) + return markAsDead(GEPI); + } + } + return Base::visitGetElementPtrInst(GEPI); } @@ -438,8 +480,7 @@ private: // risk of overflow. // FIXME: We should instead consider the pointer to have escaped if this // function is being instrumented for addressing bugs or race conditions. - if (Offset.isNegative() || Size > AllocSize || - Offset.ugt(AllocSize - Size)) { + if (Size > AllocSize || Offset.ugt(AllocSize - Size)) { DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte store @" << Offset << " which extends past the end of the " << AllocSize << " byte alloca:\n" @@ -458,7 +499,7 @@ private: assert(II.getRawDest() == *U && "Pointer use is not the destination?"); ConstantInt *Length = dyn_cast(II.getLength()); if ((Length && Length->getValue() == 0) || - (IsOffsetKnown && !Offset.isNegative() && Offset.uge(AllocSize))) + (IsOffsetKnown && Offset.uge(AllocSize))) // Zero-length mem transfer intrinsics can be ignored entirely. return markAsDead(II); @@ -490,7 +531,7 @@ private: // if already added to our partitions. // FIXME: Yet another place we really should bypass this when // instrumenting for ASan. - if (!Offset.isNegative() && Offset.uge(AllocSize)) { + if (Offset.uge(AllocSize)) { SmallDenseMap::iterator MTPI = MemTransferSliceMap.find(&II); if (MTPI != MemTransferSliceMap.end()) S.Slices[MTPI->second].kill(); @@ -625,8 +666,7 @@ private: // themselves which should be replaced with undef. // FIXME: This should instead be escaped in the event we're instrumenting // for address sanitization. - if ((Offset.isNegative() && (-Offset).uge(PHISize)) || - (!Offset.isNegative() && Offset.uge(AllocSize))) { + if (Offset.uge(AllocSize)) { S.DeadOperands.push_back(U); return; } @@ -666,8 +706,7 @@ private: // themselves which should be replaced with undef. // FIXME: This should instead be escaped in the event we're instrumenting // for address sanitization. - if ((Offset.isNegative() && Offset.uge(SelectSize)) || - (!Offset.isNegative() && Offset.uge(AllocSize))) { + if (Offset.uge(AllocSize)) { S.DeadOperands.push_back(U); return; } @@ -1223,7 +1262,7 @@ static void speculateSelectInstLoads(SelectInst &SI) { /// This will return the BasePtr if that is valid, or build a new GEP /// instruction using the IRBuilder if GEP-ing is needed. static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, Twine NamePrefix) { if (Indices.empty()) return BasePtr; @@ -1232,7 +1271,7 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, if (Indices.size() == 1 && cast(Indices.back())->isZero()) return BasePtr; - return IRB.CreateInBoundsGEP(BasePtr, Indices, "idx"); + return IRB.CreateInBoundsGEP(BasePtr, Indices, NamePrefix + "sroa_idx"); } /// \brief Get a natural GEP off of the BasePtr walking through Ty toward @@ -1246,9 +1285,10 @@ static Value *buildGEP(IRBuilderTy &IRB, Value *BasePtr, /// indicated by Indices to have the correct offset. static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, Value *BasePtr, Type *Ty, Type *TargetTy, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, + Twine NamePrefix) { if (Ty == TargetTy) - return buildGEP(IRB, BasePtr, Indices); + return buildGEP(IRB, BasePtr, Indices, NamePrefix); // See if we can descend into a struct and locate a field with the correct // type. @@ -1275,7 +1315,7 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, if (ElementTy != TargetTy) Indices.erase(Indices.end() - NumLayers, Indices.end()); - return buildGEP(IRB, BasePtr, Indices); + return buildGEP(IRB, BasePtr, Indices, NamePrefix); } /// \brief Recursively compute indices for a natural GEP. @@ -1285,9 +1325,10 @@ static Value *getNaturalGEPWithType(IRBuilderTy &IRB, const DataLayout &DL, static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, Type *Ty, APInt &Offset, Type *TargetTy, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, + Twine NamePrefix) { if (Offset == 0) - return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices); + return getNaturalGEPWithType(IRB, DL, Ptr, Ty, TargetTy, Indices, NamePrefix); // We can't recurse through pointer types. if (Ty->isPointerTy()) @@ -1307,7 +1348,7 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, VecTy->getElementType(), - Offset, TargetTy, Indices); + Offset, TargetTy, Indices, NamePrefix); } if (ArrayType *ArrTy = dyn_cast(Ty)) { @@ -1320,7 +1361,7 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, - Indices); + Indices, NamePrefix); } StructType *STy = dyn_cast(Ty); @@ -1339,7 +1380,7 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, Indices.push_back(IRB.getInt32(Index)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, - Indices); + Indices, NamePrefix); } /// \brief Get a natural GEP from a base pointer to a particular offset and @@ -1354,7 +1395,8 @@ static Value *getNaturalGEPRecursively(IRBuilderTy &IRB, const DataLayout &DL, /// If no natural GEP can be constructed, this function returns null. static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, APInt Offset, Type *TargetTy, - SmallVectorImpl &Indices) { + SmallVectorImpl &Indices, + Twine NamePrefix) { PointerType *Ty = cast(Ptr->getType()); // Don't consider any GEPs through an i8* as natural unless the TargetTy is @@ -1373,7 +1415,7 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, Offset -= NumSkippedElements * ElementSize; Indices.push_back(IRB.getInt(NumSkippedElements)); return getNaturalGEPRecursively(IRB, DL, Ptr, ElementTy, Offset, TargetTy, - Indices); + Indices, NamePrefix); } /// \brief Compute an adjusted pointer from Ptr by Offset bytes where the @@ -1391,8 +1433,9 @@ static Value *getNaturalGEPWithOffset(IRBuilderTy &IRB, const DataLayout &DL, /// properties. The algorithm tries to fold as many constant indices into /// a single GEP as possible, thus making each GEP more independent of the /// surrounding code. -static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, - Value *Ptr, APInt Offset, Type *PointerTy) { +static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, Value *Ptr, + APInt Offset, Type *PointerTy, + Twine NamePrefix) { // Even though we don't look through PHI nodes, we could be called on an // instruction in an unreachable block, which may be on a cycle. SmallPtrSet Visited; @@ -1426,7 +1469,7 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, // See if we can perform a natural GEP here. Indices.clear(); if (Value *P = getNaturalGEPWithOffset(IRB, DL, Ptr, Offset, TargetTy, - Indices)) { + Indices, NamePrefix)) { if (P->getType() == PointerTy) { // Zap any offset pointer that we ended up computing in previous rounds. if (OffsetPtr && OffsetPtr->use_empty()) @@ -1461,19 +1504,19 @@ static Value *getAdjustedPtr(IRBuilderTy &IRB, const DataLayout &DL, if (!OffsetPtr) { if (!Int8Ptr) { Int8Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy(), - "raw_cast"); + NamePrefix + "sroa_raw_cast"); Int8PtrOffset = Offset; } OffsetPtr = Int8PtrOffset == 0 ? Int8Ptr : IRB.CreateInBoundsGEP(Int8Ptr, IRB.getInt(Int8PtrOffset), - "raw_idx"); + NamePrefix + "sroa_raw_idx"); } Ptr = OffsetPtr; // On the off chance we were targeting i8*, guard the bitcast here. if (Ptr->getType() != PointerTy) - Ptr = IRB.CreateBitCast(Ptr, PointerTy, "cast"); + Ptr = IRB.CreateBitCast(Ptr, PointerTy, NamePrefix + "sroa_cast"); return Ptr; } @@ -1966,8 +2009,14 @@ class AllocaSliceRewriter : public InstVisitor { // integer type will be stored here for easy access during rewriting. IntegerType *IntTy; - // The offset of the slice currently being rewritten. + // The original offset of the slice currently being rewritten relative to + // the original alloca. uint64_t BeginOffset, EndOffset; + // The new offsets of the slice currently being rewritten relative to the + // original alloca. + uint64_t NewBeginOffset, NewEndOffset; + + uint64_t SliceSize; bool IsSplittable; bool IsSplit; Use *OldUse; @@ -1984,12 +2033,14 @@ class AllocaSliceRewriter : public InstVisitor { public: AllocaSliceRewriter(const DataLayout &DL, AllocaSlices &S, SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI, - uint64_t NewBeginOffset, uint64_t NewEndOffset, - bool IsVectorPromotable, bool IsIntegerPromotable, + uint64_t NewAllocaBeginOffset, + uint64_t NewAllocaEndOffset, bool IsVectorPromotable, + bool IsIntegerPromotable, SmallPtrSetImpl &PHIUsers, SmallPtrSetImpl &SelectUsers) : DL(DL), S(S), Pass(Pass), OldAI(OldAI), NewAI(NewAI), - NewAllocaBeginOffset(NewBeginOffset), NewAllocaEndOffset(NewEndOffset), + NewAllocaBeginOffset(NewAllocaBeginOffset), + NewAllocaEndOffset(NewAllocaEndOffset), NewAllocaTy(NewAI.getAllocatedType()), VecTy(IsVectorPromotable ? cast(NewAllocaTy) : 0), ElementTy(VecTy ? VecTy->getElementType() : 0), @@ -2019,6 +2070,14 @@ public: IsSplit = BeginOffset < NewAllocaBeginOffset || EndOffset > NewAllocaEndOffset; + // Compute the intersecting offset range. + assert(BeginOffset < NewAllocaEndOffset); + assert(EndOffset > NewAllocaBeginOffset); + NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); + NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); + + SliceSize = NewEndOffset - NewBeginOffset; + OldUse = I->getUse(); OldPtr = cast(OldUse->get()); @@ -2043,30 +2102,53 @@ private: llvm_unreachable("No rewrite rule for this instruction!"); } - Value *getAdjustedAllocaPtr(IRBuilderTy &IRB, uint64_t Offset, - Type *PointerTy) { - assert(Offset >= NewAllocaBeginOffset); - return getAdjustedPtr(IRB, DL, &NewAI, APInt(DL.getPointerSizeInBits(), - Offset - NewAllocaBeginOffset), - PointerTy); + Value *getNewAllocaSlicePtr(IRBuilderTy &IRB, Type *PointerTy) { + // Note that the offset computation can use BeginOffset or NewBeginOffset + // interchangeably for unsplit slices. + assert(IsSplit || BeginOffset == NewBeginOffset); + uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; + +#ifndef NDEBUG + StringRef OldName = OldPtr->getName(); + // Skip through the last '.sroa.' component of the name. + size_t LastSROAPrefix = OldName.rfind(".sroa."); + if (LastSROAPrefix != StringRef::npos) { + OldName = OldName.substr(LastSROAPrefix + strlen(".sroa.")); + // Look for an SROA slice index. + size_t IndexEnd = OldName.find_first_not_of("0123456789"); + if (IndexEnd != StringRef::npos && OldName[IndexEnd] == '.') { + // Strip the index and look for the offset. + OldName = OldName.substr(IndexEnd + 1); + size_t OffsetEnd = OldName.find_first_not_of("0123456789"); + if (OffsetEnd != StringRef::npos && OldName[OffsetEnd] == '.') + // Strip the offset. + OldName = OldName.substr(OffsetEnd + 1); + } + } + // Strip any SROA suffixes as well. + OldName = OldName.substr(0, OldName.find(".sroa_")); +#endif + + return getAdjustedPtr(IRB, DL, &NewAI, + APInt(DL.getPointerSizeInBits(), Offset), PointerTy, +#ifndef NDEBUG + Twine(OldName) + "." +#else + Twine() +#endif + ); } - /// \brief Compute suitable alignment to access an offset into the new alloca. - unsigned getOffsetAlign(uint64_t Offset) { + /// \brief Compute suitable alignment to access this slice of the *new* alloca. + /// + /// You can optionally pass a type to this routine and if that type's ABI + /// alignment is itself suitable, this will return zero. + unsigned getSliceAlign(Type *Ty = 0) { unsigned NewAIAlign = NewAI.getAlignment(); if (!NewAIAlign) NewAIAlign = DL.getABITypeAlignment(NewAI.getAllocatedType()); - return MinAlign(NewAIAlign, Offset); - } - - /// \brief Compute suitable alignment to access a type at an offset of the - /// new alloca. - /// - /// \returns zero if the type's ABI alignment is a suitable alignment, - /// otherwise returns the maximal suitable alignment. - unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) { - unsigned Align = getOffsetAlign(Offset); - return Align == DL.getABITypeAlignment(Ty) ? 0 : Align; + unsigned Align = MinAlign(NewAIAlign, NewBeginOffset - NewAllocaBeginOffset); + return (Ty && Align == DL.getABITypeAlignment(Ty)) ? 0 : Align; } unsigned getIndex(uint64_t Offset) { @@ -2084,8 +2166,7 @@ private: Pass.DeadInsts.insert(I); } - Value *rewriteVectorizedLoadInst(uint64_t NewBeginOffset, - uint64_t NewEndOffset) { + Value *rewriteVectorizedLoadInst() { unsigned BeginIndex = getIndex(NewBeginOffset); unsigned EndIndex = getIndex(NewEndOffset); assert(EndIndex > BeginIndex && "Empty vector!"); @@ -2095,8 +2176,7 @@ private: return extractVector(IRB, V, BeginIndex, EndIndex, "vec"); } - Value *rewriteIntegerLoad(LoadInst &LI, uint64_t NewBeginOffset, - uint64_t NewEndOffset) { + Value *rewriteIntegerLoad(LoadInst &LI) { assert(IntTy && "We cannot insert an integer to the alloca"); assert(!LI.isVolatile()); Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), @@ -2115,32 +2195,23 @@ private: Value *OldOp = LI.getOperand(0); assert(OldOp == OldPtr); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - - uint64_t Size = NewEndOffset - NewBeginOffset; - - Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), Size * 8) + Type *TargetTy = IsSplit ? Type::getIntNTy(LI.getContext(), SliceSize * 8) : LI.getType(); bool IsPtrAdjusted = false; Value *V; if (VecTy) { - V = rewriteVectorizedLoadInst(NewBeginOffset, NewEndOffset); + V = rewriteVectorizedLoadInst(); } else if (IntTy && LI.getType()->isIntegerTy()) { - V = rewriteIntegerLoad(LI, NewBeginOffset, NewEndOffset); + V = rewriteIntegerLoad(LI); } else if (NewBeginOffset == NewAllocaBeginOffset && canConvertValue(DL, NewAllocaTy, LI.getType())) { V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), - LI.isVolatile(), "load"); + LI.isVolatile(), LI.getName()); } else { Type *LTy = TargetTy->getPointerTo(); - V = IRB.CreateAlignedLoad( - getAdjustedAllocaPtr(IRB, NewBeginOffset, LTy), - getOffsetTypeAlign(TargetTy, NewBeginOffset - NewAllocaBeginOffset), - LI.isVolatile(), "load"); + V = IRB.CreateAlignedLoad(getNewAllocaSlicePtr(IRB, LTy), + getSliceAlign(TargetTy), LI.isVolatile(), + LI.getName()); IsPtrAdjusted = true; } V = convertValue(DL, IRB, V, TargetTy); @@ -2149,7 +2220,7 @@ private: assert(!LI.isVolatile()); assert(LI.getType()->isIntegerTy() && "Only integer type loads and stores are split"); - assert(Size < DL.getTypeStoreSize(LI.getType()) && + assert(SliceSize < DL.getTypeStoreSize(LI.getType()) && "Split load isn't smaller than original load"); assert(LI.getType()->getIntegerBitWidth() == DL.getTypeStoreSizeInBits(LI.getType()) && @@ -2177,9 +2248,7 @@ private: return !LI.isVolatile() && !IsPtrAdjusted; } - bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp, - uint64_t NewBeginOffset, - uint64_t NewEndOffset) { + bool rewriteVectorizedStoreInst(Value *V, StoreInst &SI, Value *OldOp) { if (V->getType() != VecTy) { unsigned BeginIndex = getIndex(NewBeginOffset); unsigned EndIndex = getIndex(NewEndOffset); @@ -2205,8 +2274,7 @@ private: return true; } - bool rewriteIntegerStore(Value *V, StoreInst &SI, - uint64_t NewBeginOffset, uint64_t NewEndOffset) { + bool rewriteIntegerStore(Value *V, StoreInst &SI) { assert(IntTy && "We cannot extract an integer from the alloca"); assert(!SI.isVolatile()); if (DL.getTypeSizeInBits(V->getType()) != IntTy->getBitWidth()) { @@ -2239,30 +2307,22 @@ private: if (AllocaInst *AI = dyn_cast(V->stripInBoundsOffsets())) Pass.PostPromotionWorklist.insert(AI); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - - uint64_t Size = NewEndOffset - NewBeginOffset; - if (Size < DL.getTypeStoreSize(V->getType())) { + if (SliceSize < DL.getTypeStoreSize(V->getType())) { assert(!SI.isVolatile()); assert(V->getType()->isIntegerTy() && "Only integer type loads and stores are split"); assert(V->getType()->getIntegerBitWidth() == DL.getTypeStoreSizeInBits(V->getType()) && "Non-byte-multiple bit width"); - IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), Size * 8); + IntegerType *NarrowTy = Type::getIntNTy(SI.getContext(), SliceSize * 8); V = extractInteger(DL, IRB, V, NarrowTy, NewBeginOffset, "extract"); } if (VecTy) - return rewriteVectorizedStoreInst(V, SI, OldOp, NewBeginOffset, - NewEndOffset); + return rewriteVectorizedStoreInst(V, SI, OldOp); if (IntTy && V->getType()->isIntegerTy()) - return rewriteIntegerStore(V, SI, NewBeginOffset, NewEndOffset); + return rewriteIntegerStore(V, SI); StoreInst *NewSI; if (NewBeginOffset == NewAllocaBeginOffset && @@ -2272,12 +2332,9 @@ private: NewSI = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(), SI.isVolatile()); } else { - Value *NewPtr = getAdjustedAllocaPtr(IRB, NewBeginOffset, - V->getType()->getPointerTo()); - NewSI = IRB.CreateAlignedStore( - V, NewPtr, getOffsetTypeAlign(V->getType(), - NewBeginOffset - NewAllocaBeginOffset), - SI.isVolatile()); + Value *NewPtr = getNewAllocaSlicePtr(IRB, V->getType()->getPointerTo()); + NewSI = IRB.CreateAlignedStore(V, NewPtr, getSliceAlign(V->getType()), + SI.isVolatile()); } (void)NewSI; Pass.DeadInsts.insert(&SI); @@ -2329,11 +2386,10 @@ private: // pointer to the new alloca. if (!isa(II.getLength())) { assert(!IsSplit); - assert(BeginOffset >= NewAllocaBeginOffset); - II.setDest( - getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType())); + assert(NewBeginOffset == BeginOffset); + II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType())); Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, getOffsetAlign(BeginOffset))); + II.setAlignment(ConstantInt::get(CstTy, getSliceAlign())); deleteIfTriviallyDead(OldPtr); return false; @@ -2345,13 +2401,6 @@ private: Type *AllocaTy = NewAI.getAllocatedType(); Type *ScalarTy = AllocaTy->getScalarType(); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset; - // If this doesn't map cleanly onto the alloca type, and that type isn't // a single value type, just emit a memset. if (!VecTy && !IntTy && @@ -2363,8 +2412,8 @@ private: Type *SizeTy = II.getLength()->getType(); Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); CallInst *New = IRB.CreateMemSet( - getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getRawDest()->getType()), - II.getValue(), Size, getOffsetAlign(SliceOffset), II.isVolatile()); + getNewAllocaSlicePtr(IRB, OldPtr->getType()), II.getValue(), Size, + getSliceAlign(), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; @@ -2441,26 +2490,11 @@ private: DEBUG(dbgs() << " original: " << II << "\n"); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - bool IsDest = &II.getRawDestUse() == OldUse; assert((IsDest && II.getRawDest() == OldPtr) || (!IsDest && II.getRawSource() == OldPtr)); - // Compute the relative offset within the transfer. - unsigned IntPtrWidth = DL.getPointerSizeInBits(); - APInt RelOffset(IntPtrWidth, NewBeginOffset - BeginOffset); - - unsigned Align = II.getAlignment(); - uint64_t SliceOffset = NewBeginOffset - NewAllocaBeginOffset; - if (Align > 1) - Align = - MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(), - MinAlign(II.getAlignment(), getOffsetAlign(SliceOffset))); + unsigned SliceAlign = getSliceAlign(); // For unsplit intrinsics, we simply modify the source and destination // pointers in place. This isn't just an optimization, it is a matter of @@ -2470,19 +2504,20 @@ private: // memcpy, and so simply updating the pointers is the necessary for us to // update both source and dest of a single call. if (!IsSplittable) { - Value *OldOp = IsDest ? II.getRawDest() : II.getRawSource(); + Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); if (IsDest) - II.setDest( - getAdjustedAllocaPtr(IRB, BeginOffset, II.getRawDest()->getType())); + II.setDest(AdjustedPtr); else - II.setSource(getAdjustedAllocaPtr(IRB, BeginOffset, - II.getRawSource()->getType())); + II.setSource(AdjustedPtr); - Type *CstTy = II.getAlignmentCst()->getType(); - II.setAlignment(ConstantInt::get(CstTy, Align)); + if (II.getAlignment() > SliceAlign) { + Type *CstTy = II.getAlignmentCst()->getType(); + II.setAlignment( + ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign))); + } DEBUG(dbgs() << " to: " << II << "\n"); - deleteIfTriviallyDead(OldOp); + deleteIfTriviallyDead(OldPtr); return false; } // For split transfer intrinsics we have an incredibly useful assurance: @@ -2524,33 +2559,32 @@ private: Pass.Worklist.insert(AI); } + // Compute the relative offset for the other pointer within the transfer. + unsigned IntPtrWidth = DL.getPointerSizeInBits(); + APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset); + unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1, + OtherOffset.zextOrTrunc(64).getZExtValue()); + if (EmitMemCpy) { Type *OtherPtrTy = OtherPtr->getType(); // Compute the other pointer, folding as much as possible to produce // a single, simple GEP in most cases. - OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy); + OtherPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, + OtherPtr->getName() + "."); - Value *OurPtr = getAdjustedAllocaPtr( - IRB, NewBeginOffset, - IsDest ? II.getRawDest()->getType() : II.getRawSource()->getType()); + Value *OurPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Type *SizeTy = II.getLength()->getType(); Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset); - CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr, - IsDest ? OtherPtr : OurPtr, - Size, Align, II.isVolatile()); + CallInst *New = IRB.CreateMemCpy( + IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size, + MinAlign(SliceAlign, OtherAlign), II.isVolatile()); (void)New; DEBUG(dbgs() << " to: " << *New << "\n"); return false; } - // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy - // is equivalent to 1, but that isn't true if we end up rewriting this as - // a load or store. - if (!Align) - Align = 1; - bool IsWholeAlloca = NewBeginOffset == NewAllocaBeginOffset && NewEndOffset == NewAllocaEndOffset; uint64_t Size = NewEndOffset - NewBeginOffset; @@ -2572,10 +2606,15 @@ private: OtherPtrTy = SubIntTy->getPointerTo(); } - Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, RelOffset, OtherPtrTy); + Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy, + OtherPtr->getName() + "."); + unsigned SrcAlign = OtherAlign; Value *DstPtr = &NewAI; - if (!IsDest) + unsigned DstAlign = SliceAlign; + if (!IsDest) { std::swap(SrcPtr, DstPtr); + std::swap(SrcAlign, DstAlign); + } Value *Src; if (VecTy && !IsWholeAlloca && !IsDest) { @@ -2589,7 +2628,7 @@ private: uint64_t Offset = NewBeginOffset - NewAllocaBeginOffset; Src = extractInteger(DL, IRB, Src, SubIntTy, Offset, "extract"); } else { - Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(), + Src = IRB.CreateAlignedLoad(SrcPtr, SrcAlign, II.isVolatile(), "copyload"); } @@ -2607,7 +2646,7 @@ private: } StoreInst *Store = cast( - IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile())); + IRB.CreateAlignedStore(Src, DstPtr, DstAlign, II.isVolatile())); (void)Store; DEBUG(dbgs() << " to: " << *Store << "\n"); return !II.isVolatile(); @@ -2619,20 +2658,13 @@ private: DEBUG(dbgs() << " original: " << II << "\n"); assert(II.getArgOperand(1) == OldPtr); - // Compute the intersecting offset range. - assert(BeginOffset < NewAllocaEndOffset); - assert(EndOffset > NewAllocaBeginOffset); - uint64_t NewBeginOffset = std::max(BeginOffset, NewAllocaBeginOffset); - uint64_t NewEndOffset = std::min(EndOffset, NewAllocaEndOffset); - // Record this instruction for deletion. Pass.DeadInsts.insert(&II); ConstantInt *Size = ConstantInt::get(cast(II.getArgOperand(0)->getType()), NewEndOffset - NewBeginOffset); - Value *Ptr = - getAdjustedAllocaPtr(IRB, NewBeginOffset, II.getArgOperand(1)->getType()); + Value *Ptr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); Value *New; if (II.getIntrinsicID() == Intrinsic::lifetime_start) New = IRB.CreateLifetimeStart(Ptr, Size); @@ -2653,12 +2685,11 @@ private: // as local as possible to the PHI. To do that, we re-use the location of // the old pointer, which necessarily must be in the right position to // dominate the PHI. - IRBuilderTy PtrBuilder(OldPtr); - PtrBuilder.SetNamePrefix(Twine(NewAI.getName()) + "." + Twine(BeginOffset) + - "."); + IRBuilderTy PtrBuilder(IRB); + PtrBuilder.SetInsertPoint(OldPtr); + PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); - Value *NewPtr = - getAdjustedAllocaPtr(PtrBuilder, BeginOffset, OldPtr->getType()); + Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType()); // Replace the operands which were using the old pointer. std::replace(PN.op_begin(), PN.op_end(), cast(OldPtr), NewPtr); @@ -2679,7 +2710,7 @@ private: assert(BeginOffset >= NewAllocaBeginOffset && "Selects are unsplittable"); assert(EndOffset <= NewAllocaEndOffset && "Selects are unsplittable"); - Value *NewPtr = getAdjustedAllocaPtr(IRB, BeginOffset, OldPtr->getType()); + Value *NewPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType()); // Replace the operands which were using the old pointer. if (SI.getOperand(1) == OldPtr) SI.setOperand(1, NewPtr); @@ -3595,11 +3626,12 @@ bool SROA::runOnFunction(Function &F) { DEBUG(dbgs() << "SROA function: " << F.getName() << "\n"); C = &F.getContext(); - DL = getAnalysisIfAvailable(); - if (!DL) { + DataLayoutPass *DLP = getAnalysisIfAvailable(); + if (!DLP) { DEBUG(dbgs() << " Skipping SROA -- no target data!\n"); return false; } + DL = &DLP->getDataLayout(); DominatorTreeWrapperPass *DTWP = getAnalysisIfAvailable(); DT = DTWP ? &DTWP->getDomTree() : 0;