#include "llvm/DebugInfo.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
-#include "llvm/GlobalVariable.h"
#include "llvm/IRBuilder.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/DataLayout.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
#include "llvm/Transforms/Utils/SSAUpdater.h"
STATISTIC(NumDeleted, "Number of instructions deleted");
STATISTIC(NumVectorized, "Number of vectorized aggregates");
+/// Hidden option to force the pass to not use DomTree and mem2reg, instead
+/// forming SSA values through the SSAUpdater infrastructure.
+static cl::opt<bool>
+ForceSSAUpdater("force-ssa-updater", cl::init(false), cl::Hidden);
+
namespace {
/// \brief Alloca partitioning representation.
///
/// information about the nature of uses of each slice of the alloca. The goal
/// is that this information is sufficient to decide if and how to split the
/// alloca apart and replace slices with scalars. It is also intended that this
-/// structure can capture the relevant information needed both due decide about
+/// structure can capture the relevant information needed both to decide about
/// and to enact these transformations.
class AllocaPartitioning {
public:
///
/// This provides an ordering over ranges such that start offsets are
/// always increasing, and within equal start offsets, the end offsets are
- /// decreasing. Thus the spanning range comes first in in cluster with the
+ /// decreasing. Thus the spanning range comes first in a cluster with the
/// same start position.
bool operator<(const ByteRange &RHS) const {
if (BeginOffset < RHS.BeginOffset) return true;
}
/// \brief Support comparison with a single offset to allow binary searches.
- bool operator<(uint64_t RHSOffset) const {
- return BeginOffset < RHSOffset;
+ friend bool operator<(const ByteRange &LHS, uint64_t RHSOffset) {
+ return LHS.BeginOffset < RHSOffset;
+ }
+
+ friend LLVM_ATTRIBUTE_UNUSED bool operator<(uint64_t LHSOffset,
+ const ByteRange &RHS) {
+ return LHSOffset < RHS.BeginOffset;
}
bool operator==(const ByteRange &RHS) const {
/// \brief Whether this partition is splittable into smaller partitions.
///
/// We flag partitions as splittable when they are formed entirely due to
- /// accesses by trivially split operations such as memset and memcpy.
+ /// accesses by trivially splittable operations such as memset and memcpy.
///
/// FIXME: At some point we should consider loads and stores of FCAs to be
/// splittable and eagerly split them into scalar values.
bool IsSplittable;
+ /// \brief Test whether a partition has been marked as dead.
+ bool isDead() const {
+ if (BeginOffset == UINT64_MAX) {
+ assert(EndOffset == UINT64_MAX);
+ return true;
+ }
+ return false;
+ }
+
+ /// \brief Kill a partition.
+ /// This is accomplished by setting both its beginning and end offset to
+ /// the maximum possible value.
+ void kill() {
+ assert(!isDead() && "He's Dead, Jim!");
+ BeginOffset = EndOffset = UINT64_MAX;
+ }
+
Partition() : ByteRange(), IsSplittable() {}
Partition(uint64_t BeginOffset, uint64_t EndOffset, bool IsSplittable)
: ByteRange(BeginOffset, EndOffset), IsSplittable(IsSplittable) {}
/// and includes a handle to the user itself and the pointer value in use.
/// The bounds of these uses are determined by intersecting the bounds of the
/// memory use itself with a particular partition. As a consequence there is
- /// intentionally overlap between various usues of the same partition.
+ /// intentionally overlap between various uses of the same partition.
struct PartitionUse : public ByteRange {
- /// \brief The user of this range of the alloca.
- AssertingVH<Instruction> User;
-
- /// \brief The particular pointer value derived from this alloca in use.
- AssertingVH<Instruction> Ptr;
+ /// \brief The use in question. Provides access to both user and used value.
+ ///
+ /// Note that this may be null if the partition use is *dead*, that is, it
+ /// should be ignored.
+ Use *U;
- PartitionUse() : ByteRange(), User(), Ptr() {}
- PartitionUse(uint64_t BeginOffset, uint64_t EndOffset,
- Instruction *User, Instruction *Ptr)
- : ByteRange(BeginOffset, EndOffset), User(User), Ptr(Ptr) {}
+ PartitionUse() : ByteRange(), U() {}
+ PartitionUse(uint64_t BeginOffset, uint64_t EndOffset, Use *U)
+ : ByteRange(BeginOffset, EndOffset), U(U) {}
};
/// \brief Construct a partitioning of a particular alloca.
///
/// Construction does most of the work for partitioning the alloca. This
/// performs the necessary walks of users and builds a partitioning from it.
- AllocaPartitioning(const TargetData &TD, AllocaInst &AI);
+ AllocaPartitioning(const DataLayout &TD, AllocaInst &AI);
/// \brief Test whether a pointer to the allocation escapes our analysis.
///
use_iterator use_begin(const_iterator I) { return Uses[I - begin()].begin(); }
use_iterator use_end(unsigned Idx) { return Uses[Idx].end(); }
use_iterator use_end(const_iterator I) { return Uses[I - begin()].end(); }
- void use_insert(unsigned Idx, use_iterator UI, const PartitionUse &U) {
- Uses[Idx].insert(UI, U);
- }
- void use_insert(const_iterator I, use_iterator UI, const PartitionUse &U) {
- Uses[I - begin()].insert(UI, U);
- }
- void use_erase(unsigned Idx, use_iterator UI) { Uses[Idx].erase(UI); }
- void use_erase(const_iterator I, use_iterator UI) {
- Uses[I - begin()].erase(UI);
- }
typedef SmallVectorImpl<PartitionUse>::const_iterator const_use_iterator;
const_use_iterator use_begin(unsigned Idx) const { return Uses[Idx].begin(); }
const_use_iterator use_end(const_iterator I) const {
return Uses[I - begin()].end();
}
+
+ unsigned use_size(unsigned Idx) const { return Uses[Idx].size(); }
+ unsigned use_size(const_iterator I) const { return Uses[I - begin()].size(); }
+ const PartitionUse &getUse(unsigned PIdx, unsigned UIdx) const {
+ return Uses[PIdx][UIdx];
+ }
+ const PartitionUse &getUse(const_iterator I, unsigned UIdx) const {
+ return Uses[I - begin()][UIdx];
+ }
+
+ void use_push_back(unsigned Idx, const PartitionUse &PU) {
+ Uses[Idx].push_back(PU);
+ }
+ void use_push_back(const_iterator I, const PartitionUse &PU) {
+ Uses[I - begin()].push_back(PU);
+ }
/// @}
/// \brief Allow iterating the dead users for this alloca.
dead_user_iterator dead_user_end() const { return DeadUsers.end(); }
/// @}
- /// \brief Allow iterating the dead operands referring to this alloca.
+ /// \brief Allow iterating the dead expressions referring to this alloca.
///
/// These are operands which have cannot actually be used to refer to the
/// alloca as they are outside its range and the user doesn't correct for
/// correctly represent. We stash extra data to help us untangle this
/// after the partitioning is complete.
struct MemTransferOffsets {
+ /// The destination begin and end offsets when the destination is within
+ /// this alloca. If the end offset is zero the destination is not within
+ /// this alloca.
uint64_t DestBegin, DestEnd;
+
+ /// The source begin and end offsets when the source is within this alloca.
+ /// If the end offset is zero, the source is not within this alloca.
uint64_t SourceBegin, SourceEnd;
+
+ /// Flag for whether an alloca is splittable.
bool IsSplittable;
};
MemTransferOffsets getMemTransferOffsets(MemTransferInst &II) const {
/// When manipulating PHI nodes or selects, they can use more than one
/// partition of an alloca. We store a special mapping to allow finding the
/// partition referenced by each of these operands, if any.
- iterator findPartitionForPHIOrSelectOperand(Instruction &I, Value *Op) {
- SmallDenseMap<std::pair<Instruction *, Value *>,
- std::pair<unsigned, unsigned> >::const_iterator MapIt
- = PHIOrSelectOpMap.find(std::make_pair(&I, Op));
+ iterator findPartitionForPHIOrSelectOperand(Use *U) {
+ SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
+ = PHIOrSelectOpMap.find(U);
if (MapIt == PHIOrSelectOpMap.end())
return end();
///
/// Similar to mapping these operands back to the partitions, this maps
/// directly to the use structure of that partition.
- use_iterator findPartitionUseForPHIOrSelectOperand(Instruction &I,
- Value *Op) {
- SmallDenseMap<std::pair<Instruction *, Value *>,
- std::pair<unsigned, unsigned> >::const_iterator MapIt
- = PHIOrSelectOpMap.find(std::make_pair(&I, Op));
+ use_iterator findPartitionUseForPHIOrSelectOperand(Use *U) {
+ SmallDenseMap<Use *, std::pair<unsigned, unsigned> >::const_iterator MapIt
+ = PHIOrSelectOpMap.find(U);
assert(MapIt != PHIOrSelectOpMap.end());
return Uses[MapIt->second.first].begin() + MapIt->second.second;
}
/// memcpy are ignored.
Type *getCommonType(iterator I) const;
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void print(raw_ostream &OS, const_iterator I, StringRef Indent = " ") const;
void printUsers(raw_ostream &OS, const_iterator I,
StringRef Indent = " ") const;
void print(raw_ostream &OS) const;
void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump(const_iterator I) const;
void LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED dump() const;
+#endif
private:
template <typename DerivedT, typename RetT = void> class BuilderBase;
class UseBuilder;
friend class AllocaPartitioning::UseBuilder;
+#ifndef NDEBUG
/// \brief Handle to alloca instruction to simplify method interfaces.
AllocaInst &AI;
+#endif
/// \brief The instruction responsible for this alloca having no partitioning.
///
///
/// We store a vector of the partitions over the alloca here. This vector is
/// sorted by increasing begin offset, and then by decreasing end offset. See
- /// the Partition inner class for more details. Initially there are overlaps,
- /// be during construction we form a disjoint sequence toward the end.
+ /// the Partition inner class for more details. Initially (during
+ /// construction) there are overlaps, but we form a disjoint sequence of
+ /// partitions while finishing construction and a fully constructed object is
+ /// expected to always have this as a disjoint space.
SmallVector<Partition, 8> Partitions;
/// \brief The uses of the partitions.
SmallDenseMap<Instruction *, std::pair<uint64_t, bool> > PHIOrSelectSizes;
/// \brief Auxiliary information for particular PHI or select operands.
- SmallDenseMap<std::pair<Instruction *, Value *>,
- std::pair<unsigned, unsigned>, 4> PHIOrSelectOpMap;
+ SmallDenseMap<Use *, std::pair<unsigned, unsigned>, 4> PHIOrSelectOpMap;
/// \brief A utility routine called from the constructor.
///
class AllocaPartitioning::BuilderBase
: public InstVisitor<DerivedT, RetT> {
public:
- BuilderBase(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+ BuilderBase(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
: TD(TD),
AllocSize(TD.getTypeAllocSize(AI.getAllocatedType())),
P(P) {
}
protected:
- const TargetData &TD;
+ const DataLayout &TD;
const uint64_t AllocSize;
AllocaPartitioning &P;
+ SmallPtrSet<Use *, 8> VisitedUses;
+
struct OffsetUse {
Use *U;
- uint64_t Offset;
+ int64_t Offset;
};
SmallVector<OffsetUse, 8> Queue;
// The active offset and use while visiting.
Use *U;
- uint64_t Offset;
+ int64_t Offset;
- void enqueueUsers(Instruction &I, uint64_t UserOffset) {
- SmallPtrSet<User *, 8> UserSet;
+ void enqueueUsers(Instruction &I, int64_t UserOffset) {
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
UI != UE; ++UI) {
- if (!UserSet.insert(*UI))
- continue;
-
- OffsetUse OU = { &UI.getUse(), UserOffset };
- Queue.push_back(OU);
+ if (VisitedUses.insert(&UI.getUse())) {
+ OffsetUse OU = { &UI.getUse(), UserOffset };
+ Queue.push_back(OU);
+ }
}
}
- bool computeConstantGEPOffset(GetElementPtrInst &GEPI, uint64_t &GEPOffset) {
+ bool computeConstantGEPOffset(GetElementPtrInst &GEPI, int64_t &GEPOffset) {
GEPOffset = Offset;
for (gep_type_iterator GTI = gep_type_begin(GEPI), GTE = gep_type_end(GEPI);
GTI != GTE; ++GTI) {
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = TD.getStructLayout(STy);
- GEPOffset += SL->getElementOffset(ElementIdx);
+ uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
+ // Check that we can continue to model this GEP in a signed 64-bit offset.
+ if (ElementOffset > INT64_MAX ||
+ (GEPOffset >= 0 &&
+ ((uint64_t)GEPOffset + ElementOffset) > INT64_MAX)) {
+ DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
+ << "what can be represented in an int64_t!\n"
+ << " alloca: " << P.AI << "\n");
+ return false;
+ }
+ if (GEPOffset < 0)
+ GEPOffset = ElementOffset + (uint64_t)-GEPOffset;
+ else
+ GEPOffset += ElementOffset;
continue;
}
- GEPOffset
- += OpC->getZExtValue() * TD.getTypeAllocSize(GTI.getIndexedType());
+ APInt Index = OpC->getValue().sextOrTrunc(TD.getPointerSizeInBits());
+ Index *= APInt(Index.getBitWidth(),
+ TD.getTypeAllocSize(GTI.getIndexedType()));
+ Index += APInt(Index.getBitWidth(), (uint64_t)GEPOffset,
+ /*isSigned*/true);
+ // Check if the result can be stored in our int64_t offset.
+ if (!Index.isSignedIntN(sizeof(GEPOffset) * 8)) {
+ DEBUG(dbgs() << "WARNING: Encountered a cumulative offset exceeding "
+ << "what can be represented in an int64_t!\n"
+ << " alloca: " << P.AI << "\n");
+ return false;
+ }
+
+ GEPOffset = Index.getSExtValue();
}
return true;
}
SmallDenseMap<Instruction *, unsigned> MemTransferPartitionMap;
public:
- PartitionBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+ PartitionBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
: BuilderBase<PartitionBuilder, bool>(TD, AI, P) {}
/// \brief Run the builder over the allocation.
return false;
}
- void insertUse(Instruction &I, uint64_t Size, bool IsSplittable = false) {
- uint64_t BeginOffset = Offset, EndOffset = Offset + Size;
-
- // Completely skip uses which start outside of the allocation.
- if (BeginOffset >= AllocSize) {
+ void insertUse(Instruction &I, int64_t Offset, uint64_t Size,
+ bool IsSplittable = false) {
+ // Completely skip uses which have a zero size or don't overlap the
+ // allocation.
+ if (Size == 0 ||
+ (Offset >= 0 && (uint64_t)Offset >= AllocSize) ||
+ (Offset < 0 && (uint64_t)-Offset >= Size)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte use @" << Offset
<< " which starts past the end of the " << AllocSize
<< " byte alloca:\n"
return;
}
- // Clamp the size to the allocation.
- if (EndOffset > AllocSize) {
+ // Clamp the start to the beginning of the allocation.
+ if (Offset < 0) {
DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
- << " to remain within the " << AllocSize << " byte alloca:\n"
+ << " to start at the beginning of the alloca:\n"
<< " alloca: " << P.AI << "\n"
<< " use: " << I << "\n");
- EndOffset = AllocSize;
+ Size -= (uint64_t)-Offset;
+ Offset = 0;
}
- // See if we can just add a user onto the last slot currently occupied.
- if (!P.Partitions.empty() &&
- P.Partitions.back().BeginOffset == BeginOffset &&
- P.Partitions.back().EndOffset == EndOffset) {
- P.Partitions.back().IsSplittable &= IsSplittable;
- return;
+ uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+
+ // Clamp the end offset to the end of the allocation. Note that this is
+ // formulated to handle even the case where "BeginOffset + Size" overflows.
+ assert(AllocSize >= BeginOffset); // Established above.
+ if (Size > AllocSize - BeginOffset) {
+ DEBUG(dbgs() << "WARNING: Clamping a " << Size << " byte use @" << Offset
+ << " to remain within the " << AllocSize << " byte alloca:\n"
+ << " alloca: " << P.AI << "\n"
+ << " use: " << I << "\n");
+ EndOffset = AllocSize;
}
Partition New(BeginOffset, EndOffset, IsSplittable);
P.Partitions.push_back(New);
}
- bool handleLoadOrStore(Type *Ty, Instruction &I) {
+ bool handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
uint64_t Size = TD.getTypeStoreSize(Ty);
// If this memory access can be shown to *statically* extend outside the
// risk of overflow.
// FIXME: We should instead consider the pointer to have escaped if this
// function is being instrumented for addressing bugs or race conditions.
- if (Offset >= AllocSize || Size > AllocSize || Offset + Size > AllocSize) {
+ if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
+ Size > (AllocSize - (uint64_t)Offset)) {
DEBUG(dbgs() << "WARNING: Ignoring " << Size << " byte "
<< (isa<LoadInst>(I) ? "load" : "store") << " @" << Offset
<< " which extends past the end of the " << AllocSize
return true;
}
- insertUse(I, Size);
+ insertUse(I, Offset, Size);
return true;
}
}
bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
- //unsigned IntPtrWidth = TD->getPointerSizeInBits();
- //assert(IntPtrWidth == Offset.getBitWidth());
- uint64_t GEPOffset;
+ int64_t GEPOffset;
if (!computeConstantGEPOffset(GEPI, GEPOffset))
return markAsEscaping(GEPI);
}
bool visitLoadInst(LoadInst &LI) {
- return handleLoadOrStore(LI.getType(), LI);
+ assert((!LI.isSimple() || LI.getType()->isSingleValueType()) &&
+ "All simple FCA loads should have been pre-split");
+ return handleLoadOrStore(LI.getType(), LI, Offset);
}
bool visitStoreInst(StoreInst &SI) {
- if (SI.getOperand(0) == *U)
+ Value *ValOp = SI.getValueOperand();
+ if (ValOp == *U)
return markAsEscaping(SI);
- return handleLoadOrStore(SI.getOperand(0)->getType(), SI);
+ assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) &&
+ "All simple FCA stores should have been pre-split");
+ return handleLoadOrStore(ValOp->getType(), SI, Offset);
}
bool visitMemSetInst(MemSetInst &II) {
+ assert(II.getRawDest() == *U && "Pointer use is not the destination?");
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- insertUse(II, Length ? Length->getZExtValue() : AllocSize - Offset, Length);
+ uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
+ insertUse(II, Offset, Size, Length);
return true;
}
// Only intrinsics with a constant length can be split.
Offsets.IsSplittable = Length;
- if (*U != II.getRawDest()) {
- assert(*U == II.getRawSource());
- Offsets.SourceBegin = Offset;
- Offsets.SourceEnd = Offset + Size;
- } else {
+ if (*U == II.getRawDest()) {
Offsets.DestBegin = Offset;
Offsets.DestEnd = Offset + Size;
}
+ if (*U == II.getRawSource()) {
+ Offsets.SourceBegin = Offset;
+ Offsets.SourceEnd = Offset + Size;
+ }
+
+ // If we have set up end offsets for both the source and the destination,
+ // we have found both sides of this transfer pointing at the same alloca.
+ bool SeenBothEnds = Offsets.SourceEnd && Offsets.DestEnd;
+ if (SeenBothEnds && II.getRawDest() != II.getRawSource()) {
+ unsigned PrevIdx = MemTransferPartitionMap[&II];
+
+ // Check if the begin offsets match and this is a non-volatile transfer.
+ // In that case, we can completely elide the transfer.
+ if (!II.isVolatile() && Offsets.SourceBegin == Offsets.DestBegin) {
+ P.Partitions[PrevIdx].kill();
+ return true;
+ }
+
+ // Otherwise we have an offset transfer within the same alloca. We can't
+ // split those.
+ P.Partitions[PrevIdx].IsSplittable = Offsets.IsSplittable = false;
+ } else if (SeenBothEnds) {
+ // Handle the case where this exact use provides both ends of the
+ // operation.
+ assert(II.getRawDest() == II.getRawSource());
- insertUse(II, Size, Offsets.IsSplittable);
- unsigned NewIdx = P.Partitions.size() - 1;
+ // For non-volatile transfers this is a no-op.
+ if (!II.isVolatile())
+ return true;
- SmallDenseMap<Instruction *, unsigned>::const_iterator PMI;
- bool Inserted = false;
- llvm::tie(PMI, Inserted)
- = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx));
- if (!Inserted && Offsets.IsSplittable) {
- // We've found a memory transfer intrinsic which refers to the alloca as
- // both a source and dest. We refuse to split these to simplify splitting
- // logic. If possible, SROA will still split them into separate allocas
- // and then re-analyze.
+ // Otherwise just suppress splitting.
Offsets.IsSplittable = false;
- P.Partitions[PMI->second].IsSplittable = false;
- P.Partitions[NewIdx].IsSplittable = false;
+ }
+
+
+ // Insert the use now that we've fixed up the splittable nature.
+ insertUse(II, Offset, Size, Offsets.IsSplittable);
+
+ // Setup the mapping from intrinsic to partition of we've not seen both
+ // ends of this transfer.
+ if (!SeenBothEnds) {
+ unsigned NewIdx = P.Partitions.size() - 1;
+ bool Inserted
+ = MemTransferPartitionMap.insert(std::make_pair(&II, NewIdx)).second;
+ assert(Inserted &&
+ "Already have intrinsic in map but haven't seen both ends");
+ (void)Inserted;
}
return true;
}
// Disable SRoA for any intrinsics except for lifetime invariants.
+ // FIXME: What about debug instrinsics? This matches old behavior, but
+ // doesn't make sense.
bool visitIntrinsicInst(IntrinsicInst &II) {
if (II.getIntrinsicID() == Intrinsic::lifetime_start ||
II.getIntrinsicID() == Intrinsic::lifetime_end) {
ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
uint64_t Size = std::min(AllocSize - Offset, Length->getLimitedValue());
- insertUse(II, Size, true);
+ insertUse(II, Offset, Size, true);
return true;
}
SmallVector<std::pair<Instruction *, Instruction *>, 4> Uses;
Visited.insert(Root);
Uses.push_back(std::make_pair(cast<Instruction>(*U), Root));
+ // If there are no loads or stores, the access is dead. We mark that as
+ // a size zero access.
+ Size = 0;
do {
Instruction *I, *UsedI;
llvm::tie(UsedI, I) = Uses.pop_back_val();
std::pair<uint64_t, bool> &PHIInfo = P.PHIOrSelectSizes[&PN];
if (PHIInfo.first) {
PHIInfo.second = true;
- insertUse(PN, PHIInfo.first);
+ insertUse(PN, Offset, PHIInfo.first);
return true;
}
if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&PN, PHIInfo.first))
return markAsEscaping(*EscapingI);
- insertUse(PN, PHIInfo.first);
+ insertUse(PN, Offset, PHIInfo.first);
return true;
}
std::pair<uint64_t, bool> &SelectInfo = P.PHIOrSelectSizes[&SI];
if (SelectInfo.first) {
SelectInfo.second = true;
- insertUse(SI, SelectInfo.first);
+ insertUse(SI, Offset, SelectInfo.first);
return true;
}
if (Instruction *EscapingI = hasUnsafePHIOrSelectUse(&SI, SelectInfo.first))
return markAsEscaping(*EscapingI);
- insertUse(SI, SelectInfo.first);
+ insertUse(SI, Offset, SelectInfo.first);
return true;
}
/// \brief Use adder for the alloca partitioning.
///
-/// This class adds the uses of an alloca to all of the partitions which it
-/// uses. For splittable partitions, this can end up doing essentially a linear
+/// This class adds the uses of an alloca to all of the partitions which they
+/// use. For splittable partitions, this can end up doing essentially a linear
/// walk of the partitions, but the number of steps remains bounded by the
/// total result instruction size:
/// - The number of partitions is a result of the number unsplittable
SmallPtrSet<Instruction *, 4> VisitedDeadInsts;
public:
- UseBuilder(const TargetData &TD, AllocaInst &AI, AllocaPartitioning &P)
+ UseBuilder(const DataLayout &TD, AllocaInst &AI, AllocaPartitioning &P)
: BuilderBase<UseBuilder>(TD, AI, P) {}
/// \brief Run the builder over the allocation.
P.DeadUsers.push_back(&I);
}
- void insertUse(uint64_t Size, Instruction &User) {
- uint64_t BeginOffset = Offset, EndOffset = Offset + Size;
-
- // If the use extends outside of the allocation, record it as a dead use
- // for elimination later.
- if (BeginOffset >= AllocSize || Size == 0)
+ void insertUse(Instruction &User, int64_t Offset, uint64_t Size) {
+ // If the use has a zero size or extends outside of the allocation, record
+ // it as a dead use for elimination later.
+ if (Size == 0 || (uint64_t)Offset >= AllocSize ||
+ (Offset < 0 && (uint64_t)-Offset >= Size))
return markAsDead(User);
- // Bound the use by the size of the allocation.
- if (EndOffset > AllocSize)
+ // Clamp the start to the beginning of the allocation.
+ if (Offset < 0) {
+ Size -= (uint64_t)-Offset;
+ Offset = 0;
+ }
+
+ uint64_t BeginOffset = Offset, EndOffset = BeginOffset + Size;
+
+ // Clamp the end offset to the end of the allocation. Note that this is
+ // formulated to handle even the case where "BeginOffset + Size" overflows.
+ assert(AllocSize >= BeginOffset); // Established above.
+ if (Size > AllocSize - BeginOffset)
EndOffset = AllocSize;
// NB: This only works if we have zero overlapping partitions.
B = llvm::prior(B);
for (iterator I = B, E = P.end(); I != E && I->BeginOffset < EndOffset;
++I) {
- PartitionUse NewUse(std::max(I->BeginOffset, BeginOffset),
- std::min(I->EndOffset, EndOffset),
- &User, cast<Instruction>(*U));
- P.Uses[I - P.begin()].push_back(NewUse);
+ PartitionUse NewPU(std::max(I->BeginOffset, BeginOffset),
+ std::min(I->EndOffset, EndOffset), U);
+ P.use_push_back(I, NewPU);
if (isa<PHINode>(U->getUser()) || isa<SelectInst>(U->getUser()))
- P.PHIOrSelectOpMap[std::make_pair(&User, U->get())]
+ P.PHIOrSelectOpMap[U]
= std::make_pair(I - P.begin(), P.Uses[I - P.begin()].size() - 1);
}
}
- void handleLoadOrStore(Type *Ty, Instruction &I) {
+ void handleLoadOrStore(Type *Ty, Instruction &I, int64_t Offset) {
uint64_t Size = TD.getTypeStoreSize(Ty);
// If this memory access can be shown to *statically* extend outside the
// bounds of of the allocation, it's behavior is undefined, so simply
// ignore it. Note that this is more strict than the generic clamping
// behavior of insertUse.
- if (Offset >= AllocSize || Size > AllocSize || Offset + Size > AllocSize)
+ if (Offset < 0 || (uint64_t)Offset >= AllocSize ||
+ Size > (AllocSize - (uint64_t)Offset))
return markAsDead(I);
- insertUse(Size, I);
+ insertUse(I, Offset, Size);
}
void visitBitCastInst(BitCastInst &BC) {
if (GEPI.use_empty())
return markAsDead(GEPI);
- //unsigned IntPtrWidth = TD->getPointerSizeInBits();
- //assert(IntPtrWidth == Offset.getBitWidth());
- uint64_t GEPOffset;
+ int64_t GEPOffset;
if (!computeConstantGEPOffset(GEPI, GEPOffset))
llvm_unreachable("Unable to compute constant offset for use");
}
void visitLoadInst(LoadInst &LI) {
- handleLoadOrStore(LI.getType(), LI);
+ handleLoadOrStore(LI.getType(), LI, Offset);
}
void visitStoreInst(StoreInst &SI) {
- handleLoadOrStore(SI.getOperand(0)->getType(), SI);
+ handleLoadOrStore(SI.getOperand(0)->getType(), SI, Offset);
}
void visitMemSetInst(MemSetInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- insertUse(Length ? Length->getZExtValue() : AllocSize - Offset, II);
+ uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
+ insertUse(II, Offset, Size);
}
void visitMemTransferInst(MemTransferInst &II) {
ConstantInt *Length = dyn_cast<ConstantInt>(II.getLength());
- insertUse(Length ? Length->getZExtValue() : AllocSize - Offset, II);
+ uint64_t Size = Length ? Length->getZExtValue() : AllocSize - Offset;
+ if (!Size)
+ return markAsDead(II);
+
+ MemTransferOffsets &Offsets = P.MemTransferInstData[&II];
+ if (!II.isVolatile() && Offsets.DestEnd && Offsets.SourceEnd &&
+ Offsets.DestBegin == Offsets.SourceBegin)
+ return markAsDead(II); // Skip identity transfers without side-effects.
+
+ insertUse(II, Offset, Size);
}
void visitIntrinsicInst(IntrinsicInst &II) {
II.getIntrinsicID() == Intrinsic::lifetime_end);
ConstantInt *Length = cast<ConstantInt>(II.getArgOperand(0));
- insertUse(std::min(AllocSize - Offset, Length->getLimitedValue()), II);
+ insertUse(II, Offset,
+ std::min(AllocSize - Offset, Length->getLimitedValue()));
}
- void insertPHIOrSelect(Instruction &User) {
+ void insertPHIOrSelect(Instruction &User, uint64_t Offset) {
uint64_t Size = P.PHIOrSelectSizes.lookup(&User).first;
// For PHI and select operands outside the alloca, we can't nuke the entire
return;
}
- insertUse(Size, User);
+ insertUse(User, Offset, Size);
}
void visitPHINode(PHINode &PN) {
if (PN.use_empty())
return markAsDead(PN);
- insertPHIOrSelect(PN);
+ insertPHIOrSelect(PN, Offset);
}
void visitSelectInst(SelectInst &SI) {
if (SI.use_empty())
// If the result of the constant fold will be the pointer, recurse
// through the select as if we had RAUW'ed it.
enqueueUsers(SI, Offset);
+ else
+ // Otherwise the operand to the select is dead, and we can replace it
+ // with undef.
+ P.DeadOperands.push_back(U);
return;
}
- insertPHIOrSelect(SI);
+ insertPHIOrSelect(SI, Offset);
}
/// \brief Unreachable, we've already visited the alloca once.
SplitEndOffset = std::max(SplitEndOffset, Partitions[j].EndOffset);
}
- Partitions[j].BeginOffset = Partitions[j].EndOffset = UINT64_MAX;
+ Partitions[j].kill();
++NumDeadPartitions;
++j;
}
if (New.BeginOffset != New.EndOffset)
Partitions.push_back(New);
// Mark the old one for removal.
- Partitions[i].BeginOffset = Partitions[i].EndOffset = UINT64_MAX;
+ Partitions[i].kill();
++NumDeadPartitions;
}
// replaced in the process.
std::sort(Partitions.begin(), Partitions.end());
if (NumDeadPartitions) {
- assert(Partitions.back().BeginOffset == UINT64_MAX);
- assert(Partitions.back().EndOffset == UINT64_MAX);
+ assert(Partitions.back().isDead());
assert((ptrdiff_t)NumDeadPartitions ==
std::count(Partitions.begin(), Partitions.end(), Partitions.back()));
}
Partitions.erase(Partitions.end() - NumDeadPartitions, Partitions.end());
}
-AllocaPartitioning::AllocaPartitioning(const TargetData &TD, AllocaInst &AI)
- : AI(AI), PointerEscapingInstr(0) {
+AllocaPartitioning::AllocaPartitioning(const DataLayout &TD, AllocaInst &AI)
+ :
+#ifndef NDEBUG
+ AI(AI),
+#endif
+ PointerEscapingInstr(0) {
PartitionBuilder PB(TD, AI, *this);
if (!PB())
return;
- if (Partitions.size() > 1) {
- // Sort the uses. This arranges for the offsets to be in ascending order,
- // and the sizes to be in descending order.
- std::sort(Partitions.begin(), Partitions.end());
+ // Sort the uses. This arranges for the offsets to be in ascending order,
+ // and the sizes to be in descending order.
+ std::sort(Partitions.begin(), Partitions.end());
+
+ // Remove any partitions from the back which are marked as dead.
+ while (!Partitions.empty() && Partitions.back().isDead())
+ Partitions.pop_back();
+ if (Partitions.size() > 1) {
// Intersect splittability for all partitions with equal offsets and sizes.
// Then remove all but the first so that we have a sequence of non-equal but
// potentially overlapping partitions.
Uses.resize(Partitions.size());
UseBuilder UB(TD, AI, *this);
UB();
- for (iterator I = Partitions.begin(), E = Partitions.end(); I != E; ++I)
- std::stable_sort(use_begin(I), use_end(I));
}
Type *AllocaPartitioning::getCommonType(iterator I) const {
Type *Ty = 0;
for (const_use_iterator UI = use_begin(I), UE = use_end(I); UI != UE; ++UI) {
- if (isa<MemIntrinsic>(*UI->User))
+ if (!UI->U)
+ continue; // Skip dead uses.
+ if (isa<IntrinsicInst>(*UI->U->getUser()))
continue;
if (UI->BeginOffset != I->BeginOffset || UI->EndOffset != I->EndOffset)
- break;
+ continue;
Type *UserTy = 0;
- if (LoadInst *LI = dyn_cast<LoadInst>(&*UI->User)) {
+ if (LoadInst *LI = dyn_cast<LoadInst>(UI->U->getUser())) {
UserTy = LI->getType();
- } else if (StoreInst *SI = dyn_cast<StoreInst>(&*UI->User)) {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(UI->U->getUser())) {
UserTy = SI->getValueOperand()->getType();
- } else if (SelectInst *SI = dyn_cast<SelectInst>(&*UI->User)) {
- if (PointerType *PtrTy = dyn_cast<PointerType>(SI->getType()))
- UserTy = PtrTy->getElementType();
- } else if (PHINode *PN = dyn_cast<PHINode>(&*UI->User)) {
- if (PointerType *PtrTy = dyn_cast<PointerType>(PN->getType()))
- UserTy = PtrTy->getElementType();
}
if (Ty && Ty != UserTy)
return Ty;
}
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+
void AllocaPartitioning::print(raw_ostream &OS, const_iterator I,
StringRef Indent) const {
OS << Indent << "partition #" << (I - begin())
StringRef Indent) const {
for (const_use_iterator UI = use_begin(I), UE = use_end(I);
UI != UE; ++UI) {
+ if (!UI->U)
+ continue; // Skip dead uses.
OS << Indent << " [" << UI->BeginOffset << "," << UI->EndOffset << ") "
- << "used by: " << *UI->User << "\n";
- if (MemTransferInst *II = dyn_cast<MemTransferInst>(&*UI->User)) {
+ << "used by: " << *UI->U->getUser() << "\n";
+ if (MemTransferInst *II = dyn_cast<MemTransferInst>(UI->U->getUser())) {
const MemTransferOffsets &MTO = MemTransferInstData.lookup(II);
bool IsDest;
if (!MTO.IsSplittable)
void AllocaPartitioning::dump(const_iterator I) const { print(dbgs(), I); }
void AllocaPartitioning::dump() const { print(dbgs()); }
+#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+
+
+namespace {
+/// \brief Implementation of LoadAndStorePromoter for promoting allocas.
+///
+/// This subclass of LoadAndStorePromoter adds overrides to handle promoting
+/// the loads and stores of an alloca instruction, as well as updating its
+/// debug information. This is used when a domtree is unavailable and thus
+/// mem2reg in its full form can't be used to handle promotion of allocas to
+/// scalar values.
+class AllocaPromoter : public LoadAndStorePromoter {
+ AllocaInst &AI;
+ DIBuilder &DIB;
+
+ SmallVector<DbgDeclareInst *, 4> DDIs;
+ SmallVector<DbgValueInst *, 4> DVIs;
+
+public:
+ AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
+ AllocaInst &AI, DIBuilder &DIB)
+ : LoadAndStorePromoter(Insts, S), AI(AI), DIB(DIB) {}
+
+ void run(const SmallVectorImpl<Instruction*> &Insts) {
+ // Remember which alloca we're promoting (for isInstInList).
+ if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
+ for (Value::use_iterator UI = DebugNode->use_begin(),
+ UE = DebugNode->use_end();
+ UI != UE; ++UI)
+ if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
+ DDIs.push_back(DDI);
+ else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
+ DVIs.push_back(DVI);
+ }
+
+ LoadAndStorePromoter::run(Insts);
+ AI.eraseFromParent();
+ while (!DDIs.empty())
+ DDIs.pop_back_val()->eraseFromParent();
+ while (!DVIs.empty())
+ DVIs.pop_back_val()->eraseFromParent();
+ }
+
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const {
+ if (LoadInst *LI = dyn_cast<LoadInst>(I))
+ return LI->getOperand(0) == &AI;
+ return cast<StoreInst>(I)->getPointerOperand() == &AI;
+ }
+
+ virtual void updateDebugInfo(Instruction *Inst) const {
+ for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
+ E = DDIs.end(); I != E; ++I) {
+ DbgDeclareInst *DDI = *I;
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
+ ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
+ else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
+ ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
+ }
+ for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
+ E = DVIs.end(); I != E; ++I) {
+ DbgValueInst *DVI = *I;
+ Value *Arg = NULL;
+ if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
+ // If an argument is zero extended then use argument directly. The ZExt
+ // may be zapped by an optimization pass in future.
+ if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
+ Arg = dyn_cast<Argument>(ZExt->getOperand(0));
+ if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
+ Arg = dyn_cast<Argument>(SExt->getOperand(0));
+ if (!Arg)
+ Arg = SI->getOperand(0);
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
+ Arg = LI->getOperand(0);
+ } else {
+ continue;
+ }
+ Instruction *DbgVal =
+ DIB.insertDbgValueIntrinsic(Arg, 0, DIVariable(DVI->getVariable()),
+ Inst);
+ DbgVal->setDebugLoc(DVI->getDebugLoc());
+ }
+ }
+};
+} // end anon namespace
+
namespace {
/// \brief An optimization pass providing Scalar Replacement of Aggregates.
/// this form. By doing so, it will enable promotion of vector aggregates to
/// SSA vector values.
class SROA : public FunctionPass {
+ const bool RequiresDomTree;
+
LLVMContext *C;
- const TargetData *TD;
+ const DataLayout *TD;
DominatorTree *DT;
/// \brief Worklist of alloca instructions to simplify.
/// uses as dead. Only used to guard insertion into DeadInsts.
SmallPtrSet<Instruction *, 4> DeadSplitInsts;
- /// \brief A set of deleted alloca instructions.
+ /// \brief Post-promotion worklist.
///
- /// These pointers are *no longer valid* as they have been deleted. They are
- /// used to remove deleted allocas from the list of promotable allocas.
- SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
+ /// Sometimes we discover an alloca which has a high probability of becoming
+ /// viable for SROA after a round of promotion takes place. In those cases,
+ /// the alloca is enqueued here for re-processing.
+ ///
+ /// Note that we have to be very careful to clear allocas out of this list in
+ /// the event they are deleted.
+ SetVector<AllocaInst *, SmallVector<AllocaInst *, 16> > PostPromotionWorklist;
/// \brief A collection of alloca instructions we can directly promote.
std::vector<AllocaInst *> PromotableAllocas;
public:
- SROA() : FunctionPass(ID), C(0), TD(0), DT(0) {
+ SROA(bool RequiresDomTree = true)
+ : FunctionPass(ID), RequiresDomTree(RequiresDomTree),
+ C(0), TD(0), DT(0) {
initializeSROAPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F);
static char ID;
private:
+ friend class PHIOrSelectSpeculator;
friend class AllocaPartitionRewriter;
friend class AllocaPartitionVectorRewriter;
AllocaPartitioning::iterator PI);
bool splitAlloca(AllocaInst &AI, AllocaPartitioning &P);
bool runOnAlloca(AllocaInst &AI);
- void deleteDeadInstructions();
+ void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
+ bool promoteAllocas(Function &F);
};
}
char SROA::ID = 0;
-FunctionPass *llvm::createSROAPass() {
- return new SROA();
+FunctionPass *llvm::createSROAPass(bool RequiresDomTree) {
+ return new SROA(RequiresDomTree);
}
INITIALIZE_PASS_BEGIN(SROA, "sroa", "Scalar Replacement Of Aggregates",
INITIALIZE_PASS_END(SROA, "sroa", "Scalar Replacement Of Aggregates",
false, false)
-/// \brief Accumulate the constant offsets in a GEP into a single APInt offset.
-///
-/// If the provided GEP is all-constant, the total byte offset formed by the
-/// GEP is computed and Offset is set to it. If the GEP has any non-constant
-/// operands, the function returns false and the value of Offset is unmodified.
-static bool accumulateGEPOffsets(const TargetData &TD, GEPOperator &GEP,
- APInt &Offset) {
- APInt GEPOffset(Offset.getBitWidth(), 0);
- for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
- GTI != GTE; ++GTI) {
- ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
- if (!OpC)
- return false;
- if (OpC->isZero()) continue;
+namespace {
+/// \brief Visitor to speculate PHIs and Selects where possible.
+class PHIOrSelectSpeculator : public InstVisitor<PHIOrSelectSpeculator> {
+ // Befriend the base class so it can delegate to private visit methods.
+ friend class llvm::InstVisitor<PHIOrSelectSpeculator>;
- // Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
- unsigned ElementIdx = OpC->getZExtValue();
- const StructLayout *SL = TD.getStructLayout(STy);
- GEPOffset += APInt(Offset.getBitWidth(),
- SL->getElementOffset(ElementIdx));
- continue;
- }
+ const DataLayout &TD;
+ AllocaPartitioning &P;
+ SROA &Pass;
- APInt TypeSize(Offset.getBitWidth(),
- TD.getTypeAllocSize(GTI.getIndexedType()));
- if (VectorType *VTy = dyn_cast<VectorType>(*GTI)) {
- assert((VTy->getScalarSizeInBits() % 8) == 0 &&
- "vector element size is not a multiple of 8, cannot GEP over it");
- TypeSize = VTy->getScalarSizeInBits() / 8;
+public:
+ PHIOrSelectSpeculator(const DataLayout &TD, AllocaPartitioning &P, SROA &Pass)
+ : TD(TD), P(P), Pass(Pass) {}
+
+ /// \brief Visit the users of an alloca partition and rewrite them.
+ void visitUsers(AllocaPartitioning::const_iterator PI) {
+ // Note that we need to use an index here as the underlying vector of uses
+ // may be grown during speculation. However, we never need to re-visit the
+ // new uses, and so we can use the initial size bound.
+ for (unsigned Idx = 0, Size = P.use_size(PI); Idx != Size; ++Idx) {
+ const AllocaPartitioning::PartitionUse &PU = P.getUse(PI, Idx);
+ if (!PU.U)
+ continue; // Skip dead use.
+
+ visit(cast<Instruction>(PU.U->getUser()));
}
-
- GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize;
}
- Offset = GEPOffset;
- return true;
-}
-/// \brief Build a GEP out of a base pointer and indices.
-///
-/// This will return the BasePtr if that is valid, or build a new GEP
-/// instruction using the IRBuilder if GEP-ing is needed.
-static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr,
- SmallVectorImpl<Value *> &Indices,
- const Twine &Prefix) {
- if (Indices.empty())
- return BasePtr;
+private:
+ // By default, skip this instruction.
+ void visitInstruction(Instruction &I) {}
- // A single zero index is a no-op, so check for this and avoid building a GEP
- // in that case.
- if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
- return BasePtr;
+ /// PHI instructions that use an alloca and are subsequently loaded can be
+ /// rewritten to load both input pointers in the pred blocks and then PHI the
+ /// results, allowing the load of the alloca to be promoted.
+ /// From this:
+ /// %P2 = phi [i32* %Alloca, i32* %Other]
+ /// %V = load i32* %P2
+ /// to:
+ /// %V1 = load i32* %Alloca -> will be mem2reg'd
+ /// ...
+ /// %V2 = load i32* %Other
+ /// ...
+ /// %V = phi [i32 %V1, i32 %V2]
+ ///
+ /// We can do this to a select if its only uses are loads and if the operands
+ /// to the select can be loaded unconditionally.
+ ///
+ /// FIXME: This should be hoisted into a generic utility, likely in
+ /// Transforms/Util/Local.h
+ bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl<LoadInst *> &Loads) {
+ // For now, we can only do this promotion if the load is in the same block
+ // as the PHI, and if there are no stores between the phi and load.
+ // TODO: Allow recursive phi users.
+ // TODO: Allow stores.
+ BasicBlock *BB = PN.getParent();
+ unsigned MaxAlign = 0;
+ for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || !LI->isSimple()) return false;
- return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx");
-}
+ // For now we only allow loads in the same block as the PHI. This is
+ // a common case that happens when instcombine merges two loads through
+ // a PHI.
+ if (LI->getParent() != BB) return false;
-/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
-/// TargetTy without changing the offset of the pointer.
-///
-/// This routine assumes we've already established a properly offset GEP with
-/// Indices, and arrived at the Ty type. The goal is to continue to GEP with
-/// zero-indices down through type layers until we find one the same as
-/// TargetTy. If we can't find one with the same type, we at least try to use
-/// one with the same size. If none of that works, we just produce the GEP as
-/// indicated by Indices to have the correct offset.
-static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const TargetData &TD,
- Value *BasePtr, Type *Ty, Type *TargetTy,
- SmallVectorImpl<Value *> &Indices,
- const Twine &Prefix) {
- if (Ty == TargetTy)
- return buildGEP(IRB, BasePtr, Indices, Prefix);
+ // Ensure that there are no instructions between the PHI and the load that
+ // could store.
+ for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
+ if (BBI->mayWriteToMemory())
+ return false;
- // See if we can descend into a struct and locate a field with the correct
- // type.
- unsigned NumLayers = 0;
- Type *ElementTy = Ty;
- do {
- if (ElementTy->isPointerTy())
- break;
- if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
- ElementTy = SeqTy->getElementType();
- Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0)));
- } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
- ElementTy = *STy->element_begin();
- Indices.push_back(IRB.getInt32(0));
- } else {
- break;
+ MaxAlign = std::max(MaxAlign, LI->getAlignment());
+ Loads.push_back(LI);
}
- ++NumLayers;
- } while (ElementTy != TargetTy);
- if (ElementTy != TargetTy)
- Indices.erase(Indices.end() - NumLayers, Indices.end());
- return buildGEP(IRB, BasePtr, Indices, Prefix);
-}
+ // We can only transform this if it is safe to push the loads into the
+ // predecessor blocks. The only thing to watch out for is that we can't put
+ // a possibly trapping load in the predecessor if it is a critical edge.
+ for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num;
+ ++Idx) {
+ TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
+ Value *InVal = PN.getIncomingValue(Idx);
-/// \brief Recursively compute indices for a natural GEP.
-///
-/// This is the recursive step for getNaturalGEPWithOffset that walks down the
-/// element types adding appropriate indices for the GEP.
-static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const TargetData &TD,
- Value *Ptr, Type *Ty, APInt &Offset,
- Type *TargetTy,
- SmallVectorImpl<Value *> &Indices,
- const Twine &Prefix) {
- if (Offset == 0)
- return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix);
+ // If the value is produced by the terminator of the predecessor (an
+ // invoke) or it has side-effects, there is no valid place to put a load
+ // in the predecessor.
+ if (TI == InVal || TI->mayHaveSideEffects())
+ return false;
- // We can't recurse through pointer types.
- if (Ty->isPointerTy())
- return 0;
+ // If the predecessor has a single successor, then the edge isn't
+ // critical.
+ if (TI->getNumSuccessors() == 1)
+ continue;
- if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
- unsigned ElementSizeInBits = VecTy->getScalarSizeInBits();
- if (ElementSizeInBits % 8)
- return 0; // GEPs over multiple of 8 size vector elements are invalid.
- APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
+ // If this pointer is always safe to load, or if we can prove that there
+ // is already a load in the block, then we can move the load to the pred
+ // block.
+ if (InVal->isDereferenceablePointer() ||
+ isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD))
+ continue;
+
+ return false;
+ }
+
+ return true;
+ }
+
+ void visitPHINode(PHINode &PN) {
+ DEBUG(dbgs() << " original: " << PN << "\n");
+
+ SmallVector<LoadInst *, 4> Loads;
+ if (!isSafePHIToSpeculate(PN, Loads))
+ return;
+
+ assert(!Loads.empty());
+
+ Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
+ IRBuilder<> PHIBuilder(&PN);
+ PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues(),
+ PN.getName() + ".sroa.speculated");
+
+ // Get the TBAA tag and alignment to use from one of the loads. It doesn't
+ // matter which one we get and if any differ, it doesn't matter.
+ LoadInst *SomeLoad = cast<LoadInst>(Loads.back());
+ MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
+ unsigned Align = SomeLoad->getAlignment();
+
+ // Rewrite all loads of the PN to use the new PHI.
+ do {
+ LoadInst *LI = Loads.pop_back_val();
+ LI->replaceAllUsesWith(NewPN);
+ Pass.DeadInsts.push_back(LI);
+ } while (!Loads.empty());
+
+ // Inject loads into all of the pred blocks.
+ for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
+ BasicBlock *Pred = PN.getIncomingBlock(Idx);
+ TerminatorInst *TI = Pred->getTerminator();
+ Use *InUse = &PN.getOperandUse(PN.getOperandNumForIncomingValue(Idx));
+ Value *InVal = PN.getIncomingValue(Idx);
+ IRBuilder<> PredBuilder(TI);
+
+ LoadInst *Load
+ = PredBuilder.CreateLoad(InVal, (PN.getName() + ".sroa.speculate.load." +
+ Pred->getName()));
+ ++NumLoadsSpeculated;
+ Load->setAlignment(Align);
+ if (TBAATag)
+ Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
+ NewPN->addIncoming(Load, Pred);
+
+ Instruction *Ptr = dyn_cast<Instruction>(InVal);
+ if (!Ptr)
+ // No uses to rewrite.
+ continue;
+
+ // Try to lookup and rewrite any partition uses corresponding to this phi
+ // input.
+ AllocaPartitioning::iterator PI
+ = P.findPartitionForPHIOrSelectOperand(InUse);
+ if (PI == P.end())
+ continue;
+
+ // Replace the Use in the PartitionUse for this operand with the Use
+ // inside the load.
+ AllocaPartitioning::use_iterator UI
+ = P.findPartitionUseForPHIOrSelectOperand(InUse);
+ assert(isa<PHINode>(*UI->U->getUser()));
+ UI->U = &Load->getOperandUse(Load->getPointerOperandIndex());
+ }
+ DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
+ }
+
+ /// Select instructions that use an alloca and are subsequently loaded can be
+ /// rewritten to load both input pointers and then select between the result,
+ /// allowing the load of the alloca to be promoted.
+ /// From this:
+ /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
+ /// %V = load i32* %P2
+ /// to:
+ /// %V1 = load i32* %Alloca -> will be mem2reg'd
+ /// %V2 = load i32* %Other
+ /// %V = select i1 %cond, i32 %V1, i32 %V2
+ ///
+ /// We can do this to a select if its only uses are loads and if the operand
+ /// to the select can be loaded unconditionally.
+ bool isSafeSelectToSpeculate(SelectInst &SI,
+ SmallVectorImpl<LoadInst *> &Loads) {
+ Value *TValue = SI.getTrueValue();
+ Value *FValue = SI.getFalseValue();
+ bool TDerefable = TValue->isDereferenceablePointer();
+ bool FDerefable = FValue->isDereferenceablePointer();
+
+ for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end();
+ UI != UE; ++UI) {
+ LoadInst *LI = dyn_cast<LoadInst>(*UI);
+ if (LI == 0 || !LI->isSimple()) return false;
+
+ // Both operands to the select need to be dereferencable, either
+ // absolutely (e.g. allocas) or at this point because we can see other
+ // accesses to it.
+ if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI,
+ LI->getAlignment(), &TD))
+ return false;
+ if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI,
+ LI->getAlignment(), &TD))
+ return false;
+ Loads.push_back(LI);
+ }
+
+ return true;
+ }
+
+ void visitSelectInst(SelectInst &SI) {
+ DEBUG(dbgs() << " original: " << SI << "\n");
+ IRBuilder<> IRB(&SI);
+
+ // If the select isn't safe to speculate, just use simple logic to emit it.
+ SmallVector<LoadInst *, 4> Loads;
+ if (!isSafeSelectToSpeculate(SI, Loads))
+ return;
+
+ Use *Ops[2] = { &SI.getOperandUse(1), &SI.getOperandUse(2) };
+ AllocaPartitioning::iterator PIs[2];
+ AllocaPartitioning::PartitionUse PUs[2];
+ for (unsigned i = 0, e = 2; i != e; ++i) {
+ PIs[i] = P.findPartitionForPHIOrSelectOperand(Ops[i]);
+ if (PIs[i] != P.end()) {
+ // If the pointer is within the partitioning, remove the select from
+ // its uses. We'll add in the new loads below.
+ AllocaPartitioning::use_iterator UI
+ = P.findPartitionUseForPHIOrSelectOperand(Ops[i]);
+ PUs[i] = *UI;
+ // Clear out the use here so that the offsets into the use list remain
+ // stable but this use is ignored when rewriting.
+ UI->U = 0;
+ }
+ }
+
+ Value *TV = SI.getTrueValue();
+ Value *FV = SI.getFalseValue();
+ // Replace the loads of the select with a select of two loads.
+ while (!Loads.empty()) {
+ LoadInst *LI = Loads.pop_back_val();
+
+ IRB.SetInsertPoint(LI);
+ LoadInst *TL =
+ IRB.CreateLoad(TV, LI->getName() + ".sroa.speculate.load.true");
+ LoadInst *FL =
+ IRB.CreateLoad(FV, LI->getName() + ".sroa.speculate.load.false");
+ NumLoadsSpeculated += 2;
+
+ // Transfer alignment and TBAA info if present.
+ TL->setAlignment(LI->getAlignment());
+ FL->setAlignment(LI->getAlignment());
+ if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
+ TL->setMetadata(LLVMContext::MD_tbaa, Tag);
+ FL->setMetadata(LLVMContext::MD_tbaa, Tag);
+ }
+
+ Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL,
+ LI->getName() + ".sroa.speculated");
+
+ LoadInst *Loads[2] = { TL, FL };
+ for (unsigned i = 0, e = 2; i != e; ++i) {
+ if (PIs[i] != P.end()) {
+ Use *LoadUse = &Loads[i]->getOperandUse(0);
+ assert(PUs[i].U->get() == LoadUse->get());
+ PUs[i].U = LoadUse;
+ P.use_push_back(PIs[i], PUs[i]);
+ }
+ }
+
+ DEBUG(dbgs() << " speculated to: " << *V << "\n");
+ LI->replaceAllUsesWith(V);
+ Pass.DeadInsts.push_back(LI);
+ }
+ }
+};
+}
+
+/// \brief Accumulate the constant offsets in a GEP into a single APInt offset.
+///
+/// If the provided GEP is all-constant, the total byte offset formed by the
+/// GEP is computed and Offset is set to it. If the GEP has any non-constant
+/// operands, the function returns false and the value of Offset is unmodified.
+static bool accumulateGEPOffsets(const DataLayout &TD, GEPOperator &GEP,
+ APInt &Offset) {
+ APInt GEPOffset(Offset.getBitWidth(), 0);
+ for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
+ GTI != GTE; ++GTI) {
+ ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
+ if (!OpC)
+ return false;
+ if (OpC->isZero()) continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ unsigned ElementIdx = OpC->getZExtValue();
+ const StructLayout *SL = TD.getStructLayout(STy);
+ GEPOffset += APInt(Offset.getBitWidth(),
+ SL->getElementOffset(ElementIdx));
+ continue;
+ }
+
+ APInt TypeSize(Offset.getBitWidth(),
+ TD.getTypeAllocSize(GTI.getIndexedType()));
+ if (VectorType *VTy = dyn_cast<VectorType>(*GTI)) {
+ assert((VTy->getScalarSizeInBits() % 8) == 0 &&
+ "vector element size is not a multiple of 8, cannot GEP over it");
+ TypeSize = VTy->getScalarSizeInBits() / 8;
+ }
+
+ GEPOffset += OpC->getValue().sextOrTrunc(Offset.getBitWidth()) * TypeSize;
+ }
+ Offset = GEPOffset;
+ return true;
+}
+
+/// \brief Build a GEP out of a base pointer and indices.
+///
+/// This will return the BasePtr if that is valid, or build a new GEP
+/// instruction using the IRBuilder if GEP-ing is needed.
+static Value *buildGEP(IRBuilder<> &IRB, Value *BasePtr,
+ SmallVectorImpl<Value *> &Indices,
+ const Twine &Prefix) {
+ if (Indices.empty())
+ return BasePtr;
+
+ // A single zero index is a no-op, so check for this and avoid building a GEP
+ // in that case.
+ if (Indices.size() == 1 && cast<ConstantInt>(Indices.back())->isZero())
+ return BasePtr;
+
+ return IRB.CreateInBoundsGEP(BasePtr, Indices, Prefix + ".idx");
+}
+
+/// \brief Get a natural GEP off of the BasePtr walking through Ty toward
+/// TargetTy without changing the offset of the pointer.
+///
+/// This routine assumes we've already established a properly offset GEP with
+/// Indices, and arrived at the Ty type. The goal is to continue to GEP with
+/// zero-indices down through type layers until we find one the same as
+/// TargetTy. If we can't find one with the same type, we at least try to use
+/// one with the same size. If none of that works, we just produce the GEP as
+/// indicated by Indices to have the correct offset.
+static Value *getNaturalGEPWithType(IRBuilder<> &IRB, const DataLayout &TD,
+ Value *BasePtr, Type *Ty, Type *TargetTy,
+ SmallVectorImpl<Value *> &Indices,
+ const Twine &Prefix) {
+ if (Ty == TargetTy)
+ return buildGEP(IRB, BasePtr, Indices, Prefix);
+
+ // See if we can descend into a struct and locate a field with the correct
+ // type.
+ unsigned NumLayers = 0;
+ Type *ElementTy = Ty;
+ do {
+ if (ElementTy->isPointerTy())
+ break;
+ if (SequentialType *SeqTy = dyn_cast<SequentialType>(ElementTy)) {
+ ElementTy = SeqTy->getElementType();
+ Indices.push_back(IRB.getInt(APInt(TD.getPointerSizeInBits(), 0)));
+ } else if (StructType *STy = dyn_cast<StructType>(ElementTy)) {
+ if (STy->element_begin() == STy->element_end())
+ break; // Nothing left to descend into.
+ ElementTy = *STy->element_begin();
+ Indices.push_back(IRB.getInt32(0));
+ } else {
+ break;
+ }
+ ++NumLayers;
+ } while (ElementTy != TargetTy);
+ if (ElementTy != TargetTy)
+ Indices.erase(Indices.end() - NumLayers, Indices.end());
+
+ return buildGEP(IRB, BasePtr, Indices, Prefix);
+}
+
+/// \brief Recursively compute indices for a natural GEP.
+///
+/// This is the recursive step for getNaturalGEPWithOffset that walks down the
+/// element types adding appropriate indices for the GEP.
+static Value *getNaturalGEPRecursively(IRBuilder<> &IRB, const DataLayout &TD,
+ Value *Ptr, Type *Ty, APInt &Offset,
+ Type *TargetTy,
+ SmallVectorImpl<Value *> &Indices,
+ const Twine &Prefix) {
+ if (Offset == 0)
+ return getNaturalGEPWithType(IRB, TD, Ptr, Ty, TargetTy, Indices, Prefix);
+
+ // We can't recurse through pointer types.
+ if (Ty->isPointerTy())
+ return 0;
+
+ // We try to analyze GEPs over vectors here, but note that these GEPs are
+ // extremely poorly defined currently. The long-term goal is to remove GEPing
+ // over a vector from the IR completely.
+ if (VectorType *VecTy = dyn_cast<VectorType>(Ty)) {
+ unsigned ElementSizeInBits = VecTy->getScalarSizeInBits();
+ if (ElementSizeInBits % 8)
+ return 0; // GEPs over non-multiple of 8 size vector elements are invalid.
+ APInt ElementSize(Offset.getBitWidth(), ElementSizeInBits / 8);
APInt NumSkippedElements = Offset.udiv(ElementSize);
if (NumSkippedElements.ugt(VecTy->getNumElements()))
return 0;
const StructLayout *SL = TD.getStructLayout(STy);
uint64_t StructOffset = Offset.getZExtValue();
- if (StructOffset > SL->getSizeInBytes())
+ if (StructOffset >= SL->getSizeInBytes())
return 0;
unsigned Index = SL->getElementContainingOffset(StructOffset);
Offset -= APInt(Offset.getBitWidth(), SL->getElementOffset(Index));
/// possible. We recurse by decreasing Offset, adding the appropriate index to
/// Indices, and setting Ty to the result subtype.
///
-/// If no natural GEP can be constructed, this function returns a null Value*.
-static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const TargetData &TD,
+/// If no natural GEP can be constructed, this function returns null.
+static Value *getNaturalGEPWithOffset(IRBuilder<> &IRB, const DataLayout &TD,
Value *Ptr, APInt Offset, Type *TargetTy,
SmallVectorImpl<Value *> &Indices,
const Twine &Prefix) {
return 0;
Type *ElementTy = Ty->getElementType();
+ if (!ElementTy->isSized())
+ return 0; // We can't GEP through an unsized element.
APInt ElementSize(Offset.getBitWidth(), TD.getTypeAllocSize(ElementTy));
if (ElementSize == 0)
return 0; // Zero-length arrays can't help us build a natural GEP.
/// properities. The algorithm tries to fold as many constant indices into
/// a single GEP as possible, thus making each GEP more independent of the
/// surrounding code.
-static Value *getAdjustedPtr(IRBuilder<> &IRB, const TargetData &TD,
+static Value *getAdjustedPtr(IRBuilder<> &IRB, const DataLayout &TD,
Value *Ptr, APInt Offset, Type *PointerTy,
const Twine &Prefix) {
// Even though we don't look through PHI nodes, we could be called on an
/// SSA value. We only can ensure this for a limited set of operations, and we
/// don't want to do the rewrites unless we are confident that the result will
/// be promotable, so we have an early test here.
-static bool isVectorPromotionViable(const TargetData &TD,
+static bool isVectorPromotionViable(const DataLayout &TD,
Type *AllocaTy,
AllocaPartitioning &P,
uint64_t PartitionBeginOffset,
ElementSize /= 8;
for (; I != E; ++I) {
+ if (!I->U)
+ continue; // Skip dead use.
+
uint64_t BeginOffset = I->BeginOffset - PartitionBeginOffset;
uint64_t BeginIndex = BeginOffset / ElementSize;
if (BeginIndex * ElementSize != BeginOffset ||
(EndOffset - BeginOffset) != VecSize)
return false;
- if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&*I->User)) {
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
if (MI->isVolatile())
return false;
- if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(&*I->User)) {
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(*MTI);
if (!MTO.IsSplittable)
return false;
}
- } else if (I->Ptr->getType()->getPointerElementType()->isStructTy()) {
+ } else if (I->U->get()->getType()->getPointerElementType()->isStructTy()) {
// Disable vector promotion when there are loads or stores of an FCA.
return false;
- } else if (!isa<LoadInst>(*I->User) && !isa<StoreInst>(*I->User)) {
+ } else if (!isa<LoadInst>(I->U->getUser()) &&
+ !isa<StoreInst>(I->U->getUser())) {
return false;
}
}
return true;
}
+/// \brief Test whether the given alloca partition can be promoted to an int.
+///
+/// This is a quick test to check whether we can rewrite a particular alloca
+/// partition (and its newly formed alloca) into an integer alloca suitable for
+/// promotion to an SSA value. We only can ensure this for a limited set of
+/// operations, and we don't want to do the rewrites unless we are confident
+/// that the result will be promotable, so we have an early test here.
+static bool isIntegerPromotionViable(const DataLayout &TD,
+ Type *AllocaTy,
+ uint64_t AllocBeginOffset,
+ AllocaPartitioning &P,
+ AllocaPartitioning::const_use_iterator I,
+ AllocaPartitioning::const_use_iterator E) {
+ IntegerType *Ty = dyn_cast<IntegerType>(AllocaTy);
+ if (!Ty || 8*TD.getTypeStoreSize(Ty) != Ty->getBitWidth())
+ return false;
+
+ // Check the uses to ensure the uses are (likely) promoteable integer uses.
+ // Also ensure that the alloca has a covering load or store. We don't want
+ // promote because of some other unsplittable entry (which we may make
+ // splittable later) and lose the ability to promote each element access.
+ bool WholeAllocaOp = false;
+ for (; I != E; ++I) {
+ if (!I->U)
+ continue; // Skip dead use.
+
+ // We can't reasonably handle cases where the load or store extends past
+ // the end of the aloca's type and into its padding.
+ if ((I->EndOffset - AllocBeginOffset) > TD.getTypeStoreSize(Ty))
+ return false;
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(I->U->getUser())) {
+ if (LI->isVolatile() || !LI->getType()->isIntegerTy())
+ return false;
+ if (LI->getType() == Ty)
+ WholeAllocaOp = true;
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(I->U->getUser())) {
+ if (SI->isVolatile() || !SI->getValueOperand()->getType()->isIntegerTy())
+ return false;
+ if (SI->getValueOperand()->getType() == Ty)
+ WholeAllocaOp = true;
+ } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I->U->getUser())) {
+ if (MI->isVolatile())
+ return false;
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(I->U->getUser())) {
+ const AllocaPartitioning::MemTransferOffsets &MTO
+ = P.getMemTransferOffsets(*MTI);
+ if (!MTO.IsSplittable)
+ return false;
+ }
+ } else {
+ return false;
+ }
+ }
+ return WholeAllocaOp;
+}
+
namespace {
/// \brief Visitor to rewrite instructions using a partition of an alloca to
/// use a new alloca.
// Befriend the base class so it can delegate to private visit methods.
friend class llvm::InstVisitor<AllocaPartitionRewriter, bool>;
- const TargetData &TD;
+ const DataLayout &TD;
AllocaPartitioning &P;
SROA &Pass;
AllocaInst &OldAI, &NewAI;
Type *ElementTy;
uint64_t ElementSize;
+ // This is a convenience and flag variable that will be null unless the new
+ // alloca has a promotion-targeted integer type due to passing
+ // isIntegerPromotionViable above. If it is non-null does, the desired
+ // integer type will be stored here for easy access during rewriting.
+ IntegerType *IntPromotionTy;
+
// The offset of the partition user currently being rewritten.
uint64_t BeginOffset, EndOffset;
+ Use *OldUse;
Instruction *OldPtr;
// The name prefix to use when rewriting instructions for this alloca.
std::string NamePrefix;
public:
- AllocaPartitionRewriter(const TargetData &TD, AllocaPartitioning &P,
+ AllocaPartitionRewriter(const DataLayout &TD, AllocaPartitioning &P,
AllocaPartitioning::iterator PI,
SROA &Pass, AllocaInst &OldAI, AllocaInst &NewAI,
uint64_t NewBeginOffset, uint64_t NewEndOffset)
OldAI(OldAI), NewAI(NewAI),
NewAllocaBeginOffset(NewBeginOffset),
NewAllocaEndOffset(NewEndOffset),
- VecTy(), ElementTy(), ElementSize(),
+ VecTy(), ElementTy(), ElementSize(), IntPromotionTy(),
BeginOffset(), EndOffset() {
}
assert((VecTy->getScalarSizeInBits() % 8) == 0 &&
"Only multiple-of-8 sized vector elements are viable");
ElementSize = VecTy->getScalarSizeInBits() / 8;
+ } else if (isIntegerPromotionViable(TD, NewAI.getAllocatedType(),
+ NewAllocaBeginOffset, P, I, E)) {
+ IntPromotionTy = cast<IntegerType>(NewAI.getAllocatedType());
}
bool CanSROA = true;
for (; I != E; ++I) {
+ if (!I->U)
+ continue; // Skip dead uses.
BeginOffset = I->BeginOffset;
EndOffset = I->EndOffset;
- OldPtr = I->Ptr;
+ OldUse = I->U;
+ OldPtr = cast<Instruction>(I->U->get());
NamePrefix = (Twine(NewAI.getName()) + "." + Twine(BeginOffset)).str();
- CanSROA &= visit(I->User);
+ CanSROA &= visit(cast<Instruction>(I->U->getUser()));
}
if (VecTy) {
assert(CanSROA);
return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
}
+ /// \brief Compute suitable alignment to access an offset into the new alloca.
+ unsigned getOffsetAlign(uint64_t Offset) {
+ unsigned NewAIAlign = NewAI.getAlignment();
+ if (!NewAIAlign)
+ NewAIAlign = TD.getABITypeAlignment(NewAI.getAllocatedType());
+ return MinAlign(NewAIAlign, Offset);
+ }
+
+ /// \brief Compute suitable alignment to access this partition of the new
+ /// alloca.
+ unsigned getPartitionAlign() {
+ return getOffsetAlign(BeginOffset - NewAllocaBeginOffset);
+ }
+
+ /// \brief Compute suitable alignment to access a type at an offset of the
+ /// new alloca.
+ ///
+ /// \returns zero if the type's ABI alignment is a suitable alignment,
+ /// otherwise returns the maximal suitable alignment.
+ unsigned getOffsetTypeAlign(Type *Ty, uint64_t Offset) {
+ unsigned Align = getOffsetAlign(Offset);
+ return Align == TD.getABITypeAlignment(Ty) ? 0 : Align;
+ }
+
+ /// \brief Compute suitable alignment to access a type at the beginning of
+ /// this partition of the new alloca.
+ ///
+ /// See \c getOffsetTypeAlign for details; this routine delegates to it.
+ unsigned getPartitionTypeAlign(Type *Ty) {
+ return getOffsetTypeAlign(Ty, BeginOffset - NewAllocaBeginOffset);
+ }
+
ConstantInt *getIndex(IRBuilder<> &IRB, uint64_t Offset) {
assert(VecTy && "Can only call getIndex when rewriting a vector");
uint64_t RelOffset = Offset - NewAllocaBeginOffset;
return IRB.getInt32(Index);
}
+ Value *extractInteger(IRBuilder<> &IRB, IntegerType *TargetTy,
+ uint64_t Offset) {
+ assert(IntPromotionTy && "Alloca is not an integer we can extract from");
+ Value *V = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load"));
+ assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t RelOffset = Offset - NewAllocaBeginOffset;
+ assert(TD.getTypeStoreSize(TargetTy) + RelOffset <=
+ TD.getTypeStoreSize(IntPromotionTy) &&
+ "Element load outside of alloca store");
+ uint64_t ShAmt = 8*RelOffset;
+ if (TD.isBigEndian())
+ ShAmt = 8*(TD.getTypeStoreSize(IntPromotionTy) -
+ TD.getTypeStoreSize(TargetTy) - RelOffset);
+ if (ShAmt)
+ V = IRB.CreateLShr(V, ShAmt, getName(".shift"));
+ if (TargetTy != IntPromotionTy) {
+ assert(TargetTy->getBitWidth() < IntPromotionTy->getBitWidth() &&
+ "Cannot extract to a larger integer!");
+ V = IRB.CreateTrunc(V, TargetTy, getName(".trunc"));
+ }
+ return V;
+ }
+
+ StoreInst *insertInteger(IRBuilder<> &IRB, Value *V, uint64_t Offset) {
+ IntegerType *Ty = cast<IntegerType>(V->getType());
+ if (Ty == IntPromotionTy)
+ return IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
+
+ assert(Ty->getBitWidth() < IntPromotionTy->getBitWidth() &&
+ "Cannot insert a larger integer!");
+ V = IRB.CreateZExt(V, IntPromotionTy, getName(".ext"));
+ assert(Offset >= NewAllocaBeginOffset && "Out of bounds offset");
+ uint64_t RelOffset = Offset - NewAllocaBeginOffset;
+ assert(TD.getTypeStoreSize(Ty) + RelOffset <=
+ TD.getTypeStoreSize(IntPromotionTy) &&
+ "Element store outside of alloca store");
+ uint64_t ShAmt = 8*RelOffset;
+ if (TD.isBigEndian())
+ ShAmt = 8*(TD.getTypeStoreSize(IntPromotionTy) - TD.getTypeStoreSize(Ty)
+ - RelOffset);
+ if (ShAmt)
+ V = IRB.CreateShl(V, ShAmt, getName(".shift"));
+
+ APInt Mask = ~Ty->getMask().zext(IntPromotionTy->getBitWidth()).shl(ShAmt);
+ Value *Old = IRB.CreateAnd(IRB.CreateAlignedLoad(&NewAI,
+ NewAI.getAlignment(),
+ getName(".oldload")),
+ Mask, getName(".mask"));
+ return IRB.CreateAlignedStore(IRB.CreateOr(Old, V, getName(".insert")),
+ &NewAI, NewAI.getAlignment());
+ }
+
void deleteIfTriviallyDead(Value *V) {
Instruction *I = cast<Instruction>(V);
if (isInstructionTriviallyDead(I))
Value *Result;
if (LI.getType() == VecTy->getElementType() ||
BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
- Result
- = IRB.CreateExtractElement(IRB.CreateLoad(&NewAI, getName(".load")),
- getIndex(IRB, BeginOffset),
- getName(".extract"));
+ Result = IRB.CreateExtractElement(
+ IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
+ getIndex(IRB, BeginOffset), getName(".extract"));
} else {
- Result = IRB.CreateLoad(&NewAI, getName(".load"));
+ Result = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load"));
}
if (Result->getType() != LI.getType())
Result = getValueCast(IRB, Result, LI.getType());
return true;
}
+ bool rewriteIntegerLoad(IRBuilder<> &IRB, LoadInst &LI) {
+ assert(!LI.isVolatile());
+ Value *Result = extractInteger(IRB, cast<IntegerType>(LI.getType()),
+ BeginOffset);
+ LI.replaceAllUsesWith(Result);
+ Pass.DeadInsts.push_back(&LI);
+ DEBUG(dbgs() << " to: " << *Result << "\n");
+ return true;
+ }
+
bool visitLoadInst(LoadInst &LI) {
DEBUG(dbgs() << " original: " << LI << "\n");
Value *OldOp = LI.getOperand(0);
if (VecTy)
return rewriteVectorizedLoadInst(IRB, LI, OldOp);
+ if (IntPromotionTy)
+ return rewriteIntegerLoad(IRB, LI);
Value *NewPtr = getAdjustedAllocaPtr(IRB,
LI.getPointerOperand()->getType());
LI.setOperand(0, NewPtr);
+ LI.setAlignment(getPartitionTypeAlign(LI.getType()));
DEBUG(dbgs() << " to: " << LI << "\n");
deleteIfTriviallyDead(OldOp);
BeginOffset > NewAllocaBeginOffset || EndOffset < NewAllocaEndOffset) {
if (V->getType() != ElementTy)
V = getValueCast(IRB, V, ElementTy);
- V = IRB.CreateInsertElement(IRB.CreateLoad(&NewAI, getName(".load")), V,
- getIndex(IRB, BeginOffset),
+ LoadInst *LI = IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(),
+ getName(".load"));
+ V = IRB.CreateInsertElement(LI, V, getIndex(IRB, BeginOffset),
getName(".insert"));
} else if (V->getType() != VecTy) {
V = getValueCast(IRB, V, VecTy);
}
- StoreInst *Store = IRB.CreateStore(V, &NewAI);
+ StoreInst *Store = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment());
Pass.DeadInsts.push_back(&SI);
(void)Store;
return true;
}
+ bool rewriteIntegerStore(IRBuilder<> &IRB, StoreInst &SI) {
+ assert(!SI.isVolatile());
+ StoreInst *Store = insertInteger(IRB, SI.getValueOperand(), BeginOffset);
+ Pass.DeadInsts.push_back(&SI);
+ (void)Store;
+ DEBUG(dbgs() << " to: " << *Store << "\n");
+ return true;
+ }
+
bool visitStoreInst(StoreInst &SI) {
DEBUG(dbgs() << " original: " << SI << "\n");
Value *OldOp = SI.getOperand(1);
if (VecTy)
return rewriteVectorizedStoreInst(IRB, SI, OldOp);
+ if (IntPromotionTy)
+ return rewriteIntegerStore(IRB, SI);
+
+ // Strip all inbounds GEPs and pointer casts to try to dig out any root
+ // alloca that should be re-examined after promoting this alloca.
+ if (SI.getValueOperand()->getType()->isPointerTy())
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(SI.getValueOperand()
+ ->stripInBoundsOffsets()))
+ Pass.PostPromotionWorklist.insert(AI);
Value *NewPtr = getAdjustedAllocaPtr(IRB,
SI.getPointerOperand()->getType());
SI.setOperand(1, NewPtr);
+ SI.setAlignment(getPartitionTypeAlign(SI.getValueOperand()->getType()));
DEBUG(dbgs() << " to: " << SI << "\n");
deleteIfTriviallyDead(OldOp);
// pointer to the new alloca.
if (!isa<Constant>(II.getLength())) {
II.setDest(getAdjustedAllocaPtr(IRB, II.getRawDest()->getType()));
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(ConstantInt::get(CstTy, getPartitionAlign()));
+
deleteIfTriviallyDead(OldPtr);
return false;
}
!TD.isLegalInteger(TD.getTypeSizeInBits(ScalarTy)))) {
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, EndOffset - BeginOffset);
-
CallInst *New
= IRB.CreateMemSet(getAdjustedAllocaPtr(IRB,
II.getRawDest()->getType()),
- II.getValue(), Size, II.getAlignment(),
+ II.getValue(), Size, getPartitionAlign(),
II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
// If this is an element-wide memset of a vectorizable alloca, insert it.
if (VecTy && (BeginOffset > NewAllocaBeginOffset ||
EndOffset < NewAllocaEndOffset)) {
- StoreInst *Store = IRB.CreateStore(
- IRB.CreateInsertElement(IRB.CreateLoad(&NewAI, getName(".load")), V,
- getIndex(IRB, BeginOffset),
+ StoreInst *Store = IRB.CreateAlignedStore(
+ IRB.CreateInsertElement(IRB.CreateAlignedLoad(&NewAI,
+ NewAI.getAlignment(),
+ getName(".load")),
+ V, getIndex(IRB, BeginOffset),
getName(".insert")),
- &NewAI);
+ &NewAI, NewAI.getAlignment());
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return true;
assert(V->getType() == VecTy);
}
- Value *New = IRB.CreateStore(V, &NewAI, II.isVolatile());
+ Value *New = IRB.CreateAlignedStore(V, &NewAI, NewAI.getAlignment(),
+ II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return !II.isVolatile();
const AllocaPartitioning::MemTransferOffsets &MTO
= P.getMemTransferOffsets(II);
+ // Compute the relative offset within the transfer.
+ unsigned IntPtrWidth = TD.getPointerSizeInBits();
+ APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
+ : MTO.SourceBegin));
+
+ unsigned Align = II.getAlignment();
+ if (Align > 1)
+ Align = MinAlign(RelOffset.zextOrTrunc(64).getZExtValue(),
+ MinAlign(II.getAlignment(), getPartitionAlign()));
+
// For unsplit intrinsics, we simply modify the source and destination
// pointers in place. This isn't just an optimization, it is a matter of
// correctness. With unsplit intrinsics we may be dealing with transfers
else
II.setSource(getAdjustedAllocaPtr(IRB, II.getRawSource()->getType()));
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(ConstantInt::get(CstTy, Align));
+
DEBUG(dbgs() << " to: " << II << "\n");
deleteIfTriviallyDead(OldOp);
return false;
// memmove with memcpy, and we don't need to worry about all manner of
// downsides to splitting and transforming the operations.
- // Compute the relative offset within the transfer.
- unsigned IntPtrWidth = TD.getPointerSizeInBits();
- APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
- : MTO.SourceBegin));
-
// If this doesn't map cleanly onto the alloca type, and that type isn't
// a single value type, just emit a memcpy.
bool EmitMemCpy
uint64_t OrigEnd = IsDest ? MTO.DestEnd : MTO.SourceEnd;
// Ensure the start lines up.
assert(BeginOffset == OrigBegin);
+ (void)OrigBegin;
// Rewrite the size as needed.
if (EndOffset != OrigEnd)
CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
IsDest ? OtherPtr : OurPtr,
- Size, II.getAlignment(),
- II.isVolatile());
+ Size, Align, II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
}
+ // Note that we clamp the alignment to 1 here as a 0 alignment for a memcpy
+ // is equivalent to 1, but that isn't true if we end up rewriting this as
+ // a load or store.
+ if (!Align)
+ Align = 1;
+
Value *SrcPtr = OtherPtr;
Value *DstPtr = &NewAI;
if (!IsDest)
Value *Src;
if (IsVectorElement && !IsDest) {
// We have to extract rather than load.
- Src = IRB.CreateExtractElement(IRB.CreateLoad(SrcPtr,
- getName(".copyload")),
- getIndex(IRB, BeginOffset),
- getName(".copyextract"));
+ Src = IRB.CreateExtractElement(
+ IRB.CreateAlignedLoad(SrcPtr, Align, getName(".copyload")),
+ getIndex(IRB, BeginOffset),
+ getName(".copyextract"));
} else {
- Src = IRB.CreateLoad(SrcPtr, II.isVolatile(), getName(".copyload"));
+ Src = IRB.CreateAlignedLoad(SrcPtr, Align, II.isVolatile(),
+ getName(".copyload"));
}
if (IsVectorElement && IsDest) {
// We have to insert into a loaded copy before storing.
- Src = IRB.CreateInsertElement(IRB.CreateLoad(&NewAI, getName(".load")),
- Src, getIndex(IRB, BeginOffset),
- getName(".insert"));
+ Src = IRB.CreateInsertElement(
+ IRB.CreateAlignedLoad(&NewAI, NewAI.getAlignment(), getName(".load")),
+ Src, getIndex(IRB, BeginOffset),
+ getName(".insert"));
}
- Value *Store = IRB.CreateStore(Src, DstPtr, II.isVolatile());
+ StoreInst *Store = cast<StoreInst>(
+ IRB.CreateAlignedStore(Src, DstPtr, Align, II.isVolatile()));
(void)Store;
DEBUG(dbgs() << " to: " << *Store << "\n");
return !II.isVolatile();
II.getIntrinsicID() == Intrinsic::lifetime_end);
DEBUG(dbgs() << " original: " << II << "\n");
IRBuilder<> IRB(&II);
- assert(II.getArgOperand(1) == OldPtr);
-
- // Record this instruction for deletion.
- if (Pass.DeadSplitInsts.insert(&II))
- Pass.DeadInsts.push_back(&II);
-
- ConstantInt *Size
- = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
- EndOffset - BeginOffset);
- Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType());
- Value *New;
- if (II.getIntrinsicID() == Intrinsic::lifetime_start)
- New = IRB.CreateLifetimeStart(Ptr, Size);
- else
- New = IRB.CreateLifetimeEnd(Ptr, Size);
-
- DEBUG(dbgs() << " to: " << *New << "\n");
- return true;
- }
-
- /// PHI instructions that use an alloca and are subsequently loaded can be
- /// rewritten to load both input pointers in the pred blocks and then PHI the
- /// results, allowing the load of the alloca to be promoted.
- /// From this:
- /// %P2 = phi [i32* %Alloca, i32* %Other]
- /// %V = load i32* %P2
- /// to:
- /// %V1 = load i32* %Alloca -> will be mem2reg'd
- /// ...
- /// %V2 = load i32* %Other
- /// ...
- /// %V = phi [i32 %V1, i32 %V2]
- ///
- /// We can do this to a select if its only uses are loads and if the operand
- /// to the select can be loaded unconditionally.
- ///
- /// FIXME: This should be hoisted into a generic utility, likely in
- /// Transforms/Util/Local.h
- bool isSafePHIToSpeculate(PHINode &PN, SmallVectorImpl<LoadInst *> &Loads) {
- // For now, we can only do this promotion if the load is in the same block
- // as the PHI, and if there are no stores between the phi and load.
- // TODO: Allow recursive phi users.
- // TODO: Allow stores.
- BasicBlock *BB = PN.getParent();
- unsigned MaxAlign = 0;
- for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end();
- UI != UE; ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple()) return false;
-
- // For now we only allow loads in the same block as the PHI. This is
- // a common case that happens when instcombine merges two loads through
- // a PHI.
- if (LI->getParent() != BB) return false;
-
- // Ensure that there are no instructions between the PHI and the load that
- // could store.
- for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI)
- if (BBI->mayWriteToMemory())
- return false;
-
- MaxAlign = std::max(MaxAlign, LI->getAlignment());
- Loads.push_back(LI);
- }
-
- // We can only transform this if it is safe to push the loads into the
- // predecessor blocks. The only thing to watch out for is that we can't put
- // a possibly trapping load in the predecessor if it is a critical edge.
- for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num;
- ++Idx) {
- TerminatorInst *TI = PN.getIncomingBlock(Idx)->getTerminator();
- Value *InVal = PN.getIncomingValue(Idx);
-
- // If the value is produced by the terminator of the predecessor (an
- // invoke) or it has side-effects, there is no valid place to put a load
- // in the predecessor.
- if (TI == InVal || TI->mayHaveSideEffects())
- return false;
-
- // If the predecessor has a single successor, then the edge isn't
- // critical.
- if (TI->getNumSuccessors() == 1)
- continue;
-
- // If this pointer is always safe to load, or if we can prove that there
- // is already a load in the block, then we can move the load to the pred
- // block.
- if (InVal->isDereferenceablePointer() ||
- isSafeToLoadUnconditionally(InVal, TI, MaxAlign, &TD))
- continue;
+ assert(II.getArgOperand(1) == OldPtr);
- return false;
- }
+ // Record this instruction for deletion.
+ if (Pass.DeadSplitInsts.insert(&II))
+ Pass.DeadInsts.push_back(&II);
+
+ ConstantInt *Size
+ = ConstantInt::get(cast<IntegerType>(II.getArgOperand(0)->getType()),
+ EndOffset - BeginOffset);
+ Value *Ptr = getAdjustedAllocaPtr(IRB, II.getArgOperand(1)->getType());
+ Value *New;
+ if (II.getIntrinsicID() == Intrinsic::lifetime_start)
+ New = IRB.CreateLifetimeStart(Ptr, Size);
+ else
+ New = IRB.CreateLifetimeEnd(Ptr, Size);
+ DEBUG(dbgs() << " to: " << *New << "\n");
return true;
}
bool visitPHINode(PHINode &PN) {
DEBUG(dbgs() << " original: " << PN << "\n");
+
// We would like to compute a new pointer in only one place, but have it be
// as local as possible to the PHI. To do that, we re-use the location of
// the old pointer, which necessarily must be in the right position to
// dominate the PHI.
IRBuilder<> PtrBuilder(cast<Instruction>(OldPtr));
- SmallVector<LoadInst *, 4> Loads;
- if (!isSafePHIToSpeculate(PN, Loads)) {
- Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType());
- // Replace the operands which were using the old pointer.
- User::op_iterator OI = PN.op_begin(), OE = PN.op_end();
- for (; OI != OE; ++OI)
- if (*OI == OldPtr)
- *OI = NewPtr;
-
- DEBUG(dbgs() << " to: " << PN << "\n");
- deleteIfTriviallyDead(OldPtr);
- return false;
- }
- assert(!Loads.empty());
-
- Type *LoadTy = cast<PointerType>(PN.getType())->getElementType();
- IRBuilder<> PHIBuilder(&PN);
- PHINode *NewPN = PHIBuilder.CreatePHI(LoadTy, PN.getNumIncomingValues());
- NewPN->takeName(&PN);
-
- // Get the TBAA tag and alignment to use from one of the loads. It doesn't
- // matter which one we get and if any differ, it doesn't matter.
- LoadInst *SomeLoad = cast<LoadInst>(Loads.back());
- MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
- unsigned Align = SomeLoad->getAlignment();
Value *NewPtr = getAdjustedAllocaPtr(PtrBuilder, OldPtr->getType());
+ // Replace the operands which were using the old pointer.
+ User::op_iterator OI = PN.op_begin(), OE = PN.op_end();
+ for (; OI != OE; ++OI)
+ if (*OI == OldPtr)
+ *OI = NewPtr;
- // Rewrite all loads of the PN to use the new PHI.
- do {
- LoadInst *LI = Loads.pop_back_val();
- LI->replaceAllUsesWith(NewPN);
- Pass.DeadInsts.push_back(LI);
- } while (!Loads.empty());
-
- // Inject loads into all of the pred blocks.
- for (unsigned Idx = 0, Num = PN.getNumIncomingValues(); Idx != Num; ++Idx) {
- BasicBlock *Pred = PN.getIncomingBlock(Idx);
- TerminatorInst *TI = Pred->getTerminator();
- Value *InVal = PN.getIncomingValue(Idx);
- IRBuilder<> PredBuilder(TI);
-
- // Map the value to the new alloca pointer if this was the old alloca
- // pointer.
- bool ThisOperand = InVal == OldPtr;
- if (ThisOperand)
- InVal = NewPtr;
-
- LoadInst *Load
- = PredBuilder.CreateLoad(InVal, getName(".sroa.speculate." +
- Pred->getName()));
- ++NumLoadsSpeculated;
- Load->setAlignment(Align);
- if (TBAATag)
- Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
- NewPN->addIncoming(Load, Pred);
-
- if (ThisOperand)
- continue;
- Instruction *OtherPtr = dyn_cast<Instruction>(InVal);
- if (!OtherPtr)
- // No uses to rewrite.
- continue;
-
- // Try to lookup and rewrite any partition uses corresponding to this phi
- // input.
- AllocaPartitioning::iterator PI
- = P.findPartitionForPHIOrSelectOperand(PN, OtherPtr);
- if (PI != P.end()) {
- // If the other pointer is within the partitioning, replace the PHI in
- // its uses with the load we just speculated, or add another load for
- // it to rewrite if we've already replaced the PHI.
- AllocaPartitioning::use_iterator UI
- = P.findPartitionUseForPHIOrSelectOperand(PN, OtherPtr);
- if (isa<PHINode>(*UI->User))
- UI->User = Load;
- else {
- AllocaPartitioning::PartitionUse OtherUse = *UI;
- OtherUse.User = Load;
- P.use_insert(PI, std::upper_bound(UI, P.use_end(PI), OtherUse),
- OtherUse);
- }
- }
- }
- DEBUG(dbgs() << " speculated to: " << *NewPN << "\n");
- return NewPtr == &NewAI;
- }
-
- /// Select instructions that use an alloca and are subsequently loaded can be
- /// rewritten to load both input pointers and then select between the result,
- /// allowing the load of the alloca to be promoted.
- /// From this:
- /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
- /// %V = load i32* %P2
- /// to:
- /// %V1 = load i32* %Alloca -> will be mem2reg'd
- /// %V2 = load i32* %Other
- /// %V = select i1 %cond, i32 %V1, i32 %V2
- ///
- /// We can do this to a select if its only uses are loads and if the operand
- /// to the select can be loaded unconditionally.
- bool isSafeSelectToSpeculate(SelectInst &SI,
- SmallVectorImpl<LoadInst *> &Loads) {
- Value *TValue = SI.getTrueValue();
- Value *FValue = SI.getFalseValue();
- bool TDerefable = TValue->isDereferenceablePointer();
- bool FDerefable = FValue->isDereferenceablePointer();
-
- for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end();
- UI != UE; ++UI) {
- LoadInst *LI = dyn_cast<LoadInst>(*UI);
- if (LI == 0 || !LI->isSimple()) return false;
-
- // Both operands to the select need to be dereferencable, either
- // absolutely (e.g. allocas) or at this point because we can see other
- // accesses to it.
- if (!TDerefable && !isSafeToLoadUnconditionally(TValue, LI,
- LI->getAlignment(), &TD))
- return false;
- if (!FDerefable && !isSafeToLoadUnconditionally(FValue, LI,
- LI->getAlignment(), &TD))
- return false;
- Loads.push_back(LI);
- }
-
- return true;
+ DEBUG(dbgs() << " to: " << PN << "\n");
+ deleteIfTriviallyDead(OldPtr);
+ return false;
}
bool visitSelectInst(SelectInst &SI) {
assert(SI.getFalseValue() != OldPtr && "Pointer is both operands!");
else
assert(SI.getFalseValue() == OldPtr && "Pointer isn't an operand!");
+
Value *NewPtr = getAdjustedAllocaPtr(IRB, OldPtr->getType());
+ SI.setOperand(IsTrueVal ? 1 : 2, NewPtr);
+ DEBUG(dbgs() << " to: " << SI << "\n");
+ deleteIfTriviallyDead(OldPtr);
+ return false;
+ }
- // If the select isn't safe to speculate, just use simple logic to emit it.
- SmallVector<LoadInst *, 4> Loads;
- if (!isSafeSelectToSpeculate(SI, Loads)) {
- SI.setOperand(IsTrueVal ? 1 : 2, NewPtr);
- DEBUG(dbgs() << " to: " << SI << "\n");
- deleteIfTriviallyDead(OldPtr);
- return false;
- }
+};
+}
- Value *OtherPtr = IsTrueVal ? SI.getFalseValue() : SI.getTrueValue();
- AllocaPartitioning::iterator PI
- = P.findPartitionForPHIOrSelectOperand(SI, OtherPtr);
- AllocaPartitioning::PartitionUse OtherUse;
- if (PI != P.end()) {
- // If the other pointer is within the partitioning, remove the select
- // from its uses. We'll add in the new loads below.
- AllocaPartitioning::use_iterator UI
- = P.findPartitionUseForPHIOrSelectOperand(SI, OtherPtr);
- OtherUse = *UI;
- P.use_erase(PI, UI);
+namespace {
+/// \brief Visitor to rewrite aggregate loads and stores as scalar.
+///
+/// This pass aggressively rewrites all aggregate loads and stores on
+/// a particular pointer (or any pointer derived from it which we can identify)
+/// with scalar loads and stores.
+class AggLoadStoreRewriter : public InstVisitor<AggLoadStoreRewriter, bool> {
+ // Befriend the base class so it can delegate to private visit methods.
+ friend class llvm::InstVisitor<AggLoadStoreRewriter, bool>;
+
+ const DataLayout &TD;
+
+ /// Queue of pointer uses to analyze and potentially rewrite.
+ SmallVector<Use *, 8> Queue;
+
+ /// Set to prevent us from cycling with phi nodes and loops.
+ SmallPtrSet<User *, 8> Visited;
+
+ /// The current pointer use being rewritten. This is used to dig up the used
+ /// value (as opposed to the user).
+ Use *U;
+
+public:
+ AggLoadStoreRewriter(const DataLayout &TD) : TD(TD) {}
+
+ /// Rewrite loads and stores through a pointer and all pointers derived from
+ /// it.
+ bool rewrite(Instruction &I) {
+ DEBUG(dbgs() << " Rewriting FCA loads and stores...\n");
+ enqueueUsers(I);
+ bool Changed = false;
+ while (!Queue.empty()) {
+ U = Queue.pop_back_val();
+ Changed |= visit(cast<Instruction>(U->getUser()));
}
+ return Changed;
+ }
- Value *TV = IsTrueVal ? NewPtr : SI.getTrueValue();
- Value *FV = IsTrueVal ? SI.getFalseValue() : NewPtr;
- // Replace the loads of the select with a select of two loads.
- while (!Loads.empty()) {
- LoadInst *LI = Loads.pop_back_val();
+private:
+ /// Enqueue all the users of the given instruction for further processing.
+ /// This uses a set to de-duplicate users.
+ void enqueueUsers(Instruction &I) {
+ for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
+ ++UI)
+ if (Visited.insert(*UI))
+ Queue.push_back(&UI.getUse());
+ }
+
+ // Conservative default is to not rewrite anything.
+ bool visitInstruction(Instruction &I) { return false; }
+
+ /// \brief Generic recursive split emission class.
+ template <typename Derived>
+ class OpSplitter {
+ protected:
+ /// The builder used to form new instructions.
+ IRBuilder<> IRB;
+ /// The indices which to be used with insert- or extractvalue to select the
+ /// appropriate value within the aggregate.
+ SmallVector<unsigned, 4> Indices;
+ /// The indices to a GEP instruction which will move Ptr to the correct slot
+ /// within the aggregate.
+ SmallVector<Value *, 4> GEPIndices;
+ /// The base pointer of the original op, used as a base for GEPing the
+ /// split operations.
+ Value *Ptr;
+
+ /// Initialize the splitter with an insertion point, Ptr and start with a
+ /// single zero GEP index.
+ OpSplitter(Instruction *InsertionPoint, Value *Ptr)
+ : IRB(InsertionPoint), GEPIndices(1, IRB.getInt32(0)), Ptr(Ptr) {}
- IRB.SetInsertPoint(LI);
- LoadInst *TL =
- IRB.CreateLoad(TV, getName("." + LI->getName() + ".true"));
- LoadInst *FL =
- IRB.CreateLoad(FV, getName("." + LI->getName() + ".false"));
- NumLoadsSpeculated += 2;
- if (PI != P.end()) {
- LoadInst *OtherLoad = IsTrueVal ? FL : TL;
- assert(OtherUse.Ptr == OtherLoad->getOperand(0));
- OtherUse.User = OtherLoad;
- P.use_insert(PI, P.use_end(PI), OtherUse);
+ public:
+ /// \brief Generic recursive split emission routine.
+ ///
+ /// This method recursively splits an aggregate op (load or store) into
+ /// scalar or vector ops. It splits recursively until it hits a single value
+ /// and emits that single value operation via the template argument.
+ ///
+ /// The logic of this routine relies on GEPs and insertvalue and
+ /// extractvalue all operating with the same fundamental index list, merely
+ /// formatted differently (GEPs need actual values).
+ ///
+ /// \param Ty The type being split recursively into smaller ops.
+ /// \param Agg The aggregate value being built up or stored, depending on
+ /// whether this is splitting a load or a store respectively.
+ void emitSplitOps(Type *Ty, Value *&Agg, const Twine &Name) {
+ if (Ty->isSingleValueType())
+ return static_cast<Derived *>(this)->emitFunc(Ty, Agg, Name);
+
+ if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
+ unsigned OldSize = Indices.size();
+ (void)OldSize;
+ for (unsigned Idx = 0, Size = ATy->getNumElements(); Idx != Size;
+ ++Idx) {
+ assert(Indices.size() == OldSize && "Did not return to the old size");
+ Indices.push_back(Idx);
+ GEPIndices.push_back(IRB.getInt32(Idx));
+ emitSplitOps(ATy->getElementType(), Agg, Name + "." + Twine(Idx));
+ GEPIndices.pop_back();
+ Indices.pop_back();
+ }
+ return;
}
- // Transfer alignment and TBAA info if present.
- TL->setAlignment(LI->getAlignment());
- FL->setAlignment(LI->getAlignment());
- if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
- TL->setMetadata(LLVMContext::MD_tbaa, Tag);
- FL->setMetadata(LLVMContext::MD_tbaa, Tag);
+ if (StructType *STy = dyn_cast<StructType>(Ty)) {
+ unsigned OldSize = Indices.size();
+ (void)OldSize;
+ for (unsigned Idx = 0, Size = STy->getNumElements(); Idx != Size;
+ ++Idx) {
+ assert(Indices.size() == OldSize && "Did not return to the old size");
+ Indices.push_back(Idx);
+ GEPIndices.push_back(IRB.getInt32(Idx));
+ emitSplitOps(STy->getElementType(Idx), Agg, Name + "." + Twine(Idx));
+ GEPIndices.pop_back();
+ Indices.pop_back();
+ }
+ return;
}
- Value *V = IRB.CreateSelect(SI.getCondition(), TL, FL);
- V->takeName(LI);
- DEBUG(dbgs() << " speculated to: " << *V << "\n");
- LI->replaceAllUsesWith(V);
- Pass.DeadInsts.push_back(LI);
+ llvm_unreachable("Only arrays and structs are aggregate loadable types");
+ }
+ };
+
+ struct LoadOpSplitter : public OpSplitter<LoadOpSplitter> {
+ LoadOpSplitter(Instruction *InsertionPoint, Value *Ptr)
+ : OpSplitter<LoadOpSplitter>(InsertionPoint, Ptr) {}
+
+ /// Emit a leaf load of a single value. This is called at the leaves of the
+ /// recursive emission to actually load values.
+ void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
+ assert(Ty->isSingleValueType());
+ // Load the single value and insert it using the indices.
+ Value *Load = IRB.CreateLoad(IRB.CreateInBoundsGEP(Ptr, GEPIndices,
+ Name + ".gep"),
+ Name + ".load");
+ Agg = IRB.CreateInsertValue(Agg, Load, Indices, Name + ".insert");
+ DEBUG(dbgs() << " to: " << *Load << "\n");
}
- if (PI != P.end())
- std::stable_sort(P.use_begin(PI), P.use_end(PI));
+ };
- deleteIfTriviallyDead(OldPtr);
- return NewPtr == &NewAI;
+ bool visitLoadInst(LoadInst &LI) {
+ assert(LI.getPointerOperand() == *U);
+ if (!LI.isSimple() || LI.getType()->isSingleValueType())
+ return false;
+
+ // We have an aggregate being loaded, split it apart.
+ DEBUG(dbgs() << " original: " << LI << "\n");
+ LoadOpSplitter Splitter(&LI, *U);
+ Value *V = UndefValue::get(LI.getType());
+ Splitter.emitSplitOps(LI.getType(), V, LI.getName() + ".fca");
+ LI.replaceAllUsesWith(V);
+ LI.eraseFromParent();
+ return true;
+ }
+
+ struct StoreOpSplitter : public OpSplitter<StoreOpSplitter> {
+ StoreOpSplitter(Instruction *InsertionPoint, Value *Ptr)
+ : OpSplitter<StoreOpSplitter>(InsertionPoint, Ptr) {}
+
+ /// Emit a leaf store of a single value. This is called at the leaves of the
+ /// recursive emission to actually produce stores.
+ void emitFunc(Type *Ty, Value *&Agg, const Twine &Name) {
+ assert(Ty->isSingleValueType());
+ // Extract the single value and store it using the indices.
+ Value *Store = IRB.CreateStore(
+ IRB.CreateExtractValue(Agg, Indices, Name + ".extract"),
+ IRB.CreateInBoundsGEP(Ptr, GEPIndices, Name + ".gep"));
+ (void)Store;
+ DEBUG(dbgs() << " to: " << *Store << "\n");
+ }
+ };
+
+ bool visitStoreInst(StoreInst &SI) {
+ if (!SI.isSimple() || SI.getPointerOperand() != *U)
+ return false;
+ Value *V = SI.getValueOperand();
+ if (V->getType()->isSingleValueType())
+ return false;
+
+ // We have an aggregate being stored, split it apart.
+ DEBUG(dbgs() << " original: " << SI << "\n");
+ StoreOpSplitter Splitter(&SI, *U);
+ Splitter.emitSplitOps(V->getType(), V, V->getName() + ".fca");
+ SI.eraseFromParent();
+ return true;
+ }
+
+ bool visitBitCastInst(BitCastInst &BC) {
+ enqueueUsers(BC);
+ return false;
+ }
+
+ bool visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ enqueueUsers(GEPI);
+ return false;
+ }
+
+ bool visitPHINode(PHINode &PN) {
+ enqueueUsers(PN);
+ return false;
}
+ bool visitSelectInst(SelectInst &SI) {
+ enqueueUsers(SI);
+ return false;
+ }
};
}
///
/// This recurses through the aggregate type and tries to compute a subtype
/// based on the offset and size. When the offset and size span a sub-section
-/// of an array, it will even compute a new array type for that sub-section.
-static Type *getTypePartition(const TargetData &TD, Type *Ty,
+/// of an array, it will even compute a new array type for that sub-section,
+/// and the same for structs.
+///
+/// Note that this routine is very strict and tries to find a partition of the
+/// type which produces the *exact* right offset and size. It is not forgiving
+/// when the size or offset cause either end of type-based partition to be off.
+/// Also, this is a best-effort routine. It is reasonable to give up and not
+/// return a type if necessary.
+static Type *getTypePartition(const DataLayout &TD, Type *Ty,
uint64_t Offset, uint64_t Size) {
if (Offset == 0 && TD.getTypeAllocSize(Ty) == Size)
return Ty;
return 0;
const StructLayout *SL = TD.getStructLayout(STy);
- if (Offset > SL->getSizeInBytes())
+ if (Offset >= SL->getSizeInBytes())
return 0;
uint64_t EndOffset = Offset + Size;
if (EndOffset > SL->getSizeInBytes())
return 0;
unsigned Index = SL->getElementContainingOffset(Offset);
- if (SL->getElementOffset(Index) != Offset)
- return 0; // Inside of padding.
Offset -= SL->getElementOffset(Index);
Type *ElementTy = STy->getElementType(Index);
if (Offset > 0 || Size < ElementSize) {
if ((Offset + Size) > ElementSize)
return 0;
- // Bail if this is a poniter element, we can't recurse through them.
- if (ElementTy->isPointerTy())
- return 0;
return getTypePartition(TD, ElementTy, Offset, Size);
}
assert(Offset == 0);
unsigned EndIndex = SL->getElementContainingOffset(EndOffset);
if (Index == EndIndex)
return 0; // Within a single element and its padding.
+
+ // Don't try to form "natural" types if the elements don't line up with the
+ // expected size.
+ // FIXME: We could potentially recurse down through the last element in the
+ // sub-struct to find a natural end point.
+ if (SL->getElementOffset(EndIndex) != EndOffset)
+ return 0;
+
assert(Index < EndIndex);
- assert(Index + EndIndex <= STy->getNumElements());
EE = STy->element_begin() + EndIndex;
}
StructType *SubTy = StructType::get(STy->getContext(), ElementTys,
STy->isPacked());
const StructLayout *SubSL = TD.getStructLayout(SubTy);
- if (Size == SubSL->getSizeInBytes())
- return SubTy;
+ if (Size != SubSL->getSizeInBytes())
+ return 0; // The sub-struct doesn't have quite the size needed.
- // FIXME: We could potentially recurse down through the last element in the
- // sub-struct to find a natural end point.
- return 0;
+ return SubTy;
}
/// \brief Rewrite an alloca partition's users.
AllocaPartitioning &P,
AllocaPartitioning::iterator PI) {
uint64_t AllocaSize = PI->EndOffset - PI->BeginOffset;
- if (P.use_begin(PI) == P.use_end(PI))
+ bool IsLive = false;
+ for (AllocaPartitioning::use_iterator UI = P.use_begin(PI),
+ UE = P.use_end(PI);
+ UI != UE && !IsLive; ++UI)
+ if (UI->U)
+ IsLive = true;
+ if (!IsLive)
return false; // No live uses left of this partition.
+ DEBUG(dbgs() << "Speculating PHIs and selects in partition "
+ << "[" << PI->BeginOffset << "," << PI->EndOffset << ")\n");
+
+ PHIOrSelectSpeculator Speculator(*TD, P, *this);
+ DEBUG(dbgs() << " speculating ");
+ DEBUG(P.print(dbgs(), PI, ""));
+ Speculator.visitUsers(PI);
+
// Try to compute a friendly type for this partition of the alloca. This
// won't always succeed, in which case we fall back to a legal integer type
// or an i8 array of an appropriate size.
AllocaTy = Type::getIntNTy(*C, AllocaSize * 8);
if (!AllocaTy)
AllocaTy = ArrayType::get(Type::getInt8Ty(*C), AllocaSize);
+ assert(TD->getTypeAllocSize(AllocaTy) >= AllocaSize);
// Check for the case where we're going to rewrite to a new alloca of the
// exact same type as the original, and with the same access offsets. In that
assert(PI == P.begin() && "Begin offset is zero on later partition");
NewAI = &AI;
} else {
- // FIXME: The alignment here is overly conservative -- we could in many
- // cases get away with much weaker alignment constraints.
- NewAI = new AllocaInst(AllocaTy, 0, AI.getAlignment(),
+ unsigned Alignment = AI.getAlignment();
+ if (!Alignment) {
+ // The minimum alignment which users can rely on when the explicit
+ // alignment is omitted or zero is that required by the ABI for this
+ // type.
+ Alignment = TD->getABITypeAlignment(AI.getAllocatedType());
+ }
+ Alignment = MinAlign(Alignment, PI->BeginOffset);
+ // If we will get at least this much alignment from the type alone, leave
+ // the alloca's alignment unconstrained.
+ if (Alignment <= TD->getABITypeAlignment(AllocaTy))
+ Alignment = 0;
+ NewAI = new AllocaInst(AllocaTy, 0, Alignment,
AI.getName() + ".sroa." + Twine(PI - P.begin()),
&AI);
++NumNewAllocas;
<< "[" << PI->BeginOffset << "," << PI->EndOffset << ") to: "
<< *NewAI << "\n");
+ // Track the high watermark of the post-promotion worklist. We will reset it
+ // to this point if the alloca is not in fact scheduled for promotion.
+ unsigned PPWOldSize = PostPromotionWorklist.size();
+
AllocaPartitionRewriter Rewriter(*TD, P, PI, *this, AI, *NewAI,
PI->BeginOffset, PI->EndOffset);
DEBUG(dbgs() << " rewriting ");
DEBUG(P.print(dbgs(), PI, ""));
- if (Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI))) {
+ bool Promotable = Rewriter.visitUsers(P.use_begin(PI), P.use_end(PI));
+ if (Promotable) {
DEBUG(dbgs() << " and queuing for promotion\n");
PromotableAllocas.push_back(NewAI);
} else if (NewAI != &AI) {
// alloca which didn't actually change and didn't get promoted.
Worklist.insert(NewAI);
}
+
+ // Drop any post-promotion work items if promotion didn't happen.
+ if (!Promotable)
+ while (PostPromotionWorklist.size() > PPWOldSize)
+ PostPromotionWorklist.pop_back();
+
return true;
}
TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
return false;
- // First check if this is a non-aggregate type that we should simply promote.
- if (!AI.getAllocatedType()->isAggregateType() && isAllocaPromotable(&AI)) {
- DEBUG(dbgs() << " Trivially scalar type, queuing for promotion...\n");
- PromotableAllocas.push_back(&AI);
- return false;
- }
+ bool Changed = false;
+
+ // First, split any FCA loads and stores touching this alloca to promote
+ // better splitting and promotion opportunities.
+ AggLoadStoreRewriter AggRewriter(*TD);
+ Changed |= AggRewriter.rewrite(AI);
// Build the partition set using a recursive instruction-visiting builder.
AllocaPartitioning P(*TD, AI);
DEBUG(P.print(dbgs()));
if (P.isEscaped())
- return false;
-
- // No partitions to split. Leave the dead alloca for a later pass to clean up.
- if (P.begin() == P.end())
- return false;
+ return Changed;
// Delete all the dead users of this alloca before splitting and rewriting it.
- bool Changed = false;
for (AllocaPartitioning::dead_user_iterator DI = P.dead_user_begin(),
DE = P.dead_user_end();
DI != DE; ++DI) {
}
}
+ // No partitions to split. Leave the dead alloca for a later pass to clean up.
+ if (P.begin() == P.end())
+ return Changed;
+
return splitAlloca(AI, P) || Changed;
}
-void SROA::deleteDeadInstructions() {
+/// \brief Delete the dead instructions accumulated in this run.
+///
+/// Recursively deletes the dead instructions we've accumulated. This is done
+/// at the very end to maximize locality of the recursive delete and to
+/// minimize the problems of invalidated instruction pointers as such pointers
+/// are used heavily in the intermediate stages of the algorithm.
+///
+/// We also record the alloca instructions deleted here so that they aren't
+/// subsequently handed to mem2reg to promote.
+void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
DeadSplitInsts.clear();
while (!DeadInsts.empty()) {
Instruction *I = DeadInsts.pop_back_val();
}
}
+/// \brief Promote the allocas, using the best available technique.
+///
+/// This attempts to promote whatever allocas have been identified as viable in
+/// the PromotableAllocas list. If that list is empty, there is nothing to do.
+/// If there is a domtree available, we attempt to promote using the full power
+/// of mem2reg. Otherwise, we build and use the AllocaPromoter above which is
+/// based on the SSAUpdater utilities. This function returns whether any
+/// promotion occured.
+bool SROA::promoteAllocas(Function &F) {
+ if (PromotableAllocas.empty())
+ return false;
+
+ NumPromoted += PromotableAllocas.size();
+
+ if (DT && !ForceSSAUpdater) {
+ DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
+ PromoteMemToReg(PromotableAllocas, *DT);
+ PromotableAllocas.clear();
+ return true;
+ }
+
+ DEBUG(dbgs() << "Promoting allocas with SSAUpdater...\n");
+ SSAUpdater SSA;
+ DIBuilder DIB(*F.getParent());
+ SmallVector<Instruction*, 64> Insts;
+
+ for (unsigned Idx = 0, Size = PromotableAllocas.size(); Idx != Size; ++Idx) {
+ AllocaInst *AI = PromotableAllocas[Idx];
+ for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
+ UI != UE;) {
+ Instruction *I = cast<Instruction>(*UI++);
+ // FIXME: Currently the SSAUpdater infrastructure doesn't reason about
+ // lifetime intrinsics and so we strip them (and the bitcasts+GEPs
+ // leading to them) here. Eventually it should use them to optimize the
+ // scalar values produced.
+ if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
+ assert(onlyUsedByLifetimeMarkers(I) &&
+ "Found a bitcast used outside of a lifetime marker.");
+ while (!I->use_empty())
+ cast<Instruction>(*I->use_begin())->eraseFromParent();
+ I->eraseFromParent();
+ continue;
+ }
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
+ assert(II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end);
+ II->eraseFromParent();
+ continue;
+ }
+
+ Insts.push_back(I);
+ }
+ AllocaPromoter(Insts, SSA, *AI, DIB).run(Insts);
+ Insts.clear();
+ }
+
+ PromotableAllocas.clear();
+ return true;
+}
+
namespace {
/// \brief A predicate to test whether an alloca belongs to a set.
class IsAllocaInSet {
const SetType &Set;
public:
+ typedef AllocaInst *argument_type;
+
IsAllocaInSet(const SetType &Set) : Set(Set) {}
- bool operator()(AllocaInst *AI) { return Set.count(AI); }
+ bool operator()(AllocaInst *AI) const { return Set.count(AI); }
};
}
bool SROA::runOnFunction(Function &F) {
DEBUG(dbgs() << "SROA function: " << F.getName() << "\n");
C = &F.getContext();
- TD = getAnalysisIfAvailable<TargetData>();
+ TD = getAnalysisIfAvailable<DataLayout>();
if (!TD) {
DEBUG(dbgs() << " Skipping SROA -- no target data!\n");
return false;
}
- DT = &getAnalysis<DominatorTree>();
+ DT = getAnalysisIfAvailable<DominatorTree>();
BasicBlock &EntryBB = F.getEntryBlock();
for (BasicBlock::iterator I = EntryBB.begin(), E = llvm::prior(EntryBB.end());
Worklist.insert(AI);
bool Changed = false;
- while (!Worklist.empty()) {
- Changed |= runOnAlloca(*Worklist.pop_back_val());
- deleteDeadInstructions();
- if (!DeletedAllocas.empty()) {
- PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
- PromotableAllocas.end(),
- IsAllocaInSet(DeletedAllocas)),
- PromotableAllocas.end());
- DeletedAllocas.clear();
+ // A set of deleted alloca instruction pointers which should be removed from
+ // the list of promotable allocas.
+ SmallPtrSet<AllocaInst *, 4> DeletedAllocas;
+
+ do {
+ while (!Worklist.empty()) {
+ Changed |= runOnAlloca(*Worklist.pop_back_val());
+ deleteDeadInstructions(DeletedAllocas);
+
+ // Remove the deleted allocas from various lists so that we don't try to
+ // continue processing them.
+ if (!DeletedAllocas.empty()) {
+ Worklist.remove_if(IsAllocaInSet(DeletedAllocas));
+ PostPromotionWorklist.remove_if(IsAllocaInSet(DeletedAllocas));
+ PromotableAllocas.erase(std::remove_if(PromotableAllocas.begin(),
+ PromotableAllocas.end(),
+ IsAllocaInSet(DeletedAllocas)),
+ PromotableAllocas.end());
+ DeletedAllocas.clear();
+ }
}
- }
- if (!PromotableAllocas.empty()) {
- DEBUG(dbgs() << "Promoting allocas with mem2reg...\n");
- PromoteMemToReg(PromotableAllocas, *DT);
- Changed = true;
- NumPromoted += PromotableAllocas.size();
- PromotableAllocas.clear();
- }
+ Changed |= promoteAllocas(F);
+
+ Worklist = PostPromotionWorklist;
+ PostPromotionWorklist.clear();
+ } while (!Worklist.empty());
return Changed;
}
void SROA::getAnalysisUsage(AnalysisUsage &AU) const {
- AU.addRequired<DominatorTree>();
+ if (RequiresDomTree)
+ AU.addRequired<DominatorTree>();
AU.setPreservesCFG();
}