//
// The LLVM Compiler Infrastructure
//
-// This file was developed by the LLVM research group and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
#include "llvm/GlobalVariable.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Pass.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Compiler.h"
#include "llvm/ADT/SmallVector.h"
namespace {
struct VISIBILITY_HIDDEN SROA : public FunctionPass {
static char ID; // Pass identification, replacement for typeid
- SROA(signed T = -1) : FunctionPass((intptr_t)&ID) {
+ explicit SROA(signed T = -1) : FunctionPass(&ID) {
if (T == -1)
SRThreshold = 128;
else
}
private:
+ TargetData *TD;
+
/// AllocaInfo - When analyzing uses of an alloca instruction, this captures
/// information about the uses. All these fields are initialized to false
/// and set to true when something is learned.
/// isUnsafe - This is set to true if the alloca cannot be SROA'd.
bool isUnsafe : 1;
- /// needsCanon - This is set to true if there is some use of the alloca
- /// that requires canonicalization.
- bool needsCanon : 1;
+ /// needsCleanup - This is set to true if there is some use of the alloca
+ /// that requires cleanup.
+ bool needsCleanup : 1;
/// isMemCpySrc - This is true if this aggregate is memcpy'd from.
bool isMemCpySrc : 1;
bool isMemCpyDst : 1;
AllocaInfo()
- : isUnsafe(false), needsCanon(false),
+ : isUnsafe(false), needsCleanup(false),
isMemCpySrc(false), isMemCpyDst(false) {}
};
void DoScalarReplacement(AllocationInst *AI,
std::vector<AllocationInst*> &WorkList);
- void CanonicalizeAllocaUsers(AllocationInst *AI);
+ void CleanupGEP(GetElementPtrInst *GEP);
+ void CleanupAllocaUsers(AllocationInst *AI);
AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
SmallVector<AllocaInst*, 32> &NewElts);
- const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
- void ConvertToScalar(AllocationInst *AI, const Type *Ty);
- void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
+ void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
+ AllocationInst *AI,
+ SmallVector<AllocaInst*, 32> &NewElts);
+ void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI,
+ SmallVector<AllocaInst*, 32> &NewElts);
+ void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
+ SmallVector<AllocaInst*, 32> &NewElts);
+
+ bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
+ bool &SawVec, uint64_t Offset, unsigned AllocaSize);
+ void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
+ Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
+ uint64_t Offset, IRBuilder<> &Builder);
+ Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
+ uint64_t Offset, IRBuilder<> &Builder);
static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
};
-
- char SROA::ID = 0;
- RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
}
+char SROA::ID = 0;
+static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
+
// Public interface to the ScalarReplAggregates pass
FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
return new SROA(Threshold);
bool SROA::runOnFunction(Function &F) {
+ TD = &getAnalysis<TargetData>();
+
bool Changed = performPromotion(F);
while (1) {
bool LocalChange = performScalarRepl(F);
if (Allocas.empty()) break;
- PromoteMemToReg(Allocas, DT, DF);
+ PromoteMemToReg(Allocas, DT, DF, Context);
NumPromoted += Allocas.size();
Changed = true;
}
return Changed;
}
+/// getNumSAElements - Return the number of elements in the specific struct or
+/// array.
+static uint64_t getNumSAElements(const Type *T) {
+ if (const StructType *ST = dyn_cast<StructType>(T))
+ return ST->getNumElements();
+ return cast<ArrayType>(T)->getNumElements();
+}
+
// performScalarRepl - This algorithm is a simple worklist driven algorithm,
// which runs on all of the malloc/alloca instructions in the function, removing
// them if they are only used by getelementptr instructions.
if (AllocationInst *A = dyn_cast<AllocationInst>(I))
WorkList.push_back(A);
- const TargetData &TD = getAnalysis<TargetData>();
-
// Process the worklist
bool Changed = false;
while (!WorkList.empty()) {
AI->eraseFromParent();
continue;
}
-
- // If we can turn this aggregate value (potentially with casts) into a
- // simple scalar value that can be mem2reg'd into a register value.
- bool IsNotTrivial = false;
- if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
- if (IsNotTrivial && ActualType != Type::VoidTy) {
- ConvertToScalar(AI, ActualType);
- Changed = true;
- continue;
- }
+ // If this alloca is impossible for us to promote, reject it early.
+ if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
+ continue;
+
+ // Check to see if this allocation is only modified by a memcpy/memmove from
+ // a constant global. If this is the case, we can change all users to use
+ // the constant global instead. This is commonly produced by the CFE by
+ // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
+ // is only subsequently read.
+ if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
+ DOUT << "Found alloca equal to global: " << *AI;
+ DOUT << " memcpy = " << *TheCopy;
+ Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
+ AI->replaceAllUsesWith(
+ Context->getConstantExprBitCast(TheSrc, AI->getType()));
+ TheCopy->eraseFromParent(); // Don't mutate the global.
+ AI->eraseFromParent();
+ ++NumGlobals;
+ Changed = true;
+ continue;
+ }
+
// Check to see if we can perform the core SROA transformation. We cannot
// transform the allocation instruction if it is an array allocation
// (allocations OF arrays are ok though), and an allocation of a scalar
// value cannot be decomposed at all.
- if (!AI->isArrayAllocation() &&
- (isa<StructType>(AI->getAllocatedType()) ||
+ uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
+
+ // Do not promote any struct whose size is too big.
+ if (AllocaSize > SRThreshold) continue;
+
+ if ((isa<StructType>(AI->getAllocatedType()) ||
isa<ArrayType>(AI->getAllocatedType())) &&
- AI->getAllocatedType()->isSized() &&
- TD.getTypeSize(AI->getAllocatedType()) < SRThreshold) {
+ // Do not promote any struct into more than "32" separate vars.
+ getNumSAElements(AI->getAllocatedType()) <= SRThreshold/4) {
// Check that all of the users of the allocation are capable of being
// transformed.
switch (isSafeAllocaToScalarRepl(AI)) {
case 0: // Not safe to scalar replace.
break;
case 1: // Safe, but requires cleanup/canonicalizations first
- CanonicalizeAllocaUsers(AI);
+ CleanupAllocaUsers(AI);
// FALL THROUGH.
case 3: // Safe to scalar replace.
DoScalarReplacement(AI, WorkList);
continue;
}
}
-
- // Check to see if this allocation is only modified by a memcpy/memmove from
- // a constant global. If this is the case, we can change all users to use
- // the constant global instead. This is commonly produced by the CFE by
- // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
- // is only subsequently read.
- if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
- DOUT << "Found alloca equal to global: " << *AI;
- DOUT << " memcpy = " << *TheCopy;
- Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
- AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
- TheCopy->eraseFromParent(); // Don't mutate the global.
+
+ // If we can turn this aggregate value (potentially with casts) into a
+ // simple scalar value that can be mem2reg'd into a register value.
+ // IsNotTrivial tracks whether this is something that mem2reg could have
+ // promoted itself. If so, we don't want to transform it needlessly. Note
+ // that we can't just check based on the type: the alloca may be of an i32
+ // but that has pointer arithmetic to set byte 3 of it or something.
+ bool IsNotTrivial = false;
+ const Type *VectorTy = 0;
+ bool HadAVector = false;
+ if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector,
+ 0, unsigned(AllocaSize)) && IsNotTrivial) {
+ AllocaInst *NewAI;
+ // If we were able to find a vector type that can handle this with
+ // insert/extract elements, and if there was at least one use that had
+ // a vector type, promote this to a vector. We don't want to promote
+ // random stuff that doesn't use vectors (e.g. <9 x double>) because then
+ // we just get a lot of insert/extracts. If at least one vector is
+ // involved, then we probably really do have a union of vector/array.
+ if (VectorTy && isa<VectorType>(VectorTy) && HadAVector) {
+ DOUT << "CONVERT TO VECTOR: " << *AI << " TYPE = " << *VectorTy <<"\n";
+
+ // Create and insert the vector alloca.
+ NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin());
+ ConvertUsesToScalar(AI, NewAI, 0);
+ } else {
+ DOUT << "CONVERT TO SCALAR INTEGER: " << *AI << "\n";
+
+ // Create and insert the integer alloca.
+ const Type *NewTy = Context->getIntegerType(AllocaSize*8);
+ NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
+ ConvertUsesToScalar(AI, NewAI, 0);
+ }
+ NewAI->takeName(AI);
AI->eraseFromParent();
- ++NumGlobals;
+ ++NumConverted;
Changed = true;
continue;
}
-
- // Otherwise, couldn't process this.
+
+ // Otherwise, couldn't process this alloca.
}
return Changed;
continue;
}
+ // Replace:
+ // %res = load { i32, i32 }* %alloc
+ // with:
+ // %load.0 = load i32* %alloc.0
+ // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
+ // %load.1 = load i32* %alloc.1
+ // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
+ // (Also works for arrays instead of structs)
+ if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ Value *Insert = Context->getUndef(LI->getType());
+ for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) {
+ Value *Load = new LoadInst(ElementAllocas[i], "load", LI);
+ Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI);
+ }
+ LI->replaceAllUsesWith(Insert);
+ LI->eraseFromParent();
+ continue;
+ }
+
+ // Replace:
+ // store { i32, i32 } %val, { i32, i32 }* %alloc
+ // with:
+ // %val.0 = extractvalue { i32, i32 } %val, 0
+ // store i32 %val.0, i32* %alloc.0
+ // %val.1 = extractvalue { i32, i32 } %val, 1
+ // store i32 %val.1, i32* %alloc.1
+ // (Also works for arrays instead of structs)
+ if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ Value *Val = SI->getOperand(0);
+ for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) {
+ Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI);
+ new StoreInst(Extract, ElementAllocas[i], SI);
+ }
+ SI->eraseFromParent();
+ continue;
+ }
+
GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
// We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
unsigned Idx =
// expanded itself once the worklist is rerun.
//
SmallVector<Value*, 8> NewArgs;
- NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
+ NewArgs.push_back(Context->getNullValue(Type::Int32Ty));
NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
- RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0],
- NewArgs.size(), "", GEPI);
+ RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(),
+ NewArgs.end(), "", GEPI);
RepValue->takeName(GEPI);
}
// If this GEP is to the start of the aggregate, check for memcpys.
- if (Idx == 0) {
- bool IsStartOfAggregateGEP = true;
- for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) {
- if (!isa<ConstantInt>(GEPI->getOperand(i))) {
- IsStartOfAggregateGEP = false;
- break;
- }
- if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) {
- IsStartOfAggregateGEP = false;
- break;
- }
- }
-
- if (IsStartOfAggregateGEP)
- RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
- }
-
+ if (Idx == 0 && GEPI->hasAllZeroIndices())
+ RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
// Move all of the users over to the new GEP.
GEPI->replaceAllUsesWith(RepValue);
// Using pointer arithmetic to navigate the array.
return MarkUnsafe(Info);
- if (AreAllZeroIndices) {
- for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) {
- if (!isa<ConstantInt>(GEP->getOperand(i)) ||
- !cast<ConstantInt>(GEP->getOperand(i))->isZero()) {
- AreAllZeroIndices = false;
- break;
- }
- }
- }
+ if (AreAllZeroIndices)
+ AreAllZeroIndices = GEP->hasAllZeroIndices();
}
isSafeElementUse(GEP, AreAllZeroIndices, AI, Info);
if (Info.isUnsafe) return;
if (BitCastInst *C = dyn_cast<BitCastInst>(User))
return isSafeUseOfBitCastedAllocation(C, AI, Info);
+ if (LoadInst *LI = dyn_cast<LoadInst>(User))
+ if (!LI->isVolatile())
+ return;// Loads (returning a first class aggregrate) are always rewritable
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(User))
+ if (!SI->isVolatile() && SI->getOperand(0) != AI)
+ return;// Store is ok if storing INTO the pointer, not storing the pointer
+
GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User);
if (GEPI == 0)
return MarkUnsafe(Info);
// The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
if (I == E ||
- I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) {
+ I.getOperand() != Context->getNullValue(I.getOperand()->getType())) {
return MarkUnsafe(Info);
}
bool IsAllZeroIndices = true;
- // If this is a use of an array allocation, do a bit more checking for sanity.
+ // If the first index is a non-constant index into an array, see if we can
+ // handle it as a special case.
if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
- uint64_t NumElements = AT->getNumElements();
-
- if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) {
- IsAllZeroIndices &= Idx->isZero();
-
- // Check to make sure that index falls within the array. If not,
- // something funny is going on, so we won't do the optimization.
- //
- if (Idx->getZExtValue() >= NumElements)
- return MarkUnsafe(Info);
-
- // We cannot scalar repl this level of the array unless any array
- // sub-indices are in-range constants. In particular, consider:
- // A[0][i]. We cannot know that the user isn't doing invalid things like
- // allowing i to index an out-of-range subscript that accesses A[1].
- //
- // Scalar replacing *just* the outer index of the array is probably not
- // going to be a win anyway, so just give up.
- for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) {
- uint64_t NumElements;
- if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I))
- NumElements = SubArrayTy->getNumElements();
- else
- NumElements = cast<VectorType>(*I)->getNumElements();
-
- ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
- if (!IdxVal) return MarkUnsafe(Info);
- if (IdxVal->getZExtValue() >= NumElements)
- return MarkUnsafe(Info);
- IsAllZeroIndices &= IdxVal->isZero();
- }
-
- } else {
+ if (!isa<ConstantInt>(I.getOperand())) {
IsAllZeroIndices = 0;
+ uint64_t NumElements = AT->getNumElements();
// If this is an array index and the index is not constant, we cannot
// promote... that is unless the array has exactly one or two elements in
// out if this is the only problem.
if ((NumElements == 1 || NumElements == 2) &&
AllUsersAreLoads(GEPI)) {
- Info.needsCanon = true;
+ Info.needsCleanup = true;
return; // Canonicalization required!
}
return MarkUnsafe(Info);
}
}
+
+ // Walk through the GEP type indices, checking the types that this indexes
+ // into.
+ for (; I != E; ++I) {
+ // Ignore struct elements, no extra checking needed for these.
+ if (isa<StructType>(*I))
+ continue;
+
+ ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
+ if (!IdxVal) return MarkUnsafe(Info);
+ // Are all indices still zero?
+ IsAllZeroIndices &= IdxVal->isZero();
+
+ if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
+ // This GEP indexes an array. Verify that this is an in-range constant
+ // integer. Specifically, consider A[0][i]. We cannot know that the user
+ // isn't doing invalid things like allowing i to index an out-of-range
+ // subscript that accesses A[1]. Because of this, we have to reject SROA
+ // of any accesses into structs where any of the components are variables.
+ if (IdxVal->getZExtValue() >= AT->getNumElements())
+ return MarkUnsafe(Info);
+ } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) {
+ if (IdxVal->getZExtValue() >= VT->getNumElements())
+ return MarkUnsafe(Info);
+ }
+ }
+
// If there are any non-simple uses of this getelementptr, make sure to reject
// them.
return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info);
if (!Length) return MarkUnsafe(Info);
// If not the whole aggregate, give up.
- const TargetData &TD = getAnalysis<TargetData>();
- if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType()))
+ if (Length->getZExtValue() !=
+ TD->getTypeAllocSize(AI->getType()->getElementType()))
return MarkUnsafe(Info);
// We only know about memcpy/memset/memmove.
- if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI))
+ if (!isa<MemIntrinsic>(MI))
return MarkUnsafe(Info);
// Otherwise, we can transform it. Determine whether this is a memcpy/set
isSafeUseOfBitCastedAllocation(BCU, AI, Info);
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info);
- } else {
+ } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
+ if (SI->isVolatile())
+ return MarkUnsafe(Info);
+
+ // If storing the entire alloca in one chunk through a bitcasted pointer
+ // to integer, we can transform it. This happens (for example) when you
+ // cast a {i32,i32}* to i64* and store through it. This is similar to the
+ // memcpy case and occurs in various "byval" cases and emulated memcpys.
+ if (isa<IntegerType>(SI->getOperand(0)->getType()) &&
+ TD->getTypeAllocSize(SI->getOperand(0)->getType()) ==
+ TD->getTypeAllocSize(AI->getType()->getElementType())) {
+ Info.isMemCpyDst = true;
+ continue;
+ }
+ return MarkUnsafe(Info);
+ } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
+ if (LI->isVolatile())
+ return MarkUnsafe(Info);
+
+ // If loading the entire alloca in one chunk through a bitcasted pointer
+ // to integer, we can transform it. This happens (for example) when you
+ // cast a {i32,i32}* to i64* and load through it. This is similar to the
+ // memcpy case and occurs in various "byval" cases and emulated memcpys.
+ if (isa<IntegerType>(LI->getType()) &&
+ TD->getTypeAllocSize(LI->getType()) ==
+ TD->getTypeAllocSize(AI->getType()->getElementType())) {
+ Info.isMemCpySrc = true;
+ continue;
+ }
+ return MarkUnsafe(Info);
+ } else if (isa<DbgInfoIntrinsic>(UI)) {
+ // If one user is DbgInfoIntrinsic then check if all users are
+ // DbgInfoIntrinsics.
+ if (OnlyUsedByDbgInfoIntrinsics(BC)) {
+ Info.needsCleanup = true;
+ return;
+ }
+ else
+ MarkUnsafe(Info);
+ }
+ else {
return MarkUnsafe(Info);
}
if (Info.isUnsafe) return;
/// instead.
void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
SmallVector<AllocaInst*, 32> &NewElts) {
- Constant *Zero = Constant::getNullValue(Type::Int32Ty);
- const TargetData &TD = getAnalysis<TargetData>();
-
Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
while (UI != UE) {
- if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) {
+ Instruction *User = cast<Instruction>(*UI++);
+ if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) {
RewriteBitCastUserOfAlloca(BCU, AI, NewElts);
- ++UI;
- BCU->eraseFromParent();
+ if (BCU->use_empty()) BCU->eraseFromParent();
continue;
}
- // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split
- // into one per element.
- MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI);
-
- // If it's not a mem intrinsic, it must be some other user of a gep of the
- // first pointer. Just leave these alone.
- if (!MI) {
- ++UI;
+ if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
+ // This must be memcpy/memmove/memset of the entire aggregate.
+ // Split into one per element.
+ RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts);
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ // If this is a store of the entire alloca from an integer, rewrite it.
+ RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
+ continue;
+ }
+
+ if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
+ // If this is a load of the entire alloca to an integer, rewrite it.
+ RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
continue;
}
- // If this is a memcpy/memmove, construct the other pointer as the
- // appropriate type.
- Value *OtherPtr = 0;
- if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) {
- if (BCInst == MCI->getRawDest())
- OtherPtr = MCI->getRawSource();
- else {
- assert(BCInst == MCI->getRawSource());
- OtherPtr = MCI->getRawDest();
- }
- } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
- if (BCInst == MMI->getRawDest())
- OtherPtr = MMI->getRawSource();
- else {
- assert(BCInst == MMI->getRawSource());
- OtherPtr = MMI->getRawDest();
- }
+ // Otherwise it must be some other user of a gep of the first pointer. Just
+ // leave these alone.
+ continue;
+ }
+}
+
+/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
+/// Rewrite it to copy or set the elements of the scalarized memory.
+void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
+ AllocationInst *AI,
+ SmallVector<AllocaInst*, 32> &NewElts) {
+
+ // If this is a memcpy/memmove, construct the other pointer as the
+ // appropriate type. The "Other" pointer is the pointer that goes to memory
+ // that doesn't have anything to do with the alloca that we are promoting. For
+ // memset, this Value* stays null.
+ Value *OtherPtr = 0;
+ unsigned MemAlignment = MI->getAlignment();
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
+ if (BCInst == MTI->getRawDest())
+ OtherPtr = MTI->getRawSource();
+ else {
+ assert(BCInst == MTI->getRawSource());
+ OtherPtr = MTI->getRawDest();
}
+ }
+
+ // If there is an other pointer, we want to convert it to the same pointer
+ // type as AI has, so we can GEP through it safely.
+ if (OtherPtr) {
+ // It is likely that OtherPtr is a bitcast, if so, remove it.
+ if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
+ OtherPtr = BC->getOperand(0);
+ // All zero GEPs are effectively bitcasts.
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr))
+ if (GEP->hasAllZeroIndices())
+ OtherPtr = GEP->getOperand(0);
+
+ if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
+ if (BCE->getOpcode() == Instruction::BitCast)
+ OtherPtr = BCE->getOperand(0);
+
+ // If the pointer is not the right type, insert a bitcast to the right
+ // type.
+ if (OtherPtr->getType() != AI->getType())
+ OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
+ MI);
+ }
+
+ // Process each element of the aggregate.
+ Value *TheFn = MI->getOperand(0);
+ const Type *BytePtrTy = MI->getRawDest()->getType();
+ bool SROADest = MI->getRawDest() == BCInst;
+
+ Constant *Zero = Context->getNullValue(Type::Int32Ty);
+
+ for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
+ // If this is a memcpy/memmove, emit a GEP of the other element address.
+ Value *OtherElt = 0;
+ unsigned OtherEltAlign = MemAlignment;
- // If there is an other pointer, we want to convert it to the same pointer
- // type as AI has, so we can GEP through it.
if (OtherPtr) {
- // It is likely that OtherPtr is a bitcast, if so, remove it.
- if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
- OtherPtr = BC->getOperand(0);
- if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
- if (BCE->getOpcode() == Instruction::BitCast)
- OtherPtr = BCE->getOperand(0);
+ Value *Idx[2] = { Zero, Context->getConstantInt(Type::Int32Ty, i) };
+ OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2,
+ OtherPtr->getNameStr()+"."+utostr(i),
+ MI);
+ uint64_t EltOffset;
+ const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
+ if (const StructType *ST =
+ dyn_cast<StructType>(OtherPtrTy->getElementType())) {
+ EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
+ } else {
+ const Type *EltTy =
+ cast<SequentialType>(OtherPtr->getType())->getElementType();
+ EltOffset = TD->getTypeAllocSize(EltTy)*i;
+ }
- // If the pointer is not the right type, insert a bitcast to the right
- // type.
- if (OtherPtr->getType() != AI->getType())
- OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
- MI);
+ // The alignment of the other pointer is the guaranteed alignment of the
+ // element, which is affected by both the known alignment of the whole
+ // mem intrinsic and the alignment of the element. If the alignment of
+ // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
+ // known alignment is just 4 bytes.
+ OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
}
-
- // Process each element of the aggregate.
- Value *TheFn = MI->getOperand(0);
- const Type *BytePtrTy = MI->getRawDest()->getType();
- bool SROADest = MI->getRawDest() == BCInst;
-
- for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
- // If this is a memcpy/memmove, emit a GEP of the other element address.
- Value *OtherElt = 0;
- if (OtherPtr) {
- OtherElt = new GetElementPtrInst(OtherPtr, Zero,
- ConstantInt::get(Type::Int32Ty, i),
- OtherPtr->getNameStr()+"."+utostr(i),
- MI);
+
+ Value *EltPtr = NewElts[i];
+ const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
+
+ // If we got down to a scalar, insert a load or store as appropriate.
+ if (EltTy->isSingleValueType()) {
+ if (isa<MemTransferInst>(MI)) {
+ if (SROADest) {
+ // From Other to Alloca.
+ Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
+ new StoreInst(Elt, EltPtr, MI);
+ } else {
+ // From Alloca to Other.
+ Value *Elt = new LoadInst(EltPtr, "tmp", MI);
+ new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
+ }
+ continue;
}
-
- Value *EltPtr = NewElts[i];
- const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType();
+ assert(isa<MemSetInst>(MI));
- // If we got down to a scalar, insert a load or store as appropriate.
- if (EltTy->isFirstClassType()) {
- if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
- Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp",
- MI);
- new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI);
- continue;
+ // If the stored element is zero (common case), just store a null
+ // constant.
+ Constant *StoreVal;
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
+ if (CI->isZero()) {
+ StoreVal = Context->getNullValue(EltTy); // 0.0, null, 0, <0,0>
} else {
- assert(isa<MemSetInst>(MI));
-
- // If the stored element is zero (common case), just store a null
- // constant.
- Constant *StoreVal;
- if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
- if (CI->isZero()) {
- StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
- } else {
- // If EltTy is a vector type, get the element type.
- const Type *ValTy = EltTy;
- if (const VectorType *VTy = dyn_cast<VectorType>(ValTy))
- ValTy = VTy->getElementType();
-
- // Construct an integer with the right value.
- unsigned EltSize = TD.getTypeSize(ValTy);
- APInt OneVal(EltSize*8, CI->getZExtValue());
- APInt TotalVal(OneVal);
- // Set each byte.
- for (unsigned i = 0; i != EltSize-1; ++i) {
- TotalVal = TotalVal.shl(8);
- TotalVal |= OneVal;
- }
-
- // Convert the integer value to the appropriate type.
- StoreVal = ConstantInt::get(TotalVal);
- if (isa<PointerType>(ValTy))
- StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
- else if (ValTy->isFloatingPoint())
- StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
- assert(StoreVal->getType() == ValTy && "Type mismatch!");
-
- // If the requested value was a vector constant, create it.
- if (EltTy != ValTy) {
- unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
- SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
- StoreVal = ConstantVector::get(&Elts[0], NumElts);
- }
- }
- new StoreInst(StoreVal, EltPtr, MI);
- continue;
+ // If EltTy is a vector type, get the element type.
+ const Type *ValTy = EltTy->getScalarType();
+
+ // Construct an integer with the right value.
+ unsigned EltSize = TD->getTypeSizeInBits(ValTy);
+ APInt OneVal(EltSize, CI->getZExtValue());
+ APInt TotalVal(OneVal);
+ // Set each byte.
+ for (unsigned i = 0; 8*i < EltSize; ++i) {
+ TotalVal = TotalVal.shl(8);
+ TotalVal |= OneVal;
+ }
+
+ // Convert the integer value to the appropriate type.
+ StoreVal = Context->getConstantInt(TotalVal);
+ if (isa<PointerType>(ValTy))
+ StoreVal = Context->getConstantExprIntToPtr(StoreVal, ValTy);
+ else if (ValTy->isFloatingPoint())
+ StoreVal = Context->getConstantExprBitCast(StoreVal, ValTy);
+ assert(StoreVal->getType() == ValTy && "Type mismatch!");
+
+ // If the requested value was a vector constant, create it.
+ if (EltTy != ValTy) {
+ unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
+ SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
+ StoreVal = Context->getConstantVector(&Elts[0], NumElts);
}
- // Otherwise, if we're storing a byte variable, use a memset call for
- // this element.
}
+ new StoreInst(StoreVal, EltPtr, MI);
+ continue;
+ }
+ // Otherwise, if we're storing a byte variable, use a memset call for
+ // this element.
+ }
+
+ // Cast the element pointer to BytePtrTy.
+ if (EltPtr->getType() != BytePtrTy)
+ EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
+
+ // Cast the other pointer (if we have one) to BytePtrTy.
+ if (OtherElt && OtherElt->getType() != BytePtrTy)
+ OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
+ MI);
+
+ unsigned EltSize = TD->getTypeAllocSize(EltTy);
+
+ // Finally, insert the meminst for this element.
+ if (isa<MemTransferInst>(MI)) {
+ Value *Ops[] = {
+ SROADest ? EltPtr : OtherElt, // Dest ptr
+ SROADest ? OtherElt : EltPtr, // Src ptr
+ Context->getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size
+ Context->getConstantInt(Type::Int32Ty, OtherEltAlign) // Align
+ };
+ CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
+ } else {
+ assert(isa<MemSetInst>(MI));
+ Value *Ops[] = {
+ EltPtr, MI->getOperand(2), // Dest, Value,
+ Context->getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size
+ Zero // Align
+ };
+ CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
+ }
+ }
+ MI->eraseFromParent();
+}
+
+/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
+/// overwrites the entire allocation. Extract out the pieces of the stored
+/// integer and store them individually.
+void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
+ AllocationInst *AI,
+ SmallVector<AllocaInst*, 32> &NewElts){
+ // Extract each element out of the integer according to its structure offset
+ // and store the element value to the individual alloca.
+ Value *SrcVal = SI->getOperand(0);
+ const Type *AllocaEltTy = AI->getType()->getElementType();
+ uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
+
+ // If this isn't a store of an integer to the whole alloca, it may be a store
+ // to the first element. Just ignore the store in this case and normal SROA
+ // will handle it.
+ if (!isa<IntegerType>(SrcVal->getType()) ||
+ TD->getTypeAllocSizeInBits(SrcVal->getType()) != AllocaSizeBits)
+ return;
+ // Handle tail padding by extending the operand
+ if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
+ SrcVal = new ZExtInst(SrcVal,
+ Context->getIntegerType(AllocaSizeBits), "", SI);
+
+ DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI;
+
+ // There are two forms here: AI could be an array or struct. Both cases
+ // have different ways to compute the element offset.
+ if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
+ const StructLayout *Layout = TD->getStructLayout(EltSTy);
+
+ for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
+ // Get the number of bits to shift SrcVal to get the value.
+ const Type *FieldTy = EltSTy->getElementType(i);
+ uint64_t Shift = Layout->getElementOffsetInBits(i);
+
+ if (TD->isBigEndian())
+ Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
+
+ Value *EltVal = SrcVal;
+ if (Shift) {
+ Value *ShiftVal = Context->getConstantInt(EltVal->getType(), Shift);
+ EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
+ "sroa.store.elt", SI);
}
- // Cast the element pointer to BytePtrTy.
- if (EltPtr->getType() != BytePtrTy)
- EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
+ // Truncate down to an integer of the right size.
+ uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
+
+ // Ignore zero sized fields like {}, they obviously contain no data.
+ if (FieldSizeBits == 0) continue;
+
+ if (FieldSizeBits != AllocaSizeBits)
+ EltVal = new TruncInst(EltVal,
+ Context->getIntegerType(FieldSizeBits), "", SI);
+ Value *DestField = NewElts[i];
+ if (EltVal->getType() == FieldTy) {
+ // Storing to an integer field of this size, just do it.
+ } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) {
+ // Bitcast to the right element type (for fp/vector values).
+ EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
+ } else {
+ // Otherwise, bitcast the dest pointer (for aggregates).
+ DestField = new BitCastInst(DestField,
+ Context->getPointerTypeUnqual(EltVal->getType()),
+ "", SI);
+ }
+ new StoreInst(EltVal, DestField, SI);
+ }
+
+ } else {
+ const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
+ const Type *ArrayEltTy = ATy->getElementType();
+ uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
+ uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
+
+ uint64_t Shift;
- // Cast the other pointer (if we have one) to BytePtrTy.
- if (OtherElt && OtherElt->getType() != BytePtrTy)
- OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
- MI);
+ if (TD->isBigEndian())
+ Shift = AllocaSizeBits-ElementOffset;
+ else
+ Shift = 0;
- unsigned EltSize = TD.getTypeSize(EltTy);
-
- // Finally, insert the meminst for this element.
- if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
- Value *Ops[] = {
- SROADest ? EltPtr : OtherElt, // Dest ptr
- SROADest ? OtherElt : EltPtr, // Src ptr
- ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
- Zero // Align
- };
- new CallInst(TheFn, Ops, 4, "", MI);
+ for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
+ // Ignore zero sized fields like {}, they obviously contain no data.
+ if (ElementSizeBits == 0) continue;
+
+ Value *EltVal = SrcVal;
+ if (Shift) {
+ Value *ShiftVal = Context->getConstantInt(EltVal->getType(), Shift);
+ EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
+ "sroa.store.elt", SI);
+ }
+
+ // Truncate down to an integer of the right size.
+ if (ElementSizeBits != AllocaSizeBits)
+ EltVal = new TruncInst(EltVal,
+ Context->getIntegerType(ElementSizeBits),"",SI);
+ Value *DestField = NewElts[i];
+ if (EltVal->getType() == ArrayEltTy) {
+ // Storing to an integer field of this size, just do it.
+ } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) {
+ // Bitcast to the right element type (for fp/vector values).
+ EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
} else {
- assert(isa<MemSetInst>(MI));
- Value *Ops[] = {
- EltPtr, MI->getOperand(2), // Dest, Value,
- ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
- Zero // Align
- };
- new CallInst(TheFn, Ops, 4, "", MI);
+ // Otherwise, bitcast the dest pointer (for aggregates).
+ DestField = new BitCastInst(DestField,
+ Context->getPointerTypeUnqual(EltVal->getType()),
+ "", SI);
}
+ new StoreInst(EltVal, DestField, SI);
+
+ if (TD->isBigEndian())
+ Shift -= ElementOffset;
+ else
+ Shift += ElementOffset;
}
+ }
+
+ SI->eraseFromParent();
+}
- // Finally, MI is now dead, as we've modified its actions to occur on all of
- // the elements of the aggregate.
- ++UI;
- MI->eraseFromParent();
+/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
+/// an integer. Load the individual pieces to form the aggregate value.
+void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
+ SmallVector<AllocaInst*, 32> &NewElts) {
+ // Extract each element out of the NewElts according to its structure offset
+ // and form the result value.
+ const Type *AllocaEltTy = AI->getType()->getElementType();
+ uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
+
+ // If this isn't a load of the whole alloca to an integer, it may be a load
+ // of the first element. Just ignore the load in this case and normal SROA
+ // will handle it.
+ if (!isa<IntegerType>(LI->getType()) ||
+ TD->getTypeAllocSizeInBits(LI->getType()) != AllocaSizeBits)
+ return;
+
+ DOUT << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI;
+
+ // There are two forms here: AI could be an array or struct. Both cases
+ // have different ways to compute the element offset.
+ const StructLayout *Layout = 0;
+ uint64_t ArrayEltBitOffset = 0;
+ if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
+ Layout = TD->getStructLayout(EltSTy);
+ } else {
+ const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
+ ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
+ }
+
+ Value *ResultVal =
+ Context->getNullValue(Context->getIntegerType(AllocaSizeBits));
+
+ for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
+ // Load the value from the alloca. If the NewElt is an aggregate, cast
+ // the pointer to an integer of the same size before doing the load.
+ Value *SrcField = NewElts[i];
+ const Type *FieldTy =
+ cast<PointerType>(SrcField->getType())->getElementType();
+ uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
+
+ // Ignore zero sized fields like {}, they obviously contain no data.
+ if (FieldSizeBits == 0) continue;
+
+ const IntegerType *FieldIntTy = Context->getIntegerType(FieldSizeBits);
+ if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
+ !isa<VectorType>(FieldTy))
+ SrcField = new BitCastInst(SrcField,
+ Context->getPointerTypeUnqual(FieldIntTy),
+ "", LI);
+ SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
+
+ // If SrcField is a fp or vector of the right size but that isn't an
+ // integer type, bitcast to an integer so we can shift it.
+ if (SrcField->getType() != FieldIntTy)
+ SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
+
+ // Zero extend the field to be the same size as the final alloca so that
+ // we can shift and insert it.
+ if (SrcField->getType() != ResultVal->getType())
+ SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
+
+ // Determine the number of bits to shift SrcField.
+ uint64_t Shift;
+ if (Layout) // Struct case.
+ Shift = Layout->getElementOffsetInBits(i);
+ else // Array case.
+ Shift = i*ArrayEltBitOffset;
+
+ if (TD->isBigEndian())
+ Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
+
+ if (Shift) {
+ Value *ShiftVal = Context->getConstantInt(SrcField->getType(), Shift);
+ SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
+ }
+
+ ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
}
+
+ // Handle tail padding by truncating the result
+ if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
+ ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
+
+ LI->replaceAllUsesWith(ResultVal);
+ LI->eraseFromParent();
}
-/// HasStructPadding - Return true if the specified type has any structure
-/// padding, false otherwise.
-static bool HasStructPadding(const Type *Ty, const TargetData &TD) {
+
+/// HasPadding - Return true if the specified type has any structure or
+/// alignment padding, false otherwise.
+static bool HasPadding(const Type *Ty, const TargetData &TD) {
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TD.getStructLayout(STy);
unsigned PrevFieldBitOffset = 0;
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- unsigned FieldBitOffset = SL->getElementOffset(i)*8;
-
+ unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
+
// Padding in sub-elements?
- if (HasStructPadding(STy->getElementType(i), TD))
+ if (HasPadding(STy->getElementType(i), TD))
return true;
-
+
// Check to see if there is any padding between this element and the
// previous one.
if (i) {
- unsigned PrevFieldEnd =
+ unsigned PrevFieldEnd =
PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
if (PrevFieldEnd < FieldBitOffset)
return true;
}
-
+
PrevFieldBitOffset = FieldBitOffset;
}
-
+
// Check for tail padding.
if (unsigned EltCount = STy->getNumElements()) {
unsigned PrevFieldEnd = PrevFieldBitOffset +
TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
- if (PrevFieldEnd < SL->getSizeInBytes()*8)
+ if (PrevFieldEnd < SL->getSizeInBits())
return true;
}
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
- return HasStructPadding(ATy->getElementType(), TD);
+ return HasPadding(ATy->getElementType(), TD);
+ } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
+ return HasPadding(VTy->getElementType(), TD);
}
- return false;
+ return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
}
/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
// types, but may actually be used. In these cases, we refuse to promote the
// struct.
if (Info.isMemCpySrc && Info.isMemCpyDst &&
- HasStructPadding(AI->getType()->getElementType(),
- getAnalysis<TargetData>()))
+ HasPadding(AI->getType()->getElementType(), *TD))
return 0;
-
+
// If we require cleanup, return 1, otherwise return 3.
- return Info.needsCanon ? 1 : 3;
+ return Info.needsCleanup ? 1 : 3;
}
-/// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
+/// CleanupGEP - GEP is used by an Alloca, which can be prompted after the GEP
+/// is canonicalized here.
+void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
+ gep_type_iterator I = gep_type_begin(GEPI);
+ ++I;
+
+ const ArrayType *AT = dyn_cast<ArrayType>(*I);
+ if (!AT)
+ return;
+
+ uint64_t NumElements = AT->getNumElements();
+
+ if (isa<ConstantInt>(I.getOperand()))
+ return;
+
+ if (NumElements == 1) {
+ GEPI->setOperand(2, Context->getNullValue(Type::Int32Ty));
+ return;
+ }
+
+ assert(NumElements == 2 && "Unhandled case!");
+ // All users of the GEP must be loads. At each use of the GEP, insert
+ // two loads of the appropriate indexed GEP and select between them.
+ Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(),
+ Context->getNullValue(I.getOperand()->getType()),
+ "isone");
+ // Insert the new GEP instructions, which are properly indexed.
+ SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
+ Indices[1] = Context->getNullValue(Type::Int32Ty);
+ Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
+ Indices.begin(),
+ Indices.end(),
+ GEPI->getName()+".0", GEPI);
+ Indices[1] = Context->getConstantInt(Type::Int32Ty, 1);
+ Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
+ Indices.begin(),
+ Indices.end(),
+ GEPI->getName()+".1", GEPI);
+ // Replace all loads of the variable index GEP with loads from both
+ // indexes and a select.
+ while (!GEPI->use_empty()) {
+ LoadInst *LI = cast<LoadInst>(GEPI->use_back());
+ Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
+ Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
+ Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI);
+ LI->replaceAllUsesWith(R);
+ LI->eraseFromParent();
+ }
+ GEPI->eraseFromParent();
+}
+
+
+/// CleanupAllocaUsers - If SROA reported that it can promote the specified
/// allocation, but only if cleaned up, perform the cleanups required.
-void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
+void SROA::CleanupAllocaUsers(AllocationInst *AI) {
// At this point, we know that the end result will be SROA'd and promoted, so
// we can insert ugly code if required so long as sroa+mem2reg will clean it
// up.
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
UI != E; ) {
- GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++);
- if (!GEPI) continue;
- gep_type_iterator I = gep_type_begin(GEPI);
- ++I;
-
- if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
- uint64_t NumElements = AT->getNumElements();
-
- if (!isa<ConstantInt>(I.getOperand())) {
- if (NumElements == 1) {
- GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty));
- } else {
- assert(NumElements == 2 && "Unhandled case!");
- // All users of the GEP must be loads. At each use of the GEP, insert
- // two loads of the appropriate indexed GEP and select between them.
- Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(),
- Constant::getNullValue(I.getOperand()->getType()),
- "isone", GEPI);
- // Insert the new GEP instructions, which are properly indexed.
- SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
- Indices[1] = Constant::getNullValue(Type::Int32Ty);
- Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0),
- &Indices[0], Indices.size(),
- GEPI->getName()+".0", GEPI);
- Indices[1] = ConstantInt::get(Type::Int32Ty, 1);
- Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0),
- &Indices[0], Indices.size(),
- GEPI->getName()+".1", GEPI);
- // Replace all loads of the variable index GEP with loads from both
- // indexes and a select.
- while (!GEPI->use_empty()) {
- LoadInst *LI = cast<LoadInst>(GEPI->use_back());
- Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
- Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
- Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI);
- LI->replaceAllUsesWith(R);
- LI->eraseFromParent();
- }
- GEPI->eraseFromParent();
+ User *U = *UI++;
+ if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U))
+ CleanupGEP(GEPI);
+ else {
+ Instruction *I = cast<Instruction>(U);
+ SmallVector<DbgInfoIntrinsic *, 2> DbgInUses;
+ if (!isa<StoreInst>(I) && OnlyUsedByDbgInfoIntrinsics(I, &DbgInUses)) {
+ // Safe to remove debug info uses.
+ while (!DbgInUses.empty()) {
+ DbgInfoIntrinsic *DI = DbgInUses.back(); DbgInUses.pop_back();
+ DI->eraseFromParent();
}
+ I->eraseFromParent();
}
}
}
}
-/// MergeInType - Add the 'In' type to the accumulated type so far. If the
-/// types are incompatible, return true, otherwise update Accum and return
-/// false.
+/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
+/// the offset specified by Offset (which is specified in bytes).
///
-/// There are three cases we handle here:
-/// 1) An effectively-integer union, where the pieces are stored into as
-/// smaller integers (common with byte swap and other idioms).
-/// 2) A union of vector types of the same size and potentially its elements.
+/// There are two cases we handle here:
+/// 1) A union of vector types of the same size and potentially its elements.
/// Here we turn element accesses into insert/extract element operations.
-/// 3) A union of scalar types, such as int/float or int/pointer. Here we
-/// merge together into integers, allowing the xform to work with #1 as
-/// well.
-static bool MergeInType(const Type *In, const Type *&Accum,
- const TargetData &TD) {
- // If this is our first type, just use it.
- const VectorType *PTy;
- if (Accum == Type::VoidTy || In == Accum) {
- Accum = In;
- } else if (In == Type::VoidTy) {
- // Noop.
- } else if (In->isInteger() && Accum->isInteger()) { // integer union.
- // Otherwise pick whichever type is larger.
- if (cast<IntegerType>(In)->getBitWidth() >
- cast<IntegerType>(Accum)->getBitWidth())
- Accum = In;
- } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) {
- // Pointer unions just stay as one of the pointers.
- } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) {
- if ((PTy = dyn_cast<VectorType>(Accum)) &&
- PTy->getElementType() == In) {
- // Accum is a vector, and we are accessing an element: ok.
- } else if ((PTy = dyn_cast<VectorType>(In)) &&
- PTy->getElementType() == Accum) {
- // In is a vector, and accum is an element: ok, remember In.
- Accum = In;
- } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) &&
- PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) {
- // Two vectors of the same size: keep Accum.
- } else {
- // Cannot insert an short into a <4 x int> or handle
- // <2 x int> -> <4 x int>
- return true;
- }
- } else {
- // Pointer/FP/Integer unions merge together as integers.
- switch (Accum->getTypeID()) {
- case Type::PointerTyID: Accum = TD.getIntPtrType(); break;
- case Type::FloatTyID: Accum = Type::Int32Ty; break;
- case Type::DoubleTyID: Accum = Type::Int64Ty; break;
- default:
- assert(Accum->isInteger() && "Unknown FP type!");
- break;
- }
-
- switch (In->getTypeID()) {
- case Type::PointerTyID: In = TD.getIntPtrType(); break;
- case Type::FloatTyID: In = Type::Int32Ty; break;
- case Type::DoubleTyID: In = Type::Int64Ty; break;
- default:
- assert(In->isInteger() && "Unknown FP type!");
- break;
+/// This promotes a <4 x float> with a store of float to the third element
+/// into a <4 x float> that uses insert element.
+/// 2) A fully general blob of memory, which we turn into some (potentially
+/// large) integer type with extract and insert operations where the loads
+/// and stores would mutate the memory.
+static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
+ unsigned AllocaSize, const TargetData &TD,
+ LLVMContext *Context) {
+ // If this could be contributing to a vector, analyze it.
+ if (VecTy != Type::VoidTy) { // either null or a vector type.
+
+ // If the In type is a vector that is the same size as the alloca, see if it
+ // matches the existing VecTy.
+ if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
+ if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
+ // If we're storing/loading a vector of the right size, allow it as a
+ // vector. If this the first vector we see, remember the type so that
+ // we know the element size.
+ if (VecTy == 0)
+ VecTy = VInTy;
+ return;
+ }
+ } else if (In == Type::FloatTy || In == Type::DoubleTy ||
+ (isa<IntegerType>(In) && In->getPrimitiveSizeInBits() >= 8 &&
+ isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
+ // If we're accessing something that could be an element of a vector, see
+ // if the implied vector agrees with what we already have and if Offset is
+ // compatible with it.
+ unsigned EltSize = In->getPrimitiveSizeInBits()/8;
+ if (Offset % EltSize == 0 &&
+ AllocaSize % EltSize == 0 &&
+ (VecTy == 0 ||
+ cast<VectorType>(VecTy)->getElementType()
+ ->getPrimitiveSizeInBits()/8 == EltSize)) {
+ if (VecTy == 0)
+ VecTy = Context->getVectorType(In, AllocaSize/EltSize);
+ return;
+ }
}
- return MergeInType(In, Accum, TD);
}
- return false;
-}
-
-/// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least
-/// as big as the specified type. If there is no suitable type, this returns
-/// null.
-const Type *getUIntAtLeastAsBitAs(unsigned NumBits) {
- if (NumBits > 64) return 0;
- if (NumBits > 32) return Type::Int64Ty;
- if (NumBits > 16) return Type::Int32Ty;
- if (NumBits > 8) return Type::Int16Ty;
- return Type::Int8Ty;
+
+ // Otherwise, we have a case that we can't handle with an optimized vector
+ // form. We can still turn this into a large integer.
+ VecTy = Type::VoidTy;
}
-/// CanConvertToScalar - V is a pointer. If we can convert the pointee to a
-/// single scalar integer type, return that type. Further, if the use is not
-/// a completely trivial use that mem2reg could promote, set IsNotTrivial. If
-/// there are no uses of this pointer, return Type::VoidTy to differentiate from
-/// failure.
+/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
+/// its accesses to use a to single vector type, return true, and set VecTy to
+/// the new type. If we could convert the alloca into a single promotable
+/// integer, return true but set VecTy to VoidTy. Further, if the use is not a
+/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
+/// is the current offset from the base of the alloca being analyzed.
///
-const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
- const Type *UsedType = Type::VoidTy; // No uses, no forced type.
- const TargetData &TD = getAnalysis<TargetData>();
- const PointerType *PTy = cast<PointerType>(V->getType());
-
+/// If we see at least one access to the value that is as a vector type, set the
+/// SawVec flag.
+///
+bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
+ bool &SawVec, uint64_t Offset,
+ unsigned AllocaSize) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
Instruction *User = cast<Instruction>(*UI);
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
- if (MergeInType(LI->getType(), UsedType, TD))
- return 0;
-
- } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ // Don't break volatile loads.
+ if (LI->isVolatile())
+ return false;
+ MergeInType(LI->getType(), Offset, VecTy, AllocaSize, *TD, Context);
+ SawVec |= isa<VectorType>(LI->getType());
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
// Storing the pointer, not into the value?
- if (SI->getOperand(0) == V) return 0;
-
- // NOTE: We could handle storing of FP imms into integers here!
+ if (SI->getOperand(0) == V || SI->isVolatile()) return 0;
+ MergeInType(SI->getOperand(0)->getType(), Offset,
+ VecTy, AllocaSize, *TD, Context);
+ SawVec |= isa<VectorType>(SI->getOperand(0)->getType());
+ continue;
+ }
+
+ if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
+ if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset,
+ AllocaSize))
+ return false;
+ IsNotTrivial = true;
+ continue;
+ }
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
+ // If this is a GEP with a variable indices, we can't handle it.
+ if (!GEP->hasAllConstantIndices())
+ return false;
- if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD))
- return 0;
- } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
+ // Compute the offset that this GEP adds to the pointer.
+ SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
+ uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(),
+ &Indices[0], Indices.size());
+ // See if all uses can be converted.
+ if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset,
+ AllocaSize))
+ return false;
IsNotTrivial = true;
- const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
- if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0;
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
- // Check to see if this is stepping over an element: GEP Ptr, int C
- if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
- unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
- unsigned ElSize = TD.getTypeSize(PTy->getElementType());
- unsigned BitOffset = Idx*ElSize*8;
- if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
-
+ continue;
+ }
+
+ // If this is a constant sized memset of a constant value (e.g. 0) we can
+ // handle it.
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
+ // Store of constant value and constant size.
+ if (isa<ConstantInt>(MSI->getValue()) &&
+ isa<ConstantInt>(MSI->getLength())) {
IsNotTrivial = true;
- const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
- if (SubElt == 0) return 0;
- if (SubElt != Type::VoidTy && SubElt->isInteger()) {
- const Type *NewTy =
- getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset);
- if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0;
- continue;
- }
- } else if (GEP->getNumOperands() == 3 &&
- isa<ConstantInt>(GEP->getOperand(1)) &&
- isa<ConstantInt>(GEP->getOperand(2)) &&
- cast<ConstantInt>(GEP->getOperand(1))->isZero()) {
- // We are stepping into an element, e.g. a structure or an array:
- // GEP Ptr, int 0, uint C
- const Type *AggTy = PTy->getElementType();
- unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
-
- if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
- if (Idx >= ATy->getNumElements()) return 0; // Out of range.
- } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) {
- // Getting an element of the vector.
- if (Idx >= VectorTy->getNumElements()) return 0; // Out of range.
-
- // Merge in the vector type.
- if (MergeInType(VectorTy, UsedType, TD)) return 0;
-
- const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
- if (SubTy == 0) return 0;
-
- if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
- return 0;
+ continue;
+ }
+ }
- // We'll need to change this to an insert/extract element operation.
+ // If this is a memcpy or memmove into or out of the whole allocation, we
+ // can handle it like a load or store of the scalar type.
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()))
+ if (Len->getZExtValue() == AllocaSize && Offset == 0) {
IsNotTrivial = true;
- continue; // Everything looks ok
-
- } else if (isa<StructType>(AggTy)) {
- // Structs are always ok.
- } else {
- return 0;
+ continue;
}
- const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8);
- if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0;
- const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
- if (SubTy == 0) return 0;
- if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
- return 0;
- continue; // Everything looks ok
- }
- return 0;
- } else {
- // Cannot handle this!
- return 0;
}
- }
-
- return UsedType;
-}
+
+ // Ignore dbg intrinsic.
+ if (isa<DbgInfoIntrinsic>(User))
+ continue;
-/// ConvertToScalar - The specified alloca passes the CanConvertToScalar
-/// predicate and is non-trivial. Convert it to something that can be trivially
-/// promoted into a register by mem2reg.
-void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
- DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = "
- << *ActualTy << "\n";
- ++NumConverted;
-
- BasicBlock *EntryBlock = AI->getParent();
- assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() &&
- "Not in the entry block!");
- EntryBlock->getInstList().remove(AI); // Take the alloca out of the program.
+ // Otherwise, we cannot handle this!
+ return false;
+ }
- // Create and insert the alloca.
- AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
- EntryBlock->begin());
- ConvertUsesToScalar(AI, NewAI, 0);
- delete AI;
+ return true;
}
///
/// Offset is an offset from the original alloca, in bits that need to be
/// shifted to the right. By the end of this, there should be no uses of Ptr.
-void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
- const TargetData &TD = getAnalysis<TargetData>();
+void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
while (!Ptr->use_empty()) {
Instruction *User = cast<Instruction>(Ptr->use_back());
+
+ if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
+ ConvertUsesToScalar(CI, NewAI, Offset);
+ CI->eraseFromParent();
+ continue;
+ }
+
+ if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
+ // Compute the offset that this GEP adds to the pointer.
+ SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
+ uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(),
+ &Indices[0], Indices.size());
+ ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
+ GEP->eraseFromParent();
+ continue;
+ }
+
+ IRBuilder<> Builder(User->getParent(), User);
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
// The load is a bit extract from NewAI shifted right by Offset bits.
- Value *NV = new LoadInst(NewAI, LI->getName(), LI);
- if (NV->getType() == LI->getType()) {
- // We win, no conversion needed.
- } else if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) {
- // If the result alloca is a vector type, this is either an element
- // access or a bitcast to another vector type.
- if (isa<VectorType>(LI->getType())) {
- NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
- } else {
- // Must be an element access.
- unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
- NV = new ExtractElementInst(
- NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI);
- }
- } else if (isa<PointerType>(NV->getType())) {
- assert(isa<PointerType>(LI->getType()));
- // Must be ptr->ptr cast. Anything else would result in NV being
- // an integer.
- NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
- } else {
- const IntegerType *NTy = cast<IntegerType>(NV->getType());
- unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType());
-
- // If this is a big-endian system and the load is narrower than the
- // full alloca type, we need to do a shift to get the right bits.
- int ShAmt = 0;
- if (TD.isBigEndian()) {
- ShAmt = NTy->getBitWidth()-LIBitWidth-Offset;
- } else {
- ShAmt = Offset;
- }
-
- // Note: we support negative bitwidths (with shl) which are not defined.
- // We do this to support (f.e.) loads off the end of a structure where
- // only some bits are used.
- if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
- NV = BinaryOperator::createLShr(NV,
- ConstantInt::get(NV->getType(),ShAmt),
- LI->getName(), LI);
- else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
- NV = BinaryOperator::createShl(NV,
- ConstantInt::get(NV->getType(),-ShAmt),
- LI->getName(), LI);
-
- // Finally, unconditionally truncate the integer to the right width.
- if (LIBitWidth < NTy->getBitWidth())
- NV = new TruncInst(NV, IntegerType::get(LIBitWidth),
- LI->getName(), LI);
-
- // If the result is an integer, this is a trunc or bitcast.
- if (isa<IntegerType>(LI->getType())) {
- assert(NV->getType() == LI->getType() && "Truncate wasn't enough?");
- } else if (LI->getType()->isFloatingPoint()) {
- // Just do a bitcast, we know the sizes match up.
- NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
- } else {
- // Otherwise must be a pointer.
- NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI);
- }
- }
- LI->replaceAllUsesWith(NV);
+ Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
+ Value *NewLoadVal
+ = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
+ LI->replaceAllUsesWith(NewLoadVal);
LI->eraseFromParent();
- } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
+ continue;
+ }
+
+ if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
assert(SI->getOperand(0) != Ptr && "Consistency error!");
-
- // Convert the stored type to the actual type, shift it left to insert
- // then 'or' into place.
- Value *SV = SI->getOperand(0);
- const Type *AllocaType = NewAI->getType()->getElementType();
- if (SV->getType() == AllocaType) {
- // All is well.
- } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) {
- Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
-
- // If the result alloca is a vector type, this is either an element
- // access or a bitcast to another vector type.
- if (isa<VectorType>(SV->getType())) {
- SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
- } else {
- // Must be an element insertion.
- unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
- SV = new InsertElementInst(Old, SV,
- ConstantInt::get(Type::Int32Ty, Elt),
- "tmp", SI);
- }
- } else if (isa<PointerType>(AllocaType)) {
- // If the alloca type is a pointer, then all the elements must be
- // pointers.
- if (SV->getType() != AllocaType)
- SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
- } else {
- Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
-
- // If SV is a float, convert it to the appropriate integer type.
- // If it is a pointer, do the same, and also handle ptr->ptr casts
- // here.
- unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
- unsigned DestWidth = AllocaType->getPrimitiveSizeInBits();
- if (SV->getType()->isFloatingPoint())
- SV = new BitCastInst(SV, IntegerType::get(SrcWidth),
- SV->getName(), SI);
- else if (isa<PointerType>(SV->getType()))
- SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI);
-
- // Always zero extend the value if needed.
- if (SV->getType() != AllocaType)
- SV = new ZExtInst(SV, AllocaType, SV->getName(), SI);
-
- // If this is a big-endian system and the store is narrower than the
- // full alloca type, we need to do a shift to get the right bits.
- int ShAmt = 0;
- if (TD.isBigEndian()) {
- ShAmt = DestWidth-SrcWidth-Offset;
- } else {
- ShAmt = Offset;
- }
+ Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str());
+ Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
+ Builder);
+ Builder.CreateStore(New, NewAI);
+ SI->eraseFromParent();
+ continue;
+ }
+
+ // If this is a constant sized memset of a constant value (e.g. 0) we can
+ // transform it into a store of the expanded constant value.
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
+ assert(MSI->getRawDest() == Ptr && "Consistency error!");
+ unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
+ if (NumBytes != 0) {
+ unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
- // Note: we support negative bitwidths (with shr) which are not defined.
- // We do this to support (f.e.) stores off the end of a structure where
- // only some bits in the structure are set.
- APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
- if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
- SV = BinaryOperator::createShl(SV,
- ConstantInt::get(SV->getType(), ShAmt),
- SV->getName(), SI);
- Mask <<= ShAmt;
- } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
- SV = BinaryOperator::createLShr(SV,
- ConstantInt::get(SV->getType(),-ShAmt),
- SV->getName(), SI);
- Mask = Mask.lshr(ShAmt);
- }
+ // Compute the value replicated the right number of times.
+ APInt APVal(NumBytes*8, Val);
+
+ // Splat the value if non-zero.
+ if (Val)
+ for (unsigned i = 1; i != NumBytes; ++i)
+ APVal |= APVal << 8;
- // Mask out the bits we are about to insert from the old value, and or
- // in the new bits.
- if (SrcWidth != DestWidth) {
- assert(DestWidth > SrcWidth);
- Old = BinaryOperator::createAnd(Old, ConstantInt::get(~Mask),
- Old->getName()+".mask", SI);
- SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
- }
+ Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str());
+ Value *New = ConvertScalar_InsertValue(Context->getConstantInt(APVal),
+ Old, Offset, Builder);
+ Builder.CreateStore(New, NewAI);
}
- new StoreInst(SV, NewAI, SI);
- SI->eraseFromParent();
+ MSI->eraseFromParent();
+ continue;
+ }
+
+ // If this is a memcpy or memmove into or out of the whole allocation, we
+ // can handle it like a load or store of the scalar type.
+ if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
+ assert(Offset == 0 && "must be store to start of alloca");
- } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
- ConvertUsesToScalar(CI, NewAI, Offset);
- CI->eraseFromParent();
- } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
- const PointerType *AggPtrTy =
- cast<PointerType>(GEP->getOperand(0)->getType());
- const TargetData &TD = getAnalysis<TargetData>();
- unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8;
+ // If the source and destination are both to the same alloca, then this is
+ // a noop copy-to-self, just delete it. Otherwise, emit a load and store
+ // as appropriate.
+ AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject());
- // Check to see if this is stepping over an element: GEP Ptr, int C
- unsigned NewOffset = Offset;
- if (GEP->getNumOperands() == 2) {
- unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
- unsigned BitOffset = Idx*AggSizeInBits;
+ if (MTI->getSource()->getUnderlyingObject() != OrigAI) {
+ // Dest must be OrigAI, change this to be a load from the original
+ // pointer (bitcasted), then a store to our new alloca.
+ assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
+ Value *SrcPtr = MTI->getSource();
+ SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
- NewOffset += BitOffset;
- } else if (GEP->getNumOperands() == 3) {
- // We know that operand #2 is zero.
- unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
- const Type *AggTy = AggPtrTy->getElementType();
- if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
- unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8;
-
- NewOffset += ElSizeBits*Idx;
- } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
- unsigned EltBitOffset =
- TD.getStructLayout(STy)->getElementOffset(Idx)*8;
-
- NewOffset += EltBitOffset;
- } else {
- assert(0 && "Unsupported operation!");
- abort();
- }
+ LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
+ SrcVal->setAlignment(MTI->getAlignment());
+ Builder.CreateStore(SrcVal, NewAI);
+ } else if (MTI->getDest()->getUnderlyingObject() != OrigAI) {
+ // Src must be OrigAI, change this to be a load from NewAI then a store
+ // through the original dest pointer (bitcasted).
+ assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
+ LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
+
+ Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
+ StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
+ NewStore->setAlignment(MTI->getAlignment());
} else {
- assert(0 && "Unsupported operation!");
- abort();
+ // Noop transfer. Src == Dst
}
- ConvertUsesToScalar(GEP, NewAI, NewOffset);
- GEP->eraseFromParent();
- } else {
- assert(0 && "Unsupported operation!");
- abort();
+
+
+ MTI->eraseFromParent();
+ continue;
+ }
+
+ // If user is a dbg info intrinsic then it is safe to remove it.
+ if (isa<DbgInfoIntrinsic>(User)) {
+ User->eraseFromParent();
+ continue;
+ }
+
+ LLVM_UNREACHABLE("Unsupported operation!");
+ }
+}
+
+/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
+/// or vector value FromVal, extracting the bits from the offset specified by
+/// Offset. This returns the value, which is of type ToType.
+///
+/// This happens when we are converting an "integer union" to a single
+/// integer scalar, or when we are converting a "vector union" to a vector with
+/// insert/extractelement instructions.
+///
+/// Offset is an offset from the original alloca, in bits that need to be
+/// shifted to the right.
+Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
+ uint64_t Offset, IRBuilder<> &Builder) {
+ // If the load is of the whole new alloca, no conversion is needed.
+ if (FromVal->getType() == ToType && Offset == 0)
+ return FromVal;
+
+ // If the result alloca is a vector type, this is either an element
+ // access or a bitcast to another vector type of the same size.
+ if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
+ if (isa<VectorType>(ToType))
+ return Builder.CreateBitCast(FromVal, ToType, "tmp");
+
+ // Otherwise it must be an element access.
+ unsigned Elt = 0;
+ if (Offset) {
+ unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
+ Elt = Offset/EltSize;
+ assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
+ }
+ // Return the element extracted out of it.
+ Value *V = Builder.CreateExtractElement(FromVal,
+ Context->getConstantInt(Type::Int32Ty,Elt),
+ "tmp");
+ if (V->getType() != ToType)
+ V = Builder.CreateBitCast(V, ToType, "tmp");
+ return V;
+ }
+
+ // If ToType is a first class aggregate, extract out each of the pieces and
+ // use insertvalue's to form the FCA.
+ if (const StructType *ST = dyn_cast<StructType>(ToType)) {
+ const StructLayout &Layout = *TD->getStructLayout(ST);
+ Value *Res = Context->getUndef(ST);
+ for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
+ Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
+ Offset+Layout.getElementOffsetInBits(i),
+ Builder);
+ Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
+ }
+ return Res;
+ }
+
+ if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
+ uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
+ Value *Res = Context->getUndef(AT);
+ for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
+ Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
+ Offset+i*EltSize, Builder);
+ Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
+ }
+ return Res;
+ }
+
+ // Otherwise, this must be a union that was converted to an integer value.
+ const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
+
+ // If this is a big-endian system and the load is narrower than the
+ // full alloca type, we need to do a shift to get the right bits.
+ int ShAmt = 0;
+ if (TD->isBigEndian()) {
+ // On big-endian machines, the lowest bit is stored at the bit offset
+ // from the pointer given by getTypeStoreSizeInBits. This matters for
+ // integers with a bitwidth that is not a multiple of 8.
+ ShAmt = TD->getTypeStoreSizeInBits(NTy) -
+ TD->getTypeStoreSizeInBits(ToType) - Offset;
+ } else {
+ ShAmt = Offset;
+ }
+
+ // Note: we support negative bitwidths (with shl) which are not defined.
+ // We do this to support (f.e.) loads off the end of a structure where
+ // only some bits are used.
+ if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
+ FromVal = Builder.CreateLShr(FromVal,
+ Context->getConstantInt(FromVal->getType(),
+ ShAmt), "tmp");
+ else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
+ FromVal = Builder.CreateShl(FromVal,
+ Context->getConstantInt(FromVal->getType(),
+ -ShAmt), "tmp");
+
+ // Finally, unconditionally truncate the integer to the right width.
+ unsigned LIBitWidth = TD->getTypeSizeInBits(ToType);
+ if (LIBitWidth < NTy->getBitWidth())
+ FromVal =
+ Builder.CreateTrunc(FromVal, Context->getIntegerType(LIBitWidth), "tmp");
+ else if (LIBitWidth > NTy->getBitWidth())
+ FromVal =
+ Builder.CreateZExt(FromVal, Context->getIntegerType(LIBitWidth), "tmp");
+
+ // If the result is an integer, this is a trunc or bitcast.
+ if (isa<IntegerType>(ToType)) {
+ // Should be done.
+ } else if (ToType->isFloatingPoint() || isa<VectorType>(ToType)) {
+ // Just do a bitcast, we know the sizes match up.
+ FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
+ } else {
+ // Otherwise must be a pointer.
+ FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
+ }
+ assert(FromVal->getType() == ToType && "Didn't convert right?");
+ return FromVal;
+}
+
+
+/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
+/// or vector value "Old" at the offset specified by Offset.
+///
+/// This happens when we are converting an "integer union" to a
+/// single integer scalar, or when we are converting a "vector union" to a
+/// vector with insert/extractelement instructions.
+///
+/// Offset is an offset from the original alloca, in bits that need to be
+/// shifted to the right.
+Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
+ uint64_t Offset, IRBuilder<> &Builder) {
+
+ // Convert the stored type to the actual type, shift it left to insert
+ // then 'or' into place.
+ const Type *AllocaType = Old->getType();
+
+ if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
+ uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy);
+ uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType());
+
+ // Changing the whole vector with memset or with an access of a different
+ // vector type?
+ if (ValSize == VecSize)
+ return Builder.CreateBitCast(SV, AllocaType, "tmp");
+
+ uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
+
+ // Must be an element insertion.
+ unsigned Elt = Offset/EltSize;
+
+ if (SV->getType() != VTy->getElementType())
+ SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
+
+ SV = Builder.CreateInsertElement(Old, SV,
+ Context->getConstantInt(Type::Int32Ty, Elt),
+ "tmp");
+ return SV;
+ }
+
+ // If SV is a first-class aggregate value, insert each value recursively.
+ if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
+ const StructLayout &Layout = *TD->getStructLayout(ST);
+ for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
+ Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
+ Old = ConvertScalar_InsertValue(Elt, Old,
+ Offset+Layout.getElementOffsetInBits(i),
+ Builder);
+ }
+ return Old;
+ }
+
+ if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
+ uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
+ for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
+ Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
+ Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
+ }
+ return Old;
+ }
+
+ // If SV is a float, convert it to the appropriate integer type.
+ // If it is a pointer, do the same.
+ unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType());
+ unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
+ unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
+ unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
+ if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
+ SV = Builder.CreateBitCast(SV, Context->getIntegerType(SrcWidth), "tmp");
+ else if (isa<PointerType>(SV->getType()))
+ SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(), "tmp");
+
+ // Zero extend or truncate the value if needed.
+ if (SV->getType() != AllocaType) {
+ if (SV->getType()->getPrimitiveSizeInBits() <
+ AllocaType->getPrimitiveSizeInBits())
+ SV = Builder.CreateZExt(SV, AllocaType, "tmp");
+ else {
+ // Truncation may be needed if storing more than the alloca can hold
+ // (undefined behavior).
+ SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
+ SrcWidth = DestWidth;
+ SrcStoreWidth = DestStoreWidth;
}
}
+
+ // If this is a big-endian system and the store is narrower than the
+ // full alloca type, we need to do a shift to get the right bits.
+ int ShAmt = 0;
+ if (TD->isBigEndian()) {
+ // On big-endian machines, the lowest bit is stored at the bit offset
+ // from the pointer given by getTypeStoreSizeInBits. This matters for
+ // integers with a bitwidth that is not a multiple of 8.
+ ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
+ } else {
+ ShAmt = Offset;
+ }
+
+ // Note: we support negative bitwidths (with shr) which are not defined.
+ // We do this to support (f.e.) stores off the end of a structure where
+ // only some bits in the structure are set.
+ APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
+ if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
+ SV = Builder.CreateShl(SV, Context->getConstantInt(SV->getType(),
+ ShAmt), "tmp");
+ Mask <<= ShAmt;
+ } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
+ SV = Builder.CreateLShr(SV, Context->getConstantInt(SV->getType(),
+ -ShAmt), "tmp");
+ Mask = Mask.lshr(-ShAmt);
+ }
+
+ // Mask out the bits we are about to insert from the old value, and or
+ // in the new bits.
+ if (SrcWidth != DestWidth) {
+ assert(DestWidth > SrcWidth);
+ Old = Builder.CreateAnd(Old, Context->getConstantInt(~Mask), "mask");
+ SV = Builder.CreateOr(Old, SV, "ins");
+ }
+ return SV;
}
+
/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
/// some part of a constant global variable. This intentionally only accepts
/// constant expressions because we don't can't rewrite arbitrary instructions.
static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
bool isOffset) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
- if (isa<LoadInst>(*UI)) {
- // Ignore loads, they are always ok.
- continue;
- }
+ if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
+ // Ignore non-volatile loads, they are always ok.
+ if (!LI->isVolatile())
+ continue;
+
if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
// If uses of the bitcast are ok, we are ok.
if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
// If this is isn't our memcpy/memmove, reject it as something we can't
// handle.
- if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI))
+ if (!isa<MemTransferInst>(*UI))
return false;
// If we already have seen a copy, reject the second one.