1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Analysis/Dominators.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/GetElementPtrTypeIterator.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/StringExtras.h"
43 STATISTIC(NumReplaced, "Number of allocas broken up");
44 STATISTIC(NumPromoted, "Number of allocas promoted");
45 STATISTIC(NumConverted, "Number of aggregates converted to scalar");
46 STATISTIC(NumGlobals, "Number of allocas copied from constant global");
49 struct VISIBILITY_HIDDEN SROA : public FunctionPass {
50 static char ID; // Pass identification, replacement for typeid
51 SROA() : FunctionPass((intptr_t)&ID) {}
53 bool runOnFunction(Function &F);
55 bool performScalarRepl(Function &F);
56 bool performPromotion(Function &F);
58 // getAnalysisUsage - This pass does not require any passes, but we know it
59 // will not alter the CFG, so say so.
60 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addRequired<ETForest>();
62 AU.addRequired<DominanceFrontier>();
63 AU.addRequired<TargetData>();
68 int isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI);
69 int isSafeUseOfAllocation(Instruction *User, AllocationInst *AI);
70 bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI);
71 bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI);
72 int isSafeAllocaToScalarRepl(AllocationInst *AI);
73 void DoScalarReplacement(AllocationInst *AI,
74 std::vector<AllocationInst*> &WorkList);
75 void CanonicalizeAllocaUsers(AllocationInst *AI);
76 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
78 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
79 SmallVector<AllocaInst*, 32> &NewElts);
81 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
82 void ConvertToScalar(AllocationInst *AI, const Type *Ty);
83 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
84 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
88 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
91 // Public interface to the ScalarReplAggregates pass
92 FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); }
95 bool SROA::runOnFunction(Function &F) {
96 bool Changed = performPromotion(F);
98 bool LocalChange = performScalarRepl(F);
99 if (!LocalChange) break; // No need to repromote if no scalarrepl
101 LocalChange = performPromotion(F);
102 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
109 bool SROA::performPromotion(Function &F) {
110 std::vector<AllocaInst*> Allocas;
111 ETForest &ET = getAnalysis<ETForest>();
112 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
114 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
116 bool Changed = false;
121 // Find allocas that are safe to promote, by looking at all instructions in
123 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
124 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
125 if (isAllocaPromotable(AI))
126 Allocas.push_back(AI);
128 if (Allocas.empty()) break;
130 PromoteMemToReg(Allocas, ET, DF);
131 NumPromoted += Allocas.size();
138 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
139 // which runs on all of the malloc/alloca instructions in the function, removing
140 // them if they are only used by getelementptr instructions.
142 bool SROA::performScalarRepl(Function &F) {
143 std::vector<AllocationInst*> WorkList;
145 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
146 BasicBlock &BB = F.getEntryBlock();
147 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
148 if (AllocationInst *A = dyn_cast<AllocationInst>(I))
149 WorkList.push_back(A);
151 const TargetData &TD = getAnalysis<TargetData>();
153 // Process the worklist
154 bool Changed = false;
155 while (!WorkList.empty()) {
156 AllocationInst *AI = WorkList.back();
159 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
160 // with unused elements.
161 if (AI->use_empty()) {
162 AI->eraseFromParent();
166 // If we can turn this aggregate value (potentially with casts) into a
167 // simple scalar value that can be mem2reg'd into a register value.
168 bool IsNotTrivial = false;
169 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
170 if (IsNotTrivial && ActualType != Type::VoidTy) {
171 ConvertToScalar(AI, ActualType);
176 // Check to see if we can perform the core SROA transformation. We cannot
177 // transform the allocation instruction if it is an array allocation
178 // (allocations OF arrays are ok though), and an allocation of a scalar
179 // value cannot be decomposed at all.
180 if (!AI->isArrayAllocation() &&
181 (isa<StructType>(AI->getAllocatedType()) ||
182 isa<ArrayType>(AI->getAllocatedType())) &&
183 AI->getAllocatedType()->isSized() &&
184 TD.getTypeSize(AI->getAllocatedType()) < 128) {
185 // Check that all of the users of the allocation are capable of being
187 switch (isSafeAllocaToScalarRepl(AI)) {
188 default: assert(0 && "Unexpected value!");
189 case 0: // Not safe to scalar replace.
191 case 1: // Safe, but requires cleanup/canonicalizations first
192 CanonicalizeAllocaUsers(AI);
194 case 3: // Safe to scalar replace.
195 DoScalarReplacement(AI, WorkList);
201 // Check to see if this allocation is only modified by a memcpy/memmove from
202 // a constant global. If this is the case, we can change all users to use
203 // the constant global instead. This is commonly produced by the CFE by
204 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
205 // is only subsequently read.
206 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
207 DOUT << "Found alloca equal to global: " << *AI;
208 DOUT << " memcpy = " << *TheCopy;
209 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
210 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
211 TheCopy->eraseFromParent(); // Don't mutate the global.
212 AI->eraseFromParent();
218 // Otherwise, couldn't process this.
224 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
225 /// predicate, do SROA now.
226 void SROA::DoScalarReplacement(AllocationInst *AI,
227 std::vector<AllocationInst*> &WorkList) {
228 DOUT << "Found inst to SROA: " << *AI;
229 SmallVector<AllocaInst*, 32> ElementAllocas;
230 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
231 ElementAllocas.reserve(ST->getNumContainedTypes());
232 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
233 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
235 AI->getName() + "." + utostr(i), AI);
236 ElementAllocas.push_back(NA);
237 WorkList.push_back(NA); // Add to worklist for recursive processing
240 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
241 ElementAllocas.reserve(AT->getNumElements());
242 const Type *ElTy = AT->getElementType();
243 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
244 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
245 AI->getName() + "." + utostr(i), AI);
246 ElementAllocas.push_back(NA);
247 WorkList.push_back(NA); // Add to worklist for recursive processing
251 // Now that we have created the alloca instructions that we want to use,
252 // expand the getelementptr instructions to use them.
254 while (!AI->use_empty()) {
255 Instruction *User = cast<Instruction>(AI->use_back());
256 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
257 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
258 BCInst->eraseFromParent();
262 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
263 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
265 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
267 assert(Idx < ElementAllocas.size() && "Index out of range?");
268 AllocaInst *AllocaToUse = ElementAllocas[Idx];
271 if (GEPI->getNumOperands() == 3) {
272 // Do not insert a new getelementptr instruction with zero indices, only
273 // to have it optimized out later.
274 RepValue = AllocaToUse;
276 // We are indexing deeply into the structure, so we still need a
277 // getelement ptr instruction to finish the indexing. This may be
278 // expanded itself once the worklist is rerun.
280 SmallVector<Value*, 8> NewArgs;
281 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
282 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
283 RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0],
284 NewArgs.size(), "", GEPI);
285 RepValue->takeName(GEPI);
288 // If this GEP is to the start of the aggregate, check for memcpys.
290 bool IsStartOfAggregateGEP = true;
291 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) {
292 if (!isa<ConstantInt>(GEPI->getOperand(i))) {
293 IsStartOfAggregateGEP = false;
296 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) {
297 IsStartOfAggregateGEP = false;
302 if (IsStartOfAggregateGEP)
303 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
307 // Move all of the users over to the new GEP.
308 GEPI->replaceAllUsesWith(RepValue);
309 // Delete the old GEP
310 GEPI->eraseFromParent();
313 // Finally, delete the Alloca instruction
314 AI->eraseFromParent();
319 /// isSafeElementUse - Check to see if this use is an allowed use for a
320 /// getelementptr instruction of an array aggregate allocation. isFirstElt
321 /// indicates whether Ptr is known to the start of the aggregate.
323 int SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI) {
324 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
326 Instruction *User = cast<Instruction>(*I);
327 switch (User->getOpcode()) {
328 case Instruction::Load: break;
329 case Instruction::Store:
330 // Store is ok if storing INTO the pointer, not storing the pointer
331 if (User->getOperand(0) == Ptr) return 0;
333 case Instruction::GetElementPtr: {
334 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
335 bool AreAllZeroIndices = isFirstElt;
336 if (GEP->getNumOperands() > 1) {
337 if (!isa<ConstantInt>(GEP->getOperand(1)) ||
338 !cast<ConstantInt>(GEP->getOperand(1))->isZero())
339 return 0; // Using pointer arithmetic to navigate the array.
341 if (AreAllZeroIndices) {
342 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) {
343 if (!isa<ConstantInt>(GEP->getOperand(i)) ||
344 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) {
345 AreAllZeroIndices = false;
351 if (!isSafeElementUse(GEP, AreAllZeroIndices, AI)) return 0;
354 case Instruction::BitCast:
356 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI))
358 DOUT << " Transformation preventing inst: " << *User;
360 case Instruction::Call:
361 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
362 if (isFirstElt && isSafeMemIntrinsicOnAllocation(MI, AI))
365 DOUT << " Transformation preventing inst: " << *User;
368 DOUT << " Transformation preventing inst: " << *User;
372 return 3; // All users look ok :)
375 /// AllUsersAreLoads - Return true if all users of this value are loads.
376 static bool AllUsersAreLoads(Value *Ptr) {
377 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
379 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
384 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
385 /// aggregate allocation.
387 int SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI) {
388 if (BitCastInst *C = dyn_cast<BitCastInst>(User))
389 return isSafeUseOfBitCastedAllocation(C, AI) ? 3 : 0;
390 if (!isa<GetElementPtrInst>(User)) return 0;
392 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
393 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
395 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
397 I.getOperand() != Constant::getNullValue(I.getOperand()->getType()))
401 if (I == E) return 0; // ran out of GEP indices??
403 bool IsAllZeroIndices = true;
405 // If this is a use of an array allocation, do a bit more checking for sanity.
406 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
407 uint64_t NumElements = AT->getNumElements();
409 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) {
410 IsAllZeroIndices &= Idx->isZero();
412 // Check to make sure that index falls within the array. If not,
413 // something funny is going on, so we won't do the optimization.
415 if (Idx->getZExtValue() >= NumElements)
418 // We cannot scalar repl this level of the array unless any array
419 // sub-indices are in-range constants. In particular, consider:
420 // A[0][i]. We cannot know that the user isn't doing invalid things like
421 // allowing i to index an out-of-range subscript that accesses A[1].
423 // Scalar replacing *just* the outer index of the array is probably not
424 // going to be a win anyway, so just give up.
425 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) {
426 uint64_t NumElements;
427 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I))
428 NumElements = SubArrayTy->getNumElements();
430 NumElements = cast<VectorType>(*I)->getNumElements();
432 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
433 if (!IdxVal) return 0;
434 if (IdxVal->getZExtValue() >= NumElements)
436 IsAllZeroIndices &= IdxVal->isZero();
440 IsAllZeroIndices = 0;
442 // If this is an array index and the index is not constant, we cannot
443 // promote... that is unless the array has exactly one or two elements in
444 // it, in which case we CAN promote it, but we have to canonicalize this
445 // out if this is the only problem.
446 if ((NumElements == 1 || NumElements == 2) &&
447 AllUsersAreLoads(GEPI))
448 return 1; // Canonicalization required!
453 // If there are any non-simple uses of this getelementptr, make sure to reject
455 return isSafeElementUse(GEPI, IsAllZeroIndices, AI);
458 /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
459 /// intrinsic can be promoted by SROA. At this point, we know that the operand
460 /// of the memintrinsic is a pointer to the beginning of the allocation.
461 bool SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI){
462 // If not constant length, give up.
463 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
464 if (!Length) return false;
466 // If not the whole aggregate, give up.
467 const TargetData &TD = getAnalysis<TargetData>();
468 if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType()))
471 // We only know about memcpy/memset/memmove.
472 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI))
474 // Otherwise, we can transform it.
478 /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
480 bool SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI) {
481 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
483 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) {
484 if (!isSafeUseOfBitCastedAllocation(BCU, AI))
486 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
487 if (!isSafeMemIntrinsicOnAllocation(MI, AI))
496 /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
497 /// to its first element. Transform users of the cast to use the new values
499 void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
500 SmallVector<AllocaInst*, 32> &NewElts) {
501 Constant *Zero = Constant::getNullValue(Type::Int32Ty);
502 const TargetData &TD = getAnalysis<TargetData>();
504 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
506 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) {
507 RewriteBitCastUserOfAlloca(BCU, AI, NewElts);
509 BCU->eraseFromParent();
513 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split
514 // into one per element.
515 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI);
517 // If it's not a mem intrinsic, it must be some other user of a gep of the
518 // first pointer. Just leave these alone.
524 // If this is a memcpy/memmove, construct the other pointer as the
527 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) {
528 if (BCInst == MCI->getRawDest())
529 OtherPtr = MCI->getRawSource();
531 assert(BCInst == MCI->getRawSource());
532 OtherPtr = MCI->getRawDest();
534 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
535 if (BCInst == MMI->getRawDest())
536 OtherPtr = MMI->getRawSource();
538 assert(BCInst == MMI->getRawSource());
539 OtherPtr = MMI->getRawDest();
543 // If there is an other pointer, we want to convert it to the same pointer
544 // type as AI has, so we can GEP through it.
546 // It is likely that OtherPtr is a bitcast, if so, remove it.
547 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
548 OtherPtr = BC->getOperand(0);
549 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
550 if (BCE->getOpcode() == Instruction::BitCast)
551 OtherPtr = BCE->getOperand(0);
553 // If the pointer is not the right type, insert a bitcast to the right
555 if (OtherPtr->getType() != AI->getType())
556 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
560 // Process each element of the aggregate.
561 Value *TheFn = MI->getOperand(0);
562 const Type *BytePtrTy = MI->getRawDest()->getType();
563 bool SROADest = MI->getRawDest() == BCInst;
565 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
566 // If this is a memcpy/memmove, emit a GEP of the other element address.
569 OtherElt = new GetElementPtrInst(OtherPtr, Zero,
570 ConstantInt::get(Type::Int32Ty, i),
571 OtherPtr->getNameStr()+"."+utostr(i),
575 Value *EltPtr = NewElts[i];
576 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType();
578 // If we got down to a scalar, insert a load or store as appropriate.
579 if (EltTy->isFirstClassType()) {
580 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
581 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp",
583 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI);
586 assert(isa<MemSetInst>(MI));
588 // If the stored element is zero (common case), just store a null
591 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
593 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
595 // If EltTy is a packed type, get the element type.
596 const Type *ValTy = EltTy;
597 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy))
598 ValTy = VTy->getElementType();
600 // Construct an integer with the right value.
601 unsigned EltSize = TD.getTypeSize(ValTy);
602 APInt OneVal(EltSize*8, CI->getZExtValue());
603 APInt TotalVal(OneVal);
605 for (unsigned i = 0; i != EltSize-1; ++i) {
606 TotalVal = TotalVal.shl(8);
610 // Convert the integer value to the appropriate type.
611 StoreVal = ConstantInt::get(TotalVal);
612 if (isa<PointerType>(ValTy))
613 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
614 else if (ValTy->isFloatingPoint())
615 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
616 assert(StoreVal->getType() == ValTy && "Type mismatch!");
618 // If the requested value was a vector constant, create it.
619 if (EltTy != ValTy) {
620 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
621 SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
622 StoreVal = ConstantVector::get(&Elts[0], NumElts);
625 new StoreInst(StoreVal, EltPtr, MI);
628 // Otherwise, if we're storing a byte variable, use a memset call for
633 // Cast the element pointer to BytePtrTy.
634 if (EltPtr->getType() != BytePtrTy)
635 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
637 // Cast the other pointer (if we have one) to BytePtrTy.
638 if (OtherElt && OtherElt->getType() != BytePtrTy)
639 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
642 unsigned EltSize = TD.getTypeSize(EltTy);
644 // Finally, insert the meminst for this element.
645 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
647 SROADest ? EltPtr : OtherElt, // Dest ptr
648 SROADest ? OtherElt : EltPtr, // Src ptr
649 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
652 new CallInst(TheFn, Ops, 4, "", MI);
654 assert(isa<MemSetInst>(MI));
656 EltPtr, MI->getOperand(2), // Dest, Value,
657 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
660 new CallInst(TheFn, Ops, 4, "", MI);
664 // Finally, MI is now dead, as we've modified its actions to occur on all of
665 // the elements of the aggregate.
667 MI->eraseFromParent();
672 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
673 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
674 /// or 1 if safe after canonicalization has been performed.
676 int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
677 // Loop over the use list of the alloca. We can only transform it if all of
678 // the users are safe to transform.
681 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
683 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I), AI);
685 DOUT << "Cannot transform: " << *AI << " due to user: " << **I;
689 // If we require cleanup, isSafe is now 1, otherwise it is 3.
693 /// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
694 /// allocation, but only if cleaned up, perform the cleanups required.
695 void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
696 // At this point, we know that the end result will be SROA'd and promoted, so
697 // we can insert ugly code if required so long as sroa+mem2reg will clean it
699 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
701 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++);
703 gep_type_iterator I = gep_type_begin(GEPI);
706 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
707 uint64_t NumElements = AT->getNumElements();
709 if (!isa<ConstantInt>(I.getOperand())) {
710 if (NumElements == 1) {
711 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty));
713 assert(NumElements == 2 && "Unhandled case!");
714 // All users of the GEP must be loads. At each use of the GEP, insert
715 // two loads of the appropriate indexed GEP and select between them.
716 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(),
717 Constant::getNullValue(I.getOperand()->getType()),
719 // Insert the new GEP instructions, which are properly indexed.
720 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
721 Indices[1] = Constant::getNullValue(Type::Int32Ty);
722 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0),
723 &Indices[0], Indices.size(),
724 GEPI->getName()+".0", GEPI);
725 Indices[1] = ConstantInt::get(Type::Int32Ty, 1);
726 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0),
727 &Indices[0], Indices.size(),
728 GEPI->getName()+".1", GEPI);
729 // Replace all loads of the variable index GEP with loads from both
730 // indexes and a select.
731 while (!GEPI->use_empty()) {
732 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
733 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
734 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
735 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI);
736 LI->replaceAllUsesWith(R);
737 LI->eraseFromParent();
739 GEPI->eraseFromParent();
746 /// MergeInType - Add the 'In' type to the accumulated type so far. If the
747 /// types are incompatible, return true, otherwise update Accum and return
750 /// There are three cases we handle here:
751 /// 1) An effectively-integer union, where the pieces are stored into as
752 /// smaller integers (common with byte swap and other idioms).
753 /// 2) A union of vector types of the same size and potentially its elements.
754 /// Here we turn element accesses into insert/extract element operations.
755 /// 3) A union of scalar types, such as int/float or int/pointer. Here we
756 /// merge together into integers, allowing the xform to work with #1 as
758 static bool MergeInType(const Type *In, const Type *&Accum,
759 const TargetData &TD) {
760 // If this is our first type, just use it.
761 const VectorType *PTy;
762 if (Accum == Type::VoidTy || In == Accum) {
764 } else if (In == Type::VoidTy) {
766 } else if (In->isInteger() && Accum->isInteger()) { // integer union.
767 // Otherwise pick whichever type is larger.
768 if (cast<IntegerType>(In)->getBitWidth() >
769 cast<IntegerType>(Accum)->getBitWidth())
771 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) {
772 // Pointer unions just stay as one of the pointers.
773 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) {
774 if ((PTy = dyn_cast<VectorType>(Accum)) &&
775 PTy->getElementType() == In) {
776 // Accum is a vector, and we are accessing an element: ok.
777 } else if ((PTy = dyn_cast<VectorType>(In)) &&
778 PTy->getElementType() == Accum) {
779 // In is a vector, and accum is an element: ok, remember In.
781 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) &&
782 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) {
783 // Two vectors of the same size: keep Accum.
785 // Cannot insert an short into a <4 x int> or handle
786 // <2 x int> -> <4 x int>
790 // Pointer/FP/Integer unions merge together as integers.
791 switch (Accum->getTypeID()) {
792 case Type::PointerTyID: Accum = TD.getIntPtrType(); break;
793 case Type::FloatTyID: Accum = Type::Int32Ty; break;
794 case Type::DoubleTyID: Accum = Type::Int64Ty; break;
796 assert(Accum->isInteger() && "Unknown FP type!");
800 switch (In->getTypeID()) {
801 case Type::PointerTyID: In = TD.getIntPtrType(); break;
802 case Type::FloatTyID: In = Type::Int32Ty; break;
803 case Type::DoubleTyID: In = Type::Int64Ty; break;
805 assert(In->isInteger() && "Unknown FP type!");
808 return MergeInType(In, Accum, TD);
813 /// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least
814 /// as big as the specified type. If there is no suitable type, this returns
816 const Type *getUIntAtLeastAsBitAs(unsigned NumBits) {
817 if (NumBits > 64) return 0;
818 if (NumBits > 32) return Type::Int64Ty;
819 if (NumBits > 16) return Type::Int32Ty;
820 if (NumBits > 8) return Type::Int16Ty;
824 /// CanConvertToScalar - V is a pointer. If we can convert the pointee to a
825 /// single scalar integer type, return that type. Further, if the use is not
826 /// a completely trivial use that mem2reg could promote, set IsNotTrivial. If
827 /// there are no uses of this pointer, return Type::VoidTy to differentiate from
830 const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
831 const Type *UsedType = Type::VoidTy; // No uses, no forced type.
832 const TargetData &TD = getAnalysis<TargetData>();
833 const PointerType *PTy = cast<PointerType>(V->getType());
835 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
836 Instruction *User = cast<Instruction>(*UI);
838 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
839 if (MergeInType(LI->getType(), UsedType, TD))
842 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
843 // Storing the pointer, not into the value?
844 if (SI->getOperand(0) == V) return 0;
846 // NOTE: We could handle storing of FP imms into integers here!
848 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD))
850 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
852 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
853 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0;
854 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
855 // Check to see if this is stepping over an element: GEP Ptr, int C
856 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
857 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
858 unsigned ElSize = TD.getTypeSize(PTy->getElementType());
859 unsigned BitOffset = Idx*ElSize*8;
860 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
863 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
864 if (SubElt == 0) return 0;
865 if (SubElt != Type::VoidTy && SubElt->isInteger()) {
867 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset);
868 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0;
871 } else if (GEP->getNumOperands() == 3 &&
872 isa<ConstantInt>(GEP->getOperand(1)) &&
873 isa<ConstantInt>(GEP->getOperand(2)) &&
874 cast<ConstantInt>(GEP->getOperand(1))->isZero()) {
875 // We are stepping into an element, e.g. a structure or an array:
876 // GEP Ptr, int 0, uint C
877 const Type *AggTy = PTy->getElementType();
878 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
880 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
881 if (Idx >= ATy->getNumElements()) return 0; // Out of range.
882 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) {
883 // Getting an element of the packed vector.
884 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range.
886 // Merge in the vector type.
887 if (MergeInType(VectorTy, UsedType, TD)) return 0;
889 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
890 if (SubTy == 0) return 0;
892 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
895 // We'll need to change this to an insert/extract element operation.
897 continue; // Everything looks ok
899 } else if (isa<StructType>(AggTy)) {
900 // Structs are always ok.
904 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8);
905 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0;
906 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
907 if (SubTy == 0) return 0;
908 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
910 continue; // Everything looks ok
914 // Cannot handle this!
922 /// ConvertToScalar - The specified alloca passes the CanConvertToScalar
923 /// predicate and is non-trivial. Convert it to something that can be trivially
924 /// promoted into a register by mem2reg.
925 void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
926 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = "
927 << *ActualTy << "\n";
930 BasicBlock *EntryBlock = AI->getParent();
931 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() &&
932 "Not in the entry block!");
933 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program.
935 // Create and insert the alloca.
936 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
937 EntryBlock->begin());
938 ConvertUsesToScalar(AI, NewAI, 0);
943 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
944 /// directly. This happens when we are converting an "integer union" to a
945 /// single integer scalar, or when we are converting a "vector union" to a
946 /// vector with insert/extractelement instructions.
948 /// Offset is an offset from the original alloca, in bits that need to be
949 /// shifted to the right. By the end of this, there should be no uses of Ptr.
950 void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
951 const TargetData &TD = getAnalysis<TargetData>();
952 while (!Ptr->use_empty()) {
953 Instruction *User = cast<Instruction>(Ptr->use_back());
955 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
956 // The load is a bit extract from NewAI shifted right by Offset bits.
957 Value *NV = new LoadInst(NewAI, LI->getName(), LI);
958 if (NV->getType() == LI->getType()) {
959 // We win, no conversion needed.
960 } else if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) {
961 // If the result alloca is a vector type, this is either an element
962 // access or a bitcast to another vector type.
963 if (isa<VectorType>(LI->getType())) {
964 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
966 // Must be an element access.
967 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
968 NV = new ExtractElementInst(
969 NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI);
971 } else if (isa<PointerType>(NV->getType())) {
972 assert(isa<PointerType>(LI->getType()));
973 // Must be ptr->ptr cast. Anything else would result in NV being
975 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
977 const IntegerType *NTy = cast<IntegerType>(NV->getType());
978 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType());
980 // If this is a big-endian system and the load is narrower than the
981 // full alloca type, we need to do a shift to get the right bits.
983 if (TD.isBigEndian()) {
984 ShAmt = NTy->getBitWidth()-LIBitWidth-Offset;
989 // Note: we support negative bitwidths (with shl) which are not defined.
990 // We do this to support (f.e.) loads off the end of a structure where
991 // only some bits are used.
992 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
993 NV = BinaryOperator::createLShr(NV,
994 ConstantInt::get(NV->getType(),ShAmt),
996 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
997 NV = BinaryOperator::createShl(NV,
998 ConstantInt::get(NV->getType(),-ShAmt),
1001 // Finally, unconditionally truncate the integer to the right width.
1002 if (LIBitWidth < NTy->getBitWidth())
1003 NV = new TruncInst(NV, IntegerType::get(LIBitWidth),
1006 // If the result is an integer, this is a trunc or bitcast.
1007 if (isa<IntegerType>(LI->getType())) {
1008 assert(NV->getType() == LI->getType() && "Truncate wasn't enough?");
1009 } else if (LI->getType()->isFloatingPoint()) {
1010 // Just do a bitcast, we know the sizes match up.
1011 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
1013 // Otherwise must be a pointer.
1014 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI);
1017 LI->replaceAllUsesWith(NV);
1018 LI->eraseFromParent();
1019 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1020 assert(SI->getOperand(0) != Ptr && "Consistency error!");
1022 // Convert the stored type to the actual type, shift it left to insert
1023 // then 'or' into place.
1024 Value *SV = SI->getOperand(0);
1025 const Type *AllocaType = NewAI->getType()->getElementType();
1026 if (SV->getType() == AllocaType) {
1028 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) {
1029 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1031 // If the result alloca is a vector type, this is either an element
1032 // access or a bitcast to another vector type.
1033 if (isa<VectorType>(SV->getType())) {
1034 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1036 // Must be an element insertion.
1037 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
1038 SV = new InsertElementInst(Old, SV,
1039 ConstantInt::get(Type::Int32Ty, Elt),
1042 } else if (isa<PointerType>(AllocaType)) {
1043 // If the alloca type is a pointer, then all the elements must be
1045 if (SV->getType() != AllocaType)
1046 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1048 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1050 // If SV is a float, convert it to the appropriate integer type.
1051 // If it is a pointer, do the same, and also handle ptr->ptr casts
1053 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
1054 unsigned DestWidth = AllocaType->getPrimitiveSizeInBits();
1055 if (SV->getType()->isFloatingPoint())
1056 SV = new BitCastInst(SV, IntegerType::get(SrcWidth),
1058 else if (isa<PointerType>(SV->getType()))
1059 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI);
1061 // Always zero extend the value if needed.
1062 if (SV->getType() != AllocaType)
1063 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI);
1065 // If this is a big-endian system and the store is narrower than the
1066 // full alloca type, we need to do a shift to get the right bits.
1068 if (TD.isBigEndian()) {
1069 ShAmt = DestWidth-SrcWidth-Offset;
1074 // Note: we support negative bitwidths (with shr) which are not defined.
1075 // We do this to support (f.e.) stores off the end of a structure where
1076 // only some bits in the structure are set.
1077 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1078 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1079 SV = BinaryOperator::createShl(SV,
1080 ConstantInt::get(SV->getType(), ShAmt),
1083 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1084 SV = BinaryOperator::createLShr(SV,
1085 ConstantInt::get(SV->getType(),-ShAmt),
1087 Mask = Mask.lshr(ShAmt);
1090 // Mask out the bits we are about to insert from the old value, and or
1092 if (SrcWidth != DestWidth) {
1093 assert(DestWidth > SrcWidth);
1094 Old = BinaryOperator::createAnd(Old, ConstantInt::get(~Mask),
1095 Old->getName()+".mask", SI);
1096 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
1099 new StoreInst(SV, NewAI, SI);
1100 SI->eraseFromParent();
1102 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
1103 ConvertUsesToScalar(CI, NewAI, Offset);
1104 CI->eraseFromParent();
1105 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1106 const PointerType *AggPtrTy =
1107 cast<PointerType>(GEP->getOperand(0)->getType());
1108 const TargetData &TD = getAnalysis<TargetData>();
1109 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8;
1111 // Check to see if this is stepping over an element: GEP Ptr, int C
1112 unsigned NewOffset = Offset;
1113 if (GEP->getNumOperands() == 2) {
1114 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
1115 unsigned BitOffset = Idx*AggSizeInBits;
1117 NewOffset += BitOffset;
1118 } else if (GEP->getNumOperands() == 3) {
1119 // We know that operand #2 is zero.
1120 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
1121 const Type *AggTy = AggPtrTy->getElementType();
1122 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
1123 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8;
1125 NewOffset += ElSizeBits*Idx;
1126 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
1127 unsigned EltBitOffset =
1128 TD.getStructLayout(STy)->getElementOffset(Idx)*8;
1130 NewOffset += EltBitOffset;
1132 assert(0 && "Unsupported operation!");
1136 assert(0 && "Unsupported operation!");
1139 ConvertUsesToScalar(GEP, NewAI, NewOffset);
1140 GEP->eraseFromParent();
1142 assert(0 && "Unsupported operation!");
1149 /// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1150 /// some part of a constant global variable. This intentionally only accepts
1151 /// constant expressions because we don't can't rewrite arbitrary instructions.
1152 static bool PointsToConstantGlobal(Value *V) {
1153 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1154 return GV->isConstant();
1155 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1156 if (CE->getOpcode() == Instruction::BitCast ||
1157 CE->getOpcode() == Instruction::GetElementPtr)
1158 return PointsToConstantGlobal(CE->getOperand(0));
1162 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1163 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
1164 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
1165 /// track of whether it moves the pointer (with isOffset) but otherwise traverse
1166 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1167 /// the alloca, and if the source pointer is a pointer to a constant global, we
1168 /// can optimize this.
1169 static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
1171 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1172 if (isa<LoadInst>(*UI)) {
1173 // Ignore loads, they are always ok.
1176 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
1177 // If uses of the bitcast are ok, we are ok.
1178 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1182 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
1183 // If the GEP has all zero indices, it doesn't offset the pointer. If it
1184 // doesn't, it does.
1185 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1186 isOffset || !GEP->hasAllZeroIndices()))
1191 // If this is isn't our memcpy/memmove, reject it as something we can't
1193 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI))
1196 // If we already have seen a copy, reject the second one.
1197 if (TheCopy) return false;
1199 // If the pointer has been offset from the start of the alloca, we can't
1200 // safely handle this.
1201 if (isOffset) return false;
1203 // If the memintrinsic isn't using the alloca as the dest, reject it.
1204 if (UI.getOperandNo() != 1) return false;
1206 MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
1208 // If the source of the memcpy/move is not a constant global, reject it.
1209 if (!PointsToConstantGlobal(MI->getOperand(2)))
1212 // Otherwise, the transform is safe. Remember the copy instruction.
1218 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1219 /// modified by a copy from a constant global. If we can prove this, we can
1220 /// replace any uses of the alloca with uses of the global directly.
1221 Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
1222 Instruction *TheCopy = 0;
1223 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))