1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/Instructions.h"
28 #include "llvm/IntrinsicInst.h"
29 #include "llvm/Pass.h"
30 #include "llvm/Analysis/Dominators.h"
31 #include "llvm/Target/TargetData.h"
32 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/GetElementPtrTypeIterator.h"
35 #include "llvm/Support/MathExtras.h"
36 #include "llvm/Support/Compiler.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringExtras.h"
42 STATISTIC(NumReplaced, "Number of allocas broken up");
43 STATISTIC(NumPromoted, "Number of allocas promoted");
44 STATISTIC(NumConverted, "Number of aggregates converted to scalar");
47 struct VISIBILITY_HIDDEN SROA : public FunctionPass {
48 bool runOnFunction(Function &F);
50 bool performScalarRepl(Function &F);
51 bool performPromotion(Function &F);
53 // getAnalysisUsage - This pass does not require any passes, but we know it
54 // will not alter the CFG, so say so.
55 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
56 AU.addRequired<DominatorTree>();
57 AU.addRequired<DominanceFrontier>();
58 AU.addRequired<TargetData>();
63 int isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI);
64 int isSafeUseOfAllocation(Instruction *User, AllocationInst *AI);
65 bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI);
66 bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI);
67 int isSafeAllocaToScalarRepl(AllocationInst *AI);
68 void CanonicalizeAllocaUsers(AllocationInst *AI);
69 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
71 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
72 SmallVector<AllocaInst*, 32> &NewElts);
74 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
75 void ConvertToScalar(AllocationInst *AI, const Type *Ty);
76 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
79 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
82 // Public interface to the ScalarReplAggregates pass
83 FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); }
86 bool SROA::runOnFunction(Function &F) {
87 bool Changed = performPromotion(F);
89 bool LocalChange = performScalarRepl(F);
90 if (!LocalChange) break; // No need to repromote if no scalarrepl
92 LocalChange = performPromotion(F);
93 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
100 bool SROA::performPromotion(Function &F) {
101 std::vector<AllocaInst*> Allocas;
102 const TargetData &TD = getAnalysis<TargetData>();
103 DominatorTree &DT = getAnalysis<DominatorTree>();
104 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
106 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
108 bool Changed = false;
113 // Find allocas that are safe to promote, by looking at all instructions in
115 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
116 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
117 if (isAllocaPromotable(AI, TD))
118 Allocas.push_back(AI);
120 if (Allocas.empty()) break;
122 PromoteMemToReg(Allocas, DT, DF, TD);
123 NumPromoted += Allocas.size();
130 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
131 // which runs on all of the malloc/alloca instructions in the function, removing
132 // them if they are only used by getelementptr instructions.
134 bool SROA::performScalarRepl(Function &F) {
135 std::vector<AllocationInst*> WorkList;
137 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
138 BasicBlock &BB = F.getEntryBlock();
139 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
140 if (AllocationInst *A = dyn_cast<AllocationInst>(I))
141 WorkList.push_back(A);
143 // Process the worklist
144 bool Changed = false;
145 while (!WorkList.empty()) {
146 AllocationInst *AI = WorkList.back();
149 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
150 // with unused elements.
151 if (AI->use_empty()) {
152 AI->eraseFromParent();
156 // If we can turn this aggregate value (potentially with casts) into a
157 // simple scalar value that can be mem2reg'd into a register value.
158 bool IsNotTrivial = false;
159 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
160 if (IsNotTrivial && ActualType != Type::VoidTy) {
161 ConvertToScalar(AI, ActualType);
166 // We cannot transform the allocation instruction if it is an array
167 // allocation (allocations OF arrays are ok though), and an allocation of a
168 // scalar value cannot be decomposed at all.
170 if (AI->isArrayAllocation() ||
171 (!isa<StructType>(AI->getAllocatedType()) &&
172 !isa<ArrayType>(AI->getAllocatedType()))) continue;
174 // Check that all of the users of the allocation are capable of being
176 switch (isSafeAllocaToScalarRepl(AI)) {
177 default: assert(0 && "Unexpected value!");
178 case 0: // Not safe to scalar replace.
180 case 1: // Safe, but requires cleanup/canonicalizations first
181 CanonicalizeAllocaUsers(AI);
182 case 3: // Safe to scalar replace.
186 DOUT << "Found inst to xform: " << *AI;
189 SmallVector<AllocaInst*, 32> ElementAllocas;
190 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
191 ElementAllocas.reserve(ST->getNumContainedTypes());
192 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
193 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
195 AI->getName() + "." + utostr(i), AI);
196 ElementAllocas.push_back(NA);
197 WorkList.push_back(NA); // Add to worklist for recursive processing
200 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
201 ElementAllocas.reserve(AT->getNumElements());
202 const Type *ElTy = AT->getElementType();
203 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
204 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
205 AI->getName() + "." + utostr(i), AI);
206 ElementAllocas.push_back(NA);
207 WorkList.push_back(NA); // Add to worklist for recursive processing
211 // Now that we have created the alloca instructions that we want to use,
212 // expand the getelementptr instructions to use them.
214 while (!AI->use_empty()) {
215 Instruction *User = cast<Instruction>(AI->use_back());
216 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
217 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
218 BCInst->eraseFromParent();
222 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
223 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
225 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
227 assert(Idx < ElementAllocas.size() && "Index out of range?");
228 AllocaInst *AllocaToUse = ElementAllocas[Idx];
231 if (GEPI->getNumOperands() == 3) {
232 // Do not insert a new getelementptr instruction with zero indices, only
233 // to have it optimized out later.
234 RepValue = AllocaToUse;
236 // We are indexing deeply into the structure, so we still need a
237 // getelement ptr instruction to finish the indexing. This may be
238 // expanded itself once the worklist is rerun.
240 SmallVector<Value*, 8> NewArgs;
241 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
242 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
243 RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0],
244 NewArgs.size(), "", GEPI);
245 RepValue->takeName(GEPI);
248 // If this GEP is to the start of the aggregate, check for memcpys.
250 bool IsStartOfAggregateGEP = true;
251 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) {
252 if (!isa<ConstantInt>(GEPI->getOperand(i))) {
253 IsStartOfAggregateGEP = false;
256 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) {
257 IsStartOfAggregateGEP = false;
262 if (IsStartOfAggregateGEP)
263 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
267 // Move all of the users over to the new GEP.
268 GEPI->replaceAllUsesWith(RepValue);
269 // Delete the old GEP
270 GEPI->eraseFromParent();
273 // Finally, delete the Alloca instruction
274 AI->eraseFromParent();
282 /// isSafeElementUse - Check to see if this use is an allowed use for a
283 /// getelementptr instruction of an array aggregate allocation. isFirstElt
284 /// indicates whether Ptr is known to the start of the aggregate.
286 int SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI) {
287 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
289 Instruction *User = cast<Instruction>(*I);
290 switch (User->getOpcode()) {
291 case Instruction::Load: break;
292 case Instruction::Store:
293 // Store is ok if storing INTO the pointer, not storing the pointer
294 if (User->getOperand(0) == Ptr) return 0;
296 case Instruction::GetElementPtr: {
297 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
298 bool AreAllZeroIndices = isFirstElt;
299 if (GEP->getNumOperands() > 1) {
300 if (!isa<ConstantInt>(GEP->getOperand(1)) ||
301 !cast<ConstantInt>(GEP->getOperand(1))->isZero())
302 return 0; // Using pointer arithmetic to navigate the array.
304 if (AreAllZeroIndices) {
305 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) {
306 if (!isa<ConstantInt>(GEP->getOperand(i)) ||
307 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) {
308 AreAllZeroIndices = false;
314 if (!isSafeElementUse(GEP, AreAllZeroIndices, AI)) return 0;
317 case Instruction::BitCast:
319 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI))
321 DOUT << " Transformation preventing inst: " << *User;
323 case Instruction::Call:
324 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
325 if (isFirstElt && isSafeMemIntrinsicOnAllocation(MI, AI))
328 DOUT << " Transformation preventing inst: " << *User;
331 DOUT << " Transformation preventing inst: " << *User;
335 return 3; // All users look ok :)
338 /// AllUsersAreLoads - Return true if all users of this value are loads.
339 static bool AllUsersAreLoads(Value *Ptr) {
340 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
342 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
347 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
348 /// aggregate allocation.
350 int SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI) {
351 if (BitCastInst *C = dyn_cast<BitCastInst>(User))
352 return isSafeUseOfBitCastedAllocation(C, AI) ? 3 : 0;
353 if (!isa<GetElementPtrInst>(User)) return 0;
355 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
356 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
358 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
360 I.getOperand() != Constant::getNullValue(I.getOperand()->getType()))
364 if (I == E) return 0; // ran out of GEP indices??
366 bool IsAllZeroIndices = true;
368 // If this is a use of an array allocation, do a bit more checking for sanity.
369 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
370 uint64_t NumElements = AT->getNumElements();
372 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) {
373 IsAllZeroIndices &= Idx->isZero();
375 // Check to make sure that index falls within the array. If not,
376 // something funny is going on, so we won't do the optimization.
378 if (Idx->getZExtValue() >= NumElements)
381 // We cannot scalar repl this level of the array unless any array
382 // sub-indices are in-range constants. In particular, consider:
383 // A[0][i]. We cannot know that the user isn't doing invalid things like
384 // allowing i to index an out-of-range subscript that accesses A[1].
386 // Scalar replacing *just* the outer index of the array is probably not
387 // going to be a win anyway, so just give up.
388 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) {
389 uint64_t NumElements;
390 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I))
391 NumElements = SubArrayTy->getNumElements();
393 NumElements = cast<VectorType>(*I)->getNumElements();
395 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
396 if (!IdxVal) return 0;
397 if (IdxVal->getZExtValue() >= NumElements)
399 IsAllZeroIndices &= IdxVal->isZero();
403 IsAllZeroIndices = 0;
405 // If this is an array index and the index is not constant, we cannot
406 // promote... that is unless the array has exactly one or two elements in
407 // it, in which case we CAN promote it, but we have to canonicalize this
408 // out if this is the only problem.
409 if ((NumElements == 1 || NumElements == 2) &&
410 AllUsersAreLoads(GEPI))
411 return 1; // Canonicalization required!
416 // If there are any non-simple uses of this getelementptr, make sure to reject
418 return isSafeElementUse(GEPI, IsAllZeroIndices, AI);
421 /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
422 /// intrinsic can be promoted by SROA. At this point, we know that the operand
423 /// of the memintrinsic is a pointer to the beginning of the allocation.
424 bool SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI){
425 // If not constant length, give up.
426 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
427 if (!Length) return false;
429 // If not the whole aggregate, give up.
430 const TargetData &TD = getAnalysis<TargetData>();
431 if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType()))
434 // We only know about memcpy/memset/memmove.
435 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI))
437 // Otherwise, we can transform it.
441 /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
443 bool SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI) {
444 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
446 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) {
447 if (!isSafeUseOfBitCastedAllocation(BCU, AI))
449 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
450 if (!isSafeMemIntrinsicOnAllocation(MI, AI))
459 /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
460 /// to its first element. Transform users of the cast to use the new values
462 void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
463 SmallVector<AllocaInst*, 32> &NewElts) {
464 Constant *Zero = Constant::getNullValue(Type::Int32Ty);
465 const TargetData &TD = getAnalysis<TargetData>();
467 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
469 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) {
470 RewriteBitCastUserOfAlloca(BCU, AI, NewElts);
472 BCU->eraseFromParent();
476 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split
477 // into one per element.
478 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI);
480 // If it's not a mem intrinsic, it must be some other user of a gep of the
481 // first pointer. Just leave these alone.
487 // If this is a memcpy/memmove, construct the other pointer as the
490 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) {
491 if (BCInst == MCI->getRawDest())
492 OtherPtr = MCI->getRawSource();
494 assert(BCInst == MCI->getRawSource());
495 OtherPtr = MCI->getRawDest();
497 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
498 if (BCInst == MMI->getRawDest())
499 OtherPtr = MMI->getRawSource();
501 assert(BCInst == MMI->getRawSource());
502 OtherPtr = MMI->getRawDest();
506 // If there is an other pointer, we want to convert it to the same pointer
507 // type as AI has, so we can GEP through it.
509 // It is likely that OtherPtr is a bitcast, if so, remove it.
510 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
511 OtherPtr = BC->getOperand(0);
512 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
513 if (BCE->getOpcode() == Instruction::BitCast)
514 OtherPtr = BCE->getOperand(0);
516 // If the pointer is not the right type, insert a bitcast to the right
518 if (OtherPtr->getType() != AI->getType())
519 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
523 // Process each element of the aggregate.
524 Value *TheFn = MI->getOperand(0);
525 const Type *BytePtrTy = MI->getRawDest()->getType();
526 bool SROADest = MI->getRawDest() == BCInst;
528 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
529 // If this is a memcpy/memmove, emit a GEP of the other element address.
532 OtherElt = new GetElementPtrInst(OtherPtr, Zero,
533 ConstantInt::get(Type::Int32Ty, i),
534 OtherPtr->getNameStr()+"."+utostr(i),
538 Value *EltPtr = NewElts[i];
539 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType();
541 // If we got down to a scalar, insert a load or store as appropriate.
542 if (EltTy->isFirstClassType()) {
543 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
544 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp",
546 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI);
549 assert(isa<MemSetInst>(MI));
551 // If the stored element is zero (common case), just store a null
554 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
556 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
558 // If EltTy is a packed type, get the element type.
559 const Type *ValTy = EltTy;
560 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy))
561 ValTy = VTy->getElementType();
563 // Construct an integer with the right value.
564 unsigned EltSize = TD.getTypeSize(ValTy);
565 APInt OneVal(EltSize*8, CI->getZExtValue());
566 APInt TotalVal(OneVal);
568 for (unsigned i = 0; i != EltSize-1; ++i) {
569 TotalVal = TotalVal.shl(8);
573 // Convert the integer value to the appropriate type.
574 StoreVal = ConstantInt::get(TotalVal);
575 if (isa<PointerType>(ValTy))
576 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
577 else if (ValTy->isFloatingPoint())
578 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
579 assert(StoreVal->getType() == ValTy && "Type mismatch!");
581 // If the requested value was a vector constant, create it.
582 if (EltTy != ValTy) {
583 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
584 SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
585 StoreVal = ConstantVector::get(&Elts[0], NumElts);
588 new StoreInst(StoreVal, EltPtr, MI);
591 // Otherwise, if we're storing a byte variable, use a memset call for
596 // Cast the element pointer to BytePtrTy.
597 if (EltPtr->getType() != BytePtrTy)
598 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
600 // Cast the other pointer (if we have one) to BytePtrTy.
601 if (OtherElt && OtherElt->getType() != BytePtrTy)
602 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
605 unsigned EltSize = TD.getTypeSize(EltTy);
607 // Finally, insert the meminst for this element.
608 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
610 SROADest ? EltPtr : OtherElt, // Dest ptr
611 SROADest ? OtherElt : EltPtr, // Src ptr
612 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
615 new CallInst(TheFn, Ops, 4, "", MI);
617 assert(isa<MemSetInst>(MI));
619 EltPtr, MI->getOperand(2), // Dest, Value,
620 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
623 new CallInst(TheFn, Ops, 4, "", MI);
627 // Finally, MI is now dead, as we've modified its actions to occur on all of
628 // the elements of the aggregate.
630 MI->eraseFromParent();
635 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
636 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
637 /// or 1 if safe after canonicalization has been performed.
639 int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
640 // Loop over the use list of the alloca. We can only transform it if all of
641 // the users are safe to transform.
644 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
646 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I), AI);
648 DOUT << "Cannot transform: " << *AI << " due to user: " << **I;
652 // If we require cleanup, isSafe is now 1, otherwise it is 3.
656 /// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
657 /// allocation, but only if cleaned up, perform the cleanups required.
658 void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
659 // At this point, we know that the end result will be SROA'd and promoted, so
660 // we can insert ugly code if required so long as sroa+mem2reg will clean it
662 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
664 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++);
666 gep_type_iterator I = gep_type_begin(GEPI);
669 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
670 uint64_t NumElements = AT->getNumElements();
672 if (!isa<ConstantInt>(I.getOperand())) {
673 if (NumElements == 1) {
674 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty));
676 assert(NumElements == 2 && "Unhandled case!");
677 // All users of the GEP must be loads. At each use of the GEP, insert
678 // two loads of the appropriate indexed GEP and select between them.
679 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(),
680 Constant::getNullValue(I.getOperand()->getType()),
682 // Insert the new GEP instructions, which are properly indexed.
683 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
684 Indices[1] = Constant::getNullValue(Type::Int32Ty);
685 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0),
686 &Indices[0], Indices.size(),
687 GEPI->getName()+".0", GEPI);
688 Indices[1] = ConstantInt::get(Type::Int32Ty, 1);
689 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0),
690 &Indices[0], Indices.size(),
691 GEPI->getName()+".1", GEPI);
692 // Replace all loads of the variable index GEP with loads from both
693 // indexes and a select.
694 while (!GEPI->use_empty()) {
695 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
696 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
697 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
698 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI);
699 LI->replaceAllUsesWith(R);
700 LI->eraseFromParent();
702 GEPI->eraseFromParent();
709 /// MergeInType - Add the 'In' type to the accumulated type so far. If the
710 /// types are incompatible, return true, otherwise update Accum and return
713 /// There are three cases we handle here:
714 /// 1) An effectively-integer union, where the pieces are stored into as
715 /// smaller integers (common with byte swap and other idioms).
716 /// 2) A union of vector types of the same size and potentially its elements.
717 /// Here we turn element accesses into insert/extract element operations.
718 /// 3) A union of scalar types, such as int/float or int/pointer. Here we
719 /// merge together into integers, allowing the xform to work with #1 as
721 static bool MergeInType(const Type *In, const Type *&Accum,
722 const TargetData &TD) {
723 // If this is our first type, just use it.
724 const VectorType *PTy;
725 if (Accum == Type::VoidTy || In == Accum) {
727 } else if (In == Type::VoidTy) {
729 } else if (In->isInteger() && Accum->isInteger()) { // integer union.
730 // Otherwise pick whichever type is larger.
731 if (cast<IntegerType>(In)->getBitWidth() >
732 cast<IntegerType>(Accum)->getBitWidth())
734 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) {
735 // Pointer unions just stay as one of the pointers.
736 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) {
737 if ((PTy = dyn_cast<VectorType>(Accum)) &&
738 PTy->getElementType() == In) {
739 // Accum is a vector, and we are accessing an element: ok.
740 } else if ((PTy = dyn_cast<VectorType>(In)) &&
741 PTy->getElementType() == Accum) {
742 // In is a vector, and accum is an element: ok, remember In.
744 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) &&
745 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) {
746 // Two vectors of the same size: keep Accum.
748 // Cannot insert an short into a <4 x int> or handle
749 // <2 x int> -> <4 x int>
753 // Pointer/FP/Integer unions merge together as integers.
754 switch (Accum->getTypeID()) {
755 case Type::PointerTyID: Accum = TD.getIntPtrType(); break;
756 case Type::FloatTyID: Accum = Type::Int32Ty; break;
757 case Type::DoubleTyID: Accum = Type::Int64Ty; break;
759 assert(Accum->isInteger() && "Unknown FP type!");
763 switch (In->getTypeID()) {
764 case Type::PointerTyID: In = TD.getIntPtrType(); break;
765 case Type::FloatTyID: In = Type::Int32Ty; break;
766 case Type::DoubleTyID: In = Type::Int64Ty; break;
768 assert(In->isInteger() && "Unknown FP type!");
771 return MergeInType(In, Accum, TD);
776 /// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least
777 /// as big as the specified type. If there is no suitable type, this returns
779 const Type *getUIntAtLeastAsBitAs(unsigned NumBits) {
780 if (NumBits > 64) return 0;
781 if (NumBits > 32) return Type::Int64Ty;
782 if (NumBits > 16) return Type::Int32Ty;
783 if (NumBits > 8) return Type::Int16Ty;
787 /// CanConvertToScalar - V is a pointer. If we can convert the pointee to a
788 /// single scalar integer type, return that type. Further, if the use is not
789 /// a completely trivial use that mem2reg could promote, set IsNotTrivial. If
790 /// there are no uses of this pointer, return Type::VoidTy to differentiate from
793 const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
794 const Type *UsedType = Type::VoidTy; // No uses, no forced type.
795 const TargetData &TD = getAnalysis<TargetData>();
796 const PointerType *PTy = cast<PointerType>(V->getType());
798 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
799 Instruction *User = cast<Instruction>(*UI);
801 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
802 if (MergeInType(LI->getType(), UsedType, TD))
805 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
806 // Storing the pointer, not into the value?
807 if (SI->getOperand(0) == V) return 0;
809 // NOTE: We could handle storing of FP imms into integers here!
811 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD))
813 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
815 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
816 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0;
817 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
818 // Check to see if this is stepping over an element: GEP Ptr, int C
819 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
820 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
821 unsigned ElSize = TD.getTypeSize(PTy->getElementType());
822 unsigned BitOffset = Idx*ElSize*8;
823 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
826 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
827 if (SubElt == 0) return 0;
828 if (SubElt != Type::VoidTy && SubElt->isInteger()) {
830 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset);
831 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0;
834 } else if (GEP->getNumOperands() == 3 &&
835 isa<ConstantInt>(GEP->getOperand(1)) &&
836 isa<ConstantInt>(GEP->getOperand(2)) &&
837 cast<Constant>(GEP->getOperand(1))->isNullValue()) {
838 // We are stepping into an element, e.g. a structure or an array:
839 // GEP Ptr, int 0, uint C
840 const Type *AggTy = PTy->getElementType();
841 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
843 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
844 if (Idx >= ATy->getNumElements()) return 0; // Out of range.
845 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) {
846 // Getting an element of the packed vector.
847 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range.
849 // Merge in the vector type.
850 if (MergeInType(VectorTy, UsedType, TD)) return 0;
852 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
853 if (SubTy == 0) return 0;
855 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
858 // We'll need to change this to an insert/extract element operation.
860 continue; // Everything looks ok
862 } else if (isa<StructType>(AggTy)) {
863 // Structs are always ok.
867 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8);
868 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0;
869 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
870 if (SubTy == 0) return 0;
871 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
873 continue; // Everything looks ok
877 // Cannot handle this!
885 /// ConvertToScalar - The specified alloca passes the CanConvertToScalar
886 /// predicate and is non-trivial. Convert it to something that can be trivially
887 /// promoted into a register by mem2reg.
888 void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
889 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = "
890 << *ActualTy << "\n";
893 BasicBlock *EntryBlock = AI->getParent();
894 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() &&
895 "Not in the entry block!");
896 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program.
898 // Create and insert the alloca.
899 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
900 EntryBlock->begin());
901 ConvertUsesToScalar(AI, NewAI, 0);
906 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
907 /// directly. This happens when we are converting an "integer union" to a
908 /// single integer scalar, or when we are converting a "vector union" to a
909 /// vector with insert/extractelement instructions.
911 /// Offset is an offset from the original alloca, in bits that need to be
912 /// shifted to the right. By the end of this, there should be no uses of Ptr.
913 void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
914 bool isVectorInsert = isa<VectorType>(NewAI->getType()->getElementType());
915 const TargetData &TD = getAnalysis<TargetData>();
916 while (!Ptr->use_empty()) {
917 Instruction *User = cast<Instruction>(Ptr->use_back());
919 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
920 // The load is a bit extract from NewAI shifted right by Offset bits.
921 Value *NV = new LoadInst(NewAI, LI->getName(), LI);
922 if (NV->getType() != LI->getType()) {
923 if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) {
924 // If the result alloca is a vector type, this is either an element
925 // access or a bitcast to another vector type.
926 if (isa<VectorType>(LI->getType())) {
927 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
929 // Must be an element access.
930 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
931 NV = new ExtractElementInst(
932 NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI);
934 } else if (isa<PointerType>(NV->getType())) {
935 assert(isa<PointerType>(LI->getType()));
936 // Must be ptr->ptr cast. Anything else would result in NV being
938 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
940 assert(NV->getType()->isInteger() && "Unknown promotion!");
941 if (Offset && Offset < TD.getTypeSize(NV->getType())*8) {
942 NV = BinaryOperator::createLShr(NV,
943 ConstantInt::get(NV->getType(), Offset),
947 // If the result is an integer, this is a trunc or bitcast.
948 if (LI->getType()->isInteger()) {
949 NV = CastInst::createTruncOrBitCast(NV, LI->getType(),
951 } else if (LI->getType()->isFloatingPoint()) {
952 // If needed, truncate the integer to the appropriate size.
953 if (NV->getType()->getPrimitiveSizeInBits() >
954 LI->getType()->getPrimitiveSizeInBits()) {
955 switch (LI->getType()->getTypeID()) {
956 default: assert(0 && "Unknown FP type!");
957 case Type::FloatTyID:
958 NV = new TruncInst(NV, Type::Int32Ty, LI->getName(), LI);
960 case Type::DoubleTyID:
961 NV = new TruncInst(NV, Type::Int64Ty, LI->getName(), LI);
966 // Then do a bitcast.
967 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
969 // Otherwise must be a pointer.
970 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI);
974 LI->replaceAllUsesWith(NV);
975 LI->eraseFromParent();
976 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
977 assert(SI->getOperand(0) != Ptr && "Consistency error!");
979 // Convert the stored type to the actual type, shift it left to insert
980 // then 'or' into place.
981 Value *SV = SI->getOperand(0);
982 const Type *AllocaType = NewAI->getType()->getElementType();
983 if (SV->getType() != AllocaType) {
984 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
986 if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) {
987 // If the result alloca is a vector type, this is either an element
988 // access or a bitcast to another vector type.
989 if (isa<VectorType>(SV->getType())) {
990 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
992 // Must be an element insertion.
993 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
994 SV = new InsertElementInst(Old, SV,
995 ConstantInt::get(Type::Int32Ty, Elt),
999 // If SV is a float, convert it to the appropriate integer type.
1000 // If it is a pointer, do the same, and also handle ptr->ptr casts
1002 switch (SV->getType()->getTypeID()) {
1004 assert(!SV->getType()->isFloatingPoint() && "Unknown FP type!");
1006 case Type::FloatTyID:
1007 SV = new BitCastInst(SV, Type::Int32Ty, SV->getName(), SI);
1009 case Type::DoubleTyID:
1010 SV = new BitCastInst(SV, Type::Int64Ty, SV->getName(), SI);
1012 case Type::PointerTyID:
1013 if (isa<PointerType>(AllocaType))
1014 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1016 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI);
1020 unsigned SrcSize = TD.getTypeSize(SV->getType())*8;
1022 // Always zero extend the value if needed.
1023 if (SV->getType() != AllocaType)
1024 SV = CastInst::createZExtOrBitCast(SV, AllocaType,
1026 if (Offset && Offset < AllocaType->getPrimitiveSizeInBits())
1027 SV = BinaryOperator::createShl(SV,
1028 ConstantInt::get(SV->getType(), Offset),
1029 SV->getName()+".adj", SI);
1030 // Mask out the bits we are about to insert from the old value.
1031 unsigned TotalBits = TD.getTypeSize(SV->getType())*8;
1032 if (TotalBits != SrcSize) {
1033 assert(TotalBits > SrcSize);
1034 uint64_t Mask = ~(((1ULL << SrcSize)-1) << Offset);
1035 Mask = Mask & cast<IntegerType>(SV->getType())->getBitMask();
1036 Old = BinaryOperator::createAnd(Old,
1037 ConstantInt::get(Old->getType(), Mask),
1038 Old->getName()+".mask", SI);
1039 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
1043 new StoreInst(SV, NewAI, SI);
1044 SI->eraseFromParent();
1046 } else if (CastInst *CI = dyn_cast<CastInst>(User)) {
1047 unsigned NewOff = Offset;
1048 const TargetData &TD = getAnalysis<TargetData>();
1049 if (TD.isBigEndian() && !isVectorInsert) {
1050 // Adjust the pointer. For example, storing 16-bits into a 32-bit
1051 // alloca with just a cast makes it modify the top 16-bits.
1052 const Type *SrcTy = cast<PointerType>(Ptr->getType())->getElementType();
1053 const Type *DstTy = cast<PointerType>(CI->getType())->getElementType();
1054 int PtrDiffBits = TD.getTypeSize(SrcTy)*8-TD.getTypeSize(DstTy)*8;
1055 NewOff += PtrDiffBits;
1057 ConvertUsesToScalar(CI, NewAI, NewOff);
1058 CI->eraseFromParent();
1059 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1060 const PointerType *AggPtrTy =
1061 cast<PointerType>(GEP->getOperand(0)->getType());
1062 const TargetData &TD = getAnalysis<TargetData>();
1063 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8;
1065 // Check to see if this is stepping over an element: GEP Ptr, int C
1066 unsigned NewOffset = Offset;
1067 if (GEP->getNumOperands() == 2) {
1068 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
1069 unsigned BitOffset = Idx*AggSizeInBits;
1071 if (TD.isLittleEndian() || isVectorInsert)
1072 NewOffset += BitOffset;
1074 NewOffset -= BitOffset;
1076 } else if (GEP->getNumOperands() == 3) {
1077 // We know that operand #2 is zero.
1078 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
1079 const Type *AggTy = AggPtrTy->getElementType();
1080 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
1081 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8;
1083 if (TD.isLittleEndian() || isVectorInsert)
1084 NewOffset += ElSizeBits*Idx;
1086 NewOffset += AggSizeInBits-ElSizeBits*(Idx+1);
1087 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
1088 unsigned EltBitOffset =
1089 TD.getStructLayout(STy)->getElementOffset(Idx)*8;
1091 if (TD.isLittleEndian() || isVectorInsert)
1092 NewOffset += EltBitOffset;
1094 const PointerType *ElPtrTy = cast<PointerType>(GEP->getType());
1095 unsigned ElSizeBits = TD.getTypeSize(ElPtrTy->getElementType())*8;
1096 NewOffset += AggSizeInBits-(EltBitOffset+ElSizeBits);
1100 assert(0 && "Unsupported operation!");
1104 assert(0 && "Unsupported operation!");
1107 ConvertUsesToScalar(GEP, NewAI, NewOffset);
1108 GEP->eraseFromParent();
1110 assert(0 && "Unsupported operation!");