1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Analysis/Dominators.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/GetElementPtrTypeIterator.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/StringExtras.h"
43 STATISTIC(NumReplaced, "Number of allocas broken up");
44 STATISTIC(NumPromoted, "Number of allocas promoted");
45 STATISTIC(NumConverted, "Number of aggregates converted to scalar");
46 STATISTIC(NumGlobals, "Number of allocas copied from constant global");
49 struct VISIBILITY_HIDDEN SROA : public FunctionPass {
50 static char ID; // Pass identification, replacement for typeid
51 explicit SROA(signed T = -1) : FunctionPass(&ID) {
58 bool runOnFunction(Function &F);
60 bool performScalarRepl(Function &F);
61 bool performPromotion(Function &F);
63 // getAnalysisUsage - This pass does not require any passes, but we know it
64 // will not alter the CFG, so say so.
65 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
66 AU.addRequired<DominatorTree>();
67 AU.addRequired<DominanceFrontier>();
68 AU.addRequired<TargetData>();
75 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
76 /// information about the uses. All these fields are initialized to false
77 /// and set to true when something is learned.
79 /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
82 /// needsCanon - This is set to true if there is some use of the alloca
83 /// that requires canonicalization.
86 /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
89 /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
93 : isUnsafe(false), needsCanon(false),
94 isMemCpySrc(false), isMemCpyDst(false) {}
99 void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
101 int isSafeAllocaToScalarRepl(AllocationInst *AI);
103 void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
105 void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
107 void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
108 unsigned OpNo, AllocaInfo &Info);
109 void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI,
112 void DoScalarReplacement(AllocationInst *AI,
113 std::vector<AllocationInst*> &WorkList);
114 void CanonicalizeAllocaUsers(AllocationInst *AI);
115 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
117 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
118 SmallVector<AllocaInst*, 32> &NewElts);
120 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
122 SmallVector<AllocaInst*, 32> &NewElts);
123 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI,
124 SmallVector<AllocaInst*, 32> &NewElts);
125 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
126 SmallVector<AllocaInst*, 32> &NewElts);
128 bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&ResTy,
130 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
131 Value *ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI,
133 Value *ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI,
135 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
140 static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
142 // Public interface to the ScalarReplAggregates pass
143 FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
144 return new SROA(Threshold);
148 bool SROA::runOnFunction(Function &F) {
149 TD = &getAnalysis<TargetData>();
151 bool Changed = performPromotion(F);
153 bool LocalChange = performScalarRepl(F);
154 if (!LocalChange) break; // No need to repromote if no scalarrepl
156 LocalChange = performPromotion(F);
157 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
164 bool SROA::performPromotion(Function &F) {
165 std::vector<AllocaInst*> Allocas;
166 DominatorTree &DT = getAnalysis<DominatorTree>();
167 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
169 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
171 bool Changed = false;
176 // Find allocas that are safe to promote, by looking at all instructions in
178 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
179 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
180 if (isAllocaPromotable(AI))
181 Allocas.push_back(AI);
183 if (Allocas.empty()) break;
185 PromoteMemToReg(Allocas, DT, DF);
186 NumPromoted += Allocas.size();
193 /// getNumSAElements - Return the number of elements in the specific struct or
195 static uint64_t getNumSAElements(const Type *T) {
196 if (const StructType *ST = dyn_cast<StructType>(T))
197 return ST->getNumElements();
198 return cast<ArrayType>(T)->getNumElements();
201 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
202 // which runs on all of the malloc/alloca instructions in the function, removing
203 // them if they are only used by getelementptr instructions.
205 bool SROA::performScalarRepl(Function &F) {
206 std::vector<AllocationInst*> WorkList;
208 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
209 BasicBlock &BB = F.getEntryBlock();
210 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
211 if (AllocationInst *A = dyn_cast<AllocationInst>(I))
212 WorkList.push_back(A);
214 // Process the worklist
215 bool Changed = false;
216 while (!WorkList.empty()) {
217 AllocationInst *AI = WorkList.back();
220 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
221 // with unused elements.
222 if (AI->use_empty()) {
223 AI->eraseFromParent();
227 // Check to see if we can perform the core SROA transformation. We cannot
228 // transform the allocation instruction if it is an array allocation
229 // (allocations OF arrays are ok though), and an allocation of a scalar
230 // value cannot be decomposed at all.
231 if (!AI->isArrayAllocation() &&
232 (isa<StructType>(AI->getAllocatedType()) ||
233 isa<ArrayType>(AI->getAllocatedType())) &&
234 AI->getAllocatedType()->isSized() &&
235 // Do not promote any struct whose size is larger than "128" bytes.
236 TD->getTypePaddedSize(AI->getAllocatedType()) < SRThreshold &&
237 // Do not promote any struct into more than "32" separate vars.
238 getNumSAElements(AI->getAllocatedType()) < SRThreshold/4) {
239 // Check that all of the users of the allocation are capable of being
241 switch (isSafeAllocaToScalarRepl(AI)) {
242 default: assert(0 && "Unexpected value!");
243 case 0: // Not safe to scalar replace.
245 case 1: // Safe, but requires cleanup/canonicalizations first
246 CanonicalizeAllocaUsers(AI);
248 case 3: // Safe to scalar replace.
249 DoScalarReplacement(AI, WorkList);
255 // Check to see if this allocation is only modified by a memcpy/memmove from
256 // a constant global. If this is the case, we can change all users to use
257 // the constant global instead. This is commonly produced by the CFE by
258 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
259 // is only subsequently read.
260 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
261 DOUT << "Found alloca equal to global: " << *AI;
262 DOUT << " memcpy = " << *TheCopy;
263 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
264 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
265 TheCopy->eraseFromParent(); // Don't mutate the global.
266 AI->eraseFromParent();
274 // If we can turn this aggregate value (potentially with casts) into a
275 // simple scalar value that can be mem2reg'd into a register value.
276 // IsNotTrivial tracks whether this is something that mem2reg could have
277 // promoted itself. If so, we don't want to transform it needlessly. Note
278 // that we can't just check based on the type: the alloca may be of an i32
279 // but that has pointer arithmetic to set byte 3 of it or something.
280 bool IsNotTrivial = false;
281 const Type *ActualTy = 0;
282 if (CanConvertToScalar(AI, IsNotTrivial, ActualTy, 0))
283 if (IsNotTrivial && ActualTy &&
284 TD->getTypeSizeInBits(ActualTy) < SRThreshold*8) {
285 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = " << *ActualTy <<"\n";
288 // Create and insert the alloca.
289 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
290 AI->getParent()->begin());
291 ConvertUsesToScalar(AI, NewAI, 0);
292 AI->eraseFromParent();
297 // Otherwise, couldn't process this.
303 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
304 /// predicate, do SROA now.
305 void SROA::DoScalarReplacement(AllocationInst *AI,
306 std::vector<AllocationInst*> &WorkList) {
307 DOUT << "Found inst to SROA: " << *AI;
308 SmallVector<AllocaInst*, 32> ElementAllocas;
309 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
310 ElementAllocas.reserve(ST->getNumContainedTypes());
311 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
312 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
314 AI->getName() + "." + utostr(i), AI);
315 ElementAllocas.push_back(NA);
316 WorkList.push_back(NA); // Add to worklist for recursive processing
319 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
320 ElementAllocas.reserve(AT->getNumElements());
321 const Type *ElTy = AT->getElementType();
322 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
323 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
324 AI->getName() + "." + utostr(i), AI);
325 ElementAllocas.push_back(NA);
326 WorkList.push_back(NA); // Add to worklist for recursive processing
330 // Now that we have created the alloca instructions that we want to use,
331 // expand the getelementptr instructions to use them.
333 while (!AI->use_empty()) {
334 Instruction *User = cast<Instruction>(AI->use_back());
335 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
336 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
337 BCInst->eraseFromParent();
342 // %res = load { i32, i32 }* %alloc
344 // %load.0 = load i32* %alloc.0
345 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
346 // %load.1 = load i32* %alloc.1
347 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
348 // (Also works for arrays instead of structs)
349 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
350 Value *Insert = UndefValue::get(LI->getType());
351 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) {
352 Value *Load = new LoadInst(ElementAllocas[i], "load", LI);
353 Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI);
355 LI->replaceAllUsesWith(Insert);
356 LI->eraseFromParent();
361 // store { i32, i32 } %val, { i32, i32 }* %alloc
363 // %val.0 = extractvalue { i32, i32 } %val, 0
364 // store i32 %val.0, i32* %alloc.0
365 // %val.1 = extractvalue { i32, i32 } %val, 1
366 // store i32 %val.1, i32* %alloc.1
367 // (Also works for arrays instead of structs)
368 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
369 Value *Val = SI->getOperand(0);
370 for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) {
371 Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI);
372 new StoreInst(Extract, ElementAllocas[i], SI);
374 SI->eraseFromParent();
378 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
379 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
381 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
383 assert(Idx < ElementAllocas.size() && "Index out of range?");
384 AllocaInst *AllocaToUse = ElementAllocas[Idx];
387 if (GEPI->getNumOperands() == 3) {
388 // Do not insert a new getelementptr instruction with zero indices, only
389 // to have it optimized out later.
390 RepValue = AllocaToUse;
392 // We are indexing deeply into the structure, so we still need a
393 // getelement ptr instruction to finish the indexing. This may be
394 // expanded itself once the worklist is rerun.
396 SmallVector<Value*, 8> NewArgs;
397 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
398 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
399 RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(),
400 NewArgs.end(), "", GEPI);
401 RepValue->takeName(GEPI);
404 // If this GEP is to the start of the aggregate, check for memcpys.
405 if (Idx == 0 && GEPI->hasAllZeroIndices())
406 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
408 // Move all of the users over to the new GEP.
409 GEPI->replaceAllUsesWith(RepValue);
410 // Delete the old GEP
411 GEPI->eraseFromParent();
414 // Finally, delete the Alloca instruction
415 AI->eraseFromParent();
420 /// isSafeElementUse - Check to see if this use is an allowed use for a
421 /// getelementptr instruction of an array aggregate allocation. isFirstElt
422 /// indicates whether Ptr is known to the start of the aggregate.
424 void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
426 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
428 Instruction *User = cast<Instruction>(*I);
429 switch (User->getOpcode()) {
430 case Instruction::Load: break;
431 case Instruction::Store:
432 // Store is ok if storing INTO the pointer, not storing the pointer
433 if (User->getOperand(0) == Ptr) return MarkUnsafe(Info);
435 case Instruction::GetElementPtr: {
436 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
437 bool AreAllZeroIndices = isFirstElt;
438 if (GEP->getNumOperands() > 1) {
439 if (!isa<ConstantInt>(GEP->getOperand(1)) ||
440 !cast<ConstantInt>(GEP->getOperand(1))->isZero())
441 // Using pointer arithmetic to navigate the array.
442 return MarkUnsafe(Info);
444 if (AreAllZeroIndices)
445 AreAllZeroIndices = GEP->hasAllZeroIndices();
447 isSafeElementUse(GEP, AreAllZeroIndices, AI, Info);
448 if (Info.isUnsafe) return;
451 case Instruction::BitCast:
453 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI, Info);
454 if (Info.isUnsafe) return;
457 DOUT << " Transformation preventing inst: " << *User;
458 return MarkUnsafe(Info);
459 case Instruction::Call:
460 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
462 isSafeMemIntrinsicOnAllocation(MI, AI, I.getOperandNo(), Info);
463 if (Info.isUnsafe) return;
467 DOUT << " Transformation preventing inst: " << *User;
468 return MarkUnsafe(Info);
470 DOUT << " Transformation preventing inst: " << *User;
471 return MarkUnsafe(Info);
474 return; // All users look ok :)
477 /// AllUsersAreLoads - Return true if all users of this value are loads.
478 static bool AllUsersAreLoads(Value *Ptr) {
479 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
481 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
486 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
487 /// aggregate allocation.
489 void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
491 if (BitCastInst *C = dyn_cast<BitCastInst>(User))
492 return isSafeUseOfBitCastedAllocation(C, AI, Info);
494 if (LoadInst *LI = dyn_cast<LoadInst>(User))
495 if (!LI->isVolatile())
496 return;// Loads (returning a first class aggregrate) are always rewritable
498 if (StoreInst *SI = dyn_cast<StoreInst>(User))
499 if (!SI->isVolatile() && SI->getOperand(0) != AI)
500 return;// Store is ok if storing INTO the pointer, not storing the pointer
502 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User);
504 return MarkUnsafe(Info);
506 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
508 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
510 I.getOperand() != Constant::getNullValue(I.getOperand()->getType())) {
511 return MarkUnsafe(Info);
515 if (I == E) return MarkUnsafe(Info); // ran out of GEP indices??
517 bool IsAllZeroIndices = true;
519 // If the first index is a non-constant index into an array, see if we can
520 // handle it as a special case.
521 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
522 if (!isa<ConstantInt>(I.getOperand())) {
523 IsAllZeroIndices = 0;
524 uint64_t NumElements = AT->getNumElements();
526 // If this is an array index and the index is not constant, we cannot
527 // promote... that is unless the array has exactly one or two elements in
528 // it, in which case we CAN promote it, but we have to canonicalize this
529 // out if this is the only problem.
530 if ((NumElements == 1 || NumElements == 2) &&
531 AllUsersAreLoads(GEPI)) {
532 Info.needsCanon = true;
533 return; // Canonicalization required!
535 return MarkUnsafe(Info);
539 // Walk through the GEP type indices, checking the types that this indexes
541 for (; I != E; ++I) {
542 // Ignore struct elements, no extra checking needed for these.
543 if (isa<StructType>(*I))
546 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
547 if (!IdxVal) return MarkUnsafe(Info);
549 // Are all indices still zero?
550 IsAllZeroIndices &= IdxVal->isZero();
552 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
553 // This GEP indexes an array. Verify that this is an in-range constant
554 // integer. Specifically, consider A[0][i]. We cannot know that the user
555 // isn't doing invalid things like allowing i to index an out-of-range
556 // subscript that accesses A[1]. Because of this, we have to reject SROA
557 // of any accesses into structs where any of the components are variables.
558 if (IdxVal->getZExtValue() >= AT->getNumElements())
559 return MarkUnsafe(Info);
560 } else if (const VectorType *VT = dyn_cast<VectorType>(*I)) {
561 if (IdxVal->getZExtValue() >= VT->getNumElements())
562 return MarkUnsafe(Info);
566 // If there are any non-simple uses of this getelementptr, make sure to reject
568 return isSafeElementUse(GEPI, IsAllZeroIndices, AI, Info);
571 /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
572 /// intrinsic can be promoted by SROA. At this point, we know that the operand
573 /// of the memintrinsic is a pointer to the beginning of the allocation.
574 void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
575 unsigned OpNo, AllocaInfo &Info) {
576 // If not constant length, give up.
577 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
578 if (!Length) return MarkUnsafe(Info);
580 // If not the whole aggregate, give up.
581 if (Length->getZExtValue() !=
582 TD->getTypePaddedSize(AI->getType()->getElementType()))
583 return MarkUnsafe(Info);
585 // We only know about memcpy/memset/memmove.
586 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI))
587 return MarkUnsafe(Info);
589 // Otherwise, we can transform it. Determine whether this is a memcpy/set
590 // into or out of the aggregate.
592 Info.isMemCpyDst = true;
595 Info.isMemCpySrc = true;
599 /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
601 void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
603 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
605 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) {
606 isSafeUseOfBitCastedAllocation(BCU, AI, Info);
607 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
608 isSafeMemIntrinsicOnAllocation(MI, AI, UI.getOperandNo(), Info);
609 } else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
610 if (SI->isVolatile())
611 return MarkUnsafe(Info);
613 // If storing the entire alloca in one chunk through a bitcasted pointer
614 // to integer, we can transform it. This happens (for example) when you
615 // cast a {i32,i32}* to i64* and store through it. This is similar to the
616 // memcpy case and occurs in various "byval" cases and emulated memcpys.
617 if (isa<IntegerType>(SI->getOperand(0)->getType()) &&
618 TD->getTypePaddedSize(SI->getOperand(0)->getType()) ==
619 TD->getTypePaddedSize(AI->getType()->getElementType())) {
620 Info.isMemCpyDst = true;
623 return MarkUnsafe(Info);
624 } else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
625 if (LI->isVolatile())
626 return MarkUnsafe(Info);
628 // If loading the entire alloca in one chunk through a bitcasted pointer
629 // to integer, we can transform it. This happens (for example) when you
630 // cast a {i32,i32}* to i64* and load through it. This is similar to the
631 // memcpy case and occurs in various "byval" cases and emulated memcpys.
632 if (isa<IntegerType>(LI->getType()) &&
633 TD->getTypePaddedSize(LI->getType()) ==
634 TD->getTypePaddedSize(AI->getType()->getElementType())) {
635 Info.isMemCpySrc = true;
638 return MarkUnsafe(Info);
640 return MarkUnsafe(Info);
642 if (Info.isUnsafe) return;
646 /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
647 /// to its first element. Transform users of the cast to use the new values
649 void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
650 SmallVector<AllocaInst*, 32> &NewElts) {
651 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
653 Instruction *User = cast<Instruction>(*UI++);
654 if (BitCastInst *BCU = dyn_cast<BitCastInst>(User)) {
655 RewriteBitCastUserOfAlloca(BCU, AI, NewElts);
656 if (BCU->use_empty()) BCU->eraseFromParent();
660 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
661 // This must be memcpy/memmove/memset of the entire aggregate.
662 // Split into one per element.
663 RewriteMemIntrinUserOfAlloca(MI, BCInst, AI, NewElts);
667 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
668 // If this is a store of the entire alloca from an integer, rewrite it.
669 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
673 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
674 // If this is a load of the entire alloca to an integer, rewrite it.
675 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
679 // Otherwise it must be some other user of a gep of the first pointer. Just
680 // leave these alone.
685 /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
686 /// Rewrite it to copy or set the elements of the scalarized memory.
687 void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
689 SmallVector<AllocaInst*, 32> &NewElts) {
691 // If this is a memcpy/memmove, construct the other pointer as the
694 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) {
695 if (BCInst == MCI->getRawDest())
696 OtherPtr = MCI->getRawSource();
698 assert(BCInst == MCI->getRawSource());
699 OtherPtr = MCI->getRawDest();
701 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
702 if (BCInst == MMI->getRawDest())
703 OtherPtr = MMI->getRawSource();
705 assert(BCInst == MMI->getRawSource());
706 OtherPtr = MMI->getRawDest();
710 // If there is an other pointer, we want to convert it to the same pointer
711 // type as AI has, so we can GEP through it safely.
713 // It is likely that OtherPtr is a bitcast, if so, remove it.
714 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
715 OtherPtr = BC->getOperand(0);
716 // All zero GEPs are effectively bitcasts.
717 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr))
718 if (GEP->hasAllZeroIndices())
719 OtherPtr = GEP->getOperand(0);
721 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
722 if (BCE->getOpcode() == Instruction::BitCast)
723 OtherPtr = BCE->getOperand(0);
725 // If the pointer is not the right type, insert a bitcast to the right
727 if (OtherPtr->getType() != AI->getType())
728 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
732 // Process each element of the aggregate.
733 Value *TheFn = MI->getOperand(0);
734 const Type *BytePtrTy = MI->getRawDest()->getType();
735 bool SROADest = MI->getRawDest() == BCInst;
737 Constant *Zero = Constant::getNullValue(Type::Int32Ty);
739 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
740 // If this is a memcpy/memmove, emit a GEP of the other element address.
743 Value *Idx[2] = { Zero, ConstantInt::get(Type::Int32Ty, i) };
744 OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2,
745 OtherPtr->getNameStr()+"."+utostr(i),
749 Value *EltPtr = NewElts[i];
750 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType();
752 // If we got down to a scalar, insert a load or store as appropriate.
753 if (EltTy->isSingleValueType()) {
754 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
755 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp",
757 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI);
760 assert(isa<MemSetInst>(MI));
762 // If the stored element is zero (common case), just store a null
765 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
767 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
769 // If EltTy is a vector type, get the element type.
770 const Type *ValTy = EltTy;
771 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy))
772 ValTy = VTy->getElementType();
774 // Construct an integer with the right value.
775 unsigned EltSize = TD->getTypeSizeInBits(ValTy);
776 APInt OneVal(EltSize, CI->getZExtValue());
777 APInt TotalVal(OneVal);
779 for (unsigned i = 0; 8*i < EltSize; ++i) {
780 TotalVal = TotalVal.shl(8);
784 // Convert the integer value to the appropriate type.
785 StoreVal = ConstantInt::get(TotalVal);
786 if (isa<PointerType>(ValTy))
787 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
788 else if (ValTy->isFloatingPoint())
789 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
790 assert(StoreVal->getType() == ValTy && "Type mismatch!");
792 // If the requested value was a vector constant, create it.
793 if (EltTy != ValTy) {
794 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
795 SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
796 StoreVal = ConstantVector::get(&Elts[0], NumElts);
799 new StoreInst(StoreVal, EltPtr, MI);
802 // Otherwise, if we're storing a byte variable, use a memset call for
806 // Cast the element pointer to BytePtrTy.
807 if (EltPtr->getType() != BytePtrTy)
808 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
810 // Cast the other pointer (if we have one) to BytePtrTy.
811 if (OtherElt && OtherElt->getType() != BytePtrTy)
812 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
815 unsigned EltSize = TD->getTypePaddedSize(EltTy);
817 // Finally, insert the meminst for this element.
818 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
820 SROADest ? EltPtr : OtherElt, // Dest ptr
821 SROADest ? OtherElt : EltPtr, // Src ptr
822 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
825 CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
827 assert(isa<MemSetInst>(MI));
829 EltPtr, MI->getOperand(2), // Dest, Value,
830 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
833 CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
836 MI->eraseFromParent();
839 /// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
840 /// overwrites the entire allocation. Extract out the pieces of the stored
841 /// integer and store them individually.
842 void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
844 SmallVector<AllocaInst*, 32> &NewElts){
845 // Extract each element out of the integer according to its structure offset
846 // and store the element value to the individual alloca.
847 Value *SrcVal = SI->getOperand(0);
848 const Type *AllocaEltTy = AI->getType()->getElementType();
849 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy);
851 // If this isn't a store of an integer to the whole alloca, it may be a store
852 // to the first element. Just ignore the store in this case and normal SROA
854 if (!isa<IntegerType>(SrcVal->getType()) ||
855 TD->getTypePaddedSizeInBits(SrcVal->getType()) != AllocaSizeBits)
858 DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI;
860 // There are two forms here: AI could be an array or struct. Both cases
861 // have different ways to compute the element offset.
862 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
863 const StructLayout *Layout = TD->getStructLayout(EltSTy);
865 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
866 // Get the number of bits to shift SrcVal to get the value.
867 const Type *FieldTy = EltSTy->getElementType(i);
868 uint64_t Shift = Layout->getElementOffsetInBits(i);
870 if (TD->isBigEndian())
871 Shift = AllocaSizeBits-Shift-TD->getTypePaddedSizeInBits(FieldTy);
873 Value *EltVal = SrcVal;
875 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
876 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
877 "sroa.store.elt", SI);
880 // Truncate down to an integer of the right size.
881 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
883 // Ignore zero sized fields like {}, they obviously contain no data.
884 if (FieldSizeBits == 0) continue;
886 if (FieldSizeBits != AllocaSizeBits)
887 EltVal = new TruncInst(EltVal, IntegerType::get(FieldSizeBits), "", SI);
888 Value *DestField = NewElts[i];
889 if (EltVal->getType() == FieldTy) {
890 // Storing to an integer field of this size, just do it.
891 } else if (FieldTy->isFloatingPoint() || isa<VectorType>(FieldTy)) {
892 // Bitcast to the right element type (for fp/vector values).
893 EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
895 // Otherwise, bitcast the dest pointer (for aggregates).
896 DestField = new BitCastInst(DestField,
897 PointerType::getUnqual(EltVal->getType()),
900 new StoreInst(EltVal, DestField, SI);
904 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
905 const Type *ArrayEltTy = ATy->getElementType();
906 uint64_t ElementOffset = TD->getTypePaddedSizeInBits(ArrayEltTy);
907 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
911 if (TD->isBigEndian())
912 Shift = AllocaSizeBits-ElementOffset;
916 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
917 // Ignore zero sized fields like {}, they obviously contain no data.
918 if (ElementSizeBits == 0) continue;
920 Value *EltVal = SrcVal;
922 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
923 EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
924 "sroa.store.elt", SI);
927 // Truncate down to an integer of the right size.
928 if (ElementSizeBits != AllocaSizeBits)
929 EltVal = new TruncInst(EltVal, IntegerType::get(ElementSizeBits),"",SI);
930 Value *DestField = NewElts[i];
931 if (EltVal->getType() == ArrayEltTy) {
932 // Storing to an integer field of this size, just do it.
933 } else if (ArrayEltTy->isFloatingPoint() || isa<VectorType>(ArrayEltTy)) {
934 // Bitcast to the right element type (for fp/vector values).
935 EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
937 // Otherwise, bitcast the dest pointer (for aggregates).
938 DestField = new BitCastInst(DestField,
939 PointerType::getUnqual(EltVal->getType()),
942 new StoreInst(EltVal, DestField, SI);
944 if (TD->isBigEndian())
945 Shift -= ElementOffset;
947 Shift += ElementOffset;
951 SI->eraseFromParent();
954 /// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
955 /// an integer. Load the individual pieces to form the aggregate value.
956 void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
957 SmallVector<AllocaInst*, 32> &NewElts) {
958 // Extract each element out of the NewElts according to its structure offset
959 // and form the result value.
960 const Type *AllocaEltTy = AI->getType()->getElementType();
961 uint64_t AllocaSizeBits = TD->getTypePaddedSizeInBits(AllocaEltTy);
963 // If this isn't a load of the whole alloca to an integer, it may be a load
964 // of the first element. Just ignore the load in this case and normal SROA
966 if (!isa<IntegerType>(LI->getType()) ||
967 TD->getTypePaddedSizeInBits(LI->getType()) != AllocaSizeBits)
970 DOUT << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << *LI;
972 // There are two forms here: AI could be an array or struct. Both cases
973 // have different ways to compute the element offset.
974 const StructLayout *Layout = 0;
975 uint64_t ArrayEltBitOffset = 0;
976 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
977 Layout = TD->getStructLayout(EltSTy);
979 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
980 ArrayEltBitOffset = TD->getTypePaddedSizeInBits(ArrayEltTy);
983 Value *ResultVal = Constant::getNullValue(LI->getType());
985 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
986 // Load the value from the alloca. If the NewElt is an aggregate, cast
987 // the pointer to an integer of the same size before doing the load.
988 Value *SrcField = NewElts[i];
989 const Type *FieldTy =
990 cast<PointerType>(SrcField->getType())->getElementType();
991 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
993 // Ignore zero sized fields like {}, they obviously contain no data.
994 if (FieldSizeBits == 0) continue;
996 const IntegerType *FieldIntTy = IntegerType::get(FieldSizeBits);
997 if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
998 !isa<VectorType>(FieldTy))
999 SrcField = new BitCastInst(SrcField, PointerType::getUnqual(FieldIntTy),
1001 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
1003 // If SrcField is a fp or vector of the right size but that isn't an
1004 // integer type, bitcast to an integer so we can shift it.
1005 if (SrcField->getType() != FieldIntTy)
1006 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
1008 // Zero extend the field to be the same size as the final alloca so that
1009 // we can shift and insert it.
1010 if (SrcField->getType() != ResultVal->getType())
1011 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
1013 // Determine the number of bits to shift SrcField.
1015 if (Layout) // Struct case.
1016 Shift = Layout->getElementOffsetInBits(i);
1018 Shift = i*ArrayEltBitOffset;
1020 if (TD->isBigEndian())
1021 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
1024 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
1025 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
1028 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
1031 LI->replaceAllUsesWith(ResultVal);
1032 LI->eraseFromParent();
1036 /// HasPadding - Return true if the specified type has any structure or
1037 /// alignment padding, false otherwise.
1038 static bool HasPadding(const Type *Ty, const TargetData &TD) {
1039 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
1040 const StructLayout *SL = TD.getStructLayout(STy);
1041 unsigned PrevFieldBitOffset = 0;
1042 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1043 unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
1045 // Padding in sub-elements?
1046 if (HasPadding(STy->getElementType(i), TD))
1049 // Check to see if there is any padding between this element and the
1052 unsigned PrevFieldEnd =
1053 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
1054 if (PrevFieldEnd < FieldBitOffset)
1058 PrevFieldBitOffset = FieldBitOffset;
1061 // Check for tail padding.
1062 if (unsigned EltCount = STy->getNumElements()) {
1063 unsigned PrevFieldEnd = PrevFieldBitOffset +
1064 TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
1065 if (PrevFieldEnd < SL->getSizeInBits())
1069 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1070 return HasPadding(ATy->getElementType(), TD);
1071 } else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1072 return HasPadding(VTy->getElementType(), TD);
1074 return TD.getTypeSizeInBits(Ty) != TD.getTypePaddedSizeInBits(Ty);
1077 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
1078 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
1079 /// or 1 if safe after canonicalization has been performed.
1081 int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
1082 // Loop over the use list of the alloca. We can only transform it if all of
1083 // the users are safe to transform.
1086 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
1088 isSafeUseOfAllocation(cast<Instruction>(*I), AI, Info);
1089 if (Info.isUnsafe) {
1090 DOUT << "Cannot transform: " << *AI << " due to user: " << **I;
1095 // Okay, we know all the users are promotable. If the aggregate is a memcpy
1096 // source and destination, we have to be careful. In particular, the memcpy
1097 // could be moving around elements that live in structure padding of the LLVM
1098 // types, but may actually be used. In these cases, we refuse to promote the
1100 if (Info.isMemCpySrc && Info.isMemCpyDst &&
1101 HasPadding(AI->getType()->getElementType(), *TD))
1104 // If we require cleanup, return 1, otherwise return 3.
1105 return Info.needsCanon ? 1 : 3;
1108 /// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
1109 /// allocation, but only if cleaned up, perform the cleanups required.
1110 void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
1111 // At this point, we know that the end result will be SROA'd and promoted, so
1112 // we can insert ugly code if required so long as sroa+mem2reg will clean it
1114 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
1116 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++);
1117 if (!GEPI) continue;
1118 gep_type_iterator I = gep_type_begin(GEPI);
1121 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
1122 uint64_t NumElements = AT->getNumElements();
1124 if (!isa<ConstantInt>(I.getOperand())) {
1125 if (NumElements == 1) {
1126 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty));
1128 assert(NumElements == 2 && "Unhandled case!");
1129 // All users of the GEP must be loads. At each use of the GEP, insert
1130 // two loads of the appropriate indexed GEP and select between them.
1131 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(),
1132 Constant::getNullValue(I.getOperand()->getType()),
1134 // Insert the new GEP instructions, which are properly indexed.
1135 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
1136 Indices[1] = Constant::getNullValue(Type::Int32Ty);
1137 Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
1140 GEPI->getName()+".0", GEPI);
1141 Indices[1] = ConstantInt::get(Type::Int32Ty, 1);
1142 Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
1145 GEPI->getName()+".1", GEPI);
1146 // Replace all loads of the variable index GEP with loads from both
1147 // indexes and a select.
1148 while (!GEPI->use_empty()) {
1149 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
1150 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
1151 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
1152 Value *R = SelectInst::Create(IsOne, One, Zero, LI->getName(), LI);
1153 LI->replaceAllUsesWith(R);
1154 LI->eraseFromParent();
1156 GEPI->eraseFromParent();
1163 /// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
1164 /// the offset specified by Offset (which is specified in bytes).
1166 /// There are two cases we handle here:
1167 /// 1) A union of vector types of the same size and potentially its elements.
1168 /// Here we turn element accesses into insert/extract element operations.
1169 /// This promotes a <4 x float> with a store of float to the third element
1170 /// into a <4 x float> that uses insert element.
1171 /// 2) A fully general blob of memory, which we turn into some (potentially
1172 /// large) integer type with extract and insert operations where the loads
1173 /// and stores would mutate the memory.
1174 static void MergeInType(const Type *In, uint64_t Offset, const Type *&Accum,
1175 const TargetData &TD) {
1176 // If this is our first type, just use it.
1177 if ((Accum == 0 && Offset == 0) || In == Type::VoidTy ||
1178 // Or if this is a same type, keep it.
1179 (In == Accum && Offset == 0)) {
1184 // Merging something like i32 into offset 8 means that a "field" is merged in
1185 // before the basic type is. Make sure to consider the offset below.
1187 Accum = Type::Int8Ty;
1189 if (const VectorType *VATy = dyn_cast<VectorType>(Accum)) {
1190 if (VATy->getElementType() == In &&
1191 Offset % TD.getTypePaddedSize(In) == 0 &&
1192 Offset < TD.getTypePaddedSize(VATy))
1193 return; // Accum is a vector, and we are accessing an element: ok.
1194 if (const VectorType *VInTy = dyn_cast<VectorType>(In))
1195 if (VInTy->getBitWidth() == VATy->getBitWidth() && Offset == 0)
1196 return; // Two vectors of the same size: keep either one of them.
1199 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
1200 // In is a vector, and we are accessing an element: keep V.
1201 if (VInTy->getElementType() == Accum &&
1202 Offset % TD.getTypePaddedSize(Accum) == 0 &&
1203 Offset < TD.getTypePaddedSize(VInTy)) {
1209 // Otherwise, we have a case that we can't handle with an optimized form.
1210 // Convert the alloca to an integer that is as large as the largest store size
1211 // of the value values.
1212 uint64_t InSize = TD.getTypeStoreSizeInBits(In)+8*Offset;
1213 uint64_t ASize = TD.getTypeStoreSizeInBits(Accum);
1214 if (InSize > ASize) ASize = InSize;
1215 Accum = IntegerType::get(ASize);
1218 /// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
1219 /// its accesses to use a to single scalar type, return true, and set ResTy to
1220 /// the new type. Further, if the use is not a completely trivial use that
1221 /// mem2reg could promote, set IsNotTrivial. Offset is the current offset from
1222 /// the base of the alloca being analyzed.
1224 bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial,
1225 const Type *&ResTy, uint64_t Offset) {
1226 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1227 Instruction *User = cast<Instruction>(*UI);
1229 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1230 // Don't break volatile loads.
1231 if (LI->isVolatile())
1233 MergeInType(LI->getType(), Offset, ResTy, *TD);
1237 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1238 // Storing the pointer, not into the value?
1239 if (SI->getOperand(0) == V || SI->isVolatile()) return 0;
1240 MergeInType(SI->getOperand(0)->getType(), Offset, ResTy, *TD);
1244 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
1245 if (!CanConvertToScalar(BCI, IsNotTrivial, ResTy, Offset))
1247 IsNotTrivial = true;
1251 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1252 // If this is a GEP with a variable indices, we can't handle it.
1253 if (!GEP->hasAllConstantIndices())
1256 // Compute the offset that this GEP adds to the pointer.
1257 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1258 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(),
1259 &Indices[0], Indices.size());
1260 // See if all uses can be converted.
1261 if (!CanConvertToScalar(GEP, IsNotTrivial, ResTy, Offset+GEPOffset))
1263 IsNotTrivial = true;
1267 // Otherwise, we cannot handle this!
1275 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
1276 /// directly. This happens when we are converting an "integer union" to a
1277 /// single integer scalar, or when we are converting a "vector union" to a
1278 /// vector with insert/extractelement instructions.
1280 /// Offset is an offset from the original alloca, in bits that need to be
1281 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1282 void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
1283 while (!Ptr->use_empty()) {
1284 Instruction *User = cast<Instruction>(Ptr->use_back());
1286 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1287 LI->replaceAllUsesWith(ConvertUsesOfLoadToScalar(LI, NewAI, Offset));
1288 LI->eraseFromParent();
1292 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1293 assert(SI->getOperand(0) != Ptr && "Consistency error!");
1294 new StoreInst(ConvertUsesOfStoreToScalar(SI, NewAI, Offset), NewAI, SI);
1295 SI->eraseFromParent();
1299 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
1300 ConvertUsesToScalar(CI, NewAI, Offset);
1301 CI->eraseFromParent();
1305 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1306 // Compute the offset that this GEP adds to the pointer.
1307 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1308 uint64_t GEPOffset = TD->getIndexedOffset(GEP->getOperand(0)->getType(),
1309 &Indices[0], Indices.size());
1310 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
1311 GEP->eraseFromParent();
1314 assert(0 && "Unsupported operation!");
1319 /// ConvertUsesOfLoadToScalar - Convert all of the users of the specified load
1320 /// to use the new alloca directly, returning the value that should replace the
1321 /// load. This happens when we are converting an "integer union" to a single
1322 /// integer scalar, or when we are converting a "vector union" to a vector with
1323 /// insert/extractelement instructions.
1325 /// Offset is an offset from the original alloca, in bits that need to be
1326 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1327 Value *SROA::ConvertUsesOfLoadToScalar(LoadInst *LI, AllocaInst *NewAI,
1329 // The load is a bit extract from NewAI shifted right by Offset bits.
1330 Value *NV = new LoadInst(NewAI, LI->getName(), LI);
1332 // If the load is of the whole new alloca, no conversion is needed.
1333 if (NV->getType() == LI->getType() && Offset == 0)
1336 // If the result alloca is a vector type, this is either an element
1337 // access or a bitcast to another vector type of the same size.
1338 if (const VectorType *VTy = dyn_cast<VectorType>(NV->getType())) {
1339 if (isa<VectorType>(LI->getType()))
1340 return new BitCastInst(NV, LI->getType(), LI->getName(), LI);
1342 // Otherwise it must be an element access.
1345 unsigned EltSize = TD->getTypePaddedSizeInBits(VTy->getElementType());
1346 Elt = Offset/EltSize;
1347 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
1349 // Return the element extracted out of it.
1350 return new ExtractElementInst(NV, ConstantInt::get(Type::Int32Ty, Elt),
1354 // Otherwise, this must be a union that was converted to an integer value.
1355 const IntegerType *NTy = cast<IntegerType>(NV->getType());
1357 // If this is a big-endian system and the load is narrower than the
1358 // full alloca type, we need to do a shift to get the right bits.
1360 if (TD->isBigEndian()) {
1361 // On big-endian machines, the lowest bit is stored at the bit offset
1362 // from the pointer given by getTypeStoreSizeInBits. This matters for
1363 // integers with a bitwidth that is not a multiple of 8.
1364 ShAmt = TD->getTypeStoreSizeInBits(NTy) -
1365 TD->getTypeStoreSizeInBits(LI->getType()) - Offset;
1370 // Note: we support negative bitwidths (with shl) which are not defined.
1371 // We do this to support (f.e.) loads off the end of a structure where
1372 // only some bits are used.
1373 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
1374 NV = BinaryOperator::CreateLShr(NV,
1375 ConstantInt::get(NV->getType(), ShAmt),
1377 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
1378 NV = BinaryOperator::CreateShl(NV,
1379 ConstantInt::get(NV->getType(), -ShAmt),
1382 // Finally, unconditionally truncate the integer to the right width.
1383 unsigned LIBitWidth = TD->getTypeSizeInBits(LI->getType());
1384 if (LIBitWidth < NTy->getBitWidth())
1385 NV = new TruncInst(NV, IntegerType::get(LIBitWidth),
1388 // If the result is an integer, this is a trunc or bitcast.
1389 if (isa<IntegerType>(LI->getType())) {
1391 } else if (LI->getType()->isFloatingPoint() ||
1392 isa<VectorType>(LI->getType())) {
1393 // Just do a bitcast, we know the sizes match up.
1394 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
1396 // Otherwise must be a pointer.
1397 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI);
1399 assert(NV->getType() == LI->getType() && "Didn't convert right?");
1404 /// ConvertUsesOfStoreToScalar - Convert the specified store to a load+store
1405 /// pair of the new alloca directly, returning the value that should be stored
1406 /// to the alloca. This happens when we are converting an "integer union" to a
1407 /// single integer scalar, or when we are converting a "vector union" to a
1408 /// vector with insert/extractelement instructions.
1410 /// Offset is an offset from the original alloca, in bits that need to be
1411 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1412 Value *SROA::ConvertUsesOfStoreToScalar(StoreInst *SI, AllocaInst *NewAI,
1415 // Convert the stored type to the actual type, shift it left to insert
1416 // then 'or' into place.
1417 Value *SV = SI->getOperand(0);
1418 const Type *AllocaType = NewAI->getType()->getElementType();
1419 if (SV->getType() == AllocaType && Offset == 0) {
1423 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
1424 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1426 // If the result alloca is a vector type, this is either an element
1427 // access or a bitcast to another vector type.
1428 if (isa<VectorType>(SV->getType())) {
1429 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1431 // Must be an element insertion.
1432 unsigned Elt = Offset/TD->getTypePaddedSizeInBits(VTy->getElementType());
1433 SV = InsertElementInst::Create(Old, SV,
1434 ConstantInt::get(Type::Int32Ty, Elt),
1441 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1443 // If SV is a float, convert it to the appropriate integer type.
1444 // If it is a pointer, do the same, and also handle ptr->ptr casts
1446 unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType());
1447 unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
1448 unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
1449 unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
1450 if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
1451 SV = new BitCastInst(SV, IntegerType::get(SrcWidth), SV->getName(), SI);
1452 else if (isa<PointerType>(SV->getType()))
1453 SV = new PtrToIntInst(SV, TD->getIntPtrType(), SV->getName(), SI);
1455 // Always zero extend the value if needed.
1456 if (SV->getType() != AllocaType)
1457 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI);
1459 // If this is a big-endian system and the store is narrower than the
1460 // full alloca type, we need to do a shift to get the right bits.
1462 if (TD->isBigEndian()) {
1463 // On big-endian machines, the lowest bit is stored at the bit offset
1464 // from the pointer given by getTypeStoreSizeInBits. This matters for
1465 // integers with a bitwidth that is not a multiple of 8.
1466 ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
1471 // Note: we support negative bitwidths (with shr) which are not defined.
1472 // We do this to support (f.e.) stores off the end of a structure where
1473 // only some bits in the structure are set.
1474 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1475 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1476 SV = BinaryOperator::CreateShl(SV,
1477 ConstantInt::get(SV->getType(), ShAmt),
1480 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1481 SV = BinaryOperator::CreateLShr(SV,
1482 ConstantInt::get(SV->getType(),-ShAmt),
1484 Mask = Mask.lshr(-ShAmt);
1487 // Mask out the bits we are about to insert from the old value, and or
1489 if (SrcWidth != DestWidth) {
1490 assert(DestWidth > SrcWidth);
1491 Old = BinaryOperator::CreateAnd(Old, ConstantInt::get(~Mask),
1492 Old->getName()+".mask", SI);
1493 SV = BinaryOperator::CreateOr(Old, SV, SV->getName()+".ins", SI);
1500 /// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1501 /// some part of a constant global variable. This intentionally only accepts
1502 /// constant expressions because we don't can't rewrite arbitrary instructions.
1503 static bool PointsToConstantGlobal(Value *V) {
1504 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1505 return GV->isConstant();
1506 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1507 if (CE->getOpcode() == Instruction::BitCast ||
1508 CE->getOpcode() == Instruction::GetElementPtr)
1509 return PointsToConstantGlobal(CE->getOperand(0));
1513 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1514 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
1515 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
1516 /// track of whether it moves the pointer (with isOffset) but otherwise traverse
1517 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1518 /// the alloca, and if the source pointer is a pointer to a constant global, we
1519 /// can optimize this.
1520 static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
1522 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1523 if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
1524 // Ignore non-volatile loads, they are always ok.
1525 if (!LI->isVolatile())
1528 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
1529 // If uses of the bitcast are ok, we are ok.
1530 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1534 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
1535 // If the GEP has all zero indices, it doesn't offset the pointer. If it
1536 // doesn't, it does.
1537 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1538 isOffset || !GEP->hasAllZeroIndices()))
1543 // If this is isn't our memcpy/memmove, reject it as something we can't
1545 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI))
1548 // If we already have seen a copy, reject the second one.
1549 if (TheCopy) return false;
1551 // If the pointer has been offset from the start of the alloca, we can't
1552 // safely handle this.
1553 if (isOffset) return false;
1555 // If the memintrinsic isn't using the alloca as the dest, reject it.
1556 if (UI.getOperandNo() != 1) return false;
1558 MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
1560 // If the source of the memcpy/move is not a constant global, reject it.
1561 if (!PointsToConstantGlobal(MI->getOperand(2)))
1564 // Otherwise, the transform is safe. Remember the copy instruction.
1570 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1571 /// modified by a copy from a constant global. If we can prove this, we can
1572 /// replace any uses of the alloca with uses of the global directly.
1573 Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
1574 Instruction *TheCopy = 0;
1575 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))