1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Analysis/Dominators.h"
32 #include "llvm/Target/TargetData.h"
33 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/GetElementPtrTypeIterator.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/ADT/SmallVector.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/ADT/StringExtras.h"
43 STATISTIC(NumReplaced, "Number of allocas broken up");
44 STATISTIC(NumPromoted, "Number of allocas promoted");
45 STATISTIC(NumConverted, "Number of aggregates converted to scalar");
46 STATISTIC(NumGlobals, "Number of allocas copied from constant global");
49 struct VISIBILITY_HIDDEN SROA : public FunctionPass {
50 bool runOnFunction(Function &F);
52 bool performScalarRepl(Function &F);
53 bool performPromotion(Function &F);
55 // getAnalysisUsage - This pass does not require any passes, but we know it
56 // will not alter the CFG, so say so.
57 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
58 AU.addRequired<ETForest>();
59 AU.addRequired<DominanceFrontier>();
60 AU.addRequired<TargetData>();
65 int isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI);
66 int isSafeUseOfAllocation(Instruction *User, AllocationInst *AI);
67 bool isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI);
68 bool isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI);
69 int isSafeAllocaToScalarRepl(AllocationInst *AI);
70 void DoScalarReplacement(AllocationInst *AI,
71 std::vector<AllocationInst*> &WorkList);
72 void CanonicalizeAllocaUsers(AllocationInst *AI);
73 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
75 void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
76 SmallVector<AllocaInst*, 32> &NewElts);
78 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
79 void ConvertToScalar(AllocationInst *AI, const Type *Ty);
80 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
81 static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
84 RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
87 // Public interface to the ScalarReplAggregates pass
88 FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); }
91 bool SROA::runOnFunction(Function &F) {
92 bool Changed = performPromotion(F);
94 bool LocalChange = performScalarRepl(F);
95 if (!LocalChange) break; // No need to repromote if no scalarrepl
97 LocalChange = performPromotion(F);
98 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
105 bool SROA::performPromotion(Function &F) {
106 std::vector<AllocaInst*> Allocas;
107 const TargetData &TD = getAnalysis<TargetData>();
108 ETForest &ET = getAnalysis<ETForest>();
109 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
111 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
113 bool Changed = false;
118 // Find allocas that are safe to promote, by looking at all instructions in
120 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
121 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
122 if (isAllocaPromotable(AI, TD))
123 Allocas.push_back(AI);
125 if (Allocas.empty()) break;
127 PromoteMemToReg(Allocas, ET, DF, TD);
128 NumPromoted += Allocas.size();
135 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
136 // which runs on all of the malloc/alloca instructions in the function, removing
137 // them if they are only used by getelementptr instructions.
139 bool SROA::performScalarRepl(Function &F) {
140 std::vector<AllocationInst*> WorkList;
142 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
143 BasicBlock &BB = F.getEntryBlock();
144 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
145 if (AllocationInst *A = dyn_cast<AllocationInst>(I))
146 WorkList.push_back(A);
148 // Process the worklist
149 bool Changed = false;
150 while (!WorkList.empty()) {
151 AllocationInst *AI = WorkList.back();
154 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
155 // with unused elements.
156 if (AI->use_empty()) {
157 AI->eraseFromParent();
161 // If we can turn this aggregate value (potentially with casts) into a
162 // simple scalar value that can be mem2reg'd into a register value.
163 bool IsNotTrivial = false;
164 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
165 if (IsNotTrivial && ActualType != Type::VoidTy) {
166 ConvertToScalar(AI, ActualType);
171 // Check to see if we can perform the core SROA transformation. We cannot
172 // transform the allocation instruction if it is an array allocation
173 // (allocations OF arrays are ok though), and an allocation of a scalar
174 // value cannot be decomposed at all.
175 if (!AI->isArrayAllocation() &&
176 (isa<StructType>(AI->getAllocatedType()) ||
177 isa<ArrayType>(AI->getAllocatedType()))) {
178 // Check that all of the users of the allocation are capable of being
180 switch (isSafeAllocaToScalarRepl(AI)) {
181 default: assert(0 && "Unexpected value!");
182 case 0: // Not safe to scalar replace.
184 case 1: // Safe, but requires cleanup/canonicalizations first
185 CanonicalizeAllocaUsers(AI);
187 case 3: // Safe to scalar replace.
188 DoScalarReplacement(AI, WorkList);
194 // Check to see if this allocation is only modified by a memcpy/memmove from
195 // a constant global. If this is the case, we can change all users to use
196 // the constant global instead. This is commonly produced by the CFE by
197 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
198 // is only subsequently read.
199 if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
200 DOUT << "Found alloca equal to global: " << *AI;
201 DOUT << " memcpy = " << *TheCopy;
202 Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
203 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
204 TheCopy->eraseFromParent(); // Don't mutate the global.
205 AI->eraseFromParent();
211 // Otherwise, couldn't process this.
217 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
218 /// predicate, do SROA now.
219 void SROA::DoScalarReplacement(AllocationInst *AI,
220 std::vector<AllocationInst*> &WorkList) {
221 DOUT << "Found inst to SROA: " << *AI;
222 SmallVector<AllocaInst*, 32> ElementAllocas;
223 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
224 ElementAllocas.reserve(ST->getNumContainedTypes());
225 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
226 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
228 AI->getName() + "." + utostr(i), AI);
229 ElementAllocas.push_back(NA);
230 WorkList.push_back(NA); // Add to worklist for recursive processing
233 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
234 ElementAllocas.reserve(AT->getNumElements());
235 const Type *ElTy = AT->getElementType();
236 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
237 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
238 AI->getName() + "." + utostr(i), AI);
239 ElementAllocas.push_back(NA);
240 WorkList.push_back(NA); // Add to worklist for recursive processing
244 // Now that we have created the alloca instructions that we want to use,
245 // expand the getelementptr instructions to use them.
247 while (!AI->use_empty()) {
248 Instruction *User = cast<Instruction>(AI->use_back());
249 if (BitCastInst *BCInst = dyn_cast<BitCastInst>(User)) {
250 RewriteBitCastUserOfAlloca(BCInst, AI, ElementAllocas);
251 BCInst->eraseFromParent();
255 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
256 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
258 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
260 assert(Idx < ElementAllocas.size() && "Index out of range?");
261 AllocaInst *AllocaToUse = ElementAllocas[Idx];
264 if (GEPI->getNumOperands() == 3) {
265 // Do not insert a new getelementptr instruction with zero indices, only
266 // to have it optimized out later.
267 RepValue = AllocaToUse;
269 // We are indexing deeply into the structure, so we still need a
270 // getelement ptr instruction to finish the indexing. This may be
271 // expanded itself once the worklist is rerun.
273 SmallVector<Value*, 8> NewArgs;
274 NewArgs.push_back(Constant::getNullValue(Type::Int32Ty));
275 NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
276 RepValue = new GetElementPtrInst(AllocaToUse, &NewArgs[0],
277 NewArgs.size(), "", GEPI);
278 RepValue->takeName(GEPI);
281 // If this GEP is to the start of the aggregate, check for memcpys.
283 bool IsStartOfAggregateGEP = true;
284 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) {
285 if (!isa<ConstantInt>(GEPI->getOperand(i))) {
286 IsStartOfAggregateGEP = false;
289 if (!cast<ConstantInt>(GEPI->getOperand(i))->isZero()) {
290 IsStartOfAggregateGEP = false;
295 if (IsStartOfAggregateGEP)
296 RewriteBitCastUserOfAlloca(GEPI, AI, ElementAllocas);
300 // Move all of the users over to the new GEP.
301 GEPI->replaceAllUsesWith(RepValue);
302 // Delete the old GEP
303 GEPI->eraseFromParent();
306 // Finally, delete the Alloca instruction
307 AI->eraseFromParent();
312 /// isSafeElementUse - Check to see if this use is an allowed use for a
313 /// getelementptr instruction of an array aggregate allocation. isFirstElt
314 /// indicates whether Ptr is known to the start of the aggregate.
316 int SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI) {
317 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
319 Instruction *User = cast<Instruction>(*I);
320 switch (User->getOpcode()) {
321 case Instruction::Load: break;
322 case Instruction::Store:
323 // Store is ok if storing INTO the pointer, not storing the pointer
324 if (User->getOperand(0) == Ptr) return 0;
326 case Instruction::GetElementPtr: {
327 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
328 bool AreAllZeroIndices = isFirstElt;
329 if (GEP->getNumOperands() > 1) {
330 if (!isa<ConstantInt>(GEP->getOperand(1)) ||
331 !cast<ConstantInt>(GEP->getOperand(1))->isZero())
332 return 0; // Using pointer arithmetic to navigate the array.
334 if (AreAllZeroIndices) {
335 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i) {
336 if (!isa<ConstantInt>(GEP->getOperand(i)) ||
337 !cast<ConstantInt>(GEP->getOperand(i))->isZero()) {
338 AreAllZeroIndices = false;
344 if (!isSafeElementUse(GEP, AreAllZeroIndices, AI)) return 0;
347 case Instruction::BitCast:
349 isSafeUseOfBitCastedAllocation(cast<BitCastInst>(User), AI))
351 DOUT << " Transformation preventing inst: " << *User;
353 case Instruction::Call:
354 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
355 if (isFirstElt && isSafeMemIntrinsicOnAllocation(MI, AI))
358 DOUT << " Transformation preventing inst: " << *User;
361 DOUT << " Transformation preventing inst: " << *User;
365 return 3; // All users look ok :)
368 /// AllUsersAreLoads - Return true if all users of this value are loads.
369 static bool AllUsersAreLoads(Value *Ptr) {
370 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
372 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
377 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
378 /// aggregate allocation.
380 int SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI) {
381 if (BitCastInst *C = dyn_cast<BitCastInst>(User))
382 return isSafeUseOfBitCastedAllocation(C, AI) ? 3 : 0;
383 if (!isa<GetElementPtrInst>(User)) return 0;
385 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
386 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
388 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
390 I.getOperand() != Constant::getNullValue(I.getOperand()->getType()))
394 if (I == E) return 0; // ran out of GEP indices??
396 bool IsAllZeroIndices = true;
398 // If this is a use of an array allocation, do a bit more checking for sanity.
399 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
400 uint64_t NumElements = AT->getNumElements();
402 if (ConstantInt *Idx = dyn_cast<ConstantInt>(I.getOperand())) {
403 IsAllZeroIndices &= Idx->isZero();
405 // Check to make sure that index falls within the array. If not,
406 // something funny is going on, so we won't do the optimization.
408 if (Idx->getZExtValue() >= NumElements)
411 // We cannot scalar repl this level of the array unless any array
412 // sub-indices are in-range constants. In particular, consider:
413 // A[0][i]. We cannot know that the user isn't doing invalid things like
414 // allowing i to index an out-of-range subscript that accesses A[1].
416 // Scalar replacing *just* the outer index of the array is probably not
417 // going to be a win anyway, so just give up.
418 for (++I; I != E && (isa<ArrayType>(*I) || isa<VectorType>(*I)); ++I) {
419 uint64_t NumElements;
420 if (const ArrayType *SubArrayTy = dyn_cast<ArrayType>(*I))
421 NumElements = SubArrayTy->getNumElements();
423 NumElements = cast<VectorType>(*I)->getNumElements();
425 ConstantInt *IdxVal = dyn_cast<ConstantInt>(I.getOperand());
426 if (!IdxVal) return 0;
427 if (IdxVal->getZExtValue() >= NumElements)
429 IsAllZeroIndices &= IdxVal->isZero();
433 IsAllZeroIndices = 0;
435 // If this is an array index and the index is not constant, we cannot
436 // promote... that is unless the array has exactly one or two elements in
437 // it, in which case we CAN promote it, but we have to canonicalize this
438 // out if this is the only problem.
439 if ((NumElements == 1 || NumElements == 2) &&
440 AllUsersAreLoads(GEPI))
441 return 1; // Canonicalization required!
446 // If there are any non-simple uses of this getelementptr, make sure to reject
448 return isSafeElementUse(GEPI, IsAllZeroIndices, AI);
451 /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
452 /// intrinsic can be promoted by SROA. At this point, we know that the operand
453 /// of the memintrinsic is a pointer to the beginning of the allocation.
454 bool SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI){
455 // If not constant length, give up.
456 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
457 if (!Length) return false;
459 // If not the whole aggregate, give up.
460 const TargetData &TD = getAnalysis<TargetData>();
461 if (Length->getZExtValue() != TD.getTypeSize(AI->getType()->getElementType()))
464 // We only know about memcpy/memset/memmove.
465 if (!isa<MemCpyInst>(MI) && !isa<MemSetInst>(MI) && !isa<MemMoveInst>(MI))
467 // Otherwise, we can transform it.
471 /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
473 bool SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI) {
474 for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
476 if (BitCastInst *BCU = dyn_cast<BitCastInst>(UI)) {
477 if (!isSafeUseOfBitCastedAllocation(BCU, AI))
479 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
480 if (!isSafeMemIntrinsicOnAllocation(MI, AI))
489 /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
490 /// to its first element. Transform users of the cast to use the new values
492 void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
493 SmallVector<AllocaInst*, 32> &NewElts) {
494 Constant *Zero = Constant::getNullValue(Type::Int32Ty);
495 const TargetData &TD = getAnalysis<TargetData>();
497 Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
499 if (BitCastInst *BCU = dyn_cast<BitCastInst>(*UI)) {
500 RewriteBitCastUserOfAlloca(BCU, AI, NewElts);
502 BCU->eraseFromParent();
506 // Otherwise, must be memcpy/memmove/memset of the entire aggregate. Split
507 // into one per element.
508 MemIntrinsic *MI = dyn_cast<MemIntrinsic>(*UI);
510 // If it's not a mem intrinsic, it must be some other user of a gep of the
511 // first pointer. Just leave these alone.
517 // If this is a memcpy/memmove, construct the other pointer as the
520 if (MemCpyInst *MCI = dyn_cast<MemCpyInst>(MI)) {
521 if (BCInst == MCI->getRawDest())
522 OtherPtr = MCI->getRawSource();
524 assert(BCInst == MCI->getRawSource());
525 OtherPtr = MCI->getRawDest();
527 } else if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
528 if (BCInst == MMI->getRawDest())
529 OtherPtr = MMI->getRawSource();
531 assert(BCInst == MMI->getRawSource());
532 OtherPtr = MMI->getRawDest();
536 // If there is an other pointer, we want to convert it to the same pointer
537 // type as AI has, so we can GEP through it.
539 // It is likely that OtherPtr is a bitcast, if so, remove it.
540 if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr))
541 OtherPtr = BC->getOperand(0);
542 if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
543 if (BCE->getOpcode() == Instruction::BitCast)
544 OtherPtr = BCE->getOperand(0);
546 // If the pointer is not the right type, insert a bitcast to the right
548 if (OtherPtr->getType() != AI->getType())
549 OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
553 // Process each element of the aggregate.
554 Value *TheFn = MI->getOperand(0);
555 const Type *BytePtrTy = MI->getRawDest()->getType();
556 bool SROADest = MI->getRawDest() == BCInst;
558 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
559 // If this is a memcpy/memmove, emit a GEP of the other element address.
562 OtherElt = new GetElementPtrInst(OtherPtr, Zero,
563 ConstantInt::get(Type::Int32Ty, i),
564 OtherPtr->getNameStr()+"."+utostr(i),
568 Value *EltPtr = NewElts[i];
569 const Type *EltTy =cast<PointerType>(EltPtr->getType())->getElementType();
571 // If we got down to a scalar, insert a load or store as appropriate.
572 if (EltTy->isFirstClassType()) {
573 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
574 Value *Elt = new LoadInst(SROADest ? OtherElt : EltPtr, "tmp",
576 new StoreInst(Elt, SROADest ? EltPtr : OtherElt, MI);
579 assert(isa<MemSetInst>(MI));
581 // If the stored element is zero (common case), just store a null
584 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
586 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
588 // If EltTy is a packed type, get the element type.
589 const Type *ValTy = EltTy;
590 if (const VectorType *VTy = dyn_cast<VectorType>(ValTy))
591 ValTy = VTy->getElementType();
593 // Construct an integer with the right value.
594 unsigned EltSize = TD.getTypeSize(ValTy);
595 APInt OneVal(EltSize*8, CI->getZExtValue());
596 APInt TotalVal(OneVal);
598 for (unsigned i = 0; i != EltSize-1; ++i) {
599 TotalVal = TotalVal.shl(8);
603 // Convert the integer value to the appropriate type.
604 StoreVal = ConstantInt::get(TotalVal);
605 if (isa<PointerType>(ValTy))
606 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
607 else if (ValTy->isFloatingPoint())
608 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
609 assert(StoreVal->getType() == ValTy && "Type mismatch!");
611 // If the requested value was a vector constant, create it.
612 if (EltTy != ValTy) {
613 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
614 SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
615 StoreVal = ConstantVector::get(&Elts[0], NumElts);
618 new StoreInst(StoreVal, EltPtr, MI);
621 // Otherwise, if we're storing a byte variable, use a memset call for
626 // Cast the element pointer to BytePtrTy.
627 if (EltPtr->getType() != BytePtrTy)
628 EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getNameStr(), MI);
630 // Cast the other pointer (if we have one) to BytePtrTy.
631 if (OtherElt && OtherElt->getType() != BytePtrTy)
632 OtherElt = new BitCastInst(OtherElt, BytePtrTy,OtherElt->getNameStr(),
635 unsigned EltSize = TD.getTypeSize(EltTy);
637 // Finally, insert the meminst for this element.
638 if (isa<MemCpyInst>(MI) || isa<MemMoveInst>(MI)) {
640 SROADest ? EltPtr : OtherElt, // Dest ptr
641 SROADest ? OtherElt : EltPtr, // Src ptr
642 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
645 new CallInst(TheFn, Ops, 4, "", MI);
647 assert(isa<MemSetInst>(MI));
649 EltPtr, MI->getOperand(2), // Dest, Value,
650 ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
653 new CallInst(TheFn, Ops, 4, "", MI);
657 // Finally, MI is now dead, as we've modified its actions to occur on all of
658 // the elements of the aggregate.
660 MI->eraseFromParent();
665 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
666 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
667 /// or 1 if safe after canonicalization has been performed.
669 int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
670 // Loop over the use list of the alloca. We can only transform it if all of
671 // the users are safe to transform.
674 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
676 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I), AI);
678 DOUT << "Cannot transform: " << *AI << " due to user: " << **I;
682 // If we require cleanup, isSafe is now 1, otherwise it is 3.
686 /// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
687 /// allocation, but only if cleaned up, perform the cleanups required.
688 void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
689 // At this point, we know that the end result will be SROA'd and promoted, so
690 // we can insert ugly code if required so long as sroa+mem2reg will clean it
692 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
694 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI++);
696 gep_type_iterator I = gep_type_begin(GEPI);
699 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
700 uint64_t NumElements = AT->getNumElements();
702 if (!isa<ConstantInt>(I.getOperand())) {
703 if (NumElements == 1) {
704 GEPI->setOperand(2, Constant::getNullValue(Type::Int32Ty));
706 assert(NumElements == 2 && "Unhandled case!");
707 // All users of the GEP must be loads. At each use of the GEP, insert
708 // two loads of the appropriate indexed GEP and select between them.
709 Value *IsOne = new ICmpInst(ICmpInst::ICMP_NE, I.getOperand(),
710 Constant::getNullValue(I.getOperand()->getType()),
712 // Insert the new GEP instructions, which are properly indexed.
713 SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
714 Indices[1] = Constant::getNullValue(Type::Int32Ty);
715 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0),
716 &Indices[0], Indices.size(),
717 GEPI->getName()+".0", GEPI);
718 Indices[1] = ConstantInt::get(Type::Int32Ty, 1);
719 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0),
720 &Indices[0], Indices.size(),
721 GEPI->getName()+".1", GEPI);
722 // Replace all loads of the variable index GEP with loads from both
723 // indexes and a select.
724 while (!GEPI->use_empty()) {
725 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
726 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
727 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
728 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI);
729 LI->replaceAllUsesWith(R);
730 LI->eraseFromParent();
732 GEPI->eraseFromParent();
739 /// MergeInType - Add the 'In' type to the accumulated type so far. If the
740 /// types are incompatible, return true, otherwise update Accum and return
743 /// There are three cases we handle here:
744 /// 1) An effectively-integer union, where the pieces are stored into as
745 /// smaller integers (common with byte swap and other idioms).
746 /// 2) A union of vector types of the same size and potentially its elements.
747 /// Here we turn element accesses into insert/extract element operations.
748 /// 3) A union of scalar types, such as int/float or int/pointer. Here we
749 /// merge together into integers, allowing the xform to work with #1 as
751 static bool MergeInType(const Type *In, const Type *&Accum,
752 const TargetData &TD) {
753 // If this is our first type, just use it.
754 const VectorType *PTy;
755 if (Accum == Type::VoidTy || In == Accum) {
757 } else if (In == Type::VoidTy) {
759 } else if (In->isInteger() && Accum->isInteger()) { // integer union.
760 // Otherwise pick whichever type is larger.
761 if (cast<IntegerType>(In)->getBitWidth() >
762 cast<IntegerType>(Accum)->getBitWidth())
764 } else if (isa<PointerType>(In) && isa<PointerType>(Accum)) {
765 // Pointer unions just stay as one of the pointers.
766 } else if (isa<VectorType>(In) || isa<VectorType>(Accum)) {
767 if ((PTy = dyn_cast<VectorType>(Accum)) &&
768 PTy->getElementType() == In) {
769 // Accum is a vector, and we are accessing an element: ok.
770 } else if ((PTy = dyn_cast<VectorType>(In)) &&
771 PTy->getElementType() == Accum) {
772 // In is a vector, and accum is an element: ok, remember In.
774 } else if ((PTy = dyn_cast<VectorType>(In)) && isa<VectorType>(Accum) &&
775 PTy->getBitWidth() == cast<VectorType>(Accum)->getBitWidth()) {
776 // Two vectors of the same size: keep Accum.
778 // Cannot insert an short into a <4 x int> or handle
779 // <2 x int> -> <4 x int>
783 // Pointer/FP/Integer unions merge together as integers.
784 switch (Accum->getTypeID()) {
785 case Type::PointerTyID: Accum = TD.getIntPtrType(); break;
786 case Type::FloatTyID: Accum = Type::Int32Ty; break;
787 case Type::DoubleTyID: Accum = Type::Int64Ty; break;
789 assert(Accum->isInteger() && "Unknown FP type!");
793 switch (In->getTypeID()) {
794 case Type::PointerTyID: In = TD.getIntPtrType(); break;
795 case Type::FloatTyID: In = Type::Int32Ty; break;
796 case Type::DoubleTyID: In = Type::Int64Ty; break;
798 assert(In->isInteger() && "Unknown FP type!");
801 return MergeInType(In, Accum, TD);
806 /// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least
807 /// as big as the specified type. If there is no suitable type, this returns
809 const Type *getUIntAtLeastAsBitAs(unsigned NumBits) {
810 if (NumBits > 64) return 0;
811 if (NumBits > 32) return Type::Int64Ty;
812 if (NumBits > 16) return Type::Int32Ty;
813 if (NumBits > 8) return Type::Int16Ty;
817 /// CanConvertToScalar - V is a pointer. If we can convert the pointee to a
818 /// single scalar integer type, return that type. Further, if the use is not
819 /// a completely trivial use that mem2reg could promote, set IsNotTrivial. If
820 /// there are no uses of this pointer, return Type::VoidTy to differentiate from
823 const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
824 const Type *UsedType = Type::VoidTy; // No uses, no forced type.
825 const TargetData &TD = getAnalysis<TargetData>();
826 const PointerType *PTy = cast<PointerType>(V->getType());
828 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
829 Instruction *User = cast<Instruction>(*UI);
831 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
832 if (MergeInType(LI->getType(), UsedType, TD))
835 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
836 // Storing the pointer, not into the value?
837 if (SI->getOperand(0) == V) return 0;
839 // NOTE: We could handle storing of FP imms into integers here!
841 if (MergeInType(SI->getOperand(0)->getType(), UsedType, TD))
843 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
845 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
846 if (!SubTy || MergeInType(SubTy, UsedType, TD)) return 0;
847 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
848 // Check to see if this is stepping over an element: GEP Ptr, int C
849 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
850 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
851 unsigned ElSize = TD.getTypeSize(PTy->getElementType());
852 unsigned BitOffset = Idx*ElSize*8;
853 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
856 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
857 if (SubElt == 0) return 0;
858 if (SubElt != Type::VoidTy && SubElt->isInteger()) {
860 getUIntAtLeastAsBitAs(TD.getTypeSize(SubElt)*8+BitOffset);
861 if (NewTy == 0 || MergeInType(NewTy, UsedType, TD)) return 0;
864 } else if (GEP->getNumOperands() == 3 &&
865 isa<ConstantInt>(GEP->getOperand(1)) &&
866 isa<ConstantInt>(GEP->getOperand(2)) &&
867 cast<ConstantInt>(GEP->getOperand(1))->isZero()) {
868 // We are stepping into an element, e.g. a structure or an array:
869 // GEP Ptr, int 0, uint C
870 const Type *AggTy = PTy->getElementType();
871 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
873 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
874 if (Idx >= ATy->getNumElements()) return 0; // Out of range.
875 } else if (const VectorType *VectorTy = dyn_cast<VectorType>(AggTy)) {
876 // Getting an element of the packed vector.
877 if (Idx >= VectorTy->getNumElements()) return 0; // Out of range.
879 // Merge in the vector type.
880 if (MergeInType(VectorTy, UsedType, TD)) return 0;
882 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
883 if (SubTy == 0) return 0;
885 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
888 // We'll need to change this to an insert/extract element operation.
890 continue; // Everything looks ok
892 } else if (isa<StructType>(AggTy)) {
893 // Structs are always ok.
897 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8);
898 if (NTy == 0 || MergeInType(NTy, UsedType, TD)) return 0;
899 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
900 if (SubTy == 0) return 0;
901 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType, TD))
903 continue; // Everything looks ok
907 // Cannot handle this!
915 /// ConvertToScalar - The specified alloca passes the CanConvertToScalar
916 /// predicate and is non-trivial. Convert it to something that can be trivially
917 /// promoted into a register by mem2reg.
918 void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
919 DOUT << "CONVERT TO SCALAR: " << *AI << " TYPE = "
920 << *ActualTy << "\n";
923 BasicBlock *EntryBlock = AI->getParent();
924 assert(EntryBlock == &EntryBlock->getParent()->getEntryBlock() &&
925 "Not in the entry block!");
926 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program.
928 // Create and insert the alloca.
929 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
930 EntryBlock->begin());
931 ConvertUsesToScalar(AI, NewAI, 0);
936 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
937 /// directly. This happens when we are converting an "integer union" to a
938 /// single integer scalar, or when we are converting a "vector union" to a
939 /// vector with insert/extractelement instructions.
941 /// Offset is an offset from the original alloca, in bits that need to be
942 /// shifted to the right. By the end of this, there should be no uses of Ptr.
943 void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
944 const TargetData &TD = getAnalysis<TargetData>();
945 while (!Ptr->use_empty()) {
946 Instruction *User = cast<Instruction>(Ptr->use_back());
948 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
949 // The load is a bit extract from NewAI shifted right by Offset bits.
950 Value *NV = new LoadInst(NewAI, LI->getName(), LI);
951 if (NV->getType() == LI->getType()) {
952 // We win, no conversion needed.
953 } else if (const VectorType *PTy = dyn_cast<VectorType>(NV->getType())) {
954 // If the result alloca is a vector type, this is either an element
955 // access or a bitcast to another vector type.
956 if (isa<VectorType>(LI->getType())) {
957 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
959 // Must be an element access.
960 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
961 NV = new ExtractElementInst(
962 NV, ConstantInt::get(Type::Int32Ty, Elt), "tmp", LI);
964 } else if (isa<PointerType>(NV->getType())) {
965 assert(isa<PointerType>(LI->getType()));
966 // Must be ptr->ptr cast. Anything else would result in NV being
968 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
970 const IntegerType *NTy = cast<IntegerType>(NV->getType());
971 unsigned LIBitWidth = TD.getTypeSizeInBits(LI->getType());
973 // If this is a big-endian system and the load is narrower than the
974 // full alloca type, we need to do a shift to get the right bits.
976 if (TD.isBigEndian()) {
977 ShAmt = NTy->getBitWidth()-LIBitWidth-Offset;
982 // Note: we support negative bitwidths (with shl) which are not defined.
983 // We do this to support (f.e.) loads off the end of a structure where
984 // only some bits are used.
985 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
986 NV = BinaryOperator::createLShr(NV,
987 ConstantInt::get(NV->getType(),ShAmt),
989 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
990 NV = BinaryOperator::createShl(NV,
991 ConstantInt::get(NV->getType(),-ShAmt),
994 // Finally, unconditionally truncate the integer to the right width.
995 if (LIBitWidth < NTy->getBitWidth())
996 NV = new TruncInst(NV, IntegerType::get(LIBitWidth),
999 // If the result is an integer, this is a trunc or bitcast.
1000 if (isa<IntegerType>(LI->getType())) {
1001 assert(NV->getType() == LI->getType() && "Truncate wasn't enough?");
1002 } else if (LI->getType()->isFloatingPoint()) {
1003 // Just do a bitcast, we know the sizes match up.
1004 NV = new BitCastInst(NV, LI->getType(), LI->getName(), LI);
1006 // Otherwise must be a pointer.
1007 NV = new IntToPtrInst(NV, LI->getType(), LI->getName(), LI);
1010 LI->replaceAllUsesWith(NV);
1011 LI->eraseFromParent();
1012 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1013 assert(SI->getOperand(0) != Ptr && "Consistency error!");
1015 // Convert the stored type to the actual type, shift it left to insert
1016 // then 'or' into place.
1017 Value *SV = SI->getOperand(0);
1018 const Type *AllocaType = NewAI->getType()->getElementType();
1019 if (SV->getType() == AllocaType) {
1021 } else if (const VectorType *PTy = dyn_cast<VectorType>(AllocaType)) {
1022 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1024 // If the result alloca is a vector type, this is either an element
1025 // access or a bitcast to another vector type.
1026 if (isa<VectorType>(SV->getType())) {
1027 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1029 // Must be an element insertion.
1030 unsigned Elt = Offset/(TD.getTypeSize(PTy->getElementType())*8);
1031 SV = new InsertElementInst(Old, SV,
1032 ConstantInt::get(Type::Int32Ty, Elt),
1035 } else if (isa<PointerType>(AllocaType)) {
1036 // If the alloca type is a pointer, then all the elements must be
1038 if (SV->getType() != AllocaType)
1039 SV = new BitCastInst(SV, AllocaType, SV->getName(), SI);
1041 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
1043 // If SV is a float, convert it to the appropriate integer type.
1044 // If it is a pointer, do the same, and also handle ptr->ptr casts
1046 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
1047 unsigned DestWidth = AllocaType->getPrimitiveSizeInBits();
1048 if (SV->getType()->isFloatingPoint())
1049 SV = new BitCastInst(SV, IntegerType::get(SrcWidth),
1051 else if (isa<PointerType>(SV->getType()))
1052 SV = new PtrToIntInst(SV, TD.getIntPtrType(), SV->getName(), SI);
1054 // Always zero extend the value if needed.
1055 if (SV->getType() != AllocaType)
1056 SV = new ZExtInst(SV, AllocaType, SV->getName(), SI);
1058 // If this is a big-endian system and the store is narrower than the
1059 // full alloca type, we need to do a shift to get the right bits.
1061 if (TD.isBigEndian()) {
1062 ShAmt = DestWidth-SrcWidth-Offset;
1067 // Note: we support negative bitwidths (with shr) which are not defined.
1068 // We do this to support (f.e.) stores off the end of a structure where
1069 // only some bits in the structure are set.
1070 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1071 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1072 SV = BinaryOperator::createShl(SV,
1073 ConstantInt::get(SV->getType(), ShAmt),
1076 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1077 SV = BinaryOperator::createLShr(SV,
1078 ConstantInt::get(SV->getType(),-ShAmt),
1080 Mask = Mask.lshr(ShAmt);
1083 // Mask out the bits we are about to insert from the old value, and or
1085 if (SrcWidth != DestWidth) {
1086 assert(DestWidth > SrcWidth);
1087 Old = BinaryOperator::createAnd(Old, ConstantInt::get(~Mask),
1088 Old->getName()+".mask", SI);
1089 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
1092 new StoreInst(SV, NewAI, SI);
1093 SI->eraseFromParent();
1095 } else if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
1096 ConvertUsesToScalar(CI, NewAI, Offset);
1097 CI->eraseFromParent();
1098 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1099 const PointerType *AggPtrTy =
1100 cast<PointerType>(GEP->getOperand(0)->getType());
1101 const TargetData &TD = getAnalysis<TargetData>();
1102 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8;
1104 // Check to see if this is stepping over an element: GEP Ptr, int C
1105 unsigned NewOffset = Offset;
1106 if (GEP->getNumOperands() == 2) {
1107 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getZExtValue();
1108 unsigned BitOffset = Idx*AggSizeInBits;
1110 NewOffset += BitOffset;
1111 } else if (GEP->getNumOperands() == 3) {
1112 // We know that operand #2 is zero.
1113 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
1114 const Type *AggTy = AggPtrTy->getElementType();
1115 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
1116 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8;
1118 NewOffset += ElSizeBits*Idx;
1119 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
1120 unsigned EltBitOffset =
1121 TD.getStructLayout(STy)->getElementOffset(Idx)*8;
1123 NewOffset += EltBitOffset;
1125 assert(0 && "Unsupported operation!");
1129 assert(0 && "Unsupported operation!");
1132 ConvertUsesToScalar(GEP, NewAI, NewOffset);
1133 GEP->eraseFromParent();
1135 assert(0 && "Unsupported operation!");
1142 /// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1143 /// some part of a constant global variable. This intentionally only accepts
1144 /// constant expressions because we don't can't rewrite arbitrary instructions.
1145 static bool PointsToConstantGlobal(Value *V) {
1146 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1147 return GV->isConstant();
1148 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1149 if (CE->getOpcode() == Instruction::BitCast ||
1150 CE->getOpcode() == Instruction::GetElementPtr)
1151 return PointsToConstantGlobal(CE->getOperand(0));
1155 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1156 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
1157 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
1158 /// track of whether it moves the pointer (with isOffset) but otherwise traverse
1159 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1160 /// the alloca, and if the source pointer is a pointer to a constant global, we
1161 /// can optimize this.
1162 static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
1164 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1165 if (isa<LoadInst>(*UI)) {
1166 // Ignore loads, they are always ok.
1169 if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
1170 // If uses of the bitcast are ok, we are ok.
1171 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1175 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
1176 // If the GEP has all zero indices, it doesn't offset the pointer. If it
1177 // doesn't, it does.
1178 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1179 isOffset || !GEP->hasAllZeroIndices()))
1184 // If this is isn't our memcpy/memmove, reject it as something we can't
1186 if (!isa<MemCpyInst>(*UI) && !isa<MemMoveInst>(*UI))
1189 // If we already have seen a copy, reject the second one.
1190 if (TheCopy) return false;
1192 // If the pointer has been offset from the start of the alloca, we can't
1193 // safely handle this.
1194 if (isOffset) return false;
1196 // If the memintrinsic isn't using the alloca as the dest, reject it.
1197 if (UI.getOperandNo() != 1) return false;
1199 MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
1201 // If the source of the memcpy/move is not a constant global, reject it.
1202 if (!PointsToConstantGlobal(MI->getOperand(2)))
1205 // Otherwise, the transform is safe. Remember the copy instruction.
1211 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1212 /// modified by a copy from a constant global. If we can prove this, we can
1213 /// replace any uses of the alloca with uses of the global directly.
1214 Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
1215 Instruction *TheCopy = 0;
1216 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))