1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass transforms simple global variables that never have their address
11 // taken. If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "globalopt"
17 #include "llvm/Transforms/IPO.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/IR/CallingConv.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/GetElementPtrTypeIterator.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/IR/Operator.h"
34 #include "llvm/Pass.h"
35 #include "llvm/Support/CallSite.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/ValueHandle.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/Target/TargetLibraryInfo.h"
42 #include "llvm/Transforms/Utils/GlobalStatus.h"
43 #include "llvm/Transforms/Utils/ModuleUtils.h"
47 STATISTIC(NumMarked , "Number of globals marked constant");
48 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
49 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
50 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
51 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
52 STATISTIC(NumDeleted , "Number of globals deleted");
53 STATISTIC(NumFnDeleted , "Number of functions deleted");
54 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
55 STATISTIC(NumLocalized , "Number of globals localized");
56 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
57 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
58 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
59 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
60 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
61 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
62 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
65 struct GlobalOpt : public ModulePass {
66 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
67 AU.addRequired<TargetLibraryInfo>();
69 static char ID; // Pass identification, replacement for typeid
70 GlobalOpt() : ModulePass(ID) {
71 initializeGlobalOptPass(*PassRegistry::getPassRegistry());
74 bool runOnModule(Module &M);
77 GlobalVariable *FindGlobalCtors(Module &M);
78 bool OptimizeFunctions(Module &M);
79 bool OptimizeGlobalVars(Module &M);
80 bool OptimizeGlobalAliases(Module &M);
81 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL);
82 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
83 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI,
84 const GlobalStatus &GS);
85 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
88 TargetLibraryInfo *TLI;
92 char GlobalOpt::ID = 0;
93 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
94 "Global Variable Optimizer", false, false)
95 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
96 INITIALIZE_PASS_END(GlobalOpt, "globalopt",
97 "Global Variable Optimizer", false, false)
99 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
101 /// isLeakCheckerRoot - Is this global variable possibly used by a leak checker
102 /// as a root? If so, we might not really want to eliminate the stores to it.
103 static bool isLeakCheckerRoot(GlobalVariable *GV) {
104 // A global variable is a root if it is a pointer, or could plausibly contain
105 // a pointer. There are two challenges; one is that we could have a struct
106 // the has an inner member which is a pointer. We recurse through the type to
107 // detect these (up to a point). The other is that we may actually be a union
108 // of a pointer and another type, and so our LLVM type is an integer which
109 // gets converted into a pointer, or our type is an [i8 x #] with a pointer
110 // potentially contained here.
112 if (GV->hasPrivateLinkage())
115 SmallVector<Type *, 4> Types;
116 Types.push_back(cast<PointerType>(GV->getType())->getElementType());
120 Type *Ty = Types.pop_back_val();
121 switch (Ty->getTypeID()) {
123 case Type::PointerTyID: return true;
124 case Type::ArrayTyID:
125 case Type::VectorTyID: {
126 SequentialType *STy = cast<SequentialType>(Ty);
127 Types.push_back(STy->getElementType());
130 case Type::StructTyID: {
131 StructType *STy = cast<StructType>(Ty);
132 if (STy->isOpaque()) return true;
133 for (StructType::element_iterator I = STy->element_begin(),
134 E = STy->element_end(); I != E; ++I) {
136 if (isa<PointerType>(InnerTy)) return true;
137 if (isa<CompositeType>(InnerTy))
138 Types.push_back(InnerTy);
143 if (--Limit == 0) return true;
144 } while (!Types.empty());
148 /// Given a value that is stored to a global but never read, determine whether
149 /// it's safe to remove the store and the chain of computation that feeds the
151 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
153 if (isa<Constant>(V))
157 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
160 if (isAllocationFn(V, TLI))
163 Instruction *I = cast<Instruction>(V);
164 if (I->mayHaveSideEffects())
166 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
167 if (!GEP->hasAllConstantIndices())
169 } else if (I->getNumOperands() != 1) {
173 V = I->getOperand(0);
177 /// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users
178 /// of the global and clean up any that obviously don't assign the global a
179 /// value that isn't dynamically allocated.
181 static bool CleanupPointerRootUsers(GlobalVariable *GV,
182 const TargetLibraryInfo *TLI) {
183 // A brief explanation of leak checkers. The goal is to find bugs where
184 // pointers are forgotten, causing an accumulating growth in memory
185 // usage over time. The common strategy for leak checkers is to whitelist the
186 // memory pointed to by globals at exit. This is popular because it also
187 // solves another problem where the main thread of a C++ program may shut down
188 // before other threads that are still expecting to use those globals. To
189 // handle that case, we expect the program may create a singleton and never
192 bool Changed = false;
194 // If Dead[n].first is the only use of a malloc result, we can delete its
195 // chain of computation and the store to the global in Dead[n].second.
196 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
198 // Constants can't be pointers to dynamically allocated memory.
199 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
202 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
203 Value *V = SI->getValueOperand();
204 if (isa<Constant>(V)) {
206 SI->eraseFromParent();
207 } else if (Instruction *I = dyn_cast<Instruction>(V)) {
209 Dead.push_back(std::make_pair(I, SI));
211 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
212 if (isa<Constant>(MSI->getValue())) {
214 MSI->eraseFromParent();
215 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
217 Dead.push_back(std::make_pair(I, MSI));
219 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
220 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
221 if (MemSrc && MemSrc->isConstant()) {
223 MTI->eraseFromParent();
224 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
226 Dead.push_back(std::make_pair(I, MTI));
228 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
229 if (CE->use_empty()) {
230 CE->destroyConstant();
233 } else if (Constant *C = dyn_cast<Constant>(U)) {
234 if (isSafeToDestroyConstant(C)) {
235 C->destroyConstant();
236 // This could have invalidated UI, start over from scratch.
238 CleanupPointerRootUsers(GV, TLI);
244 for (int i = 0, e = Dead.size(); i != e; ++i) {
245 if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
246 Dead[i].second->eraseFromParent();
247 Instruction *I = Dead[i].first;
249 if (isAllocationFn(I, TLI))
251 Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
254 I->eraseFromParent();
257 I->eraseFromParent();
264 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
265 /// users of the global, cleaning up the obvious ones. This is largely just a
266 /// quick scan over the use list to clean up the easy and obvious cruft. This
267 /// returns true if it made a change.
268 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
269 const DataLayout *DL,
270 TargetLibraryInfo *TLI) {
271 bool Changed = false;
272 // Note that we need to use a weak value handle for the worklist items. When
273 // we delete a constant array, we may also be holding pointer to one of its
274 // elements (or an element of one of its elements if we're dealing with an
275 // array of arrays) in the worklist.
276 SmallVector<WeakVH, 8> WorkList(V->use_begin(), V->use_end());
277 while (!WorkList.empty()) {
278 Value *UV = WorkList.pop_back_val();
282 User *U = cast<User>(UV);
284 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
286 // Replace the load with the initializer.
287 LI->replaceAllUsesWith(Init);
288 LI->eraseFromParent();
291 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
292 // Store must be unreachable or storing Init into the global.
293 SI->eraseFromParent();
295 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
296 if (CE->getOpcode() == Instruction::GetElementPtr) {
297 Constant *SubInit = 0;
299 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
300 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
301 } else if ((CE->getOpcode() == Instruction::BitCast &&
302 CE->getType()->isPointerTy()) ||
303 CE->getOpcode() == Instruction::AddrSpaceCast) {
304 // Pointer cast, delete any stores and memsets to the global.
305 Changed |= CleanupConstantGlobalUsers(CE, 0, DL, TLI);
308 if (CE->use_empty()) {
309 CE->destroyConstant();
312 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
313 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
314 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
315 // and will invalidate our notion of what Init is.
316 Constant *SubInit = 0;
317 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
319 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, DL, TLI));
320 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
321 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
323 // If the initializer is an all-null value and we have an inbounds GEP,
324 // we already know what the result of any load from that GEP is.
325 // TODO: Handle splats.
326 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
327 SubInit = Constant::getNullValue(GEP->getType()->getElementType());
329 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
331 if (GEP->use_empty()) {
332 GEP->eraseFromParent();
335 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
336 if (MI->getRawDest() == V) {
337 MI->eraseFromParent();
341 } else if (Constant *C = dyn_cast<Constant>(U)) {
342 // If we have a chain of dead constantexprs or other things dangling from
343 // us, and if they are all dead, nuke them without remorse.
344 if (isSafeToDestroyConstant(C)) {
345 C->destroyConstant();
346 CleanupConstantGlobalUsers(V, Init, DL, TLI);
354 /// isSafeSROAElementUse - Return true if the specified instruction is a safe
355 /// user of a derived expression from a global that we want to SROA.
356 static bool isSafeSROAElementUse(Value *V) {
357 // We might have a dead and dangling constant hanging off of here.
358 if (Constant *C = dyn_cast<Constant>(V))
359 return isSafeToDestroyConstant(C);
361 Instruction *I = dyn_cast<Instruction>(V);
362 if (!I) return false;
365 if (isa<LoadInst>(I)) return true;
367 // Stores *to* the pointer are ok.
368 if (StoreInst *SI = dyn_cast<StoreInst>(I))
369 return SI->getOperand(0) != V;
371 // Otherwise, it must be a GEP.
372 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
373 if (GEPI == 0) return false;
375 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
376 !cast<Constant>(GEPI->getOperand(1))->isNullValue())
379 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
381 if (!isSafeSROAElementUse(*I))
387 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value.
388 /// Look at it and its uses and decide whether it is safe to SROA this global.
390 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
391 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
392 if (!isa<GetElementPtrInst>(U) &&
393 (!isa<ConstantExpr>(U) ||
394 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
397 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
398 // don't like < 3 operand CE's, and we don't like non-constant integer
399 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
401 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
402 !cast<Constant>(U->getOperand(1))->isNullValue() ||
403 !isa<ConstantInt>(U->getOperand(2)))
406 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
407 ++GEPI; // Skip over the pointer index.
409 // If this is a use of an array allocation, do a bit more checking for sanity.
410 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
411 uint64_t NumElements = AT->getNumElements();
412 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
414 // Check to make sure that index falls within the array. If not,
415 // something funny is going on, so we won't do the optimization.
417 if (Idx->getZExtValue() >= NumElements)
420 // We cannot scalar repl this level of the array unless any array
421 // sub-indices are in-range constants. In particular, consider:
422 // A[0][i]. We cannot know that the user isn't doing invalid things like
423 // allowing i to index an out-of-range subscript that accesses A[1].
425 // Scalar replacing *just* the outer index of the array is probably not
426 // going to be a win anyway, so just give up.
427 for (++GEPI; // Skip array index.
430 uint64_t NumElements;
431 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
432 NumElements = SubArrayTy->getNumElements();
433 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
434 NumElements = SubVectorTy->getNumElements();
436 assert((*GEPI)->isStructTy() &&
437 "Indexed GEP type is not array, vector, or struct!");
441 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
442 if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
447 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I)
448 if (!isSafeSROAElementUse(*I))
453 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it
454 /// is safe for us to perform this transformation.
456 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
457 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
459 if (!IsUserOfGlobalSafeForSRA(*UI, GV))
466 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global
467 /// variable. This opens the door for other optimizations by exposing the
468 /// behavior of the program in a more fine-grained way. We have determined that
469 /// this transformation is safe already. We return the first global variable we
470 /// insert so that the caller can reprocess it.
471 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
472 // Make sure this global only has simple uses that we can SRA.
473 if (!GlobalUsersSafeToSRA(GV))
476 assert(GV->hasLocalLinkage() && !GV->isConstant());
477 Constant *Init = GV->getInitializer();
478 Type *Ty = Init->getType();
480 std::vector<GlobalVariable*> NewGlobals;
481 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
483 // Get the alignment of the global, either explicit or target-specific.
484 unsigned StartAlignment = GV->getAlignment();
485 if (StartAlignment == 0)
486 StartAlignment = DL.getABITypeAlignment(GV->getType());
488 if (StructType *STy = dyn_cast<StructType>(Ty)) {
489 NewGlobals.reserve(STy->getNumElements());
490 const StructLayout &Layout = *DL.getStructLayout(STy);
491 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
492 Constant *In = Init->getAggregateElement(i);
493 assert(In && "Couldn't get element of initializer?");
494 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
495 GlobalVariable::InternalLinkage,
496 In, GV->getName()+"."+Twine(i),
497 GV->getThreadLocalMode(),
498 GV->getType()->getAddressSpace());
499 Globals.insert(GV, NGV);
500 NewGlobals.push_back(NGV);
502 // Calculate the known alignment of the field. If the original aggregate
503 // had 256 byte alignment for example, something might depend on that:
504 // propagate info to each field.
505 uint64_t FieldOffset = Layout.getElementOffset(i);
506 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
507 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
508 NGV->setAlignment(NewAlign);
510 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
511 unsigned NumElements = 0;
512 if (ArrayType *ATy = dyn_cast<ArrayType>(STy))
513 NumElements = ATy->getNumElements();
515 NumElements = cast<VectorType>(STy)->getNumElements();
517 if (NumElements > 16 && GV->hasNUsesOrMore(16))
518 return 0; // It's not worth it.
519 NewGlobals.reserve(NumElements);
521 uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
522 unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
523 for (unsigned i = 0, e = NumElements; i != e; ++i) {
524 Constant *In = Init->getAggregateElement(i);
525 assert(In && "Couldn't get element of initializer?");
527 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
528 GlobalVariable::InternalLinkage,
529 In, GV->getName()+"."+Twine(i),
530 GV->getThreadLocalMode(),
531 GV->getType()->getAddressSpace());
532 Globals.insert(GV, NGV);
533 NewGlobals.push_back(NGV);
535 // Calculate the known alignment of the field. If the original aggregate
536 // had 256 byte alignment for example, something might depend on that:
537 // propagate info to each field.
538 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
539 if (NewAlign > EltAlign)
540 NGV->setAlignment(NewAlign);
544 if (NewGlobals.empty())
547 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV);
549 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
551 // Loop over all of the uses of the global, replacing the constantexpr geps,
552 // with smaller constantexpr geps or direct references.
553 while (!GV->use_empty()) {
554 User *GEP = GV->use_back();
555 assert(((isa<ConstantExpr>(GEP) &&
556 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
557 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
559 // Ignore the 1th operand, which has to be zero or else the program is quite
560 // broken (undefined). Get the 2nd operand, which is the structure or array
562 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
563 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
565 Value *NewPtr = NewGlobals[Val];
567 // Form a shorter GEP if needed.
568 if (GEP->getNumOperands() > 3) {
569 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
570 SmallVector<Constant*, 8> Idxs;
571 Idxs.push_back(NullInt);
572 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
573 Idxs.push_back(CE->getOperand(i));
574 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs);
576 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
577 SmallVector<Value*, 8> Idxs;
578 Idxs.push_back(NullInt);
579 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
580 Idxs.push_back(GEPI->getOperand(i));
581 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs,
582 GEPI->getName()+"."+Twine(Val),GEPI);
585 GEP->replaceAllUsesWith(NewPtr);
587 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
588 GEPI->eraseFromParent();
590 cast<ConstantExpr>(GEP)->destroyConstant();
593 // Delete the old global, now that it is dead.
597 // Loop over the new globals array deleting any globals that are obviously
598 // dead. This can arise due to scalarization of a structure or an array that
599 // has elements that are dead.
600 unsigned FirstGlobal = 0;
601 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
602 if (NewGlobals[i]->use_empty()) {
603 Globals.erase(NewGlobals[i]);
604 if (FirstGlobal == i) ++FirstGlobal;
607 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0;
610 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
611 /// value will trap if the value is dynamically null. PHIs keeps track of any
612 /// phi nodes we've seen to avoid reprocessing them.
613 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
614 SmallPtrSet<const PHINode*, 8> &PHIs) {
615 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
619 if (isa<LoadInst>(U)) {
621 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
622 if (SI->getOperand(0) == V) {
623 //cerr << "NONTRAPPING USE: " << *U;
624 return false; // Storing the value.
626 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
627 if (CI->getCalledValue() != V) {
628 //cerr << "NONTRAPPING USE: " << *U;
629 return false; // Not calling the ptr
631 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
632 if (II->getCalledValue() != V) {
633 //cerr << "NONTRAPPING USE: " << *U;
634 return false; // Not calling the ptr
636 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
637 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
638 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
639 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
640 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
641 // If we've already seen this phi node, ignore it, it has already been
643 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
645 } else if (isa<ICmpInst>(U) &&
646 isa<ConstantPointerNull>(UI->getOperand(1))) {
647 // Ignore icmp X, null
649 //cerr << "NONTRAPPING USE: " << *U;
656 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads
657 /// from GV will trap if the loaded value is null. Note that this also permits
658 /// comparisons of the loaded value against null, as a special case.
659 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
660 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
664 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
665 SmallPtrSet<const PHINode*, 8> PHIs;
666 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
668 } else if (isa<StoreInst>(U)) {
669 // Ignore stores to the global.
671 // We don't know or understand this user, bail out.
672 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
679 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
680 bool Changed = false;
681 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
682 Instruction *I = cast<Instruction>(*UI++);
683 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
684 LI->setOperand(0, NewV);
686 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
687 if (SI->getOperand(1) == V) {
688 SI->setOperand(1, NewV);
691 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
693 if (CS.getCalledValue() == V) {
694 // Calling through the pointer! Turn into a direct call, but be careful
695 // that the pointer is not also being passed as an argument.
696 CS.setCalledFunction(NewV);
698 bool PassedAsArg = false;
699 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
700 if (CS.getArgument(i) == V) {
702 CS.setArgument(i, NewV);
706 // Being passed as an argument also. Be careful to not invalidate UI!
710 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
711 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
712 ConstantExpr::getCast(CI->getOpcode(),
713 NewV, CI->getType()));
714 if (CI->use_empty()) {
716 CI->eraseFromParent();
718 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
719 // Should handle GEP here.
720 SmallVector<Constant*, 8> Idxs;
721 Idxs.reserve(GEPI->getNumOperands()-1);
722 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
724 if (Constant *C = dyn_cast<Constant>(*i))
728 if (Idxs.size() == GEPI->getNumOperands()-1)
729 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI,
730 ConstantExpr::getGetElementPtr(NewV, Idxs));
731 if (GEPI->use_empty()) {
733 GEPI->eraseFromParent();
742 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null
743 /// value stored into it. If there are uses of the loaded value that would trap
744 /// if the loaded value is dynamically null, then we know that they cannot be
745 /// reachable with a null optimize away the load.
746 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
747 const DataLayout *DL,
748 TargetLibraryInfo *TLI) {
749 bool Changed = false;
751 // Keep track of whether we are able to remove all the uses of the global
752 // other than the store that defines it.
753 bool AllNonStoreUsesGone = true;
755 // Replace all uses of loads with uses of uses of the stored value.
756 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
757 User *GlobalUser = *GUI++;
758 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
759 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
760 // If we were able to delete all uses of the loads
761 if (LI->use_empty()) {
762 LI->eraseFromParent();
765 AllNonStoreUsesGone = false;
767 } else if (isa<StoreInst>(GlobalUser)) {
768 // Ignore the store that stores "LV" to the global.
769 assert(GlobalUser->getOperand(1) == GV &&
770 "Must be storing *to* the global");
772 AllNonStoreUsesGone = false;
774 // If we get here we could have other crazy uses that are transitively
776 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
777 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
778 isa<BitCastInst>(GlobalUser) ||
779 isa<GetElementPtrInst>(GlobalUser)) &&
780 "Only expect load and stores!");
785 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV);
789 // If we nuked all of the loads, then none of the stores are needed either,
790 // nor is the global.
791 if (AllNonStoreUsesGone) {
792 if (isLeakCheckerRoot(GV)) {
793 Changed |= CleanupPointerRootUsers(GV, TLI);
796 CleanupConstantGlobalUsers(GV, 0, DL, TLI);
798 if (GV->use_empty()) {
799 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
801 GV->eraseFromParent();
808 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
809 /// instructions that are foldable.
810 static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
811 TargetLibraryInfo *TLI) {
812 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
813 if (Instruction *I = dyn_cast<Instruction>(*UI++))
814 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
815 I->replaceAllUsesWith(NewC);
817 // Advance UI to the next non-I use to avoid invalidating it!
818 // Instructions could multiply use V.
819 while (UI != E && *UI == I)
821 I->eraseFromParent();
825 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global
826 /// variable, and transforms the program as if it always contained the result of
827 /// the specified malloc. Because it is always the result of the specified
828 /// malloc, there is no reason to actually DO the malloc. Instead, turn the
829 /// malloc into a global, and any loads of GV as uses of the new global.
830 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
833 ConstantInt *NElements,
834 const DataLayout *DL,
835 TargetLibraryInfo *TLI) {
836 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
839 if (NElements->getZExtValue() == 1)
840 GlobalType = AllocTy;
842 // If we have an array allocation, the global variable is of an array.
843 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
845 // Create the new global variable. The contents of the malloc'd memory is
846 // undefined, so initialize with an undef value.
847 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
849 GlobalValue::InternalLinkage,
850 UndefValue::get(GlobalType),
851 GV->getName()+".body",
853 GV->getThreadLocalMode());
855 // If there are bitcast users of the malloc (which is typical, usually we have
856 // a malloc + bitcast) then replace them with uses of the new global. Update
857 // other users to use the global as well.
858 BitCastInst *TheBC = 0;
859 while (!CI->use_empty()) {
860 Instruction *User = cast<Instruction>(CI->use_back());
861 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
862 if (BCI->getType() == NewGV->getType()) {
863 BCI->replaceAllUsesWith(NewGV);
864 BCI->eraseFromParent();
866 BCI->setOperand(0, NewGV);
870 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
871 User->replaceUsesOfWith(CI, TheBC);
875 Constant *RepValue = NewGV;
876 if (NewGV->getType() != GV->getType()->getElementType())
877 RepValue = ConstantExpr::getBitCast(RepValue,
878 GV->getType()->getElementType());
880 // If there is a comparison against null, we will insert a global bool to
881 // keep track of whether the global was initialized yet or not.
882 GlobalVariable *InitBool =
883 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
884 GlobalValue::InternalLinkage,
885 ConstantInt::getFalse(GV->getContext()),
886 GV->getName()+".init", GV->getThreadLocalMode());
887 bool InitBoolUsed = false;
889 // Loop over all uses of GV, processing them in turn.
890 while (!GV->use_empty()) {
891 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
892 // The global is initialized when the store to it occurs.
893 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
894 SI->getOrdering(), SI->getSynchScope(), SI);
895 SI->eraseFromParent();
899 LoadInst *LI = cast<LoadInst>(GV->use_back());
900 while (!LI->use_empty()) {
901 Use &LoadUse = LI->use_begin().getUse();
902 if (!isa<ICmpInst>(LoadUse.getUser())) {
907 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
908 // Replace the cmp X, 0 with a use of the bool value.
909 // Sink the load to where the compare was, if atomic rules allow us to.
910 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
911 LI->getOrdering(), LI->getSynchScope(),
912 LI->isUnordered() ? (Instruction*)ICI : LI);
914 switch (ICI->getPredicate()) {
915 default: llvm_unreachable("Unknown ICmp Predicate!");
916 case ICmpInst::ICMP_ULT:
917 case ICmpInst::ICMP_SLT: // X < null -> always false
918 LV = ConstantInt::getFalse(GV->getContext());
920 case ICmpInst::ICMP_ULE:
921 case ICmpInst::ICMP_SLE:
922 case ICmpInst::ICMP_EQ:
923 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
925 case ICmpInst::ICMP_NE:
926 case ICmpInst::ICMP_UGE:
927 case ICmpInst::ICMP_SGE:
928 case ICmpInst::ICMP_UGT:
929 case ICmpInst::ICMP_SGT:
932 ICI->replaceAllUsesWith(LV);
933 ICI->eraseFromParent();
935 LI->eraseFromParent();
938 // If the initialization boolean was used, insert it, otherwise delete it.
940 while (!InitBool->use_empty()) // Delete initializations
941 cast<StoreInst>(InitBool->use_back())->eraseFromParent();
944 GV->getParent()->getGlobalList().insert(GV, InitBool);
946 // Now the GV is dead, nuke it and the malloc..
947 GV->eraseFromParent();
948 CI->eraseFromParent();
950 // To further other optimizations, loop over all users of NewGV and try to
951 // constant prop them. This will promote GEP instructions with constant
952 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
953 ConstantPropUsersOf(NewGV, DL, TLI);
954 if (RepValue != NewGV)
955 ConstantPropUsersOf(RepValue, DL, TLI);
960 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking
961 /// to make sure that there are no complex uses of V. We permit simple things
962 /// like dereferencing the pointer, but not storing through the address, unless
963 /// it is to the specified global.
964 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
965 const GlobalVariable *GV,
966 SmallPtrSet<const PHINode*, 8> &PHIs) {
967 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
969 const Instruction *Inst = cast<Instruction>(*UI);
971 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
972 continue; // Fine, ignore.
975 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
976 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
977 return false; // Storing the pointer itself... bad.
978 continue; // Otherwise, storing through it, or storing into GV... fine.
981 // Must index into the array and into the struct.
982 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
983 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
988 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
989 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
992 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
997 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
998 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1008 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV
1009 /// somewhere. Transform all uses of the allocation into loads from the
1010 /// global and uses of the resultant pointer. Further, delete the store into
1011 /// GV. This assumes that these value pass the
1012 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1013 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1014 GlobalVariable *GV) {
1015 while (!Alloc->use_empty()) {
1016 Instruction *U = cast<Instruction>(*Alloc->use_begin());
1017 Instruction *InsertPt = U;
1018 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1019 // If this is the store of the allocation into the global, remove it.
1020 if (SI->getOperand(1) == GV) {
1021 SI->eraseFromParent();
1024 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1025 // Insert the load in the corresponding predecessor, not right before the
1027 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator();
1028 } else if (isa<BitCastInst>(U)) {
1029 // Must be bitcast between the malloc and store to initialize the global.
1030 ReplaceUsesOfMallocWithGlobal(U, GV);
1031 U->eraseFromParent();
1033 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1034 // If this is a "GEP bitcast" and the user is a store to the global, then
1035 // just process it as a bitcast.
1036 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1037 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back()))
1038 if (SI->getOperand(1) == GV) {
1039 // Must be bitcast GEP between the malloc and store to initialize
1041 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1042 GEPI->eraseFromParent();
1047 // Insert a load from the global, and use it instead of the malloc.
1048 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
1049 U->replaceUsesOfWith(Alloc, NL);
1053 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi
1054 /// of a load) are simple enough to perform heap SRA on. This permits GEP's
1055 /// that index through the array and struct field, icmps of null, and PHIs.
1056 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1057 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs,
1058 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
1059 // We permit two users of the load: setcc comparing against the null
1060 // pointer, and a getelementptr of a specific form.
1061 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
1063 const Instruction *User = cast<Instruction>(*UI);
1065 // Comparison against null is ok.
1066 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
1067 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1072 // getelementptr is also ok, but only a simple form.
1073 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
1074 // Must index into the array and into the struct.
1075 if (GEPI->getNumOperands() < 3)
1078 // Otherwise the GEP is ok.
1082 if (const PHINode *PN = dyn_cast<PHINode>(User)) {
1083 if (!LoadUsingPHIsPerLoad.insert(PN))
1084 // This means some phi nodes are dependent on each other.
1085 // Avoid infinite looping!
1087 if (!LoadUsingPHIs.insert(PN))
1088 // If we have already analyzed this PHI, then it is safe.
1091 // Make sure all uses of the PHI are simple enough to transform.
1092 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1093 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1099 // Otherwise we don't know what this is, not ok.
1107 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from
1108 /// GV are simple enough to perform HeapSRA, return true.
1109 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1110 Instruction *StoredVal) {
1111 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1112 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1113 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
1115 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
1116 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1117 LoadUsingPHIsPerLoad))
1119 LoadUsingPHIsPerLoad.clear();
1122 // If we reach here, we know that all uses of the loads and transitive uses
1123 // (through PHI nodes) are simple enough to transform. However, we don't know
1124 // that all inputs the to the PHI nodes are in the same equivalence sets.
1125 // Check to verify that all operands of the PHIs are either PHIS that can be
1126 // transformed, loads from GV, or MI itself.
1127 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin()
1128 , E = LoadUsingPHIs.end(); I != E; ++I) {
1129 const PHINode *PN = *I;
1130 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1131 Value *InVal = PN->getIncomingValue(op);
1133 // PHI of the stored value itself is ok.
1134 if (InVal == StoredVal) continue;
1136 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1137 // One of the PHIs in our set is (optimistically) ok.
1138 if (LoadUsingPHIs.count(InPN))
1143 // Load from GV is ok.
1144 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1145 if (LI->getOperand(0) == GV)
1150 // Anything else is rejected.
1158 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1159 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1160 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1161 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
1163 if (FieldNo >= FieldVals.size())
1164 FieldVals.resize(FieldNo+1);
1166 // If we already have this value, just reuse the previously scalarized
1168 if (Value *FieldVal = FieldVals[FieldNo])
1171 // Depending on what instruction this is, we have several cases.
1173 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1174 // This is a scalarized version of the load from the global. Just create
1175 // a new Load of the scalarized global.
1176 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
1177 InsertedScalarizedValues,
1179 LI->getName()+".f"+Twine(FieldNo), LI);
1180 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
1181 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1183 StructType *ST = cast<StructType>(PN->getType()->getPointerElementType());
1186 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)),
1187 PN->getNumIncomingValues(),
1188 PN->getName()+".f"+Twine(FieldNo), PN);
1190 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1192 llvm_unreachable("Unknown usable value");
1195 return FieldVals[FieldNo] = Result;
1198 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from
1199 /// the load, rewrite the derived value to use the HeapSRoA'd load.
1200 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1201 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1202 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1203 // If this is a comparison against null, handle it.
1204 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1205 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1206 // If we have a setcc of the loaded pointer, we can use a setcc of any
1208 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1209 InsertedScalarizedValues, PHIsToRewrite);
1211 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1212 Constant::getNullValue(NPtr->getType()),
1214 SCI->replaceAllUsesWith(New);
1215 SCI->eraseFromParent();
1219 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1220 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1221 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1222 && "Unexpected GEPI!");
1224 // Load the pointer for this field.
1225 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1226 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1227 InsertedScalarizedValues, PHIsToRewrite);
1229 // Create the new GEP idx vector.
1230 SmallVector<Value*, 8> GEPIdx;
1231 GEPIdx.push_back(GEPI->getOperand(1));
1232 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1234 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx,
1235 GEPI->getName(), GEPI);
1236 GEPI->replaceAllUsesWith(NGEPI);
1237 GEPI->eraseFromParent();
1241 // Recursively transform the users of PHI nodes. This will lazily create the
1242 // PHIs that are needed for individual elements. Keep track of what PHIs we
1243 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1244 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1245 // already been seen first by another load, so its uses have already been
1247 PHINode *PN = cast<PHINode>(LoadUser);
1248 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1249 std::vector<Value*>())).second)
1252 // If this is the first time we've seen this PHI, recursively process all
1254 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
1255 Instruction *User = cast<Instruction>(*UI++);
1256 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1260 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr
1261 /// is a value loaded from the global. Eliminate all uses of Ptr, making them
1262 /// use FieldGlobals instead. All uses of loaded values satisfy
1263 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1264 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1265 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1266 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1267 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
1269 Instruction *User = cast<Instruction>(*UI++);
1270 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1273 if (Load->use_empty()) {
1274 Load->eraseFromParent();
1275 InsertedScalarizedValues.erase(Load);
1279 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
1280 /// it up into multiple allocations of arrays of the fields.
1281 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1282 Value *NElems, const DataLayout *DL,
1283 const TargetLibraryInfo *TLI) {
1284 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
1285 Type *MAT = getMallocAllocatedType(CI, TLI);
1286 StructType *STy = cast<StructType>(MAT);
1288 // There is guaranteed to be at least one use of the malloc (storing
1289 // it into GV). If there are other uses, change them to be uses of
1290 // the global to simplify later code. This also deletes the store
1292 ReplaceUsesOfMallocWithGlobal(CI, GV);
1294 // Okay, at this point, there are no users of the malloc. Insert N
1295 // new mallocs at the same place as CI, and N globals.
1296 std::vector<Value*> FieldGlobals;
1297 std::vector<Value*> FieldMallocs;
1299 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1300 Type *FieldTy = STy->getElementType(FieldNo);
1301 PointerType *PFieldTy = PointerType::getUnqual(FieldTy);
1303 GlobalVariable *NGV =
1304 new GlobalVariable(*GV->getParent(),
1305 PFieldTy, false, GlobalValue::InternalLinkage,
1306 Constant::getNullValue(PFieldTy),
1307 GV->getName() + ".f" + Twine(FieldNo), GV,
1308 GV->getThreadLocalMode());
1309 FieldGlobals.push_back(NGV);
1311 unsigned TypeSize = DL->getTypeAllocSize(FieldTy);
1312 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1313 TypeSize = DL->getStructLayout(ST)->getSizeInBytes();
1314 Type *IntPtrTy = DL->getIntPtrType(CI->getType());
1315 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1316 ConstantInt::get(IntPtrTy, TypeSize),
1318 CI->getName() + ".f" + Twine(FieldNo));
1319 FieldMallocs.push_back(NMI);
1320 new StoreInst(NMI, NGV, CI);
1323 // The tricky aspect of this transformation is handling the case when malloc
1324 // fails. In the original code, malloc failing would set the result pointer
1325 // of malloc to null. In this case, some mallocs could succeed and others
1326 // could fail. As such, we emit code that looks like this:
1327 // F0 = malloc(field0)
1328 // F1 = malloc(field1)
1329 // F2 = malloc(field2)
1330 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1331 // if (F0) { free(F0); F0 = 0; }
1332 // if (F1) { free(F1); F1 = 0; }
1333 // if (F2) { free(F2); F2 = 0; }
1335 // The malloc can also fail if its argument is too large.
1336 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1337 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1338 ConstantZero, "isneg");
1339 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1340 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1341 Constant::getNullValue(FieldMallocs[i]->getType()),
1343 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1346 // Split the basic block at the old malloc.
1347 BasicBlock *OrigBB = CI->getParent();
1348 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont");
1350 // Create the block to check the first condition. Put all these blocks at the
1351 // end of the function as they are unlikely to be executed.
1352 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1354 OrigBB->getParent());
1356 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1357 // branch on RunningOr.
1358 OrigBB->getTerminator()->eraseFromParent();
1359 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1361 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1362 // pointer, because some may be null while others are not.
1363 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1364 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1365 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1366 Constant::getNullValue(GVVal->getType()));
1367 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1368 OrigBB->getParent());
1369 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1370 OrigBB->getParent());
1371 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1374 // Fill in FreeBlock.
1375 CallInst::CreateFree(GVVal, BI);
1376 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1378 BranchInst::Create(NextBlock, FreeBlock);
1380 NullPtrBlock = NextBlock;
1383 BranchInst::Create(ContBB, NullPtrBlock);
1385 // CI is no longer needed, remove it.
1386 CI->eraseFromParent();
1388 /// InsertedScalarizedLoads - As we process loads, if we can't immediately
1389 /// update all uses of the load, keep track of what scalarized loads are
1390 /// inserted for a given load.
1391 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1392 InsertedScalarizedValues[GV] = FieldGlobals;
1394 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1396 // Okay, the malloc site is completely handled. All of the uses of GV are now
1397 // loads, and all uses of those loads are simple. Rewrite them to use loads
1398 // of the per-field globals instead.
1399 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
1400 Instruction *User = cast<Instruction>(*UI++);
1402 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1403 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1407 // Must be a store of null.
1408 StoreInst *SI = cast<StoreInst>(User);
1409 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1410 "Unexpected heap-sra user!");
1412 // Insert a store of null into each global.
1413 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1414 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
1415 Constant *Null = Constant::getNullValue(PT->getElementType());
1416 new StoreInst(Null, FieldGlobals[i], SI);
1418 // Erase the original store.
1419 SI->eraseFromParent();
1422 // While we have PHIs that are interesting to rewrite, do it.
1423 while (!PHIsToRewrite.empty()) {
1424 PHINode *PN = PHIsToRewrite.back().first;
1425 unsigned FieldNo = PHIsToRewrite.back().second;
1426 PHIsToRewrite.pop_back();
1427 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1428 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1430 // Add all the incoming values. This can materialize more phis.
1431 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1432 Value *InVal = PN->getIncomingValue(i);
1433 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1435 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1439 // Drop all inter-phi links and any loads that made it this far.
1440 for (DenseMap<Value*, std::vector<Value*> >::iterator
1441 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1443 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1444 PN->dropAllReferences();
1445 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1446 LI->dropAllReferences();
1449 // Delete all the phis and loads now that inter-references are dead.
1450 for (DenseMap<Value*, std::vector<Value*> >::iterator
1451 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1453 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1454 PN->eraseFromParent();
1455 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1456 LI->eraseFromParent();
1459 // The old global is now dead, remove it.
1460 GV->eraseFromParent();
1463 return cast<GlobalVariable>(FieldGlobals[0]);
1466 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
1467 /// pointer global variable with a single value stored it that is a malloc or
1469 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
1472 AtomicOrdering Ordering,
1473 Module::global_iterator &GVI,
1474 const DataLayout *DL,
1475 TargetLibraryInfo *TLI) {
1479 // If this is a malloc of an abstract type, don't touch it.
1480 if (!AllocTy->isSized())
1483 // We can't optimize this global unless all uses of it are *known* to be
1484 // of the malloc value, not of the null initializer value (consider a use
1485 // that compares the global's value against zero to see if the malloc has
1486 // been reached). To do this, we check to see if all uses of the global
1487 // would trap if the global were null: this proves that they must all
1488 // happen after the malloc.
1489 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1492 // We can't optimize this if the malloc itself is used in a complex way,
1493 // for example, being stored into multiple globals. This allows the
1494 // malloc to be stored into the specified global, loaded icmp'd, and
1495 // GEP'd. These are all things we could transform to using the global
1497 SmallPtrSet<const PHINode*, 8> PHIs;
1498 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1501 // If we have a global that is only initialized with a fixed size malloc,
1502 // transform the program to use global memory instead of malloc'd memory.
1503 // This eliminates dynamic allocation, avoids an indirection accessing the
1504 // data, and exposes the resultant global to further GlobalOpt.
1505 // We cannot optimize the malloc if we cannot determine malloc array size.
1506 Value *NElems = getMallocArraySize(CI, DL, TLI, true);
1510 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1511 // Restrict this transformation to only working on small allocations
1512 // (2048 bytes currently), as we don't want to introduce a 16M global or
1514 if (NElements->getZExtValue() * DL->getTypeAllocSize(AllocTy) < 2048) {
1515 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
1519 // If the allocation is an array of structures, consider transforming this
1520 // into multiple malloc'd arrays, one for each field. This is basically
1521 // SRoA for malloc'd memory.
1523 if (Ordering != NotAtomic)
1526 // If this is an allocation of a fixed size array of structs, analyze as a
1527 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1528 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1529 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1530 AllocTy = AT->getElementType();
1532 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1536 // This the structure has an unreasonable number of fields, leave it
1538 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1539 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1541 // If this is a fixed size array, transform the Malloc to be an alloc of
1542 // structs. malloc [100 x struct],1 -> malloc struct, 100
1543 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1544 Type *IntPtrTy = DL->getIntPtrType(CI->getType());
1545 unsigned TypeSize = DL->getStructLayout(AllocSTy)->getSizeInBytes();
1546 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1547 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1548 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
1549 AllocSize, NumElements,
1551 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1552 CI->replaceAllUsesWith(Cast);
1553 CI->eraseFromParent();
1554 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1555 CI = cast<CallInst>(BCI->getOperand(0));
1557 CI = cast<CallInst>(Malloc);
1560 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
1568 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
1569 // that only one value (besides its initializer) is ever stored to the global.
1570 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1571 AtomicOrdering Ordering,
1572 Module::global_iterator &GVI,
1573 const DataLayout *DL,
1574 TargetLibraryInfo *TLI) {
1575 // Ignore no-op GEPs and bitcasts.
1576 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1578 // If we are dealing with a pointer global that is initialized to null and
1579 // only has one (non-null) value stored into it, then we can optimize any
1580 // users of the loaded value (often calls and loads) that would trap if the
1582 if (GV->getInitializer()->getType()->isPointerTy() &&
1583 GV->getInitializer()->isNullValue()) {
1584 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1585 if (GV->getInitializer()->getType() != SOVC->getType())
1586 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1588 // Optimize away any trapping uses of the loaded value.
1589 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
1591 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
1592 Type *MallocType = getMallocAllocatedType(CI, TLI);
1594 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
1603 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only
1604 /// two values ever stored into GV are its initializer and OtherVal. See if we
1605 /// can shrink the global into a boolean and select between the two values
1606 /// whenever it is used. This exposes the values to other scalar optimizations.
1607 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1608 Type *GVElType = GV->getType()->getElementType();
1610 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1611 // an FP value, pointer or vector, don't do this optimization because a select
1612 // between them is very expensive and unlikely to lead to later
1613 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1614 // where v1 and v2 both require constant pool loads, a big loss.
1615 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1616 GVElType->isFloatingPointTy() ||
1617 GVElType->isPointerTy() || GVElType->isVectorTy())
1620 // Walk the use list of the global seeing if all the uses are load or store.
1621 // If there is anything else, bail out.
1622 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
1624 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1628 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
1630 // Create the new global, initializing it to false.
1631 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1633 GlobalValue::InternalLinkage,
1634 ConstantInt::getFalse(GV->getContext()),
1636 GV->getThreadLocalMode(),
1637 GV->getType()->getAddressSpace());
1638 GV->getParent()->getGlobalList().insert(GV, NewGV);
1640 Constant *InitVal = GV->getInitializer();
1641 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1642 "No reason to shrink to bool!");
1644 // If initialized to zero and storing one into the global, we can use a cast
1645 // instead of a select to synthesize the desired value.
1646 bool IsOneZero = false;
1647 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal))
1648 IsOneZero = InitVal->isNullValue() && CI->isOne();
1650 while (!GV->use_empty()) {
1651 Instruction *UI = cast<Instruction>(GV->use_back());
1652 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1653 // Change the store into a boolean store.
1654 bool StoringOther = SI->getOperand(0) == OtherVal;
1655 // Only do this if we weren't storing a loaded value.
1657 if (StoringOther || SI->getOperand(0) == InitVal) {
1658 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1661 // Otherwise, we are storing a previously loaded copy. To do this,
1662 // change the copy from copying the original value to just copying the
1664 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1666 // If we've already replaced the input, StoredVal will be a cast or
1667 // select instruction. If not, it will be a load of the original
1669 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1670 assert(LI->getOperand(0) == GV && "Not a copy!");
1671 // Insert a new load, to preserve the saved value.
1672 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1673 LI->getOrdering(), LI->getSynchScope(), LI);
1675 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1676 "This is not a form that we understand!");
1677 StoreVal = StoredVal->getOperand(0);
1678 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1681 new StoreInst(StoreVal, NewGV, false, 0,
1682 SI->getOrdering(), SI->getSynchScope(), SI);
1684 // Change the load into a load of bool then a select.
1685 LoadInst *LI = cast<LoadInst>(UI);
1686 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1687 LI->getOrdering(), LI->getSynchScope(), LI);
1690 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1692 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1694 LI->replaceAllUsesWith(NSI);
1696 UI->eraseFromParent();
1699 // Retain the name of the old global variable. People who are debugging their
1700 // programs may expect these variables to be named the same.
1701 NewGV->takeName(GV);
1702 GV->eraseFromParent();
1707 /// ProcessGlobal - Analyze the specified global variable and optimize it if
1708 /// possible. If we make a change, return true.
1709 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
1710 Module::global_iterator &GVI) {
1711 if (!GV->isDiscardableIfUnused())
1714 // Do more involved optimizations if the global is internal.
1715 GV->removeDeadConstantUsers();
1717 if (GV->use_empty()) {
1718 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV);
1719 GV->eraseFromParent();
1724 if (!GV->hasLocalLinkage())
1729 if (GlobalStatus::analyzeGlobal(GV, GS))
1732 if (!GS.IsCompared && !GV->hasUnnamedAddr()) {
1733 GV->setUnnamedAddr(true);
1737 if (GV->isConstant() || !GV->hasInitializer())
1740 return ProcessInternalGlobal(GV, GVI, GS);
1743 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
1744 /// it if possible. If we make a change, return true.
1745 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
1746 Module::global_iterator &GVI,
1747 const GlobalStatus &GS) {
1748 // If this is a first class global and has only one accessing function
1749 // and this function is main (which we know is not recursive), we replace
1750 // the global with a local alloca in this function.
1752 // NOTE: It doesn't make sense to promote non-single-value types since we
1753 // are just replacing static memory to stack memory.
1755 // If the global is in different address space, don't bring it to stack.
1756 if (!GS.HasMultipleAccessingFunctions &&
1757 GS.AccessingFunction && !GS.HasNonInstructionUser &&
1758 GV->getType()->getElementType()->isSingleValueType() &&
1759 GS.AccessingFunction->getName() == "main" &&
1760 GS.AccessingFunction->hasExternalLinkage() &&
1761 GV->getType()->getAddressSpace() == 0) {
1762 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
1763 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1764 ->getEntryBlock().begin());
1765 Type *ElemTy = GV->getType()->getElementType();
1766 // FIXME: Pass Global's alignment when globals have alignment
1767 AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI);
1768 if (!isa<UndefValue>(GV->getInitializer()))
1769 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1771 GV->replaceAllUsesWith(Alloca);
1772 GV->eraseFromParent();
1777 // If the global is never loaded (but may be stored to), it is dead.
1780 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV);
1783 if (isLeakCheckerRoot(GV)) {
1784 // Delete any constant stores to the global.
1785 Changed = CleanupPointerRootUsers(GV, TLI);
1787 // Delete any stores we can find to the global. We may not be able to
1788 // make it completely dead though.
1789 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1792 // If the global is dead now, delete it.
1793 if (GV->use_empty()) {
1794 GV->eraseFromParent();
1800 } else if (GS.StoredType <= GlobalStatus::InitializerStored) {
1801 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
1802 GV->setConstant(true);
1804 // Clean up any obviously simplifiable users now.
1805 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1807 // If the global is dead now, just nuke it.
1808 if (GV->use_empty()) {
1809 DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
1810 << "all users and delete global!\n");
1811 GV->eraseFromParent();
1817 } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
1818 if (DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>()) {
1819 const DataLayout &DL = DLP->getDataLayout();
1820 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
1821 GVI = FirstNewGV; // Don't skip the newly produced globals!
1825 } else if (GS.StoredType == GlobalStatus::StoredOnce) {
1826 // If the initial value for the global was an undef value, and if only
1827 // one other value was stored into it, we can just change the
1828 // initializer to be the stored value, then delete all stores to the
1829 // global. This allows us to mark it constant.
1830 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
1831 if (isa<UndefValue>(GV->getInitializer())) {
1832 // Change the initial value here.
1833 GV->setInitializer(SOVConstant);
1835 // Clean up any obviously simplifiable users now.
1836 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1838 if (GV->use_empty()) {
1839 DEBUG(dbgs() << " *** Substituting initializer allowed us to "
1840 << "simplify all users and delete global!\n");
1841 GV->eraseFromParent();
1850 // Try to optimize globals based on the knowledge that only one value
1851 // (besides its initializer) is ever stored to the global.
1852 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
1856 // Otherwise, if the global was not a boolean, we can shrink it to be a
1858 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
1859 if (GS.Ordering == NotAtomic) {
1860 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
1871 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
1872 /// function, changing them to FastCC.
1873 static void ChangeCalleesToFastCall(Function *F) {
1874 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
1875 if (isa<BlockAddress>(*UI))
1877 CallSite User(cast<Instruction>(*UI));
1878 User.setCallingConv(CallingConv::Fast);
1882 static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) {
1883 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
1884 unsigned Index = Attrs.getSlotIndex(i);
1885 if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest))
1888 // There can be only one.
1889 return Attrs.removeAttribute(C, Index, Attribute::Nest);
1895 static void RemoveNestAttribute(Function *F) {
1896 F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
1897 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
1898 if (isa<BlockAddress>(*UI))
1900 CallSite User(cast<Instruction>(*UI));
1901 User.setAttributes(StripNest(F->getContext(), User.getAttributes()));
1905 /// Return true if this is a calling convention that we'd like to change. The
1906 /// idea here is that we don't want to mess with the convention if the user
1907 /// explicitly requested something with performance implications like coldcc,
1908 /// GHC, or anyregcc.
1909 static bool isProfitableToMakeFastCC(Function *F) {
1910 CallingConv::ID CC = F->getCallingConv();
1911 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
1912 return CC == CallingConv::C || CC == CallingConv::X86_ThisCall;
1915 bool GlobalOpt::OptimizeFunctions(Module &M) {
1916 bool Changed = false;
1917 // Optimize functions.
1918 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
1920 // Functions without names cannot be referenced outside this module.
1921 if (!F->hasName() && !F->isDeclaration())
1922 F->setLinkage(GlobalValue::InternalLinkage);
1923 F->removeDeadConstantUsers();
1924 if (F->isDefTriviallyDead()) {
1925 F->eraseFromParent();
1928 } else if (F->hasLocalLinkage()) {
1929 if (isProfitableToMakeFastCC(F) && !F->isVarArg() &&
1930 !F->hasAddressTaken()) {
1931 // If this function has a calling convention worth changing, is not a
1932 // varargs function, and is only called directly, promote it to use the
1933 // Fast calling convention.
1934 F->setCallingConv(CallingConv::Fast);
1935 ChangeCalleesToFastCall(F);
1940 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
1941 !F->hasAddressTaken()) {
1942 // The function is not used by a trampoline intrinsic, so it is safe
1943 // to remove the 'nest' attribute.
1944 RemoveNestAttribute(F);
1953 bool GlobalOpt::OptimizeGlobalVars(Module &M) {
1954 bool Changed = false;
1955 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
1957 GlobalVariable *GV = GVI++;
1958 // Global variables without names cannot be referenced outside this module.
1959 if (!GV->hasName() && !GV->isDeclaration())
1960 GV->setLinkage(GlobalValue::InternalLinkage);
1961 // Simplify the initializer.
1962 if (GV->hasInitializer())
1963 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
1964 Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
1965 if (New && New != CE)
1966 GV->setInitializer(New);
1969 Changed |= ProcessGlobal(GV, GVI);
1974 /// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all
1975 /// initializers have an init priority of 65535.
1976 GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) {
1977 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1978 if (GV == 0) return 0;
1980 // Verify that the initializer is simple enough for us to handle. We are
1981 // only allowed to optimize the initializer if it is unique.
1982 if (!GV->hasUniqueInitializer()) return 0;
1984 if (isa<ConstantAggregateZero>(GV->getInitializer()))
1986 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
1988 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
1989 if (isa<ConstantAggregateZero>(*i))
1991 ConstantStruct *CS = cast<ConstantStruct>(*i);
1992 if (isa<ConstantPointerNull>(CS->getOperand(1)))
1995 // Must have a function or null ptr.
1996 if (!isa<Function>(CS->getOperand(1)))
1999 // Init priority must be standard.
2000 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0));
2001 if (CI->getZExtValue() != 65535)
2008 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand,
2009 /// return a list of the functions and null terminator as a vector.
2010 static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
2011 if (GV->getInitializer()->isNullValue())
2012 return std::vector<Function*>();
2013 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer());
2014 std::vector<Function*> Result;
2015 Result.reserve(CA->getNumOperands());
2016 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) {
2017 ConstantStruct *CS = cast<ConstantStruct>(*i);
2018 Result.push_back(dyn_cast<Function>(CS->getOperand(1)));
2023 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
2024 /// specified array, returning the new global to use.
2025 static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
2026 const std::vector<Function*> &Ctors) {
2027 // If we made a change, reassemble the initializer list.
2028 Constant *CSVals[2];
2029 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535);
2032 StructType *StructTy =
2033 cast<StructType>(GCL->getType()->getElementType()->getArrayElementType());
2035 // Create the new init list.
2036 std::vector<Constant*> CAList;
2037 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) {
2039 CSVals[1] = Ctors[i];
2041 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()),
2043 PointerType *PFTy = PointerType::getUnqual(FTy);
2044 CSVals[1] = Constant::getNullValue(PFTy);
2045 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()),
2048 CAList.push_back(ConstantStruct::get(StructTy, CSVals));
2051 // Create the array initializer.
2052 Constant *CA = ConstantArray::get(ArrayType::get(StructTy,
2053 CAList.size()), CAList);
2055 // If we didn't change the number of elements, don't create a new GV.
2056 if (CA->getType() == GCL->getInitializer()->getType()) {
2057 GCL->setInitializer(CA);
2061 // Create the new global and insert it next to the existing list.
2062 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(),
2063 GCL->getLinkage(), CA, "",
2064 GCL->getThreadLocalMode());
2065 GCL->getParent()->getGlobalList().insert(GCL, NGV);
2068 // Nuke the old list, replacing any uses with the new one.
2069 if (!GCL->use_empty()) {
2071 if (V->getType() != GCL->getType())
2072 V = ConstantExpr::getBitCast(V, GCL->getType());
2073 GCL->replaceAllUsesWith(V);
2075 GCL->eraseFromParent();
2085 isSimpleEnoughValueToCommit(Constant *C,
2086 SmallPtrSet<Constant*, 8> &SimpleConstants,
2087 const DataLayout *DL);
2090 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be
2091 /// handled by the code generator. We don't want to generate something like:
2092 /// void *X = &X/42;
2093 /// because the code generator doesn't have a relocation that can handle that.
2095 /// This function should be called if C was not found (but just got inserted)
2096 /// in SimpleConstants to avoid having to rescan the same constants all the
2098 static bool isSimpleEnoughValueToCommitHelper(Constant *C,
2099 SmallPtrSet<Constant*, 8> &SimpleConstants,
2100 const DataLayout *DL) {
2101 // Simple integer, undef, constant aggregate zero, global addresses, etc are
2103 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) ||
2104 isa<GlobalValue>(C))
2107 // Aggregate values are safe if all their elements are.
2108 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
2109 isa<ConstantVector>(C)) {
2110 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
2111 Constant *Op = cast<Constant>(C->getOperand(i));
2112 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, DL))
2118 // We don't know exactly what relocations are allowed in constant expressions,
2119 // so we allow &global+constantoffset, which is safe and uniformly supported
2121 ConstantExpr *CE = cast<ConstantExpr>(C);
2122 switch (CE->getOpcode()) {
2123 case Instruction::BitCast:
2124 // Bitcast is fine if the casted value is fine.
2125 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2127 case Instruction::IntToPtr:
2128 case Instruction::PtrToInt:
2129 // int <=> ptr is fine if the int type is the same size as the
2131 if (!DL || DL->getTypeSizeInBits(CE->getType()) !=
2132 DL->getTypeSizeInBits(CE->getOperand(0)->getType()))
2134 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2136 // GEP is fine if it is simple + constant offset.
2137 case Instruction::GetElementPtr:
2138 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
2139 if (!isa<ConstantInt>(CE->getOperand(i)))
2141 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2143 case Instruction::Add:
2144 // We allow simple+cst.
2145 if (!isa<ConstantInt>(CE->getOperand(1)))
2147 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2153 isSimpleEnoughValueToCommit(Constant *C,
2154 SmallPtrSet<Constant*, 8> &SimpleConstants,
2155 const DataLayout *DL) {
2156 // If we already checked this constant, we win.
2157 if (!SimpleConstants.insert(C)) return true;
2158 // Check the constant.
2159 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
2163 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple
2164 /// enough for us to understand. In particular, if it is a cast to anything
2165 /// other than from one pointer type to another pointer type, we punt.
2166 /// We basically just support direct accesses to globals and GEP's of
2167 /// globals. This should be kept up to date with CommitValueTo.
2168 static bool isSimpleEnoughPointerToCommit(Constant *C) {
2169 // Conservatively, avoid aggregate types. This is because we don't
2170 // want to worry about them partially overlapping other stores.
2171 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType())
2174 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
2175 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2176 // external globals.
2177 return GV->hasUniqueInitializer();
2179 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
2180 // Handle a constantexpr gep.
2181 if (CE->getOpcode() == Instruction::GetElementPtr &&
2182 isa<GlobalVariable>(CE->getOperand(0)) &&
2183 cast<GEPOperator>(CE)->isInBounds()) {
2184 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2185 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2186 // external globals.
2187 if (!GV->hasUniqueInitializer())
2190 // The first index must be zero.
2191 ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin()));
2192 if (!CI || !CI->isZero()) return false;
2194 // The remaining indices must be compile-time known integers within the
2195 // notional bounds of the corresponding static array types.
2196 if (!CE->isGEPWithNoNotionalOverIndexing())
2199 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2201 // A constantexpr bitcast from a pointer to another pointer is a no-op,
2202 // and we know how to evaluate it by moving the bitcast from the pointer
2203 // operand to the value operand.
2204 } else if (CE->getOpcode() == Instruction::BitCast &&
2205 isa<GlobalVariable>(CE->getOperand(0))) {
2206 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2207 // external globals.
2208 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer();
2215 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global
2216 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it.
2217 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
2218 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2219 ConstantExpr *Addr, unsigned OpNo) {
2220 // Base case of the recursion.
2221 if (OpNo == Addr->getNumOperands()) {
2222 assert(Val->getType() == Init->getType() && "Type mismatch!");
2226 SmallVector<Constant*, 32> Elts;
2227 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2228 // Break up the constant into its elements.
2229 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2230 Elts.push_back(Init->getAggregateElement(i));
2232 // Replace the element that we are supposed to.
2233 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2234 unsigned Idx = CU->getZExtValue();
2235 assert(Idx < STy->getNumElements() && "Struct index out of range!");
2236 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2238 // Return the modified struct.
2239 return ConstantStruct::get(STy, Elts);
2242 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2243 SequentialType *InitTy = cast<SequentialType>(Init->getType());
2246 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy))
2247 NumElts = ATy->getNumElements();
2249 NumElts = InitTy->getVectorNumElements();
2251 // Break up the array into elements.
2252 for (uint64_t i = 0, e = NumElts; i != e; ++i)
2253 Elts.push_back(Init->getAggregateElement(i));
2255 assert(CI->getZExtValue() < NumElts);
2256 Elts[CI->getZExtValue()] =
2257 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2259 if (Init->getType()->isArrayTy())
2260 return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2261 return ConstantVector::get(Elts);
2264 /// CommitValueTo - We have decided that Addr (which satisfies the predicate
2265 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2266 static void CommitValueTo(Constant *Val, Constant *Addr) {
2267 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2268 assert(GV->hasInitializer());
2269 GV->setInitializer(Val);
2273 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2274 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2275 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2280 /// Evaluator - This class evaluates LLVM IR, producing the Constant
2281 /// representing each SSA instruction. Changes to global variables are stored
2282 /// in a mapping that can be iterated over after the evaluation is complete.
2283 /// Once an evaluation call fails, the evaluation object should not be reused.
2286 Evaluator(const DataLayout *DL, const TargetLibraryInfo *TLI)
2287 : DL(DL), TLI(TLI) {
2288 ValueStack.push_back(new DenseMap<Value*, Constant*>);
2292 DeleteContainerPointers(ValueStack);
2293 while (!AllocaTmps.empty()) {
2294 GlobalVariable *Tmp = AllocaTmps.back();
2295 AllocaTmps.pop_back();
2297 // If there are still users of the alloca, the program is doing something
2298 // silly, e.g. storing the address of the alloca somewhere and using it
2299 // later. Since this is undefined, we'll just make it be null.
2300 if (!Tmp->use_empty())
2301 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
2306 /// EvaluateFunction - Evaluate a call to function F, returning true if
2307 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2308 /// arguments for the function.
2309 bool EvaluateFunction(Function *F, Constant *&RetVal,
2310 const SmallVectorImpl<Constant*> &ActualArgs);
2312 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
2313 /// successful, false if we can't evaluate it. NewBB returns the next BB that
2314 /// control flows into, or null upon return.
2315 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
2317 Constant *getVal(Value *V) {
2318 if (Constant *CV = dyn_cast<Constant>(V)) return CV;
2319 Constant *R = ValueStack.back()->lookup(V);
2320 assert(R && "Reference to an uncomputed value!");
2324 void setVal(Value *V, Constant *C) {
2325 ValueStack.back()->operator[](V) = C;
2328 const DenseMap<Constant*, Constant*> &getMutatedMemory() const {
2329 return MutatedMemory;
2332 const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const {
2337 Constant *ComputeLoadResult(Constant *P);
2339 /// ValueStack - As we compute SSA register values, we store their contents
2340 /// here. The back of the vector contains the current function and the stack
2341 /// contains the values in the calling frames.
2342 SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack;
2344 /// CallStack - This is used to detect recursion. In pathological situations
2345 /// we could hit exponential behavior, but at least there is nothing
2347 SmallVector<Function*, 4> CallStack;
2349 /// MutatedMemory - For each store we execute, we update this map. Loads
2350 /// check this to get the most up-to-date value. If evaluation is successful,
2351 /// this state is committed to the process.
2352 DenseMap<Constant*, Constant*> MutatedMemory;
2354 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
2355 /// to represent its body. This vector is needed so we can delete the
2356 /// temporary globals when we are done.
2357 SmallVector<GlobalVariable*, 32> AllocaTmps;
2359 /// Invariants - These global variables have been marked invariant by the
2360 /// static constructor.
2361 SmallPtrSet<GlobalVariable*, 8> Invariants;
2363 /// SimpleConstants - These are constants we have checked and know to be
2364 /// simple enough to live in a static initializer of a global.
2365 SmallPtrSet<Constant*, 8> SimpleConstants;
2367 const DataLayout *DL;
2368 const TargetLibraryInfo *TLI;
2371 } // anonymous namespace
2373 /// ComputeLoadResult - Return the value that would be computed by a load from
2374 /// P after the stores reflected by 'memory' have been performed. If we can't
2375 /// decide, return null.
2376 Constant *Evaluator::ComputeLoadResult(Constant *P) {
2377 // If this memory location has been recently stored, use the stored value: it
2378 // is the most up-to-date.
2379 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P);
2380 if (I != MutatedMemory.end()) return I->second;
2383 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
2384 if (GV->hasDefinitiveInitializer())
2385 return GV->getInitializer();
2389 // Handle a constantexpr getelementptr.
2390 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P))
2391 if (CE->getOpcode() == Instruction::GetElementPtr &&
2392 isa<GlobalVariable>(CE->getOperand(0))) {
2393 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2394 if (GV->hasDefinitiveInitializer())
2395 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2398 return 0; // don't know how to evaluate.
2401 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
2402 /// successful, false if we can't evaluate it. NewBB returns the next BB that
2403 /// control flows into, or null upon return.
2404 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
2405 BasicBlock *&NextBB) {
2406 // This is the main evaluation loop.
2408 Constant *InstResult = 0;
2410 DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
2412 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
2413 if (!SI->isSimple()) {
2414 DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n");
2415 return false; // no volatile/atomic accesses.
2417 Constant *Ptr = getVal(SI->getOperand(1));
2418 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2419 DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
2420 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2421 DEBUG(dbgs() << "; To: " << *Ptr << "\n");
2423 if (!isSimpleEnoughPointerToCommit(Ptr)) {
2424 // If this is too complex for us to commit, reject it.
2425 DEBUG(dbgs() << "Pointer is too complex for us to evaluate store.");
2429 Constant *Val = getVal(SI->getOperand(0));
2431 // If this might be too difficult for the backend to handle (e.g. the addr
2432 // of one global variable divided by another) then we can't commit it.
2433 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
2434 DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val
2439 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2440 if (CE->getOpcode() == Instruction::BitCast) {
2441 DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n");
2442 // If we're evaluating a store through a bitcast, then we need
2443 // to pull the bitcast off the pointer type and push it onto the
2445 Ptr = CE->getOperand(0);
2447 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType();
2449 // In order to push the bitcast onto the stored value, a bitcast
2450 // from NewTy to Val's type must be legal. If it's not, we can try
2451 // introspecting NewTy to find a legal conversion.
2452 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) {
2453 // If NewTy is a struct, we can convert the pointer to the struct
2454 // into a pointer to its first member.
2455 // FIXME: This could be extended to support arrays as well.
2456 if (StructType *STy = dyn_cast<StructType>(NewTy)) {
2457 NewTy = STy->getTypeAtIndex(0U);
2459 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32);
2460 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
2461 Constant * const IdxList[] = {IdxZero, IdxZero};
2463 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList);
2464 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2465 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2467 // If we can't improve the situation by introspecting NewTy,
2468 // we have to give up.
2470 DEBUG(dbgs() << "Failed to bitcast constant ptr, can not "
2476 // If we found compatible types, go ahead and push the bitcast
2477 // onto the stored value.
2478 Val = ConstantExpr::getBitCast(Val, NewTy);
2480 DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n");
2484 MutatedMemory[Ptr] = Val;
2485 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
2486 InstResult = ConstantExpr::get(BO->getOpcode(),
2487 getVal(BO->getOperand(0)),
2488 getVal(BO->getOperand(1)));
2489 DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult
2491 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
2492 InstResult = ConstantExpr::getCompare(CI->getPredicate(),
2493 getVal(CI->getOperand(0)),
2494 getVal(CI->getOperand(1)));
2495 DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult
2497 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
2498 InstResult = ConstantExpr::getCast(CI->getOpcode(),
2499 getVal(CI->getOperand(0)),
2501 DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult
2503 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
2504 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)),
2505 getVal(SI->getOperand(1)),
2506 getVal(SI->getOperand(2)));
2507 DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult
2509 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
2510 Constant *P = getVal(GEP->getOperand(0));
2511 SmallVector<Constant*, 8> GEPOps;
2512 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
2514 GEPOps.push_back(getVal(*i));
2516 ConstantExpr::getGetElementPtr(P, GEPOps,
2517 cast<GEPOperator>(GEP)->isInBounds());
2518 DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult
2520 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
2522 if (!LI->isSimple()) {
2523 DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n");
2524 return false; // no volatile/atomic accesses.
2527 Constant *Ptr = getVal(LI->getOperand(0));
2528 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2529 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2530 DEBUG(dbgs() << "Found a constant pointer expression, constant "
2531 "folding: " << *Ptr << "\n");
2533 InstResult = ComputeLoadResult(Ptr);
2534 if (InstResult == 0) {
2535 DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load."
2537 return false; // Could not evaluate load.
2540 DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
2541 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
2542 if (AI->isArrayAllocation()) {
2543 DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
2544 return false; // Cannot handle array allocs.
2546 Type *Ty = AI->getType()->getElementType();
2547 AllocaTmps.push_back(new GlobalVariable(Ty, false,
2548 GlobalValue::InternalLinkage,
2549 UndefValue::get(Ty),
2551 InstResult = AllocaTmps.back();
2552 DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
2553 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
2554 CallSite CS(CurInst);
2556 // Debug info can safely be ignored here.
2557 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
2558 DEBUG(dbgs() << "Ignoring debug info.\n");
2563 // Cannot handle inline asm.
2564 if (isa<InlineAsm>(CS.getCalledValue())) {
2565 DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
2569 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
2570 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
2571 if (MSI->isVolatile()) {
2572 DEBUG(dbgs() << "Can not optimize a volatile memset " <<
2576 Constant *Ptr = getVal(MSI->getDest());
2577 Constant *Val = getVal(MSI->getValue());
2578 Constant *DestVal = ComputeLoadResult(getVal(Ptr));
2579 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
2580 // This memset is a no-op.
2581 DEBUG(dbgs() << "Ignoring no-op memset.\n");
2587 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
2588 II->getIntrinsicID() == Intrinsic::lifetime_end) {
2589 DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
2594 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2595 // We don't insert an entry into Values, as it doesn't have a
2596 // meaningful return value.
2597 if (!II->use_empty()) {
2598 DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n");
2601 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
2602 Value *PtrArg = getVal(II->getArgOperand(1));
2603 Value *Ptr = PtrArg->stripPointerCasts();
2604 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
2605 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
2606 if (DL && !Size->isAllOnesValue() &&
2607 Size->getValue().getLimitedValue() >=
2608 DL->getTypeStoreSize(ElemTy)) {
2609 Invariants.insert(GV);
2610 DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
2613 DEBUG(dbgs() << "Found a global var, but can not treat it as an "
2617 // Continue even if we do nothing.
2622 DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n");
2626 // Resolve function pointers.
2627 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue()));
2628 if (!Callee || Callee->mayBeOverridden()) {
2629 DEBUG(dbgs() << "Can not resolve function pointer.\n");
2630 return false; // Cannot resolve.
2633 SmallVector<Constant*, 8> Formals;
2634 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i)
2635 Formals.push_back(getVal(*i));
2637 if (Callee->isDeclaration()) {
2638 // If this is a function we can constant fold, do it.
2639 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) {
2641 DEBUG(dbgs() << "Constant folded function call. Result: " <<
2642 *InstResult << "\n");
2644 DEBUG(dbgs() << "Can not constant fold function call.\n");
2648 if (Callee->getFunctionType()->isVarArg()) {
2649 DEBUG(dbgs() << "Can not constant fold vararg function call.\n");
2653 Constant *RetVal = 0;
2654 // Execute the call, if successful, use the return value.
2655 ValueStack.push_back(new DenseMap<Value*, Constant*>);
2656 if (!EvaluateFunction(Callee, RetVal, Formals)) {
2657 DEBUG(dbgs() << "Failed to evaluate function.\n");
2660 delete ValueStack.pop_back_val();
2661 InstResult = RetVal;
2663 if (InstResult != NULL) {
2664 DEBUG(dbgs() << "Successfully evaluated function. Result: " <<
2665 InstResult << "\n\n");
2667 DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n");
2670 } else if (isa<TerminatorInst>(CurInst)) {
2671 DEBUG(dbgs() << "Found a terminator instruction.\n");
2673 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
2674 if (BI->isUnconditional()) {
2675 NextBB = BI->getSuccessor(0);
2678 dyn_cast<ConstantInt>(getVal(BI->getCondition()));
2679 if (!Cond) return false; // Cannot determine.
2681 NextBB = BI->getSuccessor(!Cond->getZExtValue());
2683 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
2685 dyn_cast<ConstantInt>(getVal(SI->getCondition()));
2686 if (!Val) return false; // Cannot determine.
2687 NextBB = SI->findCaseValue(Val).getCaseSuccessor();
2688 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
2689 Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
2690 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
2691 NextBB = BA->getBasicBlock();
2693 return false; // Cannot determine.
2694 } else if (isa<ReturnInst>(CurInst)) {
2697 // invoke, unwind, resume, unreachable.
2698 DEBUG(dbgs() << "Can not handle terminator.");
2699 return false; // Cannot handle this terminator.
2702 // We succeeded at evaluating this block!
2703 DEBUG(dbgs() << "Successfully evaluated block.\n");
2706 // Did not know how to evaluate this!
2707 DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction."
2712 if (!CurInst->use_empty()) {
2713 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
2714 InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
2716 setVal(CurInst, InstResult);
2719 // If we just processed an invoke, we finished evaluating the block.
2720 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
2721 NextBB = II->getNormalDest();
2722 DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
2726 // Advance program counter.
2731 /// EvaluateFunction - Evaluate a call to function F, returning true if
2732 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2733 /// arguments for the function.
2734 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
2735 const SmallVectorImpl<Constant*> &ActualArgs) {
2736 // Check to see if this function is already executing (recursion). If so,
2737 // bail out. TODO: we might want to accept limited recursion.
2738 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
2741 CallStack.push_back(F);
2743 // Initialize arguments to the incoming values specified.
2745 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2747 setVal(AI, ActualArgs[ArgNo]);
2749 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
2750 // we can only evaluate any one basic block at most once. This set keeps
2751 // track of what we have executed so we can detect recursive cases etc.
2752 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
2754 // CurBB - The current basic block we're evaluating.
2755 BasicBlock *CurBB = F->begin();
2757 BasicBlock::iterator CurInst = CurBB->begin();
2760 BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings.
2761 DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
2763 if (!EvaluateBlock(CurInst, NextBB))
2767 // Successfully running until there's no next block means that we found
2768 // the return. Fill it the return value and pop the call stack.
2769 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
2770 if (RI->getNumOperands())
2771 RetVal = getVal(RI->getOperand(0));
2772 CallStack.pop_back();
2776 // Okay, we succeeded in evaluating this control flow. See if we have
2777 // executed the new block before. If so, we have a looping function,
2778 // which we cannot evaluate in reasonable time.
2779 if (!ExecutedBlocks.insert(NextBB))
2780 return false; // looped!
2782 // Okay, we have never been in this block before. Check to see if there
2783 // are any PHI nodes. If so, evaluate them with information about where
2786 for (CurInst = NextBB->begin();
2787 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
2788 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
2790 // Advance to the next block.
2795 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if
2796 /// we can. Return true if we can, false otherwise.
2797 static bool EvaluateStaticConstructor(Function *F, const DataLayout *DL,
2798 const TargetLibraryInfo *TLI) {
2799 // Call the function.
2800 Evaluator Eval(DL, TLI);
2801 Constant *RetValDummy;
2802 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2803 SmallVector<Constant*, 0>());
2806 // We succeeded at evaluation: commit the result.
2807 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2808 << F->getName() << "' to " << Eval.getMutatedMemory().size()
2810 for (DenseMap<Constant*, Constant*>::const_iterator I =
2811 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end();
2813 CommitValueTo(I->second, I->first);
2814 for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I =
2815 Eval.getInvariants().begin(), E = Eval.getInvariants().end();
2817 (*I)->setConstant(true);
2823 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
2824 /// Return true if anything changed.
2825 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
2826 std::vector<Function*> Ctors = ParseGlobalCtors(GCL);
2827 bool MadeChange = false;
2828 if (Ctors.empty()) return false;
2830 // Loop over global ctors, optimizing them when we can.
2831 for (unsigned i = 0; i != Ctors.size(); ++i) {
2832 Function *F = Ctors[i];
2833 // Found a null terminator in the middle of the list, prune off the rest of
2836 if (i != Ctors.size()-1) {
2842 DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n");
2844 // We cannot simplify external ctor functions.
2845 if (F->empty()) continue;
2847 // If we can evaluate the ctor at compile time, do.
2848 if (EvaluateStaticConstructor(F, DL, TLI)) {
2849 Ctors.erase(Ctors.begin()+i);
2852 ++NumCtorsEvaluated;
2857 if (!MadeChange) return false;
2859 GCL = InstallGlobalCtors(GCL, Ctors);
2863 static int compareNames(Constant *const *A, Constant *const *B) {
2864 return (*A)->getName().compare((*B)->getName());
2867 static void setUsedInitializer(GlobalVariable &V,
2868 SmallPtrSet<GlobalValue *, 8> Init) {
2870 V.eraseFromParent();
2874 // Type of pointer to the array of pointers.
2875 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
2877 SmallVector<llvm::Constant *, 8> UsedArray;
2878 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Init.begin(), E = Init.end();
2881 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(*I, Int8PtrTy);
2882 UsedArray.push_back(Cast);
2884 // Sort to get deterministic order.
2885 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2886 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
2888 Module *M = V.getParent();
2889 V.removeFromParent();
2890 GlobalVariable *NV =
2891 new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage,
2892 llvm::ConstantArray::get(ATy, UsedArray), "");
2894 NV->setSection("llvm.metadata");
2899 /// \brief An easy to access representation of llvm.used and llvm.compiler.used.
2901 SmallPtrSet<GlobalValue *, 8> Used;
2902 SmallPtrSet<GlobalValue *, 8> CompilerUsed;
2903 GlobalVariable *UsedV;
2904 GlobalVariable *CompilerUsedV;
2907 LLVMUsed(Module &M) {
2908 UsedV = collectUsedGlobalVariables(M, Used, false);
2909 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
2911 typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator;
2912 iterator usedBegin() { return Used.begin(); }
2913 iterator usedEnd() { return Used.end(); }
2914 iterator compilerUsedBegin() { return CompilerUsed.begin(); }
2915 iterator compilerUsedEnd() { return CompilerUsed.end(); }
2916 bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
2917 bool compilerUsedCount(GlobalValue *GV) const {
2918 return CompilerUsed.count(GV);
2920 bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
2921 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
2922 bool usedInsert(GlobalValue *GV) { return Used.insert(GV); }
2923 bool compilerUsedInsert(GlobalValue *GV) { return CompilerUsed.insert(GV); }
2925 void syncVariablesAndSets() {
2927 setUsedInitializer(*UsedV, Used);
2929 setUsedInitializer(*CompilerUsedV, CompilerUsed);
2934 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2935 if (GA.use_empty()) // No use at all.
2938 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
2939 "We should have removed the duplicated "
2940 "element from llvm.compiler.used");
2941 if (!GA.hasOneUse())
2942 // Strictly more than one use. So at least one is not in llvm.used and
2943 // llvm.compiler.used.
2946 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2947 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2950 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
2951 const LLVMUsed &U) {
2953 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
2954 "We should have removed the duplicated "
2955 "element from llvm.compiler.used");
2956 if (U.usedCount(&V) || U.compilerUsedCount(&V))
2958 return V.hasNUsesOrMore(N);
2961 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
2962 if (!GA.hasLocalLinkage())
2965 return U.usedCount(&GA) || U.compilerUsedCount(&GA);
2968 static bool hasUsesToReplace(GlobalAlias &GA, LLVMUsed &U, bool &RenameTarget) {
2969 RenameTarget = false;
2971 if (hasUseOtherThanLLVMUsed(GA, U))
2974 // If the alias is externally visible, we may still be able to simplify it.
2975 if (!mayHaveOtherReferences(GA, U))
2978 // If the aliasee has internal linkage, give it the name and linkage
2979 // of the alias, and delete the alias. This turns:
2980 // define internal ... @f(...)
2981 // @a = alias ... @f
2983 // define ... @a(...)
2984 Constant *Aliasee = GA.getAliasee();
2985 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2986 if (!Target->hasLocalLinkage())
2989 // Do not perform the transform if multiple aliases potentially target the
2990 // aliasee. This check also ensures that it is safe to replace the section
2991 // and other attributes of the aliasee with those of the alias.
2992 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
2995 RenameTarget = true;
2999 bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
3000 bool Changed = false;
3003 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Used.usedBegin(),
3006 Used.compilerUsedErase(*I);
3008 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
3010 Module::alias_iterator J = I++;
3011 // Aliases without names cannot be referenced outside this module.
3012 if (!J->hasName() && !J->isDeclaration())
3013 J->setLinkage(GlobalValue::InternalLinkage);
3014 // If the aliasee may change at link time, nothing can be done - bail out.
3015 if (J->mayBeOverridden())
3018 Constant *Aliasee = J->getAliasee();
3019 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
3020 Target->removeDeadConstantUsers();
3022 // Make all users of the alias use the aliasee instead.
3024 if (!hasUsesToReplace(*J, Used, RenameTarget))
3027 J->replaceAllUsesWith(Aliasee);
3028 ++NumAliasesResolved;
3032 // Give the aliasee the name, linkage and other attributes of the alias.
3033 Target->takeName(J);
3034 Target->setLinkage(J->getLinkage());
3035 Target->setVisibility(J->getVisibility());
3036 Target->setDLLStorageClass(J->getDLLStorageClass());
3038 if (Used.usedErase(J))
3039 Used.usedInsert(Target);
3041 if (Used.compilerUsedErase(J))
3042 Used.compilerUsedInsert(Target);
3043 } else if (mayHaveOtherReferences(*J, Used))
3046 // Delete the alias.
3047 M.getAliasList().erase(J);
3048 ++NumAliasesRemoved;
3052 Used.syncVariablesAndSets();
3057 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
3058 if (!TLI->has(LibFunc::cxa_atexit))
3061 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
3066 FunctionType *FTy = Fn->getFunctionType();
3068 // Checking that the function has the right return type, the right number of
3069 // parameters and that they all have pointer types should be enough.
3070 if (!FTy->getReturnType()->isIntegerTy() ||
3071 FTy->getNumParams() != 3 ||
3072 !FTy->getParamType(0)->isPointerTy() ||
3073 !FTy->getParamType(1)->isPointerTy() ||
3074 !FTy->getParamType(2)->isPointerTy())
3080 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++
3081 /// destructor and can therefore be eliminated.
3082 /// Note that we assume that other optimization passes have already simplified
3083 /// the code so we only look for a function with a single basic block, where
3084 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
3085 /// other side-effect free instructions.
3086 static bool cxxDtorIsEmpty(const Function &Fn,
3087 SmallPtrSet<const Function *, 8> &CalledFunctions) {
3088 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
3089 // nounwind, but that doesn't seem worth doing.
3090 if (Fn.isDeclaration())
3093 if (++Fn.begin() != Fn.end())
3096 const BasicBlock &EntryBlock = Fn.getEntryBlock();
3097 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end();
3099 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
3100 // Ignore debug intrinsics.
3101 if (isa<DbgInfoIntrinsic>(CI))
3104 const Function *CalledFn = CI->getCalledFunction();
3109 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
3111 // Don't treat recursive functions as empty.
3112 if (!NewCalledFunctions.insert(CalledFn))
3115 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
3117 } else if (isa<ReturnInst>(*I))
3118 return true; // We're done.
3119 else if (I->mayHaveSideEffects())
3120 return false; // Destructor with side effects, bail.
3126 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
3127 /// Itanium C++ ABI p3.3.5:
3129 /// After constructing a global (or local static) object, that will require
3130 /// destruction on exit, a termination function is registered as follows:
3132 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
3134 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
3135 /// call f(p) when DSO d is unloaded, before all such termination calls
3136 /// registered before this one. It returns zero if registration is
3137 /// successful, nonzero on failure.
3139 // This pass will look for calls to __cxa_atexit where the function is trivial
3141 bool Changed = false;
3143 for (Function::use_iterator I = CXAAtExitFn->use_begin(),
3144 E = CXAAtExitFn->use_end(); I != E;) {
3145 // We're only interested in calls. Theoretically, we could handle invoke
3146 // instructions as well, but neither llvm-gcc nor clang generate invokes
3148 CallInst *CI = dyn_cast<CallInst>(*I++);
3153 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
3157 SmallPtrSet<const Function *, 8> CalledFunctions;
3158 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions))
3161 // Just remove the call.
3162 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
3163 CI->eraseFromParent();
3165 ++NumCXXDtorsRemoved;
3173 bool GlobalOpt::runOnModule(Module &M) {
3174 bool Changed = false;
3176 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
3177 DL = DLP ? &DLP->getDataLayout() : 0;
3178 TLI = &getAnalysis<TargetLibraryInfo>();
3180 // Try to find the llvm.globalctors list.
3181 GlobalVariable *GlobalCtors = FindGlobalCtors(M);
3183 bool LocalChange = true;
3184 while (LocalChange) {
3185 LocalChange = false;
3187 // Delete functions that are trivially dead, ccc -> fastcc
3188 LocalChange |= OptimizeFunctions(M);
3190 // Optimize global_ctors list.
3192 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors);
3194 // Optimize non-address-taken globals.
3195 LocalChange |= OptimizeGlobalVars(M);
3197 // Resolve aliases, when possible.
3198 LocalChange |= OptimizeGlobalAliases(M);
3200 // Try to remove trivial global destructors if they are not removed
3202 Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
3204 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
3206 Changed |= LocalChange;
3209 // TODO: Move all global ctors functions to the end of the module for code