1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass transforms simple global variables that never have their address
11 // taken. If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/ConstantFolding.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/IR/CallSite.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/GetElementPtrTypeIterator.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/Operator.h"
36 #include "llvm/IR/ValueHandle.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Transforms/Utils/CtorUtils.h"
43 #include "llvm/Transforms/Utils/GlobalStatus.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
49 #define DEBUG_TYPE "globalopt"
51 STATISTIC(NumMarked , "Number of globals marked constant");
52 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
53 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
54 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
55 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
56 STATISTIC(NumDeleted , "Number of globals deleted");
57 STATISTIC(NumFnDeleted , "Number of functions deleted");
58 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
59 STATISTIC(NumLocalized , "Number of globals localized");
60 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
61 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
62 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
63 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
64 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
65 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
66 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
69 struct GlobalOpt : public ModulePass {
70 void getAnalysisUsage(AnalysisUsage &AU) const override {
71 AU.addRequired<TargetLibraryInfoWrapperPass>();
73 static char ID; // Pass identification, replacement for typeid
74 GlobalOpt() : ModulePass(ID) {
75 initializeGlobalOptPass(*PassRegistry::getPassRegistry());
78 bool runOnModule(Module &M) override;
81 bool OptimizeFunctions(Module &M);
82 bool OptimizeGlobalVars(Module &M);
83 bool OptimizeGlobalAliases(Module &M);
84 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI);
85 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI,
86 const GlobalStatus &GS);
87 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn);
89 TargetLibraryInfo *TLI;
90 SmallSet<const Comdat *, 8> NotDiscardableComdats;
94 char GlobalOpt::ID = 0;
95 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt",
96 "Global Variable Optimizer", false, false)
97 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
98 INITIALIZE_PASS_END(GlobalOpt, "globalopt",
99 "Global Variable Optimizer", false, false)
101 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
103 /// isLeakCheckerRoot - Is this global variable possibly used by a leak checker
104 /// as a root? If so, we might not really want to eliminate the stores to it.
105 static bool isLeakCheckerRoot(GlobalVariable *GV) {
106 // A global variable is a root if it is a pointer, or could plausibly contain
107 // a pointer. There are two challenges; one is that we could have a struct
108 // the has an inner member which is a pointer. We recurse through the type to
109 // detect these (up to a point). The other is that we may actually be a union
110 // of a pointer and another type, and so our LLVM type is an integer which
111 // gets converted into a pointer, or our type is an [i8 x #] with a pointer
112 // potentially contained here.
114 if (GV->hasPrivateLinkage())
117 SmallVector<Type *, 4> Types;
118 Types.push_back(cast<PointerType>(GV->getType())->getElementType());
122 Type *Ty = Types.pop_back_val();
123 switch (Ty->getTypeID()) {
125 case Type::PointerTyID: return true;
126 case Type::ArrayTyID:
127 case Type::VectorTyID: {
128 SequentialType *STy = cast<SequentialType>(Ty);
129 Types.push_back(STy->getElementType());
132 case Type::StructTyID: {
133 StructType *STy = cast<StructType>(Ty);
134 if (STy->isOpaque()) return true;
135 for (StructType::element_iterator I = STy->element_begin(),
136 E = STy->element_end(); I != E; ++I) {
138 if (isa<PointerType>(InnerTy)) return true;
139 if (isa<CompositeType>(InnerTy))
140 Types.push_back(InnerTy);
145 if (--Limit == 0) return true;
146 } while (!Types.empty());
150 /// Given a value that is stored to a global but never read, determine whether
151 /// it's safe to remove the store and the chain of computation that feeds the
153 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
155 if (isa<Constant>(V))
159 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
162 if (isAllocationFn(V, TLI))
165 Instruction *I = cast<Instruction>(V);
166 if (I->mayHaveSideEffects())
168 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
169 if (!GEP->hasAllConstantIndices())
171 } else if (I->getNumOperands() != 1) {
175 V = I->getOperand(0);
179 /// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users
180 /// of the global and clean up any that obviously don't assign the global a
181 /// value that isn't dynamically allocated.
183 static bool CleanupPointerRootUsers(GlobalVariable *GV,
184 const TargetLibraryInfo *TLI) {
185 // A brief explanation of leak checkers. The goal is to find bugs where
186 // pointers are forgotten, causing an accumulating growth in memory
187 // usage over time. The common strategy for leak checkers is to whitelist the
188 // memory pointed to by globals at exit. This is popular because it also
189 // solves another problem where the main thread of a C++ program may shut down
190 // before other threads that are still expecting to use those globals. To
191 // handle that case, we expect the program may create a singleton and never
194 bool Changed = false;
196 // If Dead[n].first is the only use of a malloc result, we can delete its
197 // chain of computation and the store to the global in Dead[n].second.
198 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
200 // Constants can't be pointers to dynamically allocated memory.
201 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
204 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
205 Value *V = SI->getValueOperand();
206 if (isa<Constant>(V)) {
208 SI->eraseFromParent();
209 } else if (Instruction *I = dyn_cast<Instruction>(V)) {
211 Dead.push_back(std::make_pair(I, SI));
213 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
214 if (isa<Constant>(MSI->getValue())) {
216 MSI->eraseFromParent();
217 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
219 Dead.push_back(std::make_pair(I, MSI));
221 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
222 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
223 if (MemSrc && MemSrc->isConstant()) {
225 MTI->eraseFromParent();
226 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
228 Dead.push_back(std::make_pair(I, MTI));
230 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
231 if (CE->use_empty()) {
232 CE->destroyConstant();
235 } else if (Constant *C = dyn_cast<Constant>(U)) {
236 if (isSafeToDestroyConstant(C)) {
237 C->destroyConstant();
238 // This could have invalidated UI, start over from scratch.
240 CleanupPointerRootUsers(GV, TLI);
246 for (int i = 0, e = Dead.size(); i != e; ++i) {
247 if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
248 Dead[i].second->eraseFromParent();
249 Instruction *I = Dead[i].first;
251 if (isAllocationFn(I, TLI))
253 Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
256 I->eraseFromParent();
259 I->eraseFromParent();
266 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
267 /// users of the global, cleaning up the obvious ones. This is largely just a
268 /// quick scan over the use list to clean up the easy and obvious cruft. This
269 /// returns true if it made a change.
270 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
271 const DataLayout &DL,
272 TargetLibraryInfo *TLI) {
273 bool Changed = false;
274 // Note that we need to use a weak value handle for the worklist items. When
275 // we delete a constant array, we may also be holding pointer to one of its
276 // elements (or an element of one of its elements if we're dealing with an
277 // array of arrays) in the worklist.
278 SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end());
279 while (!WorkList.empty()) {
280 Value *UV = WorkList.pop_back_val();
284 User *U = cast<User>(UV);
286 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
288 // Replace the load with the initializer.
289 LI->replaceAllUsesWith(Init);
290 LI->eraseFromParent();
293 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
294 // Store must be unreachable or storing Init into the global.
295 SI->eraseFromParent();
297 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
298 if (CE->getOpcode() == Instruction::GetElementPtr) {
299 Constant *SubInit = nullptr;
301 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
302 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
303 } else if ((CE->getOpcode() == Instruction::BitCast &&
304 CE->getType()->isPointerTy()) ||
305 CE->getOpcode() == Instruction::AddrSpaceCast) {
306 // Pointer cast, delete any stores and memsets to the global.
307 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI);
310 if (CE->use_empty()) {
311 CE->destroyConstant();
314 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
315 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
316 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
317 // and will invalidate our notion of what Init is.
318 Constant *SubInit = nullptr;
319 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
320 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
321 ConstantFoldInstruction(GEP, DL, TLI));
322 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
323 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
325 // If the initializer is an all-null value and we have an inbounds GEP,
326 // we already know what the result of any load from that GEP is.
327 // TODO: Handle splats.
328 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
329 SubInit = Constant::getNullValue(GEP->getType()->getElementType());
331 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
333 if (GEP->use_empty()) {
334 GEP->eraseFromParent();
337 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
338 if (MI->getRawDest() == V) {
339 MI->eraseFromParent();
343 } else if (Constant *C = dyn_cast<Constant>(U)) {
344 // If we have a chain of dead constantexprs or other things dangling from
345 // us, and if they are all dead, nuke them without remorse.
346 if (isSafeToDestroyConstant(C)) {
347 C->destroyConstant();
348 CleanupConstantGlobalUsers(V, Init, DL, TLI);
356 /// isSafeSROAElementUse - Return true if the specified instruction is a safe
357 /// user of a derived expression from a global that we want to SROA.
358 static bool isSafeSROAElementUse(Value *V) {
359 // We might have a dead and dangling constant hanging off of here.
360 if (Constant *C = dyn_cast<Constant>(V))
361 return isSafeToDestroyConstant(C);
363 Instruction *I = dyn_cast<Instruction>(V);
364 if (!I) return false;
367 if (isa<LoadInst>(I)) return true;
369 // Stores *to* the pointer are ok.
370 if (StoreInst *SI = dyn_cast<StoreInst>(I))
371 return SI->getOperand(0) != V;
373 // Otherwise, it must be a GEP.
374 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I);
375 if (!GEPI) return false;
377 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) ||
378 !cast<Constant>(GEPI->getOperand(1))->isNullValue())
381 for (User *U : GEPI->users())
382 if (!isSafeSROAElementUse(U))
388 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value.
389 /// Look at it and its uses and decide whether it is safe to SROA this global.
391 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
392 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
393 if (!isa<GetElementPtrInst>(U) &&
394 (!isa<ConstantExpr>(U) ||
395 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
398 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
399 // don't like < 3 operand CE's, and we don't like non-constant integer
400 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
402 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
403 !cast<Constant>(U->getOperand(1))->isNullValue() ||
404 !isa<ConstantInt>(U->getOperand(2)))
407 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
408 ++GEPI; // Skip over the pointer index.
410 // If this is a use of an array allocation, do a bit more checking for sanity.
411 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
412 uint64_t NumElements = AT->getNumElements();
413 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
415 // Check to make sure that index falls within the array. If not,
416 // something funny is going on, so we won't do the optimization.
418 if (Idx->getZExtValue() >= NumElements)
421 // We cannot scalar repl this level of the array unless any array
422 // sub-indices are in-range constants. In particular, consider:
423 // A[0][i]. We cannot know that the user isn't doing invalid things like
424 // allowing i to index an out-of-range subscript that accesses A[1].
426 // Scalar replacing *just* the outer index of the array is probably not
427 // going to be a win anyway, so just give up.
428 for (++GEPI; // Skip array index.
431 uint64_t NumElements;
432 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
433 NumElements = SubArrayTy->getNumElements();
434 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
435 NumElements = SubVectorTy->getNumElements();
437 assert((*GEPI)->isStructTy() &&
438 "Indexed GEP type is not array, vector, or struct!");
442 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
443 if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
448 for (User *UU : U->users())
449 if (!isSafeSROAElementUse(UU))
455 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it
456 /// is safe for us to perform this transformation.
458 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
459 for (User *U : GV->users())
460 if (!IsUserOfGlobalSafeForSRA(U, GV))
467 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global
468 /// variable. This opens the door for other optimizations by exposing the
469 /// behavior of the program in a more fine-grained way. We have determined that
470 /// this transformation is safe already. We return the first global variable we
471 /// insert so that the caller can reprocess it.
472 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
473 // Make sure this global only has simple uses that we can SRA.
474 if (!GlobalUsersSafeToSRA(GV))
477 assert(GV->hasLocalLinkage() && !GV->isConstant());
478 Constant *Init = GV->getInitializer();
479 Type *Ty = Init->getType();
481 std::vector<GlobalVariable*> NewGlobals;
482 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
484 // Get the alignment of the global, either explicit or target-specific.
485 unsigned StartAlignment = GV->getAlignment();
486 if (StartAlignment == 0)
487 StartAlignment = DL.getABITypeAlignment(GV->getType());
489 if (StructType *STy = dyn_cast<StructType>(Ty)) {
490 NewGlobals.reserve(STy->getNumElements());
491 const StructLayout &Layout = *DL.getStructLayout(STy);
492 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
493 Constant *In = Init->getAggregateElement(i);
494 assert(In && "Couldn't get element of initializer?");
495 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
496 GlobalVariable::InternalLinkage,
497 In, GV->getName()+"."+Twine(i),
498 GV->getThreadLocalMode(),
499 GV->getType()->getAddressSpace());
500 Globals.insert(GV, NGV);
501 NewGlobals.push_back(NGV);
503 // Calculate the known alignment of the field. If the original aggregate
504 // had 256 byte alignment for example, something might depend on that:
505 // propagate info to each field.
506 uint64_t FieldOffset = Layout.getElementOffset(i);
507 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
508 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
509 NGV->setAlignment(NewAlign);
511 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
512 unsigned NumElements = 0;
513 if (ArrayType *ATy = dyn_cast<ArrayType>(STy))
514 NumElements = ATy->getNumElements();
516 NumElements = cast<VectorType>(STy)->getNumElements();
518 if (NumElements > 16 && GV->hasNUsesOrMore(16))
519 return nullptr; // It's not worth it.
520 NewGlobals.reserve(NumElements);
522 uint64_t EltSize = DL.getTypeAllocSize(STy->getElementType());
523 unsigned EltAlign = DL.getABITypeAlignment(STy->getElementType());
524 for (unsigned i = 0, e = NumElements; i != e; ++i) {
525 Constant *In = Init->getAggregateElement(i);
526 assert(In && "Couldn't get element of initializer?");
528 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
529 GlobalVariable::InternalLinkage,
530 In, GV->getName()+"."+Twine(i),
531 GV->getThreadLocalMode(),
532 GV->getType()->getAddressSpace());
533 Globals.insert(GV, NGV);
534 NewGlobals.push_back(NGV);
536 // Calculate the known alignment of the field. If the original aggregate
537 // had 256 byte alignment for example, something might depend on that:
538 // propagate info to each field.
539 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
540 if (NewAlign > EltAlign)
541 NGV->setAlignment(NewAlign);
545 if (NewGlobals.empty())
548 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV);
550 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
552 // Loop over all of the uses of the global, replacing the constantexpr geps,
553 // with smaller constantexpr geps or direct references.
554 while (!GV->use_empty()) {
555 User *GEP = GV->user_back();
556 assert(((isa<ConstantExpr>(GEP) &&
557 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
558 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
560 // Ignore the 1th operand, which has to be zero or else the program is quite
561 // broken (undefined). Get the 2nd operand, which is the structure or array
563 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
564 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
566 Value *NewPtr = NewGlobals[Val];
567 Type *NewTy = NewGlobals[Val]->getValueType();
569 // Form a shorter GEP if needed.
570 if (GEP->getNumOperands() > 3) {
571 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
572 SmallVector<Constant*, 8> Idxs;
573 Idxs.push_back(NullInt);
574 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
575 Idxs.push_back(CE->getOperand(i));
577 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
579 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
580 SmallVector<Value*, 8> Idxs;
581 Idxs.push_back(NullInt);
582 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
583 Idxs.push_back(GEPI->getOperand(i));
584 NewPtr = GetElementPtrInst::Create(
585 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI);
588 GEP->replaceAllUsesWith(NewPtr);
590 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
591 GEPI->eraseFromParent();
593 cast<ConstantExpr>(GEP)->destroyConstant();
596 // Delete the old global, now that it is dead.
600 // Loop over the new globals array deleting any globals that are obviously
601 // dead. This can arise due to scalarization of a structure or an array that
602 // has elements that are dead.
603 unsigned FirstGlobal = 0;
604 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
605 if (NewGlobals[i]->use_empty()) {
606 Globals.erase(NewGlobals[i]);
607 if (FirstGlobal == i) ++FirstGlobal;
610 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr;
613 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
614 /// value will trap if the value is dynamically null. PHIs keeps track of any
615 /// phi nodes we've seen to avoid reprocessing them.
616 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
617 SmallPtrSetImpl<const PHINode*> &PHIs) {
618 for (const User *U : V->users())
619 if (isa<LoadInst>(U)) {
621 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
622 if (SI->getOperand(0) == V) {
623 //cerr << "NONTRAPPING USE: " << *U;
624 return false; // Storing the value.
626 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
627 if (CI->getCalledValue() != V) {
628 //cerr << "NONTRAPPING USE: " << *U;
629 return false; // Not calling the ptr
631 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
632 if (II->getCalledValue() != V) {
633 //cerr << "NONTRAPPING USE: " << *U;
634 return false; // Not calling the ptr
636 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
637 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
638 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
639 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
640 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
641 // If we've already seen this phi node, ignore it, it has already been
643 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
645 } else if (isa<ICmpInst>(U) &&
646 isa<ConstantPointerNull>(U->getOperand(1))) {
647 // Ignore icmp X, null
649 //cerr << "NONTRAPPING USE: " << *U;
656 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads
657 /// from GV will trap if the loaded value is null. Note that this also permits
658 /// comparisons of the loaded value against null, as a special case.
659 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
660 for (const User *U : GV->users())
661 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
662 SmallPtrSet<const PHINode*, 8> PHIs;
663 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
665 } else if (isa<StoreInst>(U)) {
666 // Ignore stores to the global.
668 // We don't know or understand this user, bail out.
669 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
675 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
676 bool Changed = false;
677 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
678 Instruction *I = cast<Instruction>(*UI++);
679 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
680 LI->setOperand(0, NewV);
682 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
683 if (SI->getOperand(1) == V) {
684 SI->setOperand(1, NewV);
687 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
689 if (CS.getCalledValue() == V) {
690 // Calling through the pointer! Turn into a direct call, but be careful
691 // that the pointer is not also being passed as an argument.
692 CS.setCalledFunction(NewV);
694 bool PassedAsArg = false;
695 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
696 if (CS.getArgument(i) == V) {
698 CS.setArgument(i, NewV);
702 // Being passed as an argument also. Be careful to not invalidate UI!
703 UI = V->user_begin();
706 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
707 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
708 ConstantExpr::getCast(CI->getOpcode(),
709 NewV, CI->getType()));
710 if (CI->use_empty()) {
712 CI->eraseFromParent();
714 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
715 // Should handle GEP here.
716 SmallVector<Constant*, 8> Idxs;
717 Idxs.reserve(GEPI->getNumOperands()-1);
718 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
720 if (Constant *C = dyn_cast<Constant>(*i))
724 if (Idxs.size() == GEPI->getNumOperands()-1)
725 Changed |= OptimizeAwayTrappingUsesOfValue(
726 GEPI, ConstantExpr::getGetElementPtr(nullptr, NewV, Idxs));
727 if (GEPI->use_empty()) {
729 GEPI->eraseFromParent();
738 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null
739 /// value stored into it. If there are uses of the loaded value that would trap
740 /// if the loaded value is dynamically null, then we know that they cannot be
741 /// reachable with a null optimize away the load.
742 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
743 const DataLayout &DL,
744 TargetLibraryInfo *TLI) {
745 bool Changed = false;
747 // Keep track of whether we are able to remove all the uses of the global
748 // other than the store that defines it.
749 bool AllNonStoreUsesGone = true;
751 // Replace all uses of loads with uses of uses of the stored value.
752 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
753 User *GlobalUser = *GUI++;
754 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
755 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
756 // If we were able to delete all uses of the loads
757 if (LI->use_empty()) {
758 LI->eraseFromParent();
761 AllNonStoreUsesGone = false;
763 } else if (isa<StoreInst>(GlobalUser)) {
764 // Ignore the store that stores "LV" to the global.
765 assert(GlobalUser->getOperand(1) == GV &&
766 "Must be storing *to* the global");
768 AllNonStoreUsesGone = false;
770 // If we get here we could have other crazy uses that are transitively
772 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
773 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
774 isa<BitCastInst>(GlobalUser) ||
775 isa<GetElementPtrInst>(GlobalUser)) &&
776 "Only expect load and stores!");
781 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV);
785 // If we nuked all of the loads, then none of the stores are needed either,
786 // nor is the global.
787 if (AllNonStoreUsesGone) {
788 if (isLeakCheckerRoot(GV)) {
789 Changed |= CleanupPointerRootUsers(GV, TLI);
792 CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
794 if (GV->use_empty()) {
795 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
797 GV->eraseFromParent();
804 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
805 /// instructions that are foldable.
806 static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
807 TargetLibraryInfo *TLI) {
808 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
809 if (Instruction *I = dyn_cast<Instruction>(*UI++))
810 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
811 I->replaceAllUsesWith(NewC);
813 // Advance UI to the next non-I use to avoid invalidating it!
814 // Instructions could multiply use V.
815 while (UI != E && *UI == I)
817 I->eraseFromParent();
821 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global
822 /// variable, and transforms the program as if it always contained the result of
823 /// the specified malloc. Because it is always the result of the specified
824 /// malloc, there is no reason to actually DO the malloc. Instead, turn the
825 /// malloc into a global, and any loads of GV as uses of the new global.
826 static GlobalVariable *
827 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
828 ConstantInt *NElements, const DataLayout &DL,
829 TargetLibraryInfo *TLI) {
830 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n');
833 if (NElements->getZExtValue() == 1)
834 GlobalType = AllocTy;
836 // If we have an array allocation, the global variable is of an array.
837 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
839 // Create the new global variable. The contents of the malloc'd memory is
840 // undefined, so initialize with an undef value.
841 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
843 GlobalValue::InternalLinkage,
844 UndefValue::get(GlobalType),
845 GV->getName()+".body",
847 GV->getThreadLocalMode());
849 // If there are bitcast users of the malloc (which is typical, usually we have
850 // a malloc + bitcast) then replace them with uses of the new global. Update
851 // other users to use the global as well.
852 BitCastInst *TheBC = nullptr;
853 while (!CI->use_empty()) {
854 Instruction *User = cast<Instruction>(CI->user_back());
855 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
856 if (BCI->getType() == NewGV->getType()) {
857 BCI->replaceAllUsesWith(NewGV);
858 BCI->eraseFromParent();
860 BCI->setOperand(0, NewGV);
864 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
865 User->replaceUsesOfWith(CI, TheBC);
869 Constant *RepValue = NewGV;
870 if (NewGV->getType() != GV->getType()->getElementType())
871 RepValue = ConstantExpr::getBitCast(RepValue,
872 GV->getType()->getElementType());
874 // If there is a comparison against null, we will insert a global bool to
875 // keep track of whether the global was initialized yet or not.
876 GlobalVariable *InitBool =
877 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
878 GlobalValue::InternalLinkage,
879 ConstantInt::getFalse(GV->getContext()),
880 GV->getName()+".init", GV->getThreadLocalMode());
881 bool InitBoolUsed = false;
883 // Loop over all uses of GV, processing them in turn.
884 while (!GV->use_empty()) {
885 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
886 // The global is initialized when the store to it occurs.
887 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
888 SI->getOrdering(), SI->getSynchScope(), SI);
889 SI->eraseFromParent();
893 LoadInst *LI = cast<LoadInst>(GV->user_back());
894 while (!LI->use_empty()) {
895 Use &LoadUse = *LI->use_begin();
896 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
902 // Replace the cmp X, 0 with a use of the bool value.
903 // Sink the load to where the compare was, if atomic rules allow us to.
904 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
905 LI->getOrdering(), LI->getSynchScope(),
906 LI->isUnordered() ? (Instruction*)ICI : LI);
908 switch (ICI->getPredicate()) {
909 default: llvm_unreachable("Unknown ICmp Predicate!");
910 case ICmpInst::ICMP_ULT:
911 case ICmpInst::ICMP_SLT: // X < null -> always false
912 LV = ConstantInt::getFalse(GV->getContext());
914 case ICmpInst::ICMP_ULE:
915 case ICmpInst::ICMP_SLE:
916 case ICmpInst::ICMP_EQ:
917 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
919 case ICmpInst::ICMP_NE:
920 case ICmpInst::ICMP_UGE:
921 case ICmpInst::ICMP_SGE:
922 case ICmpInst::ICMP_UGT:
923 case ICmpInst::ICMP_SGT:
926 ICI->replaceAllUsesWith(LV);
927 ICI->eraseFromParent();
929 LI->eraseFromParent();
932 // If the initialization boolean was used, insert it, otherwise delete it.
934 while (!InitBool->use_empty()) // Delete initializations
935 cast<StoreInst>(InitBool->user_back())->eraseFromParent();
938 GV->getParent()->getGlobalList().insert(GV, InitBool);
940 // Now the GV is dead, nuke it and the malloc..
941 GV->eraseFromParent();
942 CI->eraseFromParent();
944 // To further other optimizations, loop over all users of NewGV and try to
945 // constant prop them. This will promote GEP instructions with constant
946 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
947 ConstantPropUsersOf(NewGV, DL, TLI);
948 if (RepValue != NewGV)
949 ConstantPropUsersOf(RepValue, DL, TLI);
954 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking
955 /// to make sure that there are no complex uses of V. We permit simple things
956 /// like dereferencing the pointer, but not storing through the address, unless
957 /// it is to the specified global.
958 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
959 const GlobalVariable *GV,
960 SmallPtrSetImpl<const PHINode*> &PHIs) {
961 for (const User *U : V->users()) {
962 const Instruction *Inst = cast<Instruction>(U);
964 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
965 continue; // Fine, ignore.
968 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
969 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
970 return false; // Storing the pointer itself... bad.
971 continue; // Otherwise, storing through it, or storing into GV... fine.
974 // Must index into the array and into the struct.
975 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
976 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
981 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
982 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
984 if (PHIs.insert(PN).second)
985 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
990 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
991 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
1001 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV
1002 /// somewhere. Transform all uses of the allocation into loads from the
1003 /// global and uses of the resultant pointer. Further, delete the store into
1004 /// GV. This assumes that these value pass the
1005 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1006 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1007 GlobalVariable *GV) {
1008 while (!Alloc->use_empty()) {
1009 Instruction *U = cast<Instruction>(*Alloc->user_begin());
1010 Instruction *InsertPt = U;
1011 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1012 // If this is the store of the allocation into the global, remove it.
1013 if (SI->getOperand(1) == GV) {
1014 SI->eraseFromParent();
1017 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1018 // Insert the load in the corresponding predecessor, not right before the
1020 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
1021 } else if (isa<BitCastInst>(U)) {
1022 // Must be bitcast between the malloc and store to initialize the global.
1023 ReplaceUsesOfMallocWithGlobal(U, GV);
1024 U->eraseFromParent();
1026 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1027 // If this is a "GEP bitcast" and the user is a store to the global, then
1028 // just process it as a bitcast.
1029 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1030 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
1031 if (SI->getOperand(1) == GV) {
1032 // Must be bitcast GEP between the malloc and store to initialize
1034 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1035 GEPI->eraseFromParent();
1040 // Insert a load from the global, and use it instead of the malloc.
1041 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt);
1042 U->replaceUsesOfWith(Alloc, NL);
1046 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi
1047 /// of a load) are simple enough to perform heap SRA on. This permits GEP's
1048 /// that index through the array and struct field, icmps of null, and PHIs.
1049 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1050 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
1051 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
1052 // We permit two users of the load: setcc comparing against the null
1053 // pointer, and a getelementptr of a specific form.
1054 for (const User *U : V->users()) {
1055 const Instruction *UI = cast<Instruction>(U);
1057 // Comparison against null is ok.
1058 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
1059 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1064 // getelementptr is also ok, but only a simple form.
1065 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
1066 // Must index into the array and into the struct.
1067 if (GEPI->getNumOperands() < 3)
1070 // Otherwise the GEP is ok.
1074 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1075 if (!LoadUsingPHIsPerLoad.insert(PN).second)
1076 // This means some phi nodes are dependent on each other.
1077 // Avoid infinite looping!
1079 if (!LoadUsingPHIs.insert(PN).second)
1080 // If we have already analyzed this PHI, then it is safe.
1083 // Make sure all uses of the PHI are simple enough to transform.
1084 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1085 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1091 // Otherwise we don't know what this is, not ok.
1099 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from
1100 /// GV are simple enough to perform HeapSRA, return true.
1101 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1102 Instruction *StoredVal) {
1103 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1104 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1105 for (const User *U : GV->users())
1106 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
1107 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1108 LoadUsingPHIsPerLoad))
1110 LoadUsingPHIsPerLoad.clear();
1113 // If we reach here, we know that all uses of the loads and transitive uses
1114 // (through PHI nodes) are simple enough to transform. However, we don't know
1115 // that all inputs the to the PHI nodes are in the same equivalence sets.
1116 // Check to verify that all operands of the PHIs are either PHIS that can be
1117 // transformed, loads from GV, or MI itself.
1118 for (const PHINode *PN : LoadUsingPHIs) {
1119 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1120 Value *InVal = PN->getIncomingValue(op);
1122 // PHI of the stored value itself is ok.
1123 if (InVal == StoredVal) continue;
1125 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1126 // One of the PHIs in our set is (optimistically) ok.
1127 if (LoadUsingPHIs.count(InPN))
1132 // Load from GV is ok.
1133 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1134 if (LI->getOperand(0) == GV)
1139 // Anything else is rejected.
1147 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1148 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1149 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1150 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
1152 if (FieldNo >= FieldVals.size())
1153 FieldVals.resize(FieldNo+1);
1155 // If we already have this value, just reuse the previously scalarized
1157 if (Value *FieldVal = FieldVals[FieldNo])
1160 // Depending on what instruction this is, we have several cases.
1162 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1163 // This is a scalarized version of the load from the global. Just create
1164 // a new Load of the scalarized global.
1165 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
1166 InsertedScalarizedValues,
1168 LI->getName()+".f"+Twine(FieldNo), LI);
1170 PHINode *PN = cast<PHINode>(V);
1171 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1174 PointerType *PTy = cast<PointerType>(PN->getType());
1175 StructType *ST = cast<StructType>(PTy->getElementType());
1177 unsigned AS = PTy->getAddressSpace();
1179 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
1180 PN->getNumIncomingValues(),
1181 PN->getName()+".f"+Twine(FieldNo), PN);
1183 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1186 return FieldVals[FieldNo] = Result;
1189 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from
1190 /// the load, rewrite the derived value to use the HeapSRoA'd load.
1191 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1192 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1193 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1194 // If this is a comparison against null, handle it.
1195 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1196 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1197 // If we have a setcc of the loaded pointer, we can use a setcc of any
1199 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1200 InsertedScalarizedValues, PHIsToRewrite);
1202 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1203 Constant::getNullValue(NPtr->getType()),
1205 SCI->replaceAllUsesWith(New);
1206 SCI->eraseFromParent();
1210 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1211 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1212 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1213 && "Unexpected GEPI!");
1215 // Load the pointer for this field.
1216 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1217 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1218 InsertedScalarizedValues, PHIsToRewrite);
1220 // Create the new GEP idx vector.
1221 SmallVector<Value*, 8> GEPIdx;
1222 GEPIdx.push_back(GEPI->getOperand(1));
1223 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1225 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx,
1226 GEPI->getName(), GEPI);
1227 GEPI->replaceAllUsesWith(NGEPI);
1228 GEPI->eraseFromParent();
1232 // Recursively transform the users of PHI nodes. This will lazily create the
1233 // PHIs that are needed for individual elements. Keep track of what PHIs we
1234 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1235 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1236 // already been seen first by another load, so its uses have already been
1238 PHINode *PN = cast<PHINode>(LoadUser);
1239 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1240 std::vector<Value*>())).second)
1243 // If this is the first time we've seen this PHI, recursively process all
1245 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
1246 Instruction *User = cast<Instruction>(*UI++);
1247 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1251 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr
1252 /// is a value loaded from the global. Eliminate all uses of Ptr, making them
1253 /// use FieldGlobals instead. All uses of loaded values satisfy
1254 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1255 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1256 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
1257 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
1258 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
1259 Instruction *User = cast<Instruction>(*UI++);
1260 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1263 if (Load->use_empty()) {
1264 Load->eraseFromParent();
1265 InsertedScalarizedValues.erase(Load);
1269 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
1270 /// it up into multiple allocations of arrays of the fields.
1271 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1272 Value *NElems, const DataLayout &DL,
1273 const TargetLibraryInfo *TLI) {
1274 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
1275 Type *MAT = getMallocAllocatedType(CI, TLI);
1276 StructType *STy = cast<StructType>(MAT);
1278 // There is guaranteed to be at least one use of the malloc (storing
1279 // it into GV). If there are other uses, change them to be uses of
1280 // the global to simplify later code. This also deletes the store
1282 ReplaceUsesOfMallocWithGlobal(CI, GV);
1284 // Okay, at this point, there are no users of the malloc. Insert N
1285 // new mallocs at the same place as CI, and N globals.
1286 std::vector<Value*> FieldGlobals;
1287 std::vector<Value*> FieldMallocs;
1289 unsigned AS = GV->getType()->getPointerAddressSpace();
1290 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1291 Type *FieldTy = STy->getElementType(FieldNo);
1292 PointerType *PFieldTy = PointerType::get(FieldTy, AS);
1294 GlobalVariable *NGV =
1295 new GlobalVariable(*GV->getParent(),
1296 PFieldTy, false, GlobalValue::InternalLinkage,
1297 Constant::getNullValue(PFieldTy),
1298 GV->getName() + ".f" + Twine(FieldNo), GV,
1299 GV->getThreadLocalMode());
1300 FieldGlobals.push_back(NGV);
1302 unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
1303 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1304 TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
1305 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1306 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1307 ConstantInt::get(IntPtrTy, TypeSize),
1309 CI->getName() + ".f" + Twine(FieldNo));
1310 FieldMallocs.push_back(NMI);
1311 new StoreInst(NMI, NGV, CI);
1314 // The tricky aspect of this transformation is handling the case when malloc
1315 // fails. In the original code, malloc failing would set the result pointer
1316 // of malloc to null. In this case, some mallocs could succeed and others
1317 // could fail. As such, we emit code that looks like this:
1318 // F0 = malloc(field0)
1319 // F1 = malloc(field1)
1320 // F2 = malloc(field2)
1321 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1322 // if (F0) { free(F0); F0 = 0; }
1323 // if (F1) { free(F1); F1 = 0; }
1324 // if (F2) { free(F2); F2 = 0; }
1326 // The malloc can also fail if its argument is too large.
1327 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1328 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1329 ConstantZero, "isneg");
1330 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1331 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1332 Constant::getNullValue(FieldMallocs[i]->getType()),
1334 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1337 // Split the basic block at the old malloc.
1338 BasicBlock *OrigBB = CI->getParent();
1339 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont");
1341 // Create the block to check the first condition. Put all these blocks at the
1342 // end of the function as they are unlikely to be executed.
1343 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1345 OrigBB->getParent());
1347 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1348 // branch on RunningOr.
1349 OrigBB->getTerminator()->eraseFromParent();
1350 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1352 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1353 // pointer, because some may be null while others are not.
1354 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1355 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
1356 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1357 Constant::getNullValue(GVVal->getType()));
1358 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1359 OrigBB->getParent());
1360 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1361 OrigBB->getParent());
1362 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1365 // Fill in FreeBlock.
1366 CallInst::CreateFree(GVVal, BI);
1367 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1369 BranchInst::Create(NextBlock, FreeBlock);
1371 NullPtrBlock = NextBlock;
1374 BranchInst::Create(ContBB, NullPtrBlock);
1376 // CI is no longer needed, remove it.
1377 CI->eraseFromParent();
1379 /// InsertedScalarizedLoads - As we process loads, if we can't immediately
1380 /// update all uses of the load, keep track of what scalarized loads are
1381 /// inserted for a given load.
1382 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues;
1383 InsertedScalarizedValues[GV] = FieldGlobals;
1385 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite;
1387 // Okay, the malloc site is completely handled. All of the uses of GV are now
1388 // loads, and all uses of those loads are simple. Rewrite them to use loads
1389 // of the per-field globals instead.
1390 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
1391 Instruction *User = cast<Instruction>(*UI++);
1393 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1394 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1398 // Must be a store of null.
1399 StoreInst *SI = cast<StoreInst>(User);
1400 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1401 "Unexpected heap-sra user!");
1403 // Insert a store of null into each global.
1404 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1405 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
1406 Constant *Null = Constant::getNullValue(PT->getElementType());
1407 new StoreInst(Null, FieldGlobals[i], SI);
1409 // Erase the original store.
1410 SI->eraseFromParent();
1413 // While we have PHIs that are interesting to rewrite, do it.
1414 while (!PHIsToRewrite.empty()) {
1415 PHINode *PN = PHIsToRewrite.back().first;
1416 unsigned FieldNo = PHIsToRewrite.back().second;
1417 PHIsToRewrite.pop_back();
1418 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1419 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1421 // Add all the incoming values. This can materialize more phis.
1422 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1423 Value *InVal = PN->getIncomingValue(i);
1424 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1426 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1430 // Drop all inter-phi links and any loads that made it this far.
1431 for (DenseMap<Value*, std::vector<Value*> >::iterator
1432 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1434 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1435 PN->dropAllReferences();
1436 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1437 LI->dropAllReferences();
1440 // Delete all the phis and loads now that inter-references are dead.
1441 for (DenseMap<Value*, std::vector<Value*> >::iterator
1442 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1444 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1445 PN->eraseFromParent();
1446 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1447 LI->eraseFromParent();
1450 // The old global is now dead, remove it.
1451 GV->eraseFromParent();
1454 return cast<GlobalVariable>(FieldGlobals[0]);
1457 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
1458 /// pointer global variable with a single value stored it that is a malloc or
1460 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
1462 AtomicOrdering Ordering,
1463 Module::global_iterator &GVI,
1464 const DataLayout &DL,
1465 TargetLibraryInfo *TLI) {
1466 // If this is a malloc of an abstract type, don't touch it.
1467 if (!AllocTy->isSized())
1470 // We can't optimize this global unless all uses of it are *known* to be
1471 // of the malloc value, not of the null initializer value (consider a use
1472 // that compares the global's value against zero to see if the malloc has
1473 // been reached). To do this, we check to see if all uses of the global
1474 // would trap if the global were null: this proves that they must all
1475 // happen after the malloc.
1476 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1479 // We can't optimize this if the malloc itself is used in a complex way,
1480 // for example, being stored into multiple globals. This allows the
1481 // malloc to be stored into the specified global, loaded icmp'd, and
1482 // GEP'd. These are all things we could transform to using the global
1484 SmallPtrSet<const PHINode*, 8> PHIs;
1485 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1488 // If we have a global that is only initialized with a fixed size malloc,
1489 // transform the program to use global memory instead of malloc'd memory.
1490 // This eliminates dynamic allocation, avoids an indirection accessing the
1491 // data, and exposes the resultant global to further GlobalOpt.
1492 // We cannot optimize the malloc if we cannot determine malloc array size.
1493 Value *NElems = getMallocArraySize(CI, DL, TLI, true);
1497 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1498 // Restrict this transformation to only working on small allocations
1499 // (2048 bytes currently), as we don't want to introduce a 16M global or
1501 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
1502 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
1506 // If the allocation is an array of structures, consider transforming this
1507 // into multiple malloc'd arrays, one for each field. This is basically
1508 // SRoA for malloc'd memory.
1510 if (Ordering != NotAtomic)
1513 // If this is an allocation of a fixed size array of structs, analyze as a
1514 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1515 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1516 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1517 AllocTy = AT->getElementType();
1519 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1523 // This the structure has an unreasonable number of fields, leave it
1525 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1526 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1528 // If this is a fixed size array, transform the Malloc to be an alloc of
1529 // structs. malloc [100 x struct],1 -> malloc struct, 100
1530 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1531 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1532 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
1533 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1534 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1535 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy,
1536 AllocSize, NumElements,
1537 nullptr, CI->getName());
1538 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1539 CI->replaceAllUsesWith(Cast);
1540 CI->eraseFromParent();
1541 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1542 CI = cast<CallInst>(BCI->getOperand(0));
1544 CI = cast<CallInst>(Malloc);
1547 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true),
1555 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
1556 // that only one value (besides its initializer) is ever stored to the global.
1557 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1558 AtomicOrdering Ordering,
1559 Module::global_iterator &GVI,
1560 const DataLayout &DL,
1561 TargetLibraryInfo *TLI) {
1562 // Ignore no-op GEPs and bitcasts.
1563 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1565 // If we are dealing with a pointer global that is initialized to null and
1566 // only has one (non-null) value stored into it, then we can optimize any
1567 // users of the loaded value (often calls and loads) that would trap if the
1569 if (GV->getInitializer()->getType()->isPointerTy() &&
1570 GV->getInitializer()->isNullValue()) {
1571 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1572 if (GV->getInitializer()->getType() != SOVC->getType())
1573 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1575 // Optimize away any trapping uses of the loaded value.
1576 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
1578 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
1579 Type *MallocType = getMallocAllocatedType(CI, TLI);
1581 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
1590 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only
1591 /// two values ever stored into GV are its initializer and OtherVal. See if we
1592 /// can shrink the global into a boolean and select between the two values
1593 /// whenever it is used. This exposes the values to other scalar optimizations.
1594 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1595 Type *GVElType = GV->getType()->getElementType();
1597 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1598 // an FP value, pointer or vector, don't do this optimization because a select
1599 // between them is very expensive and unlikely to lead to later
1600 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1601 // where v1 and v2 both require constant pool loads, a big loss.
1602 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1603 GVElType->isFloatingPointTy() ||
1604 GVElType->isPointerTy() || GVElType->isVectorTy())
1607 // Walk the use list of the global seeing if all the uses are load or store.
1608 // If there is anything else, bail out.
1609 for (User *U : GV->users())
1610 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1613 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
1615 // Create the new global, initializing it to false.
1616 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1618 GlobalValue::InternalLinkage,
1619 ConstantInt::getFalse(GV->getContext()),
1621 GV->getThreadLocalMode(),
1622 GV->getType()->getAddressSpace());
1623 GV->getParent()->getGlobalList().insert(GV, NewGV);
1625 Constant *InitVal = GV->getInitializer();
1626 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1627 "No reason to shrink to bool!");
1629 // If initialized to zero and storing one into the global, we can use a cast
1630 // instead of a select to synthesize the desired value.
1631 bool IsOneZero = false;
1632 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal))
1633 IsOneZero = InitVal->isNullValue() && CI->isOne();
1635 while (!GV->use_empty()) {
1636 Instruction *UI = cast<Instruction>(GV->user_back());
1637 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1638 // Change the store into a boolean store.
1639 bool StoringOther = SI->getOperand(0) == OtherVal;
1640 // Only do this if we weren't storing a loaded value.
1642 if (StoringOther || SI->getOperand(0) == InitVal) {
1643 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1646 // Otherwise, we are storing a previously loaded copy. To do this,
1647 // change the copy from copying the original value to just copying the
1649 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1651 // If we've already replaced the input, StoredVal will be a cast or
1652 // select instruction. If not, it will be a load of the original
1654 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1655 assert(LI->getOperand(0) == GV && "Not a copy!");
1656 // Insert a new load, to preserve the saved value.
1657 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1658 LI->getOrdering(), LI->getSynchScope(), LI);
1660 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1661 "This is not a form that we understand!");
1662 StoreVal = StoredVal->getOperand(0);
1663 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1666 new StoreInst(StoreVal, NewGV, false, 0,
1667 SI->getOrdering(), SI->getSynchScope(), SI);
1669 // Change the load into a load of bool then a select.
1670 LoadInst *LI = cast<LoadInst>(UI);
1671 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0,
1672 LI->getOrdering(), LI->getSynchScope(), LI);
1675 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1677 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1679 LI->replaceAllUsesWith(NSI);
1681 UI->eraseFromParent();
1684 // Retain the name of the old global variable. People who are debugging their
1685 // programs may expect these variables to be named the same.
1686 NewGV->takeName(GV);
1687 GV->eraseFromParent();
1692 /// ProcessGlobal - Analyze the specified global variable and optimize it if
1693 /// possible. If we make a change, return true.
1694 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV,
1695 Module::global_iterator &GVI) {
1696 // Do more involved optimizations if the global is internal.
1697 GV->removeDeadConstantUsers();
1699 if (GV->use_empty()) {
1700 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV);
1701 GV->eraseFromParent();
1706 if (!GV->hasLocalLinkage())
1711 if (GlobalStatus::analyzeGlobal(GV, GS))
1714 if (!GS.IsCompared && !GV->hasUnnamedAddr()) {
1715 GV->setUnnamedAddr(true);
1719 if (GV->isConstant() || !GV->hasInitializer())
1722 return ProcessInternalGlobal(GV, GVI, GS);
1725 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
1726 /// it if possible. If we make a change, return true.
1727 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
1728 Module::global_iterator &GVI,
1729 const GlobalStatus &GS) {
1730 auto &DL = GV->getParent()->getDataLayout();
1731 // If this is a first class global and has only one accessing function
1732 // and this function is main (which we know is not recursive), we replace
1733 // the global with a local alloca in this function.
1735 // NOTE: It doesn't make sense to promote non-single-value types since we
1736 // are just replacing static memory to stack memory.
1738 // If the global is in different address space, don't bring it to stack.
1739 if (!GS.HasMultipleAccessingFunctions &&
1740 GS.AccessingFunction && !GS.HasNonInstructionUser &&
1741 GV->getType()->getElementType()->isSingleValueType() &&
1742 GS.AccessingFunction->getName() == "main" &&
1743 GS.AccessingFunction->hasExternalLinkage() &&
1744 GV->getType()->getAddressSpace() == 0) {
1745 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV);
1746 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1747 ->getEntryBlock().begin());
1748 Type *ElemTy = GV->getType()->getElementType();
1749 // FIXME: Pass Global's alignment when globals have alignment
1750 AllocaInst *Alloca = new AllocaInst(ElemTy, nullptr,
1751 GV->getName(), &FirstI);
1752 if (!isa<UndefValue>(GV->getInitializer()))
1753 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1755 GV->replaceAllUsesWith(Alloca);
1756 GV->eraseFromParent();
1761 // If the global is never loaded (but may be stored to), it is dead.
1764 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV);
1767 if (isLeakCheckerRoot(GV)) {
1768 // Delete any constant stores to the global.
1769 Changed = CleanupPointerRootUsers(GV, TLI);
1771 // Delete any stores we can find to the global. We may not be able to
1772 // make it completely dead though.
1773 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1776 // If the global is dead now, delete it.
1777 if (GV->use_empty()) {
1778 GV->eraseFromParent();
1784 } else if (GS.StoredType <= GlobalStatus::InitializerStored) {
1785 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
1786 GV->setConstant(true);
1788 // Clean up any obviously simplifiable users now.
1789 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1791 // If the global is dead now, just nuke it.
1792 if (GV->use_empty()) {
1793 DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
1794 << "all users and delete global!\n");
1795 GV->eraseFromParent();
1801 } else if (!GV->getInitializer()->getType()->isSingleValueType()) {
1802 const DataLayout &DL = GV->getParent()->getDataLayout();
1803 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, DL)) {
1804 GVI = FirstNewGV; // Don't skip the newly produced globals!
1807 } else if (GS.StoredType == GlobalStatus::StoredOnce) {
1808 // If the initial value for the global was an undef value, and if only
1809 // one other value was stored into it, we can just change the
1810 // initializer to be the stored value, then delete all stores to the
1811 // global. This allows us to mark it constant.
1812 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
1813 if (isa<UndefValue>(GV->getInitializer())) {
1814 // Change the initial value here.
1815 GV->setInitializer(SOVConstant);
1817 // Clean up any obviously simplifiable users now.
1818 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1820 if (GV->use_empty()) {
1821 DEBUG(dbgs() << " *** Substituting initializer allowed us to "
1822 << "simplify all users and delete global!\n");
1823 GV->eraseFromParent();
1832 // Try to optimize globals based on the knowledge that only one value
1833 // (besides its initializer) is ever stored to the global.
1834 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI,
1838 // Otherwise, if the global was not a boolean, we can shrink it to be a
1840 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
1841 if (GS.Ordering == NotAtomic) {
1842 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
1853 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
1854 /// function, changing them to FastCC.
1855 static void ChangeCalleesToFastCall(Function *F) {
1856 for (User *U : F->users()) {
1857 if (isa<BlockAddress>(U))
1859 CallSite CS(cast<Instruction>(U));
1860 CS.setCallingConv(CallingConv::Fast);
1864 static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) {
1865 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) {
1866 unsigned Index = Attrs.getSlotIndex(i);
1867 if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest))
1870 // There can be only one.
1871 return Attrs.removeAttribute(C, Index, Attribute::Nest);
1877 static void RemoveNestAttribute(Function *F) {
1878 F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
1879 for (User *U : F->users()) {
1880 if (isa<BlockAddress>(U))
1882 CallSite CS(cast<Instruction>(U));
1883 CS.setAttributes(StripNest(F->getContext(), CS.getAttributes()));
1887 /// Return true if this is a calling convention that we'd like to change. The
1888 /// idea here is that we don't want to mess with the convention if the user
1889 /// explicitly requested something with performance implications like coldcc,
1890 /// GHC, or anyregcc.
1891 static bool isProfitableToMakeFastCC(Function *F) {
1892 CallingConv::ID CC = F->getCallingConv();
1893 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
1894 return CC == CallingConv::C || CC == CallingConv::X86_ThisCall;
1897 bool GlobalOpt::OptimizeFunctions(Module &M) {
1898 bool Changed = false;
1899 // Optimize functions.
1900 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
1902 // Functions without names cannot be referenced outside this module.
1903 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
1904 F->setLinkage(GlobalValue::InternalLinkage);
1906 const Comdat *C = F->getComdat();
1907 bool inComdat = C && NotDiscardableComdats.count(C);
1908 F->removeDeadConstantUsers();
1909 if ((!inComdat || F->hasLocalLinkage()) && F->isDefTriviallyDead()) {
1910 F->eraseFromParent();
1913 } else if (F->hasLocalLinkage()) {
1914 if (isProfitableToMakeFastCC(F) && !F->isVarArg() &&
1915 !F->hasAddressTaken()) {
1916 // If this function has a calling convention worth changing, is not a
1917 // varargs function, and is only called directly, promote it to use the
1918 // Fast calling convention.
1919 F->setCallingConv(CallingConv::Fast);
1920 ChangeCalleesToFastCall(F);
1925 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
1926 !F->hasAddressTaken()) {
1927 // The function is not used by a trampoline intrinsic, so it is safe
1928 // to remove the 'nest' attribute.
1929 RemoveNestAttribute(F);
1938 bool GlobalOpt::OptimizeGlobalVars(Module &M) {
1939 bool Changed = false;
1941 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
1943 GlobalVariable *GV = GVI++;
1944 // Global variables without names cannot be referenced outside this module.
1945 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
1946 GV->setLinkage(GlobalValue::InternalLinkage);
1947 // Simplify the initializer.
1948 if (GV->hasInitializer())
1949 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) {
1950 auto &DL = M.getDataLayout();
1951 Constant *New = ConstantFoldConstantExpression(CE, DL, TLI);
1952 if (New && New != CE)
1953 GV->setInitializer(New);
1956 if (GV->isDiscardableIfUnused()) {
1957 if (const Comdat *C = GV->getComdat())
1958 if (NotDiscardableComdats.count(C) && !GV->hasLocalLinkage())
1960 Changed |= ProcessGlobal(GV, GVI);
1968 /// Sorts GEP expressions in ascending order by their indexes.
1969 struct GEPComparator {
1970 bool operator()(GEPOperator *A, GEPOperator *B) const {
1971 int NumOpA = A->getNumOperands();
1972 int NumOpB = B->getNumOperands();
1974 // Globals are always pointers, the first index should be 0.
1975 assert(cast<ConstantInt>(A->getOperand(1))->isZero() &&
1976 "GEP A steps over object");
1977 assert(cast<ConstantInt>(B->getOperand(1))->isZero() &&
1978 "GEP B steps over object");
1980 for (int i = 2; i < NumOpA && i < NumOpB; i++) {
1981 ConstantInt *IndexA = cast<ConstantInt>(A->getOperand(i));
1982 ConstantInt *IndexB = cast<ConstantInt>(B->getOperand(i));
1984 if (IndexA->getZExtValue() < IndexB->getZExtValue()) {
1989 return NumOpA < NumOpB;
1993 typedef std::map<GEPOperator *, Constant *, GEPComparator> StoreMap;
1995 /// MutatedGlobal - Holds mutations for a global. If a store overwrites the
1996 /// the entire global, Initializer is updated with the new value. If a store
1997 /// writes to a GEP of a global, the store is instead added to the Pending
1998 /// map to be merged later during MergePendingStores.
1999 struct MutatedGlobal {
2001 Constant *Initializer;
2005 MutatedGlobal(GlobalVariable *GV) : GV(GV), Initializer(nullptr) {}
2008 /// MutatedGlobals - This class tracks and commits stores to globals as basic
2009 /// blocks are evaluated.
2010 class MutatedGlobals {
2011 DenseMap<GlobalVariable *, MutatedGlobal> Globals;
2012 typedef DenseMap<GlobalVariable *, MutatedGlobal>::const_iterator
2015 GlobalVariable *GetGlobalForPointer(Constant *Ptr) {
2016 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
2020 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2021 if (CE->getOpcode() == Instruction::GetElementPtr) {
2022 return cast<GlobalVariable>(CE->getOperand(0));
2029 Constant *MergePendingStores(Constant *Init, StoreMap &Pending,
2030 uint64_t CurrentIdx, unsigned OpNum);
2033 const_iterator begin() const { return Globals.begin(); }
2034 const_iterator end() const { return Globals.end(); }
2035 size_t size() const { return Globals.size(); }
2037 void AddStore(Constant *Ptr, Constant *Value);
2038 Constant *LookupStore(Constant *Ptr);
2040 void Commit(MutatedGlobal &MG);
2044 /// AddStore - Add store for the global variable referenced by Ptr.
2045 /// Currently, it's assumed that the incoming pointer is either the global
2046 /// variable itself, or a GEP expression referencing the global.
2047 void MutatedGlobals::AddStore(Constant *Ptr, Constant *Value) {
2048 GlobalVariable *GV = GetGlobalForPointer(Ptr);
2049 assert(GV && "Failed to resolve global for pointer");
2051 auto I = Globals.find(GV);
2052 if (I == Globals.end()) {
2053 auto R = Globals.insert(std::make_pair(GV, MutatedGlobal(GV)));
2054 assert(R.second && "Global value already in the map?");
2058 MutatedGlobal &MG = I->second;
2061 MG.Initializer = Value;
2062 // Pending stores are no longer valid.
2064 } else if (GEPOperator *GEPOp = dyn_cast<GEPOperator>(Ptr)) {
2065 MG.Pending[GEPOp] = Value;
2067 llvm_unreachable("Unexpected address type");
2071 Constant *MutatedGlobals::LookupStore(Constant *Ptr) {
2072 GlobalVariable *GV = GetGlobalForPointer(Ptr);
2077 auto I = Globals.find(GV);
2078 if (I == Globals.end()) {
2082 MutatedGlobal &MG = I->second;
2085 if (MG.Initializer) {
2086 // If there are any pending stores, Initializer isn't valid, it would
2087 // need them merged in first. This situation currently doesn't occur
2088 // due to isSimpleEnoughPointerToCommit / isSimpleEnoughValueToCommit
2089 // not letting stores for aggregate types pass through. If this needs
2090 // to be supported, calling Commit() at this point should do the trick.
2091 assert(MG.Pending.empty() &&
2092 "Can't use pending initializer without merging pending stores.");
2093 return MG.Initializer;
2095 } else if (GEPOperator *GEPOp = dyn_cast<GEPOperator>(Ptr)) {
2096 auto SI = MG.Pending.find(GEPOp);
2097 if (SI != MG.Pending.end()) {
2105 /// MergePendingStores - Recursively merge stores to a global variable into its
2106 /// initializer. Merging any number of stores into the initializer requires
2107 /// cloning the entire initializer, so stores are batched up during evaluation
2108 /// and processed all at once.
2109 Constant *MutatedGlobals::MergePendingStores(Constant *Init, StoreMap &Pending,
2110 uint64_t CurrentIdx,
2112 if (Pending.empty()) {
2113 // Nothing left to merge.
2117 // If the GEP expression has been traversed completely, terminate.
2118 auto It = Pending.begin();
2119 GEPOperator *GEP = It->first;
2121 if (OpNum >= GEP->getNumOperands()) {
2122 Constant *Val = It->second;
2123 assert(Val->getType() == Init->getType() && "Type mismatch!");
2125 // Move on to the next expression.
2126 Pending.erase(It++);
2131 // Clone the existing initializer so it can be merged into.
2132 Type *InitTy = Init->getType();
2133 ArrayType *ATy = dyn_cast<ArrayType>(InitTy);
2134 StructType *STy = dyn_cast<StructType>(InitTy);
2135 VectorType *VTy = dyn_cast<VectorType>(InitTy);
2139 NumElts = ATy->getNumElements();
2141 NumElts = STy->getNumElements();
2143 NumElts = VTy->getNumElements();
2145 llvm_unreachable("Unexpected initializer type");
2148 SmallVector<Constant *, 32> Elts;
2149 for (unsigned i = 0; i < NumElts; ++i) {
2150 Elts.push_back(Init->getAggregateElement(i));
2153 // Iterate over the sorted stores, merging all stores for the current GEP
2155 while (!Pending.empty()) {
2156 It = Pending.begin();
2159 // If the store doesn't belong to the current index, we're done.
2160 ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(OpNum - 1));
2161 uint64_t Idx = CI->getZExtValue();
2162 if (Idx != CurrentIdx) {
2166 // Recurse into the next index.
2167 CI = cast<ConstantInt>(GEP->getOperand(OpNum));
2168 Idx = CI->getZExtValue();
2169 assert(Idx < NumElts && "GEP index out of range!");
2170 Elts[Idx] = MergePendingStores(Elts[Idx], Pending, Idx, OpNum + 1);
2174 return ConstantArray::get(ATy, Elts);
2176 return ConstantStruct::get(STy, Elts);
2178 return ConstantVector::get(Elts);
2180 llvm_unreachable("Unexpected initializer type");
2186 /// Commit - We have decided that stores to the global (which satisfy the
2187 /// predicate isSimpleEnoughPointerToCommit) should be committed.
2188 void MutatedGlobals::Commit(MutatedGlobal &MG) {
2189 Constant *Init = MG.Initializer ? MG.Initializer : MG.GV->getInitializer();
2191 // Globals are always pointers, skip first GEP index assuming it's 0.
2192 Init = MergePendingStores(Init, MG.Pending, 0, 2);
2194 // Reset pending state.
2195 MG.Initializer = nullptr;
2196 assert(MG.Pending.empty() &&
2197 "Expected pending stores to be empty after merging");
2199 MG.GV->setInitializer(Init);
2204 isSimpleEnoughValueToCommit(Constant *C,
2205 SmallPtrSetImpl<Constant *> &SimpleConstants,
2206 const DataLayout &DL);
2208 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be
2209 /// handled by the code generator. We don't want to generate something like:
2210 /// void *X = &X/42;
2211 /// because the code generator doesn't have a relocation that can handle that.
2213 /// This function should be called if C was not found (but just got inserted)
2214 /// in SimpleConstants to avoid having to rescan the same constants all the
2217 isSimpleEnoughValueToCommitHelper(Constant *C,
2218 SmallPtrSetImpl<Constant *> &SimpleConstants,
2219 const DataLayout &DL) {
2220 // Simple global addresses are supported, do not allow dllimport or
2221 // thread-local globals.
2222 if (auto *GV = dyn_cast<GlobalValue>(C))
2223 return !GV->hasDLLImportStorageClass() && !GV->isThreadLocal();
2225 // Simple integer, undef, constant aggregate zero, etc are all supported.
2226 if (C->getNumOperands() == 0 || isa<BlockAddress>(C))
2229 // Aggregate values are safe if all their elements are.
2230 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) ||
2231 isa<ConstantVector>(C)) {
2232 for (Value *Op : C->operands())
2233 if (!isSimpleEnoughValueToCommit(cast<Constant>(Op), SimpleConstants, DL))
2238 // We don't know exactly what relocations are allowed in constant expressions,
2239 // so we allow &global+constantoffset, which is safe and uniformly supported
2241 ConstantExpr *CE = cast<ConstantExpr>(C);
2242 switch (CE->getOpcode()) {
2243 case Instruction::BitCast:
2244 // Bitcast is fine if the casted value is fine.
2245 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2247 case Instruction::IntToPtr:
2248 case Instruction::PtrToInt:
2249 // int <=> ptr is fine if the int type is the same size as the
2251 if (DL.getTypeSizeInBits(CE->getType()) !=
2252 DL.getTypeSizeInBits(CE->getOperand(0)->getType()))
2254 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2256 // GEP is fine if it is simple + constant offset.
2257 case Instruction::GetElementPtr:
2258 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
2259 if (!isa<ConstantInt>(CE->getOperand(i)))
2261 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2263 case Instruction::Add:
2264 // We allow simple+cst.
2265 if (!isa<ConstantInt>(CE->getOperand(1)))
2267 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, DL);
2273 isSimpleEnoughValueToCommit(Constant *C,
2274 SmallPtrSetImpl<Constant *> &SimpleConstants,
2275 const DataLayout &DL) {
2276 // If we already checked this constant, we win.
2277 if (!SimpleConstants.insert(C).second)
2279 // Check the constant.
2280 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
2284 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple
2285 /// enough for us to understand. In particular, if it is a cast to anything
2286 /// other than from one pointer type to another pointer type, we punt.
2287 /// We basically just support direct accesses to globals and GEP's of
2288 /// globals. This should be kept up to date with CommitValueTo.
2289 static bool isSimpleEnoughPointerToCommit(Constant *C) {
2290 // Conservatively, avoid aggregate types. This is because we don't
2291 // want to worry about them partially overlapping other stores.
2292 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType())
2295 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
2296 // Do not allow weak/*_odr/linkonce linkage or external globals.
2297 return GV->hasUniqueInitializer();
2299 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
2300 // Handle a constantexpr gep.
2301 if (CE->getOpcode() == Instruction::GetElementPtr &&
2302 isa<GlobalVariable>(CE->getOperand(0)) &&
2303 cast<GEPOperator>(CE)->isInBounds()) {
2304 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2305 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2306 // external globals.
2307 if (!GV->hasUniqueInitializer())
2310 // The first index must be zero.
2311 ConstantInt *CI = dyn_cast<ConstantInt>(*std::next(CE->op_begin()));
2312 if (!CI || !CI->isZero()) return false;
2314 // The remaining indices must be compile-time known integers within the
2315 // notional bounds of the corresponding static array types.
2316 if (!CE->isGEPWithNoNotionalOverIndexing())
2319 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2321 // A constantexpr bitcast from a pointer to another pointer is a no-op,
2322 // and we know how to evaluate it by moving the bitcast from the pointer
2323 // operand to the value operand.
2324 } else if (CE->getOpcode() == Instruction::BitCast &&
2325 isa<GlobalVariable>(CE->getOperand(0))) {
2326 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2327 // external globals.
2328 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer();
2337 /// Evaluator - This class evaluates LLVM IR, producing the Constant
2338 /// representing each SSA instruction. Changes to global variables are stored
2339 /// in a mapping that can be iterated over after the evaluation is complete.
2340 /// Once an evaluation call fails, the evaluation object should not be reused.
2343 Evaluator(const DataLayout &DL, const TargetLibraryInfo *TLI)
2344 : DL(DL), TLI(TLI) {
2345 ValueStack.emplace_back();
2349 for (auto &Tmp : AllocaTmps)
2350 // If there are still users of the alloca, the program is doing something
2351 // silly, e.g. storing the address of the alloca somewhere and using it
2352 // later. Since this is undefined, we'll just make it be null.
2353 if (!Tmp->use_empty())
2354 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType()));
2357 /// EvaluateFunction - Evaluate a call to function F, returning true if
2358 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2359 /// arguments for the function.
2360 bool EvaluateFunction(Function *F, Constant *&RetVal,
2361 const SmallVectorImpl<Constant*> &ActualArgs);
2363 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
2364 /// successful, false if we can't evaluate it. NewBB returns the next BB that
2365 /// control flows into, or null upon return.
2366 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB);
2368 Constant *getVal(Value *V) {
2369 if (Constant *CV = dyn_cast<Constant>(V)) return CV;
2370 Constant *R = ValueStack.back().lookup(V);
2371 assert(R && "Reference to an uncomputed value!");
2375 void setVal(Value *V, Constant *C) {
2376 ValueStack.back()[V] = C;
2379 MutatedGlobals &getMutated() {
2383 const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const {
2388 Constant *ComputeLoadResult(Constant *P);
2390 /// ValueStack - As we compute SSA register values, we store their contents
2391 /// here. The back of the deque contains the current function and the stack
2392 /// contains the values in the calling frames.
2393 std::deque<DenseMap<Value*, Constant*>> ValueStack;
2395 /// CallStack - This is used to detect recursion. In pathological situations
2396 /// we could hit exponential behavior, but at least there is nothing
2398 SmallVector<Function*, 4> CallStack;
2400 /// Mutated - For each store we execute, we update this map. Loads check
2401 /// this to get the most up-to-date value. If evaluation is successful,
2402 /// this state is committed to the process.
2403 MutatedGlobals Mutated;
2405 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
2406 /// to represent its body. This vector is needed so we can delete the
2407 /// temporary globals when we are done.
2408 SmallVector<std::unique_ptr<GlobalVariable>, 32> AllocaTmps;
2410 /// Invariants - These global variables have been marked invariant by the
2411 /// static constructor.
2412 SmallPtrSet<GlobalVariable*, 8> Invariants;
2414 /// SimpleConstants - These are constants we have checked and know to be
2415 /// simple enough to live in a static initializer of a global.
2416 SmallPtrSet<Constant*, 8> SimpleConstants;
2418 const DataLayout &DL;
2419 const TargetLibraryInfo *TLI;
2422 } // anonymous namespace
2424 /// ComputeLoadResult - Return the value that would be computed by a load from
2425 /// P after the stores reflected by 'memory' have been performed. If we can't
2426 /// decide, return null.
2427 Constant *Evaluator::ComputeLoadResult(Constant *P) {
2428 // If this memory location has been recently stored, use the stored value: it
2429 // is the most up-to-date.
2430 Constant *Val = Mutated.LookupStore(P);
2431 if (Val) return Val;
2434 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
2435 if (GV->hasDefinitiveInitializer())
2436 return GV->getInitializer();
2440 // Handle a constantexpr getelementptr.
2441 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P))
2442 if (CE->getOpcode() == Instruction::GetElementPtr &&
2443 isa<GlobalVariable>(CE->getOperand(0))) {
2444 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2445 if (GV->hasDefinitiveInitializer())
2446 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE);
2449 return nullptr; // don't know how to evaluate.
2452 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if
2453 /// successful, false if we can't evaluate it. NewBB returns the next BB that
2454 /// control flows into, or null upon return.
2455 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst,
2456 BasicBlock *&NextBB) {
2457 // This is the main evaluation loop.
2459 Constant *InstResult = nullptr;
2461 DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n");
2463 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
2464 if (!SI->isSimple()) {
2465 DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n");
2466 return false; // no volatile/atomic accesses.
2468 Constant *Ptr = getVal(SI->getOperand(1));
2469 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2470 DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr);
2471 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2472 DEBUG(dbgs() << "; To: " << *Ptr << "\n");
2474 if (!isSimpleEnoughPointerToCommit(Ptr)) {
2475 // If this is too complex for us to commit, reject it.
2476 DEBUG(dbgs() << "Pointer is too complex for us to evaluate store.");
2480 Constant *Val = getVal(SI->getOperand(0));
2482 // If this might be too difficult for the backend to handle (e.g. the addr
2483 // of one global variable divided by another) then we can't commit it.
2484 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, DL)) {
2485 DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val
2490 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2491 if (CE->getOpcode() == Instruction::BitCast) {
2492 DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n");
2493 // If we're evaluating a store through a bitcast, then we need
2494 // to pull the bitcast off the pointer type and push it onto the
2496 Ptr = CE->getOperand(0);
2498 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType();
2500 // In order to push the bitcast onto the stored value, a bitcast
2501 // from NewTy to Val's type must be legal. If it's not, we can try
2502 // introspecting NewTy to find a legal conversion.
2503 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) {
2504 // If NewTy is a struct, we can convert the pointer to the struct
2505 // into a pointer to its first member.
2506 // FIXME: This could be extended to support arrays as well.
2507 if (StructType *STy = dyn_cast<StructType>(NewTy)) {
2508 NewTy = STy->getTypeAtIndex(0U);
2510 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32);
2511 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false);
2512 Constant * const IdxList[] = {IdxZero, IdxZero};
2514 Ptr = ConstantExpr::getGetElementPtr(nullptr, Ptr, IdxList);
2515 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
2516 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2518 // If we can't improve the situation by introspecting NewTy,
2519 // we have to give up.
2521 DEBUG(dbgs() << "Failed to bitcast constant ptr, can not "
2527 // If we found compatible types, go ahead and push the bitcast
2528 // onto the stored value.
2529 Val = ConstantExpr::getBitCast(Val, NewTy);
2531 DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n");
2535 Mutated.AddStore(Ptr, Val);
2536 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
2537 InstResult = ConstantExpr::get(BO->getOpcode(),
2538 getVal(BO->getOperand(0)),
2539 getVal(BO->getOperand(1)));
2540 DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult
2542 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
2543 InstResult = ConstantExpr::getCompare(CI->getPredicate(),
2544 getVal(CI->getOperand(0)),
2545 getVal(CI->getOperand(1)));
2546 DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult
2548 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
2549 InstResult = ConstantExpr::getCast(CI->getOpcode(),
2550 getVal(CI->getOperand(0)),
2552 DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult
2554 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
2555 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)),
2556 getVal(SI->getOperand(1)),
2557 getVal(SI->getOperand(2)));
2558 DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult
2560 } else if (auto *EVI = dyn_cast<ExtractValueInst>(CurInst)) {
2561 InstResult = ConstantExpr::getExtractValue(
2562 getVal(EVI->getAggregateOperand()), EVI->getIndices());
2563 DEBUG(dbgs() << "Found an ExtractValueInst! Simplifying: " << *InstResult
2565 } else if (auto *IVI = dyn_cast<InsertValueInst>(CurInst)) {
2566 InstResult = ConstantExpr::getInsertValue(
2567 getVal(IVI->getAggregateOperand()),
2568 getVal(IVI->getInsertedValueOperand()), IVI->getIndices());
2569 DEBUG(dbgs() << "Found an InsertValueInst! Simplifying: " << *InstResult
2571 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
2572 Constant *P = getVal(GEP->getOperand(0));
2573 SmallVector<Constant*, 8> GEPOps;
2574 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end();
2576 GEPOps.push_back(getVal(*i));
2578 ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), P, GEPOps,
2579 cast<GEPOperator>(GEP)->isInBounds());
2580 DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult
2582 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
2584 if (!LI->isSimple()) {
2585 DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n");
2586 return false; // no volatile/atomic accesses.
2589 Constant *Ptr = getVal(LI->getOperand(0));
2590 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) {
2591 Ptr = ConstantFoldConstantExpression(CE, DL, TLI);
2592 DEBUG(dbgs() << "Found a constant pointer expression, constant "
2593 "folding: " << *Ptr << "\n");
2595 InstResult = ComputeLoadResult(Ptr);
2597 DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load."
2599 return false; // Could not evaluate load.
2602 DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n");
2603 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
2604 if (AI->isArrayAllocation()) {
2605 DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n");
2606 return false; // Cannot handle array allocs.
2608 Type *Ty = AI->getType()->getElementType();
2609 AllocaTmps.push_back(
2610 make_unique<GlobalVariable>(Ty, false, GlobalValue::InternalLinkage,
2611 UndefValue::get(Ty), AI->getName()));
2612 InstResult = AllocaTmps.back().get();
2613 DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n");
2614 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) {
2615 CallSite CS(CurInst);
2617 // Debug info can safely be ignored here.
2618 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) {
2619 DEBUG(dbgs() << "Ignoring debug info.\n");
2624 // Cannot handle inline asm.
2625 if (isa<InlineAsm>(CS.getCalledValue())) {
2626 DEBUG(dbgs() << "Found inline asm, can not evaluate.\n");
2630 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
2631 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) {
2632 if (MSI->isVolatile()) {
2633 DEBUG(dbgs() << "Can not optimize a volatile memset " <<
2637 Constant *Ptr = getVal(MSI->getDest());
2638 Constant *Val = getVal(MSI->getValue());
2639 Constant *DestVal = ComputeLoadResult(getVal(Ptr));
2640 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) {
2641 // This memset is a no-op.
2642 DEBUG(dbgs() << "Ignoring no-op memset.\n");
2648 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
2649 II->getIntrinsicID() == Intrinsic::lifetime_end) {
2650 DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n");
2655 if (II->getIntrinsicID() == Intrinsic::invariant_start) {
2656 // We don't insert an entry into Values, as it doesn't have a
2657 // meaningful return value.
2658 if (!II->use_empty()) {
2659 DEBUG(dbgs() << "Found unused invariant_start. Can't evaluate.\n");
2662 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0));
2663 Value *PtrArg = getVal(II->getArgOperand(1));
2664 Value *Ptr = PtrArg->stripPointerCasts();
2665 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) {
2666 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType();
2667 if (!Size->isAllOnesValue() &&
2668 Size->getValue().getLimitedValue() >=
2669 DL.getTypeStoreSize(ElemTy)) {
2670 Invariants.insert(GV);
2671 DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV
2674 DEBUG(dbgs() << "Found a global var, but can not treat it as an "
2678 // Continue even if we do nothing.
2683 DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n");
2687 // Resolve function pointers.
2688 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue()));
2689 if (!Callee || Callee->mayBeOverridden()) {
2690 DEBUG(dbgs() << "Can not resolve function pointer.\n");
2691 return false; // Cannot resolve.
2694 SmallVector<Constant*, 8> Formals;
2695 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i)
2696 Formals.push_back(getVal(*i));
2698 if (Callee->isDeclaration()) {
2699 // If this is a function we can constant fold, do it.
2700 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) {
2702 DEBUG(dbgs() << "Constant folded function call. Result: " <<
2703 *InstResult << "\n");
2705 DEBUG(dbgs() << "Can not constant fold function call.\n");
2709 if (Callee->getFunctionType()->isVarArg()) {
2710 DEBUG(dbgs() << "Can not constant fold vararg function call.\n");
2714 Constant *RetVal = nullptr;
2715 // Execute the call, if successful, use the return value.
2716 ValueStack.emplace_back();
2717 if (!EvaluateFunction(Callee, RetVal, Formals)) {
2718 DEBUG(dbgs() << "Failed to evaluate function.\n");
2721 ValueStack.pop_back();
2722 InstResult = RetVal;
2725 DEBUG(dbgs() << "Successfully evaluated function. Result: " <<
2726 InstResult << "\n\n");
2728 DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n");
2731 } else if (isa<TerminatorInst>(CurInst)) {
2732 DEBUG(dbgs() << "Found a terminator instruction.\n");
2734 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) {
2735 if (BI->isUnconditional()) {
2736 NextBB = BI->getSuccessor(0);
2739 dyn_cast<ConstantInt>(getVal(BI->getCondition()));
2740 if (!Cond) return false; // Cannot determine.
2742 NextBB = BI->getSuccessor(!Cond->getZExtValue());
2744 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) {
2746 dyn_cast<ConstantInt>(getVal(SI->getCondition()));
2747 if (!Val) return false; // Cannot determine.
2748 NextBB = SI->findCaseValue(Val).getCaseSuccessor();
2749 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) {
2750 Value *Val = getVal(IBI->getAddress())->stripPointerCasts();
2751 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val))
2752 NextBB = BA->getBasicBlock();
2754 return false; // Cannot determine.
2755 } else if (isa<ReturnInst>(CurInst)) {
2758 // invoke, unwind, resume, unreachable.
2759 DEBUG(dbgs() << "Can not handle terminator.");
2760 return false; // Cannot handle this terminator.
2763 // We succeeded at evaluating this block!
2764 DEBUG(dbgs() << "Successfully evaluated block.\n");
2767 // Did not know how to evaluate this!
2768 DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction."
2773 if (!CurInst->use_empty()) {
2774 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult))
2775 InstResult = ConstantFoldConstantExpression(CE, DL, TLI);
2777 setVal(CurInst, InstResult);
2780 // If we just processed an invoke, we finished evaluating the block.
2781 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) {
2782 NextBB = II->getNormalDest();
2783 DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n");
2787 // Advance program counter.
2792 /// EvaluateFunction - Evaluate a call to function F, returning true if
2793 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2794 /// arguments for the function.
2795 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal,
2796 const SmallVectorImpl<Constant*> &ActualArgs) {
2797 // Check to see if this function is already executing (recursion). If so,
2798 // bail out. TODO: we might want to accept limited recursion.
2799 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
2802 CallStack.push_back(F);
2804 // Initialize arguments to the incoming values specified.
2806 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E;
2808 setVal(AI, ActualArgs[ArgNo]);
2810 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
2811 // we can only evaluate any one basic block at most once. This set keeps
2812 // track of what we have executed so we can detect recursive cases etc.
2813 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks;
2815 // CurBB - The current basic block we're evaluating.
2816 BasicBlock *CurBB = F->begin();
2818 BasicBlock::iterator CurInst = CurBB->begin();
2821 BasicBlock *NextBB = nullptr; // Initialized to avoid compiler warnings.
2822 DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n");
2824 if (!EvaluateBlock(CurInst, NextBB))
2828 // Successfully running until there's no next block means that we found
2829 // the return. Fill it the return value and pop the call stack.
2830 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator());
2831 if (RI->getNumOperands())
2832 RetVal = getVal(RI->getOperand(0));
2833 CallStack.pop_back();
2837 // Okay, we succeeded in evaluating this control flow. See if we have
2838 // executed the new block before. If so, we have a looping function,
2839 // which we cannot evaluate in reasonable time.
2840 if (!ExecutedBlocks.insert(NextBB).second)
2841 return false; // looped!
2843 // Okay, we have never been in this block before. Check to see if there
2844 // are any PHI nodes. If so, evaluate them with information about where
2846 PHINode *PN = nullptr;
2847 for (CurInst = NextBB->begin();
2848 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst)
2849 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB)));
2851 // Advance to the next block.
2856 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if
2857 /// we can. Return true if we can, false otherwise.
2858 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
2859 const TargetLibraryInfo *TLI) {
2860 // Call the function.
2861 Evaluator Eval(DL, TLI);
2862 Constant *RetValDummy;
2863 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2864 SmallVector<Constant*, 0>());
2867 ++NumCtorsEvaluated;
2869 // We succeeded at evaluation: commit the result.
2870 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2871 << F->getName() << "' to " << Eval.getMutated().size()
2872 << " mutated globals.\n");
2874 MutatedGlobals &Mutated = Eval.getMutated();
2875 for (auto I : Mutated)
2876 Mutated.Commit(I.second);
2878 for (GlobalVariable *GV : Eval.getInvariants())
2879 GV->setConstant(true);
2885 static int compareNames(Constant *const *A, Constant *const *B) {
2886 return (*A)->getName().compare((*B)->getName());
2889 static void setUsedInitializer(GlobalVariable &V,
2890 const SmallPtrSet<GlobalValue *, 8> &Init) {
2892 V.eraseFromParent();
2896 // Type of pointer to the array of pointers.
2897 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
2899 SmallVector<llvm::Constant *, 8> UsedArray;
2900 for (GlobalValue *GV : Init) {
2902 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy);
2903 UsedArray.push_back(Cast);
2905 // Sort to get deterministic order.
2906 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2907 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
2909 Module *M = V.getParent();
2910 V.removeFromParent();
2911 GlobalVariable *NV =
2912 new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage,
2913 llvm::ConstantArray::get(ATy, UsedArray), "");
2915 NV->setSection("llvm.metadata");
2920 /// \brief An easy to access representation of llvm.used and llvm.compiler.used.
2922 SmallPtrSet<GlobalValue *, 8> Used;
2923 SmallPtrSet<GlobalValue *, 8> CompilerUsed;
2924 GlobalVariable *UsedV;
2925 GlobalVariable *CompilerUsedV;
2928 LLVMUsed(Module &M) {
2929 UsedV = collectUsedGlobalVariables(M, Used, false);
2930 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
2932 typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator;
2933 typedef iterator_range<iterator> used_iterator_range;
2934 iterator usedBegin() { return Used.begin(); }
2935 iterator usedEnd() { return Used.end(); }
2936 used_iterator_range used() {
2937 return used_iterator_range(usedBegin(), usedEnd());
2939 iterator compilerUsedBegin() { return CompilerUsed.begin(); }
2940 iterator compilerUsedEnd() { return CompilerUsed.end(); }
2941 used_iterator_range compilerUsed() {
2942 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
2944 bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
2945 bool compilerUsedCount(GlobalValue *GV) const {
2946 return CompilerUsed.count(GV);
2948 bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
2949 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
2950 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
2951 bool compilerUsedInsert(GlobalValue *GV) {
2952 return CompilerUsed.insert(GV).second;
2955 void syncVariablesAndSets() {
2957 setUsedInitializer(*UsedV, Used);
2959 setUsedInitializer(*CompilerUsedV, CompilerUsed);
2964 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2965 if (GA.use_empty()) // No use at all.
2968 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
2969 "We should have removed the duplicated "
2970 "element from llvm.compiler.used");
2971 if (!GA.hasOneUse())
2972 // Strictly more than one use. So at least one is not in llvm.used and
2973 // llvm.compiler.used.
2976 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2977 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2980 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
2981 const LLVMUsed &U) {
2983 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
2984 "We should have removed the duplicated "
2985 "element from llvm.compiler.used");
2986 if (U.usedCount(&V) || U.compilerUsedCount(&V))
2988 return V.hasNUsesOrMore(N);
2991 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
2992 if (!GA.hasLocalLinkage())
2995 return U.usedCount(&GA) || U.compilerUsedCount(&GA);
2998 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
2999 bool &RenameTarget) {
3000 RenameTarget = false;
3002 if (hasUseOtherThanLLVMUsed(GA, U))
3005 // If the alias is externally visible, we may still be able to simplify it.
3006 if (!mayHaveOtherReferences(GA, U))
3009 // If the aliasee has internal linkage, give it the name and linkage
3010 // of the alias, and delete the alias. This turns:
3011 // define internal ... @f(...)
3012 // @a = alias ... @f
3014 // define ... @a(...)
3015 Constant *Aliasee = GA.getAliasee();
3016 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
3017 if (!Target->hasLocalLinkage())
3020 // Do not perform the transform if multiple aliases potentially target the
3021 // aliasee. This check also ensures that it is safe to replace the section
3022 // and other attributes of the aliasee with those of the alias.
3023 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
3026 RenameTarget = true;
3030 bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
3031 bool Changed = false;
3034 for (GlobalValue *GV : Used.used())
3035 Used.compilerUsedErase(GV);
3037 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
3039 Module::alias_iterator J = I++;
3040 // Aliases without names cannot be referenced outside this module.
3041 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
3042 J->setLinkage(GlobalValue::InternalLinkage);
3043 // If the aliasee may change at link time, nothing can be done - bail out.
3044 if (J->mayBeOverridden())
3047 Constant *Aliasee = J->getAliasee();
3048 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
3049 // We can't trivially replace the alias with the aliasee if the aliasee is
3050 // non-trivial in some way.
3051 // TODO: Try to handle non-zero GEPs of local aliasees.
3054 Target->removeDeadConstantUsers();
3056 // Make all users of the alias use the aliasee instead.
3058 if (!hasUsesToReplace(*J, Used, RenameTarget))
3061 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
3062 ++NumAliasesResolved;
3066 // Give the aliasee the name, linkage and other attributes of the alias.
3067 Target->takeName(J);
3068 Target->setLinkage(J->getLinkage());
3069 Target->setVisibility(J->getVisibility());
3070 Target->setDLLStorageClass(J->getDLLStorageClass());
3072 if (Used.usedErase(J))
3073 Used.usedInsert(Target);
3075 if (Used.compilerUsedErase(J))
3076 Used.compilerUsedInsert(Target);
3077 } else if (mayHaveOtherReferences(*J, Used))
3080 // Delete the alias.
3081 M.getAliasList().erase(J);
3082 ++NumAliasesRemoved;
3086 Used.syncVariablesAndSets();
3091 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
3092 if (!TLI->has(LibFunc::cxa_atexit))
3095 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit));
3100 FunctionType *FTy = Fn->getFunctionType();
3102 // Checking that the function has the right return type, the right number of
3103 // parameters and that they all have pointer types should be enough.
3104 if (!FTy->getReturnType()->isIntegerTy() ||
3105 FTy->getNumParams() != 3 ||
3106 !FTy->getParamType(0)->isPointerTy() ||
3107 !FTy->getParamType(1)->isPointerTy() ||
3108 !FTy->getParamType(2)->isPointerTy())
3114 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++
3115 /// destructor and can therefore be eliminated.
3116 /// Note that we assume that other optimization passes have already simplified
3117 /// the code so we only look for a function with a single basic block, where
3118 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and
3119 /// other side-effect free instructions.
3120 static bool cxxDtorIsEmpty(const Function &Fn,
3121 SmallPtrSet<const Function *, 8> &CalledFunctions) {
3122 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
3123 // nounwind, but that doesn't seem worth doing.
3124 if (Fn.isDeclaration())
3127 if (++Fn.begin() != Fn.end())
3130 const BasicBlock &EntryBlock = Fn.getEntryBlock();
3131 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end();
3133 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
3134 // Ignore debug intrinsics.
3135 if (isa<DbgInfoIntrinsic>(CI))
3138 const Function *CalledFn = CI->getCalledFunction();
3143 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
3145 // Don't treat recursive functions as empty.
3146 if (!NewCalledFunctions.insert(CalledFn).second)
3149 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
3151 } else if (isa<ReturnInst>(*I))
3152 return true; // We're done.
3153 else if (I->mayHaveSideEffects())
3154 return false; // Destructor with side effects, bail.
3160 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
3161 /// Itanium C++ ABI p3.3.5:
3163 /// After constructing a global (or local static) object, that will require
3164 /// destruction on exit, a termination function is registered as follows:
3166 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
3168 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
3169 /// call f(p) when DSO d is unloaded, before all such termination calls
3170 /// registered before this one. It returns zero if registration is
3171 /// successful, nonzero on failure.
3173 // This pass will look for calls to __cxa_atexit where the function is trivial
3175 bool Changed = false;
3177 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
3179 // We're only interested in calls. Theoretically, we could handle invoke
3180 // instructions as well, but neither llvm-gcc nor clang generate invokes
3182 CallInst *CI = dyn_cast<CallInst>(*I++);
3187 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
3191 SmallPtrSet<const Function *, 8> CalledFunctions;
3192 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions))
3195 // Just remove the call.
3196 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
3197 CI->eraseFromParent();
3199 ++NumCXXDtorsRemoved;
3207 bool GlobalOpt::runOnModule(Module &M) {
3208 bool Changed = false;
3210 auto &DL = M.getDataLayout();
3211 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
3213 bool LocalChange = true;
3214 while (LocalChange) {
3215 LocalChange = false;
3217 NotDiscardableComdats.clear();
3218 for (const GlobalVariable &GV : M.globals())
3219 if (const Comdat *C = GV.getComdat())
3220 if (!GV.isDiscardableIfUnused() || !GV.use_empty())
3221 NotDiscardableComdats.insert(C);
3222 for (Function &F : M)
3223 if (const Comdat *C = F.getComdat())
3224 if (!F.isDefTriviallyDead())
3225 NotDiscardableComdats.insert(C);
3226 for (GlobalAlias &GA : M.aliases())
3227 if (const Comdat *C = GA.getComdat())
3228 if (!GA.isDiscardableIfUnused() || !GA.use_empty())
3229 NotDiscardableComdats.insert(C);
3231 // Delete functions that are trivially dead, ccc -> fastcc
3232 LocalChange |= OptimizeFunctions(M);
3234 // Optimize global_ctors list.
3235 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
3236 return EvaluateStaticConstructor(F, DL, TLI);
3239 // Optimize non-address-taken globals.
3240 LocalChange |= OptimizeGlobalVars(M);
3242 // Resolve aliases, when possible.
3243 LocalChange |= OptimizeGlobalAliases(M);
3245 // Try to remove trivial global destructors if they are not removed
3247 Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
3249 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
3251 Changed |= LocalChange;
3254 // TODO: Move all global ctors functions to the end of the module for code