1 //===- MergeFunctions.cpp - Merge identical functions ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass looks for equivalent functions that are mergable and folds them.
12 // Order relation is defined on set of functions. It was made through
13 // special function comparison procedure that returns
14 // 0 when functions are equal,
15 // -1 when Left function is less than right function, and
16 // 1 for opposite case. We need total-ordering, so we need to maintain
17 // four properties on the functions set:
18 // a <= a (reflexivity)
19 // if a <= b and b <= a then a = b (antisymmetry)
20 // if a <= b and b <= c then a <= c (transitivity).
21 // for all a and b: a <= b or b <= a (totality).
23 // Comparison iterates through each instruction in each basic block.
24 // Functions are kept on binary tree. For each new function F we perform
25 // lookup in binary tree.
26 // In practice it works the following way:
27 // -- We define Function* container class with custom "operator<" (FunctionPtr).
28 // -- "FunctionPtr" instances are stored in std::set collection, so every
29 // std::set::insert operation will give you result in log(N) time.
31 // When a match is found the functions are folded. If both functions are
32 // overridable, we move the functionality into a new internal function and
33 // leave two overridable thunks to it.
35 //===----------------------------------------------------------------------===//
39 // * virtual functions.
41 // Many functions have their address taken by the virtual function table for
42 // the object they belong to. However, as long as it's only used for a lookup
43 // and call, this is irrelevant, and we'd like to fold such functions.
45 // * be smarter about bitcasts.
47 // In order to fold functions, we will sometimes add either bitcast instructions
48 // or bitcast constant expressions. Unfortunately, this can confound further
49 // analysis since the two functions differ where one has a bitcast and the
50 // other doesn't. We should learn to look through bitcasts.
52 // * Compare complex types with pointer types inside.
53 // * Compare cross-reference cases.
54 // * Compare complex expressions.
56 // All the three issues above could be described as ability to prove that
57 // fA == fB == fC == fE == fF == fG in example below:
76 // Simplest cross-reference case (fA <--> fB) was implemented in previous
77 // versions of MergeFunctions, though it presented only in two function pairs
78 // in test-suite (that counts >50k functions)
79 // Though possibility to detect complex cross-referencing (e.g.: A->B->C->D->A)
80 // could cover much more cases.
82 //===----------------------------------------------------------------------===//
84 #include "llvm/Transforms/IPO.h"
85 #include "llvm/ADT/DenseSet.h"
86 #include "llvm/ADT/FoldingSet.h"
87 #include "llvm/ADT/STLExtras.h"
88 #include "llvm/ADT/SmallSet.h"
89 #include "llvm/ADT/Statistic.h"
90 #include "llvm/IR/CallSite.h"
91 #include "llvm/IR/Constants.h"
92 #include "llvm/IR/DataLayout.h"
93 #include "llvm/IR/IRBuilder.h"
94 #include "llvm/IR/InlineAsm.h"
95 #include "llvm/IR/Instructions.h"
96 #include "llvm/IR/LLVMContext.h"
97 #include "llvm/IR/Module.h"
98 #include "llvm/IR/Operator.h"
99 #include "llvm/IR/ValueHandle.h"
100 #include "llvm/Pass.h"
101 #include "llvm/Support/CommandLine.h"
102 #include "llvm/Support/Debug.h"
103 #include "llvm/Support/ErrorHandling.h"
104 #include "llvm/Support/raw_ostream.h"
106 using namespace llvm;
108 #define DEBUG_TYPE "mergefunc"
110 STATISTIC(NumFunctionsMerged, "Number of functions merged");
111 STATISTIC(NumThunksWritten, "Number of thunks generated");
112 STATISTIC(NumAliasesWritten, "Number of aliases generated");
113 STATISTIC(NumDoubleWeak, "Number of new functions created");
115 static cl::opt<unsigned> NumFunctionsForSanityCheck(
117 cl::desc("How many functions in module could be used for "
118 "MergeFunctions pass sanity check. "
119 "'0' disables this check. Works only with '-debug' key."),
120 cl::init(0), cl::Hidden);
122 /// Returns the type id for a type to be hashed. We turn pointer types into
123 /// integers here because the actual compare logic below considers pointers and
124 /// integers of the same size as equal.
125 static Type::TypeID getTypeIDForHash(Type *Ty) {
126 if (Ty->isPointerTy())
127 return Type::IntegerTyID;
128 return Ty->getTypeID();
131 /// Creates a hash-code for the function which is the same for any two
132 /// functions that will compare equal, without looking at the instructions
133 /// inside the function.
134 static unsigned profileFunction(const Function *F) {
135 FunctionType *FTy = F->getFunctionType();
138 ID.AddInteger(F->size());
139 ID.AddInteger(F->getCallingConv());
140 ID.AddBoolean(F->hasGC());
141 ID.AddBoolean(FTy->isVarArg());
142 ID.AddInteger(getTypeIDForHash(FTy->getReturnType()));
143 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
144 ID.AddInteger(getTypeIDForHash(FTy->getParamType(i)));
145 return ID.ComputeHash();
150 /// ComparableFunction - A struct that pairs together functions with a
151 /// DataLayout so that we can keep them together as elements in the DenseSet.
152 class ComparableFunction {
154 static const ComparableFunction EmptyKey;
155 static const ComparableFunction TombstoneKey;
156 static DataLayout * const LookupOnly;
158 ComparableFunction(Function *Func, const DataLayout *DL)
159 : Func(Func), Hash(profileFunction(Func)), DL(DL) {}
161 Function *getFunc() const { return Func; }
162 unsigned getHash() const { return Hash; }
163 const DataLayout *getDataLayout() const { return DL; }
165 // Drops AssertingVH reference to the function. Outside of debug mode, this
169 "Attempted to release function twice, or release empty/tombstone!");
174 explicit ComparableFunction(unsigned Hash)
175 : Func(nullptr), Hash(Hash), DL(nullptr) {}
177 AssertingVH<Function> Func;
179 const DataLayout *DL;
182 const ComparableFunction ComparableFunction::EmptyKey = ComparableFunction(0);
183 const ComparableFunction ComparableFunction::TombstoneKey =
184 ComparableFunction(1);
185 DataLayout *const ComparableFunction::LookupOnly = (DataLayout*)(-1);
191 struct DenseMapInfo<ComparableFunction> {
192 static ComparableFunction getEmptyKey() {
193 return ComparableFunction::EmptyKey;
195 static ComparableFunction getTombstoneKey() {
196 return ComparableFunction::TombstoneKey;
198 static unsigned getHashValue(const ComparableFunction &CF) {
201 static bool isEqual(const ComparableFunction &LHS,
202 const ComparableFunction &RHS);
208 /// FunctionComparator - Compares two functions to determine whether or not
209 /// they will generate machine code with the same behaviour. DataLayout is
210 /// used if available. The comparator always fails conservatively (erring on the
211 /// side of claiming that two functions are different).
212 class FunctionComparator {
214 FunctionComparator(const DataLayout *DL, const Function *F1,
216 : FnL(F1), FnR(F2), DL(DL) {}
218 /// Test whether the two functions have equivalent behaviour.
222 /// Test whether two basic blocks have equivalent behaviour.
223 int compare(const BasicBlock *BBL, const BasicBlock *BBR);
225 /// Constants comparison.
226 /// Its analog to lexicographical comparison between hypothetical numbers
228 /// <bitcastability-trait><raw-bit-contents>
230 /// 1. Bitcastability.
231 /// Check whether L's type could be losslessly bitcasted to R's type.
232 /// On this stage method, in case when lossless bitcast is not possible
233 /// method returns -1 or 1, thus also defining which type is greater in
234 /// context of bitcastability.
235 /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
236 /// to the contents comparison.
237 /// If types differ, remember types comparison result and check
238 /// whether we still can bitcast types.
239 /// Stage 1: Types that satisfies isFirstClassType conditions are always
240 /// greater then others.
241 /// Stage 2: Vector is greater then non-vector.
242 /// If both types are vectors, then vector with greater bitwidth is
244 /// If both types are vectors with the same bitwidth, then types
245 /// are bitcastable, and we can skip other stages, and go to contents
247 /// Stage 3: Pointer types are greater than non-pointers. If both types are
248 /// pointers of the same address space - go to contents comparison.
249 /// Different address spaces: pointer with greater address space is
251 /// Stage 4: Types are neither vectors, nor pointers. And they differ.
252 /// We don't know how to bitcast them. So, we better don't do it,
253 /// and return types comparison result (so it determines the
254 /// relationship among constants we don't know how to bitcast).
256 /// Just for clearance, let's see how the set of constants could look
257 /// on single dimension axis:
259 /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
260 /// Where: NFCT - Not a FirstClassType
261 /// FCT - FirstClassTyp:
263 /// 2. Compare raw contents.
264 /// It ignores types on this stage and only compares bits from L and R.
265 /// Returns 0, if L and R has equivalent contents.
266 /// -1 or 1 if values are different.
268 /// 2.1. If contents are numbers, compare numbers.
269 /// Ints with greater bitwidth are greater. Ints with same bitwidths
270 /// compared by their contents.
271 /// 2.2. "And so on". Just to avoid discrepancies with comments
272 /// perhaps it would be better to read the implementation itself.
273 /// 3. And again about overall picture. Let's look back at how the ordered set
274 /// of constants will look like:
275 /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
277 /// Now look, what could be inside [FCT, "others"], for example:
278 /// [FCT, "others"] =
280 /// [double 0.1], [double 1.23],
281 /// [i32 1], [i32 2],
282 /// { double 1.0 }, ; StructTyID, NumElements = 1
283 /// { i32 1 }, ; StructTyID, NumElements = 1
284 /// { double 1, i32 1 }, ; StructTyID, NumElements = 2
285 /// { i32 1, double 1 } ; StructTyID, NumElements = 2
288 /// Let's explain the order. Float numbers will be less than integers, just
289 /// because of cmpType terms: FloatTyID < IntegerTyID.
290 /// Floats (with same fltSemantics) are sorted according to their value.
291 /// Then you can see integers, and they are, like a floats,
292 /// could be easy sorted among each others.
293 /// The structures. Structures are grouped at the tail, again because of their
294 /// TypeID: StructTyID > IntegerTyID > FloatTyID.
295 /// Structures with greater number of elements are greater. Structures with
296 /// greater elements going first are greater.
297 /// The same logic with vectors, arrays and other possible complex types.
299 /// Bitcastable constants.
300 /// Let's assume, that some constant, belongs to some group of
301 /// "so-called-equal" values with different types, and at the same time
302 /// belongs to another group of constants with equal types
303 /// and "really" equal values.
305 /// Now, prove that this is impossible:
307 /// If constant A with type TyA is bitcastable to B with type TyB, then:
308 /// 1. All constants with equal types to TyA, are bitcastable to B. Since
309 /// those should be vectors (if TyA is vector), pointers
310 /// (if TyA is pointer), or else (if TyA equal to TyB), those types should
312 /// 2. All constants with non-equal, but bitcastable types to TyA, are
313 /// bitcastable to B.
314 /// Once again, just because we allow it to vectors and pointers only.
315 /// This statement could be expanded as below:
316 /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
317 /// vector B, and thus bitcastable to B as well.
318 /// 2.2. All pointers of the same address space, no matter what they point to,
319 /// bitcastable. So if C is pointer, it could be bitcasted to A and to B.
320 /// So any constant equal or bitcastable to A is equal or bitcastable to B.
323 /// In another words, for pointers and vectors, we ignore top-level type and
324 /// look at their particular properties (bit-width for vectors, and
325 /// address space for pointers).
326 /// If these properties are equal - compare their contents.
327 int cmpConstants(const Constant *L, const Constant *R);
329 /// Assign or look up previously assigned numbers for the two values, and
330 /// return whether the numbers are equal. Numbers are assigned in the order
332 /// Comparison order:
333 /// Stage 0: Value that is function itself is always greater then others.
334 /// If left and right values are references to their functions, then
336 /// Stage 1: Constants are greater than non-constants.
337 /// If both left and right are constants, then the result of
338 /// cmpConstants is used as cmpValues result.
339 /// Stage 2: InlineAsm instances are greater than others. If both left and
340 /// right are InlineAsm instances, InlineAsm* pointers casted to
341 /// integers and compared as numbers.
342 /// Stage 3: For all other cases we compare order we meet these values in
343 /// their functions. If right value was met first during scanning,
344 /// then left value is greater.
345 /// In another words, we compare serial numbers, for more details
346 /// see comments for sn_mapL and sn_mapR.
347 int cmpValues(const Value *L, const Value *R);
349 /// Compare two Instructions for equivalence, similar to
350 /// Instruction::isSameOperationAs but with modifications to the type
352 /// Stages are listed in "most significant stage first" order:
353 /// On each stage below, we do comparison between some left and right
354 /// operation parts. If parts are non-equal, we assign parts comparison
355 /// result to the operation comparison result and exit from method.
356 /// Otherwise we proceed to the next stage.
358 /// 1. Operations opcodes. Compared as numbers.
359 /// 2. Number of operands.
360 /// 3. Operation types. Compared with cmpType method.
361 /// 4. Compare operation subclass optional data as stream of bytes:
362 /// just convert it to integers and call cmpNumbers.
363 /// 5. Compare in operation operand types with cmpType in
364 /// most significant operand first order.
365 /// 6. Last stage. Check operations for some specific attributes.
366 /// For example, for Load it would be:
367 /// 6.1.Load: volatile (as boolean flag)
368 /// 6.2.Load: alignment (as integer numbers)
369 /// 6.3.Load: synch-scope (as integer numbers)
370 /// 6.4.Load: range metadata (as integer numbers)
371 /// On this stage its better to see the code, since its not more than 10-15
372 /// strings for particular instruction, and could change sometimes.
373 int cmpOperation(const Instruction *L, const Instruction *R) const;
375 /// Compare two GEPs for equivalent pointer arithmetic.
376 /// Parts to be compared for each comparison stage,
377 /// most significant stage first:
378 /// 1. Address space. As numbers.
379 /// 2. Constant offset, (if "DataLayout *DL" field is not NULL,
380 /// using GEPOperator::accumulateConstantOffset method).
381 /// 3. Pointer operand type (using cmpType method).
382 /// 4. Number of operands.
383 /// 5. Compare operands, using cmpValues method.
384 int cmpGEP(const GEPOperator *GEPL, const GEPOperator *GEPR);
385 int cmpGEP(const GetElementPtrInst *GEPL, const GetElementPtrInst *GEPR) {
386 return cmpGEP(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
389 /// cmpType - compares two types,
390 /// defines total ordering among the types set.
393 /// 0 if types are equal,
394 /// -1 if Left is less than Right,
395 /// +1 if Left is greater than Right.
398 /// Comparison is broken onto stages. Like in lexicographical comparison
399 /// stage coming first has higher priority.
400 /// On each explanation stage keep in mind total ordering properties.
402 /// 0. Before comparison we coerce pointer types of 0 address space to
404 /// We also don't bother with same type at left and right, so
405 /// just return 0 in this case.
407 /// 1. If types are of different kind (different type IDs).
408 /// Return result of type IDs comparison, treating them as numbers.
409 /// 2. If types are vectors or integers, compare Type* values as numbers.
410 /// 3. Types has same ID, so check whether they belongs to the next group:
419 /// If so - return 0, yes - we can treat these types as equal only because
420 /// their IDs are same.
421 /// 4. If Left and Right are pointers, return result of address space
422 /// comparison (numbers comparison). We can treat pointer types of same
423 /// address space as equal.
424 /// 5. If types are complex.
425 /// Then both Left and Right are to be expanded and their element types will
426 /// be checked with the same way. If we get Res != 0 on some stage, return it.
427 /// Otherwise return 0.
428 /// 6. For all other cases put llvm_unreachable.
429 int cmpType(Type *TyL, Type *TyR) const;
431 int cmpNumbers(uint64_t L, uint64_t R) const;
433 int cmpAPInt(const APInt &L, const APInt &R) const;
434 int cmpAPFloat(const APFloat &L, const APFloat &R) const;
435 int cmpStrings(StringRef L, StringRef R) const;
436 int cmpAttrs(const AttributeSet L, const AttributeSet R) const;
438 // The two functions undergoing comparison.
439 const Function *FnL, *FnR;
441 const DataLayout *DL;
443 /// Assign serial numbers to values from left function, and values from
446 /// Being comparing functions we need to compare values we meet at left and
448 /// Its easy to sort things out for external values. It just should be
449 /// the same value at left and right.
450 /// But for local values (those were introduced inside function body)
451 /// we have to ensure they were introduced at exactly the same place,
452 /// and plays the same role.
453 /// Let's assign serial number to each value when we meet it first time.
454 /// Values that were met at same place will be with same serial numbers.
455 /// In this case it would be good to explain few points about values assigned
456 /// to BBs and other ways of implementation (see below).
458 /// 1. Safety of BB reordering.
459 /// It's safe to change the order of BasicBlocks in function.
460 /// Relationship with other functions and serial numbering will not be
461 /// changed in this case.
462 /// As follows from FunctionComparator::compare(), we do CFG walk: we start
463 /// from the entry, and then take each terminator. So it doesn't matter how in
464 /// fact BBs are ordered in function. And since cmpValues are called during
465 /// this walk, the numbering depends only on how BBs located inside the CFG.
466 /// So the answer is - yes. We will get the same numbering.
468 /// 2. Impossibility to use dominance properties of values.
469 /// If we compare two instruction operands: first is usage of local
470 /// variable AL from function FL, and second is usage of local variable AR
471 /// from FR, we could compare their origins and check whether they are
472 /// defined at the same place.
473 /// But, we are still not able to compare operands of PHI nodes, since those
474 /// could be operands from further BBs we didn't scan yet.
475 /// So it's impossible to use dominance properties in general.
476 DenseMap<const Value*, int> sn_mapL, sn_mapR;
480 AssertingVH<Function> F;
481 const DataLayout *DL;
484 FunctionPtr(Function *F, const DataLayout *DL) : F(F), DL(DL) {}
485 Function *getFunc() const { return F; }
486 void release() { F = 0; }
487 bool operator<(const FunctionPtr &RHS) const {
488 return (FunctionComparator(DL, F, RHS.getFunc()).compare()) == -1;
493 int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
494 if (L < R) return -1;
499 int FunctionComparator::cmpAPInt(const APInt &L, const APInt &R) const {
500 if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
502 if (L.ugt(R)) return 1;
503 if (R.ugt(L)) return -1;
507 int FunctionComparator::cmpAPFloat(const APFloat &L, const APFloat &R) const {
508 if (int Res = cmpNumbers((uint64_t)&L.getSemantics(),
509 (uint64_t)&R.getSemantics()))
511 return cmpAPInt(L.bitcastToAPInt(), R.bitcastToAPInt());
514 int FunctionComparator::cmpStrings(StringRef L, StringRef R) const {
515 // Prevent heavy comparison, compare sizes first.
516 if (int Res = cmpNumbers(L.size(), R.size()))
519 // Compare strings lexicographically only when it is necessary: only when
520 // strings are equal in size.
524 int FunctionComparator::cmpAttrs(const AttributeSet L,
525 const AttributeSet R) const {
526 if (int Res = cmpNumbers(L.getNumSlots(), R.getNumSlots()))
529 for (unsigned i = 0, e = L.getNumSlots(); i != e; ++i) {
530 AttributeSet::iterator LI = L.begin(i), LE = L.end(i), RI = R.begin(i),
532 for (; LI != LE && RI != RE; ++LI, ++RI) {
548 /// Constants comparison:
549 /// 1. Check whether type of L constant could be losslessly bitcasted to R
551 /// 2. Compare constant contents.
552 /// For more details see declaration comments.
553 int FunctionComparator::cmpConstants(const Constant *L, const Constant *R) {
555 Type *TyL = L->getType();
556 Type *TyR = R->getType();
558 // Check whether types are bitcastable. This part is just re-factored
559 // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
560 // we also pack into result which type is "less" for us.
561 int TypesRes = cmpType(TyL, TyR);
563 // Types are different, but check whether we can bitcast them.
564 if (!TyL->isFirstClassType()) {
565 if (TyR->isFirstClassType())
567 // Neither TyL nor TyR are values of first class type. Return the result
568 // of comparing the types
571 if (!TyR->isFirstClassType()) {
572 if (TyL->isFirstClassType())
577 // Vector -> Vector conversions are always lossless if the two vector types
578 // have the same size, otherwise not.
579 unsigned TyLWidth = 0;
580 unsigned TyRWidth = 0;
582 if (const VectorType *VecTyL = dyn_cast<VectorType>(TyL))
583 TyLWidth = VecTyL->getBitWidth();
584 if (const VectorType *VecTyR = dyn_cast<VectorType>(TyR))
585 TyRWidth = VecTyR->getBitWidth();
587 if (TyLWidth != TyRWidth)
588 return cmpNumbers(TyLWidth, TyRWidth);
590 // Zero bit-width means neither TyL nor TyR are vectors.
592 PointerType *PTyL = dyn_cast<PointerType>(TyL);
593 PointerType *PTyR = dyn_cast<PointerType>(TyR);
595 unsigned AddrSpaceL = PTyL->getAddressSpace();
596 unsigned AddrSpaceR = PTyR->getAddressSpace();
597 if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
605 // TyL and TyR aren't vectors, nor pointers. We don't know how to
611 // OK, types are bitcastable, now check constant contents.
613 if (L->isNullValue() && R->isNullValue())
615 if (L->isNullValue() && !R->isNullValue())
617 if (!L->isNullValue() && R->isNullValue())
620 if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
623 switch (L->getValueID()) {
624 case Value::UndefValueVal: return TypesRes;
625 case Value::ConstantIntVal: {
626 const APInt &LInt = cast<ConstantInt>(L)->getValue();
627 const APInt &RInt = cast<ConstantInt>(R)->getValue();
628 return cmpAPInt(LInt, RInt);
630 case Value::ConstantFPVal: {
631 const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
632 const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
633 return cmpAPFloat(LAPF, RAPF);
635 case Value::ConstantArrayVal: {
636 const ConstantArray *LA = cast<ConstantArray>(L);
637 const ConstantArray *RA = cast<ConstantArray>(R);
638 uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
639 uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
640 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
642 for (uint64_t i = 0; i < NumElementsL; ++i) {
643 if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
644 cast<Constant>(RA->getOperand(i))))
649 case Value::ConstantStructVal: {
650 const ConstantStruct *LS = cast<ConstantStruct>(L);
651 const ConstantStruct *RS = cast<ConstantStruct>(R);
652 unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
653 unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
654 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
656 for (unsigned i = 0; i != NumElementsL; ++i) {
657 if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
658 cast<Constant>(RS->getOperand(i))))
663 case Value::ConstantVectorVal: {
664 const ConstantVector *LV = cast<ConstantVector>(L);
665 const ConstantVector *RV = cast<ConstantVector>(R);
666 unsigned NumElementsL = cast<VectorType>(TyL)->getNumElements();
667 unsigned NumElementsR = cast<VectorType>(TyR)->getNumElements();
668 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
670 for (uint64_t i = 0; i < NumElementsL; ++i) {
671 if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
672 cast<Constant>(RV->getOperand(i))))
677 case Value::ConstantExprVal: {
678 const ConstantExpr *LE = cast<ConstantExpr>(L);
679 const ConstantExpr *RE = cast<ConstantExpr>(R);
680 unsigned NumOperandsL = LE->getNumOperands();
681 unsigned NumOperandsR = RE->getNumOperands();
682 if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
684 for (unsigned i = 0; i < NumOperandsL; ++i) {
685 if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
686 cast<Constant>(RE->getOperand(i))))
691 case Value::FunctionVal:
692 case Value::GlobalVariableVal:
693 case Value::GlobalAliasVal:
694 default: // Unknown constant, cast L and R pointers to numbers and compare.
695 return cmpNumbers((uint64_t)L, (uint64_t)R);
699 /// cmpType - compares two types,
700 /// defines total ordering among the types set.
701 /// See method declaration comments for more details.
702 int FunctionComparator::cmpType(Type *TyL, Type *TyR) const {
704 PointerType *PTyL = dyn_cast<PointerType>(TyL);
705 PointerType *PTyR = dyn_cast<PointerType>(TyR);
708 if (PTyL && PTyL->getAddressSpace() == 0) TyL = DL->getIntPtrType(TyL);
709 if (PTyR && PTyR->getAddressSpace() == 0) TyR = DL->getIntPtrType(TyR);
715 if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
718 switch (TyL->getTypeID()) {
720 llvm_unreachable("Unknown type!");
721 // Fall through in Release mode.
722 case Type::IntegerTyID:
723 case Type::VectorTyID:
724 // TyL == TyR would have returned true earlier.
725 return cmpNumbers((uint64_t)TyL, (uint64_t)TyR);
728 case Type::FloatTyID:
729 case Type::DoubleTyID:
730 case Type::X86_FP80TyID:
731 case Type::FP128TyID:
732 case Type::PPC_FP128TyID:
733 case Type::LabelTyID:
734 case Type::MetadataTyID:
737 case Type::PointerTyID: {
738 assert(PTyL && PTyR && "Both types must be pointers here.");
739 return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
742 case Type::StructTyID: {
743 StructType *STyL = cast<StructType>(TyL);
744 StructType *STyR = cast<StructType>(TyR);
745 if (STyL->getNumElements() != STyR->getNumElements())
746 return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
748 if (STyL->isPacked() != STyR->isPacked())
749 return cmpNumbers(STyL->isPacked(), STyR->isPacked());
751 for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
752 if (int Res = cmpType(STyL->getElementType(i),
753 STyR->getElementType(i)))
759 case Type::FunctionTyID: {
760 FunctionType *FTyL = cast<FunctionType>(TyL);
761 FunctionType *FTyR = cast<FunctionType>(TyR);
762 if (FTyL->getNumParams() != FTyR->getNumParams())
763 return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
765 if (FTyL->isVarArg() != FTyR->isVarArg())
766 return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
768 if (int Res = cmpType(FTyL->getReturnType(), FTyR->getReturnType()))
771 for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
772 if (int Res = cmpType(FTyL->getParamType(i), FTyR->getParamType(i)))
778 case Type::ArrayTyID: {
779 ArrayType *ATyL = cast<ArrayType>(TyL);
780 ArrayType *ATyR = cast<ArrayType>(TyR);
781 if (ATyL->getNumElements() != ATyR->getNumElements())
782 return cmpNumbers(ATyL->getNumElements(), ATyR->getNumElements());
783 return cmpType(ATyL->getElementType(), ATyR->getElementType());
788 // Determine whether the two operations are the same except that pointer-to-A
789 // and pointer-to-B are equivalent. This should be kept in sync with
790 // Instruction::isSameOperationAs.
791 // Read method declaration comments for more details.
792 int FunctionComparator::cmpOperation(const Instruction *L,
793 const Instruction *R) const {
794 // Differences from Instruction::isSameOperationAs:
795 // * replace type comparison with calls to isEquivalentType.
796 // * we test for I->hasSameSubclassOptionalData (nuw/nsw/tail) at the top
797 // * because of the above, we don't test for the tail bit on calls later on
798 if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
801 if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
804 if (int Res = cmpType(L->getType(), R->getType()))
807 if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
808 R->getRawSubclassOptionalData()))
811 // We have two instructions of identical opcode and #operands. Check to see
812 // if all operands are the same type
813 for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
815 cmpType(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
819 // Check special state that is a part of some instructions.
820 if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
821 if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
824 cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
827 cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
830 cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
832 return cmpNumbers((uint64_t)LI->getMetadata(LLVMContext::MD_range),
833 (uint64_t)cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
835 if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
837 cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
840 cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
843 cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
845 return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
847 if (const CmpInst *CI = dyn_cast<CmpInst>(L))
848 return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
849 if (const CallInst *CI = dyn_cast<CallInst>(L)) {
850 if (int Res = cmpNumbers(CI->getCallingConv(),
851 cast<CallInst>(R)->getCallingConv()))
853 return cmpAttrs(CI->getAttributes(), cast<CallInst>(R)->getAttributes());
855 if (const InvokeInst *CI = dyn_cast<InvokeInst>(L)) {
856 if (int Res = cmpNumbers(CI->getCallingConv(),
857 cast<InvokeInst>(R)->getCallingConv()))
859 return cmpAttrs(CI->getAttributes(), cast<InvokeInst>(R)->getAttributes());
861 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
862 ArrayRef<unsigned> LIndices = IVI->getIndices();
863 ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
864 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
866 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
867 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
871 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
872 ArrayRef<unsigned> LIndices = EVI->getIndices();
873 ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
874 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
876 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
877 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
881 if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
883 cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
885 return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
888 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
889 if (int Res = cmpNumbers(CXI->isVolatile(),
890 cast<AtomicCmpXchgInst>(R)->isVolatile()))
892 if (int Res = cmpNumbers(CXI->isWeak(),
893 cast<AtomicCmpXchgInst>(R)->isWeak()))
895 if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
896 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
898 if (int Res = cmpNumbers(CXI->getFailureOrdering(),
899 cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
901 return cmpNumbers(CXI->getSynchScope(),
902 cast<AtomicCmpXchgInst>(R)->getSynchScope());
904 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
905 if (int Res = cmpNumbers(RMWI->getOperation(),
906 cast<AtomicRMWInst>(R)->getOperation()))
908 if (int Res = cmpNumbers(RMWI->isVolatile(),
909 cast<AtomicRMWInst>(R)->isVolatile()))
911 if (int Res = cmpNumbers(RMWI->getOrdering(),
912 cast<AtomicRMWInst>(R)->getOrdering()))
914 return cmpNumbers(RMWI->getSynchScope(),
915 cast<AtomicRMWInst>(R)->getSynchScope());
920 // Determine whether two GEP operations perform the same underlying arithmetic.
921 // Read method declaration comments for more details.
922 int FunctionComparator::cmpGEP(const GEPOperator *GEPL,
923 const GEPOperator *GEPR) {
925 unsigned int ASL = GEPL->getPointerAddressSpace();
926 unsigned int ASR = GEPR->getPointerAddressSpace();
928 if (int Res = cmpNumbers(ASL, ASR))
931 // When we have target data, we can reduce the GEP down to the value in bytes
932 // added to the address.
934 unsigned BitWidth = DL->getPointerSizeInBits(ASL);
935 APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
936 if (GEPL->accumulateConstantOffset(*DL, OffsetL) &&
937 GEPR->accumulateConstantOffset(*DL, OffsetR))
938 return cmpAPInt(OffsetL, OffsetR);
941 if (int Res = cmpNumbers((uint64_t)GEPL->getPointerOperand()->getType(),
942 (uint64_t)GEPR->getPointerOperand()->getType()))
945 if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
948 for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
949 if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
956 /// Compare two values used by the two functions under pair-wise comparison. If
957 /// this is the first time the values are seen, they're added to the mapping so
958 /// that we will detect mismatches on next use.
959 /// See comments in declaration for more details.
960 int FunctionComparator::cmpValues(const Value *L, const Value *R) {
961 // Catch self-reference case.
973 const Constant *ConstL = dyn_cast<Constant>(L);
974 const Constant *ConstR = dyn_cast<Constant>(R);
975 if (ConstL && ConstR) {
978 return cmpConstants(ConstL, ConstR);
986 const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
987 const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
989 if (InlineAsmL && InlineAsmR)
990 return cmpNumbers((uint64_t)L, (uint64_t)R);
996 auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
997 RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
999 return cmpNumbers(LeftSN.first->second, RightSN.first->second);
1001 // Test whether two basic blocks have equivalent behaviour.
1002 int FunctionComparator::compare(const BasicBlock *BBL, const BasicBlock *BBR) {
1003 BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
1004 BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
1007 if (int Res = cmpValues(InstL, InstR))
1010 const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(InstL);
1011 const GetElementPtrInst *GEPR = dyn_cast<GetElementPtrInst>(InstR);
1020 cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
1022 if (int Res = cmpGEP(GEPL, GEPR))
1025 if (int Res = cmpOperation(InstL, InstR))
1027 assert(InstL->getNumOperands() == InstR->getNumOperands());
1029 for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
1030 Value *OpL = InstL->getOperand(i);
1031 Value *OpR = InstR->getOperand(i);
1032 if (int Res = cmpValues(OpL, OpR))
1034 if (int Res = cmpNumbers(OpL->getValueID(), OpR->getValueID()))
1036 // TODO: Already checked in cmpOperation
1037 if (int Res = cmpType(OpL->getType(), OpR->getType()))
1043 } while (InstL != InstLE && InstR != InstRE);
1045 if (InstL != InstLE && InstR == InstRE)
1047 if (InstL == InstLE && InstR != InstRE)
1052 // Test whether the two functions have equivalent behaviour.
1053 int FunctionComparator::compare() {
1058 if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
1061 if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
1065 if (int Res = cmpNumbers((uint64_t)FnL->getGC(), (uint64_t)FnR->getGC()))
1069 if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
1072 if (FnL->hasSection()) {
1073 if (int Res = cmpStrings(FnL->getSection(), FnR->getSection()))
1077 if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
1080 // TODO: if it's internal and only used in direct calls, we could handle this
1082 if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
1085 if (int Res = cmpType(FnL->getFunctionType(), FnR->getFunctionType()))
1088 assert(FnL->arg_size() == FnR->arg_size() &&
1089 "Identically typed functions have different numbers of args!");
1091 // Visit the arguments so that they get enumerated in the order they're
1093 for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
1094 ArgRI = FnR->arg_begin(),
1095 ArgLE = FnL->arg_end();
1096 ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
1097 if (cmpValues(ArgLI, ArgRI) != 0)
1098 llvm_unreachable("Arguments repeat!");
1101 // We do a CFG-ordered walk since the actual ordering of the blocks in the
1102 // linked list is immaterial. Our walk starts at the entry block for both
1103 // functions, then takes each block from each terminator in order. As an
1104 // artifact, this also means that unreachable blocks are ignored.
1105 SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
1106 SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
1108 FnLBBs.push_back(&FnL->getEntryBlock());
1109 FnRBBs.push_back(&FnR->getEntryBlock());
1111 VisitedBBs.insert(FnLBBs[0]);
1112 while (!FnLBBs.empty()) {
1113 const BasicBlock *BBL = FnLBBs.pop_back_val();
1114 const BasicBlock *BBR = FnRBBs.pop_back_val();
1116 if (int Res = cmpValues(BBL, BBR))
1119 if (int Res = compare(BBL, BBR))
1122 const TerminatorInst *TermL = BBL->getTerminator();
1123 const TerminatorInst *TermR = BBR->getTerminator();
1125 assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
1126 for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
1127 if (!VisitedBBs.insert(TermL->getSuccessor(i)))
1130 FnLBBs.push_back(TermL->getSuccessor(i));
1131 FnRBBs.push_back(TermR->getSuccessor(i));
1139 /// MergeFunctions finds functions which will generate identical machine code,
1140 /// by considering all pointer types to be equivalent. Once identified,
1141 /// MergeFunctions will fold them by replacing a call to one to a call to a
1142 /// bitcast of the other.
1144 class MergeFunctions : public ModulePass {
1148 : ModulePass(ID), HasGlobalAliases(false) {
1149 initializeMergeFunctionsPass(*PassRegistry::getPassRegistry());
1152 bool runOnModule(Module &M) override;
1155 typedef std::set<FunctionPtr> FnTreeType;
1157 /// A work queue of functions that may have been modified and should be
1159 std::vector<WeakVH> Deferred;
1161 /// Checks the rules of order relation introduced among functions set.
1162 /// Returns true, if sanity check has been passed, and false if failed.
1163 bool doSanityCheck(std::vector<WeakVH> &Worklist);
1165 /// Insert a ComparableFunction into the FnTree, or merge it away if it's
1166 /// equal to one that's already present.
1167 bool insert(Function *NewFunction);
1169 /// Remove a Function from the FnTree and queue it up for a second sweep of
1171 void remove(Function *F);
1173 /// Find the functions that use this Value and remove them from FnTree and
1174 /// queue the functions.
1175 void removeUsers(Value *V);
1177 /// Replace all direct calls of Old with calls of New. Will bitcast New if
1178 /// necessary to make types match.
1179 void replaceDirectCallers(Function *Old, Function *New);
1181 /// Merge two equivalent functions. Upon completion, G may be deleted, or may
1182 /// be converted into a thunk. In either case, it should never be visited
1184 void mergeTwoFunctions(Function *F, Function *G);
1186 /// Replace G with a thunk or an alias to F. Deletes G.
1187 void writeThunkOrAlias(Function *F, Function *G);
1189 /// Replace G with a simple tail call to bitcast(F). Also replace direct uses
1190 /// of G with bitcast(F). Deletes G.
1191 void writeThunk(Function *F, Function *G);
1193 /// Replace G with an alias to F. Deletes G.
1194 void writeAlias(Function *F, Function *G);
1196 /// The set of all distinct functions. Use the insert() and remove() methods
1200 /// DataLayout for more accurate GEP comparisons. May be NULL.
1201 const DataLayout *DL;
1203 /// Whether or not the target supports global aliases.
1204 bool HasGlobalAliases;
1207 } // end anonymous namespace
1209 char MergeFunctions::ID = 0;
1210 INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false)
1212 ModulePass *llvm::createMergeFunctionsPass() {
1213 return new MergeFunctions();
1216 bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
1217 if (const unsigned Max = NumFunctionsForSanityCheck) {
1218 unsigned TripleNumber = 0;
1221 dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n";
1224 for (std::vector<WeakVH>::iterator I = Worklist.begin(), E = Worklist.end();
1225 I != E && i < Max; ++I, ++i) {
1227 for (std::vector<WeakVH>::iterator J = I; J != E && j < Max; ++J, ++j) {
1228 Function *F1 = cast<Function>(*I);
1229 Function *F2 = cast<Function>(*J);
1230 int Res1 = FunctionComparator(DL, F1, F2).compare();
1231 int Res2 = FunctionComparator(DL, F2, F1).compare();
1233 // If F1 <= F2, then F2 >= F1, otherwise report failure.
1234 if (Res1 != -Res2) {
1235 dbgs() << "MERGEFUNC-SANITY: Non-symmetric; triple: " << TripleNumber
1246 for (std::vector<WeakVH>::iterator K = J; K != E && k < Max;
1247 ++k, ++K, ++TripleNumber) {
1251 Function *F3 = cast<Function>(*K);
1252 int Res3 = FunctionComparator(DL, F1, F3).compare();
1253 int Res4 = FunctionComparator(DL, F2, F3).compare();
1255 bool Transitive = true;
1257 if (Res1 != 0 && Res1 == Res4) {
1258 // F1 > F2, F2 > F3 => F1 > F3
1259 Transitive = Res3 == Res1;
1260 } else if (Res3 != 0 && Res3 == -Res4) {
1261 // F1 > F3, F3 > F2 => F1 > F2
1262 Transitive = Res3 == Res1;
1263 } else if (Res4 != 0 && -Res3 == Res4) {
1264 // F2 > F3, F3 > F1 => F2 > F1
1265 Transitive = Res4 == -Res1;
1269 dbgs() << "MERGEFUNC-SANITY: Non-transitive; triple: "
1270 << TripleNumber << "\n";
1271 dbgs() << "Res1, Res3, Res4: " << Res1 << ", " << Res3 << ", "
1282 dbgs() << "MERGEFUNC-SANITY: " << (Valid ? "Passed." : "Failed.") << "\n";
1288 bool MergeFunctions::runOnModule(Module &M) {
1289 bool Changed = false;
1290 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
1291 DL = DLP ? &DLP->getDataLayout() : nullptr;
1293 for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
1294 if (!I->isDeclaration() && !I->hasAvailableExternallyLinkage())
1295 Deferred.push_back(WeakVH(I));
1299 std::vector<WeakVH> Worklist;
1300 Deferred.swap(Worklist);
1302 DEBUG(doSanityCheck(Worklist));
1304 DEBUG(dbgs() << "size of module: " << M.size() << '\n');
1305 DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
1307 // Insert only strong functions and merge them. Strong function merging
1308 // always deletes one of them.
1309 for (std::vector<WeakVH>::iterator I = Worklist.begin(),
1310 E = Worklist.end(); I != E; ++I) {
1312 Function *F = cast<Function>(*I);
1313 if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
1314 !F->mayBeOverridden()) {
1315 Changed |= insert(F);
1319 // Insert only weak functions and merge them. By doing these second we
1320 // create thunks to the strong function when possible. When two weak
1321 // functions are identical, we create a new strong function with two weak
1322 // weak thunks to it which are identical but not mergable.
1323 for (std::vector<WeakVH>::iterator I = Worklist.begin(),
1324 E = Worklist.end(); I != E; ++I) {
1326 Function *F = cast<Function>(*I);
1327 if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
1328 F->mayBeOverridden()) {
1329 Changed |= insert(F);
1332 DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n');
1333 } while (!Deferred.empty());
1340 bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
1341 const ComparableFunction &RHS) {
1342 if (LHS.getFunc() == RHS.getFunc() &&
1343 LHS.getHash() == RHS.getHash())
1345 if (!LHS.getFunc() || !RHS.getFunc())
1348 // One of these is a special "underlying pointer comparison only" object.
1349 if (LHS.getDataLayout() == ComparableFunction::LookupOnly ||
1350 RHS.getDataLayout() == ComparableFunction::LookupOnly)
1353 assert(LHS.getDataLayout() == RHS.getDataLayout() &&
1354 "Comparing functions for different targets");
1356 return FunctionComparator(LHS.getDataLayout(), LHS.getFunc(), RHS.getFunc())
1360 // Replace direct callers of Old with New.
1361 void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
1362 Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
1363 for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) {
1366 CallSite CS(U->getUser());
1367 if (CS && CS.isCallee(U)) {
1368 remove(CS.getInstruction()->getParent()->getParent());
1374 // Replace G with an alias to F if possible, or else a thunk to F. Deletes G.
1375 void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
1376 if (HasGlobalAliases && G->hasUnnamedAddr()) {
1377 if (G->hasExternalLinkage() || G->hasLocalLinkage() ||
1378 G->hasWeakLinkage()) {
1387 // Helper for writeThunk,
1388 // Selects proper bitcast operation,
1389 // but a bit simpler then CastInst::getCastOpcode.
1390 static Value *createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
1391 Type *SrcTy = V->getType();
1392 if (SrcTy->isStructTy()) {
1393 assert(DestTy->isStructTy());
1394 assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements());
1395 Value *Result = UndefValue::get(DestTy);
1396 for (unsigned int I = 0, E = SrcTy->getStructNumElements(); I < E; ++I) {
1397 Value *Element = createCast(
1398 Builder, Builder.CreateExtractValue(V, ArrayRef<unsigned int>(I)),
1399 DestTy->getStructElementType(I));
1402 Builder.CreateInsertValue(Result, Element, ArrayRef<unsigned int>(I));
1406 assert(!DestTy->isStructTy());
1407 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
1408 return Builder.CreateIntToPtr(V, DestTy);
1409 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
1410 return Builder.CreatePtrToInt(V, DestTy);
1412 return Builder.CreateBitCast(V, DestTy);
1415 // Replace G with a simple tail call to bitcast(F). Also replace direct uses
1416 // of G with bitcast(F). Deletes G.
1417 void MergeFunctions::writeThunk(Function *F, Function *G) {
1418 if (!G->mayBeOverridden()) {
1419 // Redirect direct callers of G to F.
1420 replaceDirectCallers(G, F);
1423 // If G was internal then we may have replaced all uses of G with F. If so,
1424 // stop here and delete G. There's no need for a thunk.
1425 if (G->hasLocalLinkage() && G->use_empty()) {
1426 G->eraseFromParent();
1430 Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
1432 BasicBlock *BB = BasicBlock::Create(F->getContext(), "", NewG);
1433 IRBuilder<false> Builder(BB);
1435 SmallVector<Value *, 16> Args;
1437 FunctionType *FFTy = F->getFunctionType();
1438 for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
1440 Args.push_back(createCast(Builder, (Value*)AI, FFTy->getParamType(i)));
1444 CallInst *CI = Builder.CreateCall(F, Args);
1446 CI->setCallingConv(F->getCallingConv());
1447 if (NewG->getReturnType()->isVoidTy()) {
1448 Builder.CreateRetVoid();
1450 Builder.CreateRet(createCast(Builder, CI, NewG->getReturnType()));
1453 NewG->copyAttributesFrom(G);
1456 G->replaceAllUsesWith(NewG);
1457 G->eraseFromParent();
1459 DEBUG(dbgs() << "writeThunk: " << NewG->getName() << '\n');
1463 // Replace G with an alias to F and delete G.
1464 void MergeFunctions::writeAlias(Function *F, Function *G) {
1465 PointerType *PTy = G->getType();
1466 auto *GA = GlobalAlias::create(PTy->getElementType(), PTy->getAddressSpace(),
1467 G->getLinkage(), "", F);
1468 F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
1470 GA->setVisibility(G->getVisibility());
1472 G->replaceAllUsesWith(GA);
1473 G->eraseFromParent();
1475 DEBUG(dbgs() << "writeAlias: " << GA->getName() << '\n');
1476 ++NumAliasesWritten;
1479 // Merge two equivalent functions. Upon completion, Function G is deleted.
1480 void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
1481 if (F->mayBeOverridden()) {
1482 assert(G->mayBeOverridden());
1484 if (HasGlobalAliases) {
1485 // Make them both thunks to the same internal function.
1486 Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
1488 H->copyAttributesFrom(F);
1491 F->replaceAllUsesWith(H);
1493 unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
1498 F->setAlignment(MaxAlignment);
1499 F->setLinkage(GlobalValue::PrivateLinkage);
1501 // We can't merge them. Instead, pick one and update all direct callers
1502 // to call it and hope that we improve the instruction cache hit rate.
1503 replaceDirectCallers(G, F);
1508 writeThunkOrAlias(F, G);
1511 ++NumFunctionsMerged;
1514 // Insert a ComparableFunction into the FnTree, or merge it away if equal to one
1515 // that was already inserted.
1516 bool MergeFunctions::insert(Function *NewFunction) {
1517 std::pair<FnTreeType::iterator, bool> Result =
1518 FnTree.insert(FunctionPtr(NewFunction, DL));
1520 if (Result.second) {
1521 DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n');
1525 const FunctionPtr &OldF = *Result.first;
1527 // Don't merge tiny functions, since it can just end up making the function
1529 // FIXME: Should still merge them if they are unnamed_addr and produce an
1531 if (NewFunction->size() == 1) {
1532 if (NewFunction->front().size() <= 2) {
1533 DEBUG(dbgs() << NewFunction->getName()
1534 << " is to small to bother merging\n");
1539 // Never thunk a strong function to a weak function.
1540 assert(!OldF.getFunc()->mayBeOverridden() || NewFunction->mayBeOverridden());
1542 DEBUG(dbgs() << " " << OldF.getFunc()->getName()
1543 << " == " << NewFunction->getName() << '\n');
1545 Function *DeleteF = NewFunction;
1546 mergeTwoFunctions(OldF.getFunc(), DeleteF);
1550 // Remove a function from FnTree. If it was already in FnTree, add
1551 // it to Deferred so that we'll look at it in the next round.
1552 void MergeFunctions::remove(Function *F) {
1553 // We need to make sure we remove F, not a function "equal" to F per the
1554 // function equality comparator.
1555 FnTreeType::iterator found = FnTree.find(FunctionPtr(F, DL));
1557 if (found != FnTree.end() && found->getFunc() == F) {
1559 FnTree.erase(found);
1563 DEBUG(dbgs() << "Removed " << F->getName()
1564 << " from set and deferred it.\n");
1565 Deferred.push_back(F);
1569 // For each instruction used by the value, remove() the function that contains
1570 // the instruction. This should happen right before a call to RAUW.
1571 void MergeFunctions::removeUsers(Value *V) {
1572 std::vector<Value *> Worklist;
1573 Worklist.push_back(V);
1574 while (!Worklist.empty()) {
1575 Value *V = Worklist.back();
1576 Worklist.pop_back();
1578 for (User *U : V->users()) {
1579 if (Instruction *I = dyn_cast<Instruction>(U)) {
1580 remove(I->getParent()->getParent());
1581 } else if (isa<GlobalValue>(U)) {
1583 } else if (Constant *C = dyn_cast<Constant>(U)) {
1584 for (User *UU : C->users())
1585 Worklist.push_back(UU);