1 //===- MergeFunctions.cpp - Merge identical functions ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass looks for equivalent functions that are mergable and folds them.
12 // Order relation is defined on set of functions. It was made through
13 // special function comparison procedure that returns
14 // 0 when functions are equal,
15 // -1 when Left function is less than right function, and
16 // 1 for opposite case. We need total-ordering, so we need to maintain
17 // four properties on the functions set:
18 // a <= a (reflexivity)
19 // if a <= b and b <= a then a = b (antisymmetry)
20 // if a <= b and b <= c then a <= c (transitivity).
21 // for all a and b: a <= b or b <= a (totality).
23 // Comparison iterates through each instruction in each basic block.
24 // Functions are kept on binary tree. For each new function F we perform
25 // lookup in binary tree.
26 // In practice it works the following way:
27 // -- We define Function* container class with custom "operator<" (FunctionPtr).
28 // -- "FunctionPtr" instances are stored in std::set collection, so every
29 // std::set::insert operation will give you result in log(N) time.
31 // As an optimization, a hash of the function structure is calculated first, and
32 // two functions are only compared if they have the same hash. This hash is
33 // cheap to compute, and has the property that if function F == G according to
34 // the comparison function, then hash(F) == hash(G). This consistency property
35 // is critical to ensuring all possible merging opportunities are exploited.
36 // Collisions in the hash affect the speed of the pass but not the correctness
37 // or determinism of the resulting transformation.
39 // When a match is found the functions are folded. If both functions are
40 // overridable, we move the functionality into a new internal function and
41 // leave two overridable thunks to it.
43 //===----------------------------------------------------------------------===//
47 // * virtual functions.
49 // Many functions have their address taken by the virtual function table for
50 // the object they belong to. However, as long as it's only used for a lookup
51 // and call, this is irrelevant, and we'd like to fold such functions.
53 // * be smarter about bitcasts.
55 // In order to fold functions, we will sometimes add either bitcast instructions
56 // or bitcast constant expressions. Unfortunately, this can confound further
57 // analysis since the two functions differ where one has a bitcast and the
58 // other doesn't. We should learn to look through bitcasts.
60 // * Compare complex types with pointer types inside.
61 // * Compare cross-reference cases.
62 // * Compare complex expressions.
64 // All the three issues above could be described as ability to prove that
65 // fA == fB == fC == fE == fF == fG in example below:
84 // Simplest cross-reference case (fA <--> fB) was implemented in previous
85 // versions of MergeFunctions, though it presented only in two function pairs
86 // in test-suite (that counts >50k functions)
87 // Though possibility to detect complex cross-referencing (e.g.: A->B->C->D->A)
88 // could cover much more cases.
90 //===----------------------------------------------------------------------===//
92 #include "llvm/Transforms/IPO.h"
93 #include "llvm/ADT/DenseSet.h"
94 #include "llvm/ADT/FoldingSet.h"
95 #include "llvm/ADT/STLExtras.h"
96 #include "llvm/ADT/SmallSet.h"
97 #include "llvm/ADT/Statistic.h"
98 #include "llvm/ADT/Hashing.h"
99 #include "llvm/IR/CallSite.h"
100 #include "llvm/IR/Constants.h"
101 #include "llvm/IR/DataLayout.h"
102 #include "llvm/IR/IRBuilder.h"
103 #include "llvm/IR/InlineAsm.h"
104 #include "llvm/IR/Instructions.h"
105 #include "llvm/IR/LLVMContext.h"
106 #include "llvm/IR/Module.h"
107 #include "llvm/IR/Operator.h"
108 #include "llvm/IR/ValueHandle.h"
109 #include "llvm/IR/ValueMap.h"
110 #include "llvm/Pass.h"
111 #include "llvm/Support/CommandLine.h"
112 #include "llvm/Support/Debug.h"
113 #include "llvm/Support/ErrorHandling.h"
114 #include "llvm/Support/raw_ostream.h"
116 using namespace llvm;
118 #define DEBUG_TYPE "mergefunc"
120 STATISTIC(NumFunctionsMerged, "Number of functions merged");
121 STATISTIC(NumThunksWritten, "Number of thunks generated");
122 STATISTIC(NumAliasesWritten, "Number of aliases generated");
123 STATISTIC(NumDoubleWeak, "Number of new functions created");
125 static cl::opt<unsigned> NumFunctionsForSanityCheck(
127 cl::desc("How many functions in module could be used for "
128 "MergeFunctions pass sanity check. "
129 "'0' disables this check. Works only with '-debug' key."),
130 cl::init(0), cl::Hidden);
134 /// GlobalNumberState assigns an integer to each global value in the program,
135 /// which is used by the comparison routine to order references to globals. This
136 /// state must be preserved throughout the pass, because Functions and other
137 /// globals need to maintain their relative order. Globals are assigned a number
138 /// when they are first visited. This order is deterministic, and so the
139 /// assigned numbers are as well. When two functions are merged, neither number
140 /// is updated. If the symbols are weak, this would be incorrect. If they are
141 /// strong, then one will be replaced at all references to the other, and so
142 /// direct callsites will now see one or the other symbol, and no update is
143 /// necessary. Note that if we were guaranteed unique names, we could just
144 /// compare those, but this would not work for stripped bitcodes or for those
145 /// few symbols without a name.
146 class GlobalNumberState {
147 struct Config : ValueMapConfig<GlobalValue*> {
148 enum { FollowRAUW = false };
150 // Each GlobalValue is mapped to an identifier. The Config ensures when RAUW
151 // occurs, the mapping does not change. Tracking changes is unnecessary, and
152 // also problematic for weak symbols (which may be overwritten).
153 typedef ValueMap<GlobalValue *, uint64_t, Config> ValueNumberMap;
154 ValueNumberMap GlobalNumbers;
155 // The next unused serial number to assign to a global.
158 GlobalNumberState() : GlobalNumbers(), NextNumber(0) {}
159 uint64_t getNumber(GlobalValue* Global) {
160 ValueNumberMap::iterator MapIter;
162 std::tie(MapIter, Inserted) = GlobalNumbers.insert({Global, NextNumber});
165 return MapIter->second;
168 GlobalNumbers.clear();
172 /// FunctionComparator - Compares two functions to determine whether or not
173 /// they will generate machine code with the same behaviour. DataLayout is
174 /// used if available. The comparator always fails conservatively (erring on the
175 /// side of claiming that two functions are different).
176 class FunctionComparator {
178 FunctionComparator(const Function *F1, const Function *F2,
179 GlobalNumberState* GN)
180 : FnL(F1), FnR(F2), GlobalNumbers(GN) {}
182 /// Test whether the two functions have equivalent behaviour.
184 /// Hash a function. Equivalent functions will have the same hash, and unequal
185 /// functions will have different hashes with high probability.
186 typedef uint64_t FunctionHash;
187 static FunctionHash functionHash(Function &);
190 /// Test whether two basic blocks have equivalent behaviour.
191 int cmpBasicBlocks(const BasicBlock *BBL, const BasicBlock *BBR);
193 /// Constants comparison.
194 /// Its analog to lexicographical comparison between hypothetical numbers
196 /// <bitcastability-trait><raw-bit-contents>
198 /// 1. Bitcastability.
199 /// Check whether L's type could be losslessly bitcasted to R's type.
200 /// On this stage method, in case when lossless bitcast is not possible
201 /// method returns -1 or 1, thus also defining which type is greater in
202 /// context of bitcastability.
203 /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
204 /// to the contents comparison.
205 /// If types differ, remember types comparison result and check
206 /// whether we still can bitcast types.
207 /// Stage 1: Types that satisfies isFirstClassType conditions are always
208 /// greater then others.
209 /// Stage 2: Vector is greater then non-vector.
210 /// If both types are vectors, then vector with greater bitwidth is
212 /// If both types are vectors with the same bitwidth, then types
213 /// are bitcastable, and we can skip other stages, and go to contents
215 /// Stage 3: Pointer types are greater than non-pointers. If both types are
216 /// pointers of the same address space - go to contents comparison.
217 /// Different address spaces: pointer with greater address space is
219 /// Stage 4: Types are neither vectors, nor pointers. And they differ.
220 /// We don't know how to bitcast them. So, we better don't do it,
221 /// and return types comparison result (so it determines the
222 /// relationship among constants we don't know how to bitcast).
224 /// Just for clearance, let's see how the set of constants could look
225 /// on single dimension axis:
227 /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
228 /// Where: NFCT - Not a FirstClassType
229 /// FCT - FirstClassTyp:
231 /// 2. Compare raw contents.
232 /// It ignores types on this stage and only compares bits from L and R.
233 /// Returns 0, if L and R has equivalent contents.
234 /// -1 or 1 if values are different.
236 /// 2.1. If contents are numbers, compare numbers.
237 /// Ints with greater bitwidth are greater. Ints with same bitwidths
238 /// compared by their contents.
239 /// 2.2. "And so on". Just to avoid discrepancies with comments
240 /// perhaps it would be better to read the implementation itself.
241 /// 3. And again about overall picture. Let's look back at how the ordered set
242 /// of constants will look like:
243 /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
245 /// Now look, what could be inside [FCT, "others"], for example:
246 /// [FCT, "others"] =
248 /// [double 0.1], [double 1.23],
249 /// [i32 1], [i32 2],
250 /// { double 1.0 }, ; StructTyID, NumElements = 1
251 /// { i32 1 }, ; StructTyID, NumElements = 1
252 /// { double 1, i32 1 }, ; StructTyID, NumElements = 2
253 /// { i32 1, double 1 } ; StructTyID, NumElements = 2
256 /// Let's explain the order. Float numbers will be less than integers, just
257 /// because of cmpType terms: FloatTyID < IntegerTyID.
258 /// Floats (with same fltSemantics) are sorted according to their value.
259 /// Then you can see integers, and they are, like a floats,
260 /// could be easy sorted among each others.
261 /// The structures. Structures are grouped at the tail, again because of their
262 /// TypeID: StructTyID > IntegerTyID > FloatTyID.
263 /// Structures with greater number of elements are greater. Structures with
264 /// greater elements going first are greater.
265 /// The same logic with vectors, arrays and other possible complex types.
267 /// Bitcastable constants.
268 /// Let's assume, that some constant, belongs to some group of
269 /// "so-called-equal" values with different types, and at the same time
270 /// belongs to another group of constants with equal types
271 /// and "really" equal values.
273 /// Now, prove that this is impossible:
275 /// If constant A with type TyA is bitcastable to B with type TyB, then:
276 /// 1. All constants with equal types to TyA, are bitcastable to B. Since
277 /// those should be vectors (if TyA is vector), pointers
278 /// (if TyA is pointer), or else (if TyA equal to TyB), those types should
280 /// 2. All constants with non-equal, but bitcastable types to TyA, are
281 /// bitcastable to B.
282 /// Once again, just because we allow it to vectors and pointers only.
283 /// This statement could be expanded as below:
284 /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
285 /// vector B, and thus bitcastable to B as well.
286 /// 2.2. All pointers of the same address space, no matter what they point to,
287 /// bitcastable. So if C is pointer, it could be bitcasted to A and to B.
288 /// So any constant equal or bitcastable to A is equal or bitcastable to B.
291 /// In another words, for pointers and vectors, we ignore top-level type and
292 /// look at their particular properties (bit-width for vectors, and
293 /// address space for pointers).
294 /// If these properties are equal - compare their contents.
295 int cmpConstants(const Constant *L, const Constant *R);
297 /// Compares two global values by number. Uses the GlobalNumbersState to
298 /// identify the same gobals across function calls.
299 int cmpGlobalValues(GlobalValue *L, GlobalValue *R);
301 /// Assign or look up previously assigned numbers for the two values, and
302 /// return whether the numbers are equal. Numbers are assigned in the order
304 /// Comparison order:
305 /// Stage 0: Value that is function itself is always greater then others.
306 /// If left and right values are references to their functions, then
308 /// Stage 1: Constants are greater than non-constants.
309 /// If both left and right are constants, then the result of
310 /// cmpConstants is used as cmpValues result.
311 /// Stage 2: InlineAsm instances are greater than others. If both left and
312 /// right are InlineAsm instances, InlineAsm* pointers casted to
313 /// integers and compared as numbers.
314 /// Stage 3: For all other cases we compare order we meet these values in
315 /// their functions. If right value was met first during scanning,
316 /// then left value is greater.
317 /// In another words, we compare serial numbers, for more details
318 /// see comments for sn_mapL and sn_mapR.
319 int cmpValues(const Value *L, const Value *R);
321 /// Compare two Instructions for equivalence, similar to
322 /// Instruction::isSameOperationAs but with modifications to the type
324 /// Stages are listed in "most significant stage first" order:
325 /// On each stage below, we do comparison between some left and right
326 /// operation parts. If parts are non-equal, we assign parts comparison
327 /// result to the operation comparison result and exit from method.
328 /// Otherwise we proceed to the next stage.
330 /// 1. Operations opcodes. Compared as numbers.
331 /// 2. Number of operands.
332 /// 3. Operation types. Compared with cmpType method.
333 /// 4. Compare operation subclass optional data as stream of bytes:
334 /// just convert it to integers and call cmpNumbers.
335 /// 5. Compare in operation operand types with cmpType in
336 /// most significant operand first order.
337 /// 6. Last stage. Check operations for some specific attributes.
338 /// For example, for Load it would be:
339 /// 6.1.Load: volatile (as boolean flag)
340 /// 6.2.Load: alignment (as integer numbers)
341 /// 6.3.Load: synch-scope (as integer numbers)
342 /// 6.4.Load: range metadata (as integer numbers)
343 /// On this stage its better to see the code, since its not more than 10-15
344 /// strings for particular instruction, and could change sometimes.
345 int cmpOperations(const Instruction *L, const Instruction *R) const;
347 /// Compare two GEPs for equivalent pointer arithmetic.
348 /// Parts to be compared for each comparison stage,
349 /// most significant stage first:
350 /// 1. Address space. As numbers.
351 /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
352 /// 3. Pointer operand type (using cmpType method).
353 /// 4. Number of operands.
354 /// 5. Compare operands, using cmpValues method.
355 int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR);
356 int cmpGEPs(const GetElementPtrInst *GEPL, const GetElementPtrInst *GEPR) {
357 return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
360 /// cmpType - compares two types,
361 /// defines total ordering among the types set.
364 /// 0 if types are equal,
365 /// -1 if Left is less than Right,
366 /// +1 if Left is greater than Right.
369 /// Comparison is broken onto stages. Like in lexicographical comparison
370 /// stage coming first has higher priority.
371 /// On each explanation stage keep in mind total ordering properties.
373 /// 0. Before comparison we coerce pointer types of 0 address space to
375 /// We also don't bother with same type at left and right, so
376 /// just return 0 in this case.
378 /// 1. If types are of different kind (different type IDs).
379 /// Return result of type IDs comparison, treating them as numbers.
380 /// 2. If types are integers, check that they have the same width. If they
381 /// are vectors, check that they have the same count and subtype.
382 /// 3. Types have the same ID, so check whether they are one of:
391 /// We can treat these types as equal whenever their IDs are same.
392 /// 4. If Left and Right are pointers, return result of address space
393 /// comparison (numbers comparison). We can treat pointer types of same
394 /// address space as equal.
395 /// 5. If types are complex.
396 /// Then both Left and Right are to be expanded and their element types will
397 /// be checked with the same way. If we get Res != 0 on some stage, return it.
398 /// Otherwise return 0.
399 /// 6. For all other cases put llvm_unreachable.
400 int cmpTypes(Type *TyL, Type *TyR) const;
402 int cmpNumbers(uint64_t L, uint64_t R) const;
403 int cmpAPInts(const APInt &L, const APInt &R) const;
404 int cmpAPFloats(const APFloat &L, const APFloat &R) const;
405 int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
406 int cmpMem(StringRef L, StringRef R) const;
407 int cmpAttrs(const AttributeSet L, const AttributeSet R) const;
408 int cmpRangeMetadata(const MDNode* L, const MDNode* R) const;
410 // The two functions undergoing comparison.
411 const Function *FnL, *FnR;
413 /// Assign serial numbers to values from left function, and values from
416 /// Being comparing functions we need to compare values we meet at left and
418 /// Its easy to sort things out for external values. It just should be
419 /// the same value at left and right.
420 /// But for local values (those were introduced inside function body)
421 /// we have to ensure they were introduced at exactly the same place,
422 /// and plays the same role.
423 /// Let's assign serial number to each value when we meet it first time.
424 /// Values that were met at same place will be with same serial numbers.
425 /// In this case it would be good to explain few points about values assigned
426 /// to BBs and other ways of implementation (see below).
428 /// 1. Safety of BB reordering.
429 /// It's safe to change the order of BasicBlocks in function.
430 /// Relationship with other functions and serial numbering will not be
431 /// changed in this case.
432 /// As follows from FunctionComparator::compare(), we do CFG walk: we start
433 /// from the entry, and then take each terminator. So it doesn't matter how in
434 /// fact BBs are ordered in function. And since cmpValues are called during
435 /// this walk, the numbering depends only on how BBs located inside the CFG.
436 /// So the answer is - yes. We will get the same numbering.
438 /// 2. Impossibility to use dominance properties of values.
439 /// If we compare two instruction operands: first is usage of local
440 /// variable AL from function FL, and second is usage of local variable AR
441 /// from FR, we could compare their origins and check whether they are
442 /// defined at the same place.
443 /// But, we are still not able to compare operands of PHI nodes, since those
444 /// could be operands from further BBs we didn't scan yet.
445 /// So it's impossible to use dominance properties in general.
446 DenseMap<const Value*, int> sn_mapL, sn_mapR;
448 // The global state we will use
449 GlobalNumberState* GlobalNumbers;
453 mutable AssertingVH<Function> F;
454 FunctionComparator::FunctionHash Hash;
456 // Note the hash is recalculated potentially multiple times, but it is cheap.
457 FunctionNode(Function *F)
458 : F(F), Hash(FunctionComparator::functionHash(*F)) {}
459 Function *getFunc() const { return F; }
460 FunctionComparator::FunctionHash getHash() const { return Hash; }
462 /// Replace the reference to the function F by the function G, assuming their
463 /// implementations are equal.
464 void replaceBy(Function *G) const {
468 void release() { F = 0; }
472 int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
473 if (L < R) return -1;
478 int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
479 if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
481 if (L.ugt(R)) return 1;
482 if (R.ugt(L)) return -1;
486 int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
487 // Floats are ordered first by semantics (i.e. float, double, half, etc.),
488 // then by value interpreted as a bitstring (aka APInt).
489 const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
490 if (int Res = cmpNumbers(APFloat::semanticsPrecision(SL),
491 APFloat::semanticsPrecision(SR)))
493 if (int Res = cmpNumbers(APFloat::semanticsMaxExponent(SL),
494 APFloat::semanticsMaxExponent(SR)))
496 if (int Res = cmpNumbers(APFloat::semanticsMinExponent(SL),
497 APFloat::semanticsMinExponent(SR)))
499 if (int Res = cmpNumbers(APFloat::semanticsSizeInBits(SL),
500 APFloat::semanticsSizeInBits(SR)))
502 return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt());
505 int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
506 // Prevent heavy comparison, compare sizes first.
507 if (int Res = cmpNumbers(L.size(), R.size()))
510 // Compare strings lexicographically only when it is necessary: only when
511 // strings are equal in size.
515 int FunctionComparator::cmpAttrs(const AttributeSet L,
516 const AttributeSet R) const {
517 if (int Res = cmpNumbers(L.getNumSlots(), R.getNumSlots()))
520 for (unsigned i = 0, e = L.getNumSlots(); i != e; ++i) {
521 AttributeSet::iterator LI = L.begin(i), LE = L.end(i), RI = R.begin(i),
523 for (; LI != LE && RI != RE; ++LI, ++RI) {
538 int FunctionComparator::cmpRangeMetadata(const MDNode* L,
539 const MDNode* R) const {
546 // Range metadata is a sequence of numbers. Make sure they are the same
548 // TODO: Note that as this is metadata, it is possible to drop and/or merge
549 // this data when considering functions to merge. Thus this comparison would
550 // return 0 (i.e. equivalent), but merging would become more complicated
551 // because the ranges would need to be unioned. It is not likely that
552 // functions differ ONLY in this metadata if they are actually the same
553 // function semantically.
554 if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
556 for (size_t I = 0; I < L->getNumOperands(); ++I) {
557 ConstantInt* LLow = mdconst::extract<ConstantInt>(L->getOperand(I));
558 ConstantInt* RLow = mdconst::extract<ConstantInt>(R->getOperand(I));
559 if (int Res = cmpAPInts(LLow->getValue(), RLow->getValue()))
565 /// Constants comparison:
566 /// 1. Check whether type of L constant could be losslessly bitcasted to R
568 /// 2. Compare constant contents.
569 /// For more details see declaration comments.
570 int FunctionComparator::cmpConstants(const Constant *L, const Constant *R) {
572 Type *TyL = L->getType();
573 Type *TyR = R->getType();
575 // Check whether types are bitcastable. This part is just re-factored
576 // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
577 // we also pack into result which type is "less" for us.
578 int TypesRes = cmpTypes(TyL, TyR);
580 // Types are different, but check whether we can bitcast them.
581 if (!TyL->isFirstClassType()) {
582 if (TyR->isFirstClassType())
584 // Neither TyL nor TyR are values of first class type. Return the result
585 // of comparing the types
588 if (!TyR->isFirstClassType()) {
589 if (TyL->isFirstClassType())
594 // Vector -> Vector conversions are always lossless if the two vector types
595 // have the same size, otherwise not.
596 unsigned TyLWidth = 0;
597 unsigned TyRWidth = 0;
599 if (auto *VecTyL = dyn_cast<VectorType>(TyL))
600 TyLWidth = VecTyL->getBitWidth();
601 if (auto *VecTyR = dyn_cast<VectorType>(TyR))
602 TyRWidth = VecTyR->getBitWidth();
604 if (TyLWidth != TyRWidth)
605 return cmpNumbers(TyLWidth, TyRWidth);
607 // Zero bit-width means neither TyL nor TyR are vectors.
609 PointerType *PTyL = dyn_cast<PointerType>(TyL);
610 PointerType *PTyR = dyn_cast<PointerType>(TyR);
612 unsigned AddrSpaceL = PTyL->getAddressSpace();
613 unsigned AddrSpaceR = PTyR->getAddressSpace();
614 if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
622 // TyL and TyR aren't vectors, nor pointers. We don't know how to
628 // OK, types are bitcastable, now check constant contents.
630 if (L->isNullValue() && R->isNullValue())
632 if (L->isNullValue() && !R->isNullValue())
634 if (!L->isNullValue() && R->isNullValue())
637 auto GlobalValueL = const_cast<GlobalValue*>(dyn_cast<GlobalValue>(L));
638 auto GlobalValueR = const_cast<GlobalValue*>(dyn_cast<GlobalValue>(R));
639 if (GlobalValueL && GlobalValueR) {
640 return cmpGlobalValues(GlobalValueL, GlobalValueR);
643 if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
646 if (const auto *SeqL = dyn_cast<ConstantDataSequential>(L)) {
647 const auto *SeqR = cast<ConstantDataSequential>(R);
648 // This handles ConstantDataArray and ConstantDataVector. Note that we
649 // compare the two raw data arrays, which might differ depending on the host
650 // endianness. This isn't a problem though, because the endiness of a module
651 // will affect the order of the constants, but this order is the same
652 // for a given input module and host platform.
653 return cmpMem(SeqL->getRawDataValues(), SeqR->getRawDataValues());
656 switch (L->getValueID()) {
657 case Value::UndefValueVal: return TypesRes;
658 case Value::ConstantIntVal: {
659 const APInt &LInt = cast<ConstantInt>(L)->getValue();
660 const APInt &RInt = cast<ConstantInt>(R)->getValue();
661 return cmpAPInts(LInt, RInt);
663 case Value::ConstantFPVal: {
664 const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
665 const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
666 return cmpAPFloats(LAPF, RAPF);
668 case Value::ConstantArrayVal: {
669 const ConstantArray *LA = cast<ConstantArray>(L);
670 const ConstantArray *RA = cast<ConstantArray>(R);
671 uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
672 uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
673 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
675 for (uint64_t i = 0; i < NumElementsL; ++i) {
676 if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
677 cast<Constant>(RA->getOperand(i))))
682 case Value::ConstantStructVal: {
683 const ConstantStruct *LS = cast<ConstantStruct>(L);
684 const ConstantStruct *RS = cast<ConstantStruct>(R);
685 unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
686 unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
687 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
689 for (unsigned i = 0; i != NumElementsL; ++i) {
690 if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
691 cast<Constant>(RS->getOperand(i))))
696 case Value::ConstantVectorVal: {
697 const ConstantVector *LV = cast<ConstantVector>(L);
698 const ConstantVector *RV = cast<ConstantVector>(R);
699 unsigned NumElementsL = cast<VectorType>(TyL)->getNumElements();
700 unsigned NumElementsR = cast<VectorType>(TyR)->getNumElements();
701 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
703 for (uint64_t i = 0; i < NumElementsL; ++i) {
704 if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
705 cast<Constant>(RV->getOperand(i))))
710 case Value::ConstantExprVal: {
711 const ConstantExpr *LE = cast<ConstantExpr>(L);
712 const ConstantExpr *RE = cast<ConstantExpr>(R);
713 unsigned NumOperandsL = LE->getNumOperands();
714 unsigned NumOperandsR = RE->getNumOperands();
715 if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
717 for (unsigned i = 0; i < NumOperandsL; ++i) {
718 if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
719 cast<Constant>(RE->getOperand(i))))
724 case Value::BlockAddressVal: {
725 const BlockAddress *LBA = cast<BlockAddress>(L);
726 const BlockAddress *RBA = cast<BlockAddress>(R);
727 if (int Res = cmpValues(LBA->getFunction(), RBA->getFunction()))
729 if (LBA->getFunction() == RBA->getFunction()) {
730 // They are BBs in the same function. Order by which comes first in the
731 // BB order of the function. This order is deterministic.
732 Function* F = LBA->getFunction();
733 BasicBlock *LBB = LBA->getBasicBlock();
734 BasicBlock *RBB = RBA->getBasicBlock();
737 for(BasicBlock &BB : F->getBasicBlockList()) {
745 llvm_unreachable("Basic Block Address does not point to a basic block in "
749 // cmpValues said the functions are the same. So because they aren't
750 // literally the same pointer, they must respectively be the left and
752 assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
753 // cmpValues will tell us if these are equivalent BasicBlocks, in the
754 // context of their respective functions.
755 return cmpValues(LBA->getBasicBlock(), RBA->getBasicBlock());
758 default: // Unknown constant, abort.
759 DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
760 llvm_unreachable("Constant ValueID not recognized.");
765 int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue* R) {
766 return cmpNumbers(GlobalNumbers->getNumber(L), GlobalNumbers->getNumber(R));
769 /// cmpType - compares two types,
770 /// defines total ordering among the types set.
771 /// See method declaration comments for more details.
772 int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
774 PointerType *PTyL = dyn_cast<PointerType>(TyL);
775 PointerType *PTyR = dyn_cast<PointerType>(TyR);
777 const DataLayout &DL = FnL->getParent()->getDataLayout();
778 if (PTyL && PTyL->getAddressSpace() == 0)
779 TyL = DL.getIntPtrType(TyL);
780 if (PTyR && PTyR->getAddressSpace() == 0)
781 TyR = DL.getIntPtrType(TyR);
786 if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
789 switch (TyL->getTypeID()) {
791 llvm_unreachable("Unknown type!");
792 // Fall through in Release mode.
793 case Type::IntegerTyID:
794 return cmpNumbers(cast<IntegerType>(TyL)->getBitWidth(),
795 cast<IntegerType>(TyR)->getBitWidth());
796 case Type::VectorTyID: {
797 VectorType *VTyL = cast<VectorType>(TyL), *VTyR = cast<VectorType>(TyR);
798 if (int Res = cmpNumbers(VTyL->getNumElements(), VTyR->getNumElements()))
800 return cmpTypes(VTyL->getElementType(), VTyR->getElementType());
802 // TyL == TyR would have returned true earlier, because types are uniqued.
804 case Type::FloatTyID:
805 case Type::DoubleTyID:
806 case Type::X86_FP80TyID:
807 case Type::FP128TyID:
808 case Type::PPC_FP128TyID:
809 case Type::LabelTyID:
810 case Type::MetadataTyID:
811 case Type::TokenTyID:
814 case Type::PointerTyID: {
815 assert(PTyL && PTyR && "Both types must be pointers here.");
816 return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
819 case Type::StructTyID: {
820 StructType *STyL = cast<StructType>(TyL);
821 StructType *STyR = cast<StructType>(TyR);
822 if (STyL->getNumElements() != STyR->getNumElements())
823 return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
825 if (STyL->isPacked() != STyR->isPacked())
826 return cmpNumbers(STyL->isPacked(), STyR->isPacked());
828 for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
829 if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i)))
835 case Type::FunctionTyID: {
836 FunctionType *FTyL = cast<FunctionType>(TyL);
837 FunctionType *FTyR = cast<FunctionType>(TyR);
838 if (FTyL->getNumParams() != FTyR->getNumParams())
839 return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
841 if (FTyL->isVarArg() != FTyR->isVarArg())
842 return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
844 if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType()))
847 for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
848 if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i)))
854 case Type::ArrayTyID: {
855 ArrayType *ATyL = cast<ArrayType>(TyL);
856 ArrayType *ATyR = cast<ArrayType>(TyR);
857 if (ATyL->getNumElements() != ATyR->getNumElements())
858 return cmpNumbers(ATyL->getNumElements(), ATyR->getNumElements());
859 return cmpTypes(ATyL->getElementType(), ATyR->getElementType());
864 // Determine whether the two operations are the same except that pointer-to-A
865 // and pointer-to-B are equivalent. This should be kept in sync with
866 // Instruction::isSameOperationAs.
867 // Read method declaration comments for more details.
868 int FunctionComparator::cmpOperations(const Instruction *L,
869 const Instruction *R) const {
870 // Differences from Instruction::isSameOperationAs:
871 // * replace type comparison with calls to isEquivalentType.
872 // * we test for I->hasSameSubclassOptionalData (nuw/nsw/tail) at the top
873 // * because of the above, we don't test for the tail bit on calls later on
874 if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
877 if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
880 if (int Res = cmpTypes(L->getType(), R->getType()))
883 if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
884 R->getRawSubclassOptionalData()))
887 if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) {
888 if (int Res = cmpTypes(AI->getAllocatedType(),
889 cast<AllocaInst>(R)->getAllocatedType()))
892 cmpNumbers(AI->getAlignment(), cast<AllocaInst>(R)->getAlignment()))
896 // We have two instructions of identical opcode and #operands. Check to see
897 // if all operands are the same type
898 for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
900 cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
904 // Check special state that is a part of some instructions.
905 if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
906 if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
909 cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
912 cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
915 cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
917 return cmpRangeMetadata(LI->getMetadata(LLVMContext::MD_range),
918 cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
920 if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
922 cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
925 cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
928 cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
930 return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
932 if (const CmpInst *CI = dyn_cast<CmpInst>(L))
933 return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
934 if (const CallInst *CI = dyn_cast<CallInst>(L)) {
935 if (int Res = cmpNumbers(CI->getCallingConv(),
936 cast<CallInst>(R)->getCallingConv()))
939 cmpAttrs(CI->getAttributes(), cast<CallInst>(R)->getAttributes()))
941 return cmpRangeMetadata(
942 CI->getMetadata(LLVMContext::MD_range),
943 cast<CallInst>(R)->getMetadata(LLVMContext::MD_range));
945 if (const InvokeInst *CI = dyn_cast<InvokeInst>(L)) {
946 if (int Res = cmpNumbers(CI->getCallingConv(),
947 cast<InvokeInst>(R)->getCallingConv()))
950 cmpAttrs(CI->getAttributes(), cast<InvokeInst>(R)->getAttributes()))
952 return cmpRangeMetadata(
953 CI->getMetadata(LLVMContext::MD_range),
954 cast<InvokeInst>(R)->getMetadata(LLVMContext::MD_range));
956 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
957 ArrayRef<unsigned> LIndices = IVI->getIndices();
958 ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
959 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
961 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
962 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
966 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
967 ArrayRef<unsigned> LIndices = EVI->getIndices();
968 ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
969 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
971 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
972 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
976 if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
978 cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
980 return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
983 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
984 if (int Res = cmpNumbers(CXI->isVolatile(),
985 cast<AtomicCmpXchgInst>(R)->isVolatile()))
987 if (int Res = cmpNumbers(CXI->isWeak(),
988 cast<AtomicCmpXchgInst>(R)->isWeak()))
990 if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
991 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
993 if (int Res = cmpNumbers(CXI->getFailureOrdering(),
994 cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
996 return cmpNumbers(CXI->getSynchScope(),
997 cast<AtomicCmpXchgInst>(R)->getSynchScope());
999 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
1000 if (int Res = cmpNumbers(RMWI->getOperation(),
1001 cast<AtomicRMWInst>(R)->getOperation()))
1003 if (int Res = cmpNumbers(RMWI->isVolatile(),
1004 cast<AtomicRMWInst>(R)->isVolatile()))
1006 if (int Res = cmpNumbers(RMWI->getOrdering(),
1007 cast<AtomicRMWInst>(R)->getOrdering()))
1009 return cmpNumbers(RMWI->getSynchScope(),
1010 cast<AtomicRMWInst>(R)->getSynchScope());
1015 // Determine whether two GEP operations perform the same underlying arithmetic.
1016 // Read method declaration comments for more details.
1017 int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
1018 const GEPOperator *GEPR) {
1020 unsigned int ASL = GEPL->getPointerAddressSpace();
1021 unsigned int ASR = GEPR->getPointerAddressSpace();
1023 if (int Res = cmpNumbers(ASL, ASR))
1026 // When we have target data, we can reduce the GEP down to the value in bytes
1027 // added to the address.
1028 const DataLayout &DL = FnL->getParent()->getDataLayout();
1029 unsigned BitWidth = DL.getPointerSizeInBits(ASL);
1030 APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
1031 if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
1032 GEPR->accumulateConstantOffset(DL, OffsetR))
1033 return cmpAPInts(OffsetL, OffsetR);
1034 if (int Res = cmpTypes(GEPL->getSourceElementType(),
1035 GEPR->getSourceElementType()))
1038 if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
1041 for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
1042 if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
1049 int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
1050 const InlineAsm *R) const {
1051 // InlineAsm's are uniqued. If they are the same pointer, obviously they are
1052 // the same, otherwise compare the fields.
1055 if (int Res = cmpTypes(L->getFunctionType(), R->getFunctionType()))
1057 if (int Res = cmpMem(L->getAsmString(), R->getAsmString()))
1059 if (int Res = cmpMem(L->getConstraintString(), R->getConstraintString()))
1061 if (int Res = cmpNumbers(L->hasSideEffects(), R->hasSideEffects()))
1063 if (int Res = cmpNumbers(L->isAlignStack(), R->isAlignStack()))
1065 if (int Res = cmpNumbers(L->getDialect(), R->getDialect()))
1067 llvm_unreachable("InlineAsm blocks were not uniqued.");
1071 /// Compare two values used by the two functions under pair-wise comparison. If
1072 /// this is the first time the values are seen, they're added to the mapping so
1073 /// that we will detect mismatches on next use.
1074 /// See comments in declaration for more details.
1075 int FunctionComparator::cmpValues(const Value *L, const Value *R) {
1076 // Catch self-reference case.
1088 const Constant *ConstL = dyn_cast<Constant>(L);
1089 const Constant *ConstR = dyn_cast<Constant>(R);
1090 if (ConstL && ConstR) {
1093 return cmpConstants(ConstL, ConstR);
1101 const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
1102 const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
1104 if (InlineAsmL && InlineAsmR)
1105 return cmpInlineAsm(InlineAsmL, InlineAsmR);
1111 auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
1112 RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
1114 return cmpNumbers(LeftSN.first->second, RightSN.first->second);
1116 // Test whether two basic blocks have equivalent behaviour.
1117 int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
1118 const BasicBlock *BBR) {
1119 BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
1120 BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
1123 if (int Res = cmpValues(InstL, InstR))
1126 const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(InstL);
1127 const GetElementPtrInst *GEPR = dyn_cast<GetElementPtrInst>(InstR);
1136 cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
1138 if (int Res = cmpGEPs(GEPL, GEPR))
1141 if (int Res = cmpOperations(InstL, InstR))
1143 assert(InstL->getNumOperands() == InstR->getNumOperands());
1145 for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
1146 Value *OpL = InstL->getOperand(i);
1147 Value *OpR = InstR->getOperand(i);
1148 if (int Res = cmpValues(OpL, OpR))
1150 // cmpValues should ensure this is true.
1151 assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
1156 } while (InstL != InstLE && InstR != InstRE);
1158 if (InstL != InstLE && InstR == InstRE)
1160 if (InstL == InstLE && InstR != InstRE)
1165 // Test whether the two functions have equivalent behaviour.
1166 int FunctionComparator::compare() {
1171 if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
1174 if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
1178 if (int Res = cmpMem(FnL->getGC(), FnR->getGC()))
1182 if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
1185 if (FnL->hasSection()) {
1186 if (int Res = cmpMem(FnL->getSection(), FnR->getSection()))
1190 if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
1193 // TODO: if it's internal and only used in direct calls, we could handle this
1195 if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
1198 if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType()))
1201 assert(FnL->arg_size() == FnR->arg_size() &&
1202 "Identically typed functions have different numbers of args!");
1204 // Visit the arguments so that they get enumerated in the order they're
1206 for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
1207 ArgRI = FnR->arg_begin(),
1208 ArgLE = FnL->arg_end();
1209 ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
1210 if (cmpValues(ArgLI, ArgRI) != 0)
1211 llvm_unreachable("Arguments repeat!");
1214 // We do a CFG-ordered walk since the actual ordering of the blocks in the
1215 // linked list is immaterial. Our walk starts at the entry block for both
1216 // functions, then takes each block from each terminator in order. As an
1217 // artifact, this also means that unreachable blocks are ignored.
1218 SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
1219 SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
1221 FnLBBs.push_back(&FnL->getEntryBlock());
1222 FnRBBs.push_back(&FnR->getEntryBlock());
1224 VisitedBBs.insert(FnLBBs[0]);
1225 while (!FnLBBs.empty()) {
1226 const BasicBlock *BBL = FnLBBs.pop_back_val();
1227 const BasicBlock *BBR = FnRBBs.pop_back_val();
1229 if (int Res = cmpValues(BBL, BBR))
1232 if (int Res = cmpBasicBlocks(BBL, BBR))
1235 const TerminatorInst *TermL = BBL->getTerminator();
1236 const TerminatorInst *TermR = BBR->getTerminator();
1238 assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
1239 for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
1240 if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
1243 FnLBBs.push_back(TermL->getSuccessor(i));
1244 FnRBBs.push_back(TermR->getSuccessor(i));
1250 // Accumulate the hash of a sequence of 64-bit integers. This is similar to a
1251 // hash of a sequence of 64bit ints, but the entire input does not need to be
1252 // available at once. This interface is necessary for functionHash because it
1253 // needs to accumulate the hash as the structure of the function is traversed
1254 // without saving these values to an intermediate buffer. This form of hashing
1255 // is not often needed, as usually the object to hash is just read from a
1257 class HashAccumulator64 {
1260 // Initialize to random constant, so the state isn't zero.
1261 HashAccumulator64() { Hash = 0x6acaa36bef8325c5ULL; }
1262 void add(uint64_t V) {
1263 Hash = llvm::hashing::detail::hash_16_bytes(Hash, V);
1265 // No finishing is required, because the entire hash value is used.
1266 uint64_t getHash() { return Hash; }
1269 // A function hash is calculated by considering only the number of arguments and
1270 // whether a function is varargs, the order of basic blocks (given by the
1271 // successors of each basic block in depth first order), and the order of
1272 // opcodes of each instruction within each of these basic blocks. This mirrors
1273 // the strategy compare() uses to compare functions by walking the BBs in depth
1274 // first order and comparing each instruction in sequence. Because this hash
1275 // does not look at the operands, it is insensitive to things such as the
1276 // target of calls and the constants used in the function, which makes it useful
1277 // when possibly merging functions which are the same modulo constants and call
1279 FunctionComparator::FunctionHash FunctionComparator::functionHash(Function &F) {
1280 HashAccumulator64 H;
1281 H.add(F.isVarArg());
1282 H.add(F.arg_size());
1284 SmallVector<const BasicBlock *, 8> BBs;
1285 SmallSet<const BasicBlock *, 16> VisitedBBs;
1287 // Walk the blocks in the same order as FunctionComparator::cmpBasicBlocks(),
1288 // accumulating the hash of the function "structure." (BB and opcode sequence)
1289 BBs.push_back(&F.getEntryBlock());
1290 VisitedBBs.insert(BBs[0]);
1291 while (!BBs.empty()) {
1292 const BasicBlock *BB = BBs.pop_back_val();
1293 // This random value acts as a block header, as otherwise the partition of
1294 // opcodes into BBs wouldn't affect the hash, only the order of the opcodes
1296 for (auto &Inst : *BB) {
1297 H.add(Inst.getOpcode());
1299 const TerminatorInst *Term = BB->getTerminator();
1300 for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
1301 if (!VisitedBBs.insert(Term->getSuccessor(i)).second)
1303 BBs.push_back(Term->getSuccessor(i));
1312 /// MergeFunctions finds functions which will generate identical machine code,
1313 /// by considering all pointer types to be equivalent. Once identified,
1314 /// MergeFunctions will fold them by replacing a call to one to a call to a
1315 /// bitcast of the other.
1317 class MergeFunctions : public ModulePass {
1321 : ModulePass(ID), FnTree(FunctionNodeCmp(&GlobalNumbers)), FNodesInTree(),
1322 HasGlobalAliases(false) {
1323 initializeMergeFunctionsPass(*PassRegistry::getPassRegistry());
1326 bool runOnModule(Module &M) override;
1329 // The function comparison operator is provided here so that FunctionNodes do
1330 // not need to become larger with another pointer.
1331 class FunctionNodeCmp {
1332 GlobalNumberState* GlobalNumbers;
1334 FunctionNodeCmp(GlobalNumberState* GN) : GlobalNumbers(GN) {}
1335 bool operator()(const FunctionNode &LHS, const FunctionNode &RHS) const {
1336 // Order first by hashes, then full function comparison.
1337 if (LHS.getHash() != RHS.getHash())
1338 return LHS.getHash() < RHS.getHash();
1339 FunctionComparator FCmp(LHS.getFunc(), RHS.getFunc(), GlobalNumbers);
1340 return FCmp.compare() == -1;
1343 typedef std::set<FunctionNode, FunctionNodeCmp> FnTreeType;
1345 GlobalNumberState GlobalNumbers;
1347 /// A work queue of functions that may have been modified and should be
1349 std::vector<WeakVH> Deferred;
1351 /// Checks the rules of order relation introduced among functions set.
1352 /// Returns true, if sanity check has been passed, and false if failed.
1353 bool doSanityCheck(std::vector<WeakVH> &Worklist);
1355 /// Insert a ComparableFunction into the FnTree, or merge it away if it's
1356 /// equal to one that's already present.
1357 bool insert(Function *NewFunction);
1359 /// Remove a Function from the FnTree and queue it up for a second sweep of
1361 void remove(Function *F);
1363 /// Find the functions that use this Value and remove them from FnTree and
1364 /// queue the functions.
1365 void removeUsers(Value *V);
1367 /// Replace all direct calls of Old with calls of New. Will bitcast New if
1368 /// necessary to make types match.
1369 void replaceDirectCallers(Function *Old, Function *New);
1371 /// Merge two equivalent functions. Upon completion, G may be deleted, or may
1372 /// be converted into a thunk. In either case, it should never be visited
1374 void mergeTwoFunctions(Function *F, Function *G);
1376 /// Replace G with a thunk or an alias to F. Deletes G.
1377 void writeThunkOrAlias(Function *F, Function *G);
1379 /// Replace G with a simple tail call to bitcast(F). Also replace direct uses
1380 /// of G with bitcast(F). Deletes G.
1381 void writeThunk(Function *F, Function *G);
1383 /// Replace G with an alias to F. Deletes G.
1384 void writeAlias(Function *F, Function *G);
1386 /// Replace function F with function G in the function tree.
1387 void replaceFunctionInTree(const FunctionNode &FN, Function *G);
1389 /// The set of all distinct functions. Use the insert() and remove() methods
1390 /// to modify it. The map allows efficient lookup and deferring of Functions.
1392 // Map functions to the iterators of the FunctionNode which contains them
1393 // in the FnTree. This must be updated carefully whenever the FnTree is
1394 // modified, i.e. in insert(), remove(), and replaceFunctionInTree(), to avoid
1395 // dangling iterators into FnTree. The invariant that preserves this is that
1396 // there is exactly one mapping F -> FN for each FunctionNode FN in FnTree.
1397 ValueMap<Function*, FnTreeType::iterator> FNodesInTree;
1399 /// Whether or not the target supports global aliases.
1400 bool HasGlobalAliases;
1403 } // end anonymous namespace
1405 char MergeFunctions::ID = 0;
1406 INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false)
1408 ModulePass *llvm::createMergeFunctionsPass() {
1409 return new MergeFunctions();
1412 bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
1413 if (const unsigned Max = NumFunctionsForSanityCheck) {
1414 unsigned TripleNumber = 0;
1417 dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n";
1420 for (std::vector<WeakVH>::iterator I = Worklist.begin(), E = Worklist.end();
1421 I != E && i < Max; ++I, ++i) {
1423 for (std::vector<WeakVH>::iterator J = I; J != E && j < Max; ++J, ++j) {
1424 Function *F1 = cast<Function>(*I);
1425 Function *F2 = cast<Function>(*J);
1426 int Res1 = FunctionComparator(F1, F2, &GlobalNumbers).compare();
1427 int Res2 = FunctionComparator(F2, F1, &GlobalNumbers).compare();
1429 // If F1 <= F2, then F2 >= F1, otherwise report failure.
1430 if (Res1 != -Res2) {
1431 dbgs() << "MERGEFUNC-SANITY: Non-symmetric; triple: " << TripleNumber
1442 for (std::vector<WeakVH>::iterator K = J; K != E && k < Max;
1443 ++k, ++K, ++TripleNumber) {
1447 Function *F3 = cast<Function>(*K);
1448 int Res3 = FunctionComparator(F1, F3, &GlobalNumbers).compare();
1449 int Res4 = FunctionComparator(F2, F3, &GlobalNumbers).compare();
1451 bool Transitive = true;
1453 if (Res1 != 0 && Res1 == Res4) {
1454 // F1 > F2, F2 > F3 => F1 > F3
1455 Transitive = Res3 == Res1;
1456 } else if (Res3 != 0 && Res3 == -Res4) {
1457 // F1 > F3, F3 > F2 => F1 > F2
1458 Transitive = Res3 == Res1;
1459 } else if (Res4 != 0 && -Res3 == Res4) {
1460 // F2 > F3, F3 > F1 => F2 > F1
1461 Transitive = Res4 == -Res1;
1465 dbgs() << "MERGEFUNC-SANITY: Non-transitive; triple: "
1466 << TripleNumber << "\n";
1467 dbgs() << "Res1, Res3, Res4: " << Res1 << ", " << Res3 << ", "
1478 dbgs() << "MERGEFUNC-SANITY: " << (Valid ? "Passed." : "Failed.") << "\n";
1484 bool MergeFunctions::runOnModule(Module &M) {
1485 bool Changed = false;
1487 // All functions in the module, ordered by hash. Functions with a unique
1488 // hash value are easily eliminated.
1489 std::vector<std::pair<FunctionComparator::FunctionHash, Function *>>
1491 for (Function &Func : M) {
1492 if (!Func.isDeclaration() && !Func.hasAvailableExternallyLinkage()) {
1493 HashedFuncs.push_back({FunctionComparator::functionHash(Func), &Func});
1498 HashedFuncs.begin(), HashedFuncs.end(),
1499 [](const std::pair<FunctionComparator::FunctionHash, Function *> &a,
1500 const std::pair<FunctionComparator::FunctionHash, Function *> &b) {
1501 return a.first < b.first;
1504 auto S = HashedFuncs.begin();
1505 for (auto I = HashedFuncs.begin(), IE = HashedFuncs.end(); I != IE; ++I) {
1506 // If the hash value matches the previous value or the next one, we must
1507 // consider merging it. Otherwise it is dropped and never considered again.
1508 if ((I != S && std::prev(I)->first == I->first) ||
1509 (std::next(I) != IE && std::next(I)->first == I->first) ) {
1510 Deferred.push_back(WeakVH(I->second));
1515 std::vector<WeakVH> Worklist;
1516 Deferred.swap(Worklist);
1518 DEBUG(doSanityCheck(Worklist));
1520 DEBUG(dbgs() << "size of module: " << M.size() << '\n');
1521 DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
1523 // Insert only strong functions and merge them. Strong function merging
1524 // always deletes one of them.
1525 for (std::vector<WeakVH>::iterator I = Worklist.begin(),
1526 E = Worklist.end(); I != E; ++I) {
1528 Function *F = cast<Function>(*I);
1529 if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
1530 !F->mayBeOverridden()) {
1531 Changed |= insert(F);
1535 // Insert only weak functions and merge them. By doing these second we
1536 // create thunks to the strong function when possible. When two weak
1537 // functions are identical, we create a new strong function with two weak
1538 // weak thunks to it which are identical but not mergable.
1539 for (std::vector<WeakVH>::iterator I = Worklist.begin(),
1540 E = Worklist.end(); I != E; ++I) {
1542 Function *F = cast<Function>(*I);
1543 if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
1544 F->mayBeOverridden()) {
1545 Changed |= insert(F);
1548 DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n');
1549 } while (!Deferred.empty());
1552 GlobalNumbers.clear();
1557 // Replace direct callers of Old with New.
1558 void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
1559 Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
1560 for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) {
1563 CallSite CS(U->getUser());
1564 if (CS && CS.isCallee(U)) {
1565 // Transfer the called function's attributes to the call site. Due to the
1566 // bitcast we will 'lose' ABI changing attributes because the 'called
1567 // function' is no longer a Function* but the bitcast. Code that looks up
1568 // the attributes from the called function will fail.
1570 // FIXME: This is not actually true, at least not anymore. The callsite
1571 // will always have the same ABI affecting attributes as the callee,
1572 // because otherwise the original input has UB. Note that Old and New
1573 // always have matching ABI, so no attributes need to be changed.
1574 // Transferring other attributes may help other optimizations, but that
1575 // should be done uniformly and not in this ad-hoc way.
1576 auto &Context = New->getContext();
1577 auto NewFuncAttrs = New->getAttributes();
1578 auto CallSiteAttrs = CS.getAttributes();
1580 CallSiteAttrs = CallSiteAttrs.addAttributes(
1581 Context, AttributeSet::ReturnIndex, NewFuncAttrs.getRetAttributes());
1583 for (unsigned argIdx = 0; argIdx < CS.arg_size(); argIdx++) {
1584 AttributeSet Attrs = NewFuncAttrs.getParamAttributes(argIdx);
1585 if (Attrs.getNumSlots())
1586 CallSiteAttrs = CallSiteAttrs.addAttributes(Context, argIdx, Attrs);
1589 CS.setAttributes(CallSiteAttrs);
1591 remove(CS.getInstruction()->getParent()->getParent());
1597 // Replace G with an alias to F if possible, or else a thunk to F. Deletes G.
1598 void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
1599 if (HasGlobalAliases && G->hasUnnamedAddr()) {
1600 if (G->hasExternalLinkage() || G->hasLocalLinkage() ||
1601 G->hasWeakLinkage()) {
1610 // Helper for writeThunk,
1611 // Selects proper bitcast operation,
1612 // but a bit simpler then CastInst::getCastOpcode.
1613 static Value *createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
1614 Type *SrcTy = V->getType();
1615 if (SrcTy->isStructTy()) {
1616 assert(DestTy->isStructTy());
1617 assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements());
1618 Value *Result = UndefValue::get(DestTy);
1619 for (unsigned int I = 0, E = SrcTy->getStructNumElements(); I < E; ++I) {
1620 Value *Element = createCast(
1621 Builder, Builder.CreateExtractValue(V, makeArrayRef(I)),
1622 DestTy->getStructElementType(I));
1625 Builder.CreateInsertValue(Result, Element, makeArrayRef(I));
1629 assert(!DestTy->isStructTy());
1630 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
1631 return Builder.CreateIntToPtr(V, DestTy);
1632 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
1633 return Builder.CreatePtrToInt(V, DestTy);
1635 return Builder.CreateBitCast(V, DestTy);
1638 // Replace G with a simple tail call to bitcast(F). Also replace direct uses
1639 // of G with bitcast(F). Deletes G.
1640 void MergeFunctions::writeThunk(Function *F, Function *G) {
1641 if (!G->mayBeOverridden()) {
1642 // Redirect direct callers of G to F.
1643 replaceDirectCallers(G, F);
1646 // If G was internal then we may have replaced all uses of G with F. If so,
1647 // stop here and delete G. There's no need for a thunk.
1648 if (G->hasLocalLinkage() && G->use_empty()) {
1649 G->eraseFromParent();
1653 Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
1655 BasicBlock *BB = BasicBlock::Create(F->getContext(), "", NewG);
1656 IRBuilder<false> Builder(BB);
1658 SmallVector<Value *, 16> Args;
1660 FunctionType *FFTy = F->getFunctionType();
1661 for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
1663 Args.push_back(createCast(Builder, (Value*)AI, FFTy->getParamType(i)));
1667 CallInst *CI = Builder.CreateCall(F, Args);
1669 CI->setCallingConv(F->getCallingConv());
1670 CI->setAttributes(F->getAttributes());
1671 if (NewG->getReturnType()->isVoidTy()) {
1672 Builder.CreateRetVoid();
1674 Builder.CreateRet(createCast(Builder, CI, NewG->getReturnType()));
1677 NewG->copyAttributesFrom(G);
1680 G->replaceAllUsesWith(NewG);
1681 G->eraseFromParent();
1683 DEBUG(dbgs() << "writeThunk: " << NewG->getName() << '\n');
1687 // Replace G with an alias to F and delete G.
1688 void MergeFunctions::writeAlias(Function *F, Function *G) {
1689 auto *GA = GlobalAlias::create(G->getLinkage(), "", F);
1690 F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
1692 GA->setVisibility(G->getVisibility());
1694 G->replaceAllUsesWith(GA);
1695 G->eraseFromParent();
1697 DEBUG(dbgs() << "writeAlias: " << GA->getName() << '\n');
1698 ++NumAliasesWritten;
1701 // Merge two equivalent functions. Upon completion, Function G is deleted.
1702 void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
1703 if (F->mayBeOverridden()) {
1704 assert(G->mayBeOverridden());
1706 // Make them both thunks to the same internal function.
1707 Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
1709 H->copyAttributesFrom(F);
1712 F->replaceAllUsesWith(H);
1714 unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
1716 if (HasGlobalAliases) {
1724 F->setAlignment(MaxAlignment);
1725 F->setLinkage(GlobalValue::PrivateLinkage);
1728 writeThunkOrAlias(F, G);
1731 ++NumFunctionsMerged;
1734 /// Replace function F by function G.
1735 void MergeFunctions::replaceFunctionInTree(const FunctionNode &FN,
1737 Function *F = FN.getFunc();
1738 assert(FunctionComparator(F, G, &GlobalNumbers).compare() == 0 &&
1739 "The two functions must be equal");
1741 auto I = FNodesInTree.find(F);
1742 assert(I != FNodesInTree.end() && "F should be in FNodesInTree");
1743 assert(FNodesInTree.count(G) == 0 && "FNodesInTree should not contain G");
1745 FnTreeType::iterator IterToFNInFnTree = I->second;
1746 assert(&(*IterToFNInFnTree) == &FN && "F should map to FN in FNodesInTree.");
1747 // Remove F -> FN and insert G -> FN
1748 FNodesInTree.erase(I);
1749 FNodesInTree.insert({G, IterToFNInFnTree});
1750 // Replace F with G in FN, which is stored inside the FnTree.
1754 // Insert a ComparableFunction into the FnTree, or merge it away if equal to one
1755 // that was already inserted.
1756 bool MergeFunctions::insert(Function *NewFunction) {
1757 std::pair<FnTreeType::iterator, bool> Result =
1758 FnTree.insert(FunctionNode(NewFunction));
1760 if (Result.second) {
1761 assert(FNodesInTree.count(NewFunction) == 0);
1762 FNodesInTree.insert({NewFunction, Result.first});
1763 DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n');
1767 const FunctionNode &OldF = *Result.first;
1769 // Don't merge tiny functions, since it can just end up making the function
1771 // FIXME: Should still merge them if they are unnamed_addr and produce an
1773 if (NewFunction->size() == 1) {
1774 if (NewFunction->front().size() <= 2) {
1775 DEBUG(dbgs() << NewFunction->getName()
1776 << " is to small to bother merging\n");
1781 // Impose a total order (by name) on the replacement of functions. This is
1782 // important when operating on more than one module independently to prevent
1783 // cycles of thunks calling each other when the modules are linked together.
1785 // When one function is weak and the other is strong there is an order imposed
1786 // already. We process strong functions before weak functions.
1787 if ((OldF.getFunc()->mayBeOverridden() && NewFunction->mayBeOverridden()) ||
1788 (!OldF.getFunc()->mayBeOverridden() && !NewFunction->mayBeOverridden()))
1789 if (OldF.getFunc()->getName() > NewFunction->getName()) {
1790 // Swap the two functions.
1791 Function *F = OldF.getFunc();
1792 replaceFunctionInTree(*Result.first, NewFunction);
1794 assert(OldF.getFunc() != F && "Must have swapped the functions.");
1797 // Never thunk a strong function to a weak function.
1798 assert(!OldF.getFunc()->mayBeOverridden() || NewFunction->mayBeOverridden());
1800 DEBUG(dbgs() << " " << OldF.getFunc()->getName()
1801 << " == " << NewFunction->getName() << '\n');
1803 Function *DeleteF = NewFunction;
1804 mergeTwoFunctions(OldF.getFunc(), DeleteF);
1808 // Remove a function from FnTree. If it was already in FnTree, add
1809 // it to Deferred so that we'll look at it in the next round.
1810 void MergeFunctions::remove(Function *F) {
1811 auto I = FNodesInTree.find(F);
1812 if (I != FNodesInTree.end()) {
1813 DEBUG(dbgs() << "Deferred " << F->getName()<< ".\n");
1814 FnTree.erase(I->second);
1815 // I->second has been invalidated, remove it from the FNodesInTree map to
1816 // preserve the invariant.
1817 FNodesInTree.erase(I);
1818 Deferred.emplace_back(F);
1822 // For each instruction used by the value, remove() the function that contains
1823 // the instruction. This should happen right before a call to RAUW.
1824 void MergeFunctions::removeUsers(Value *V) {
1825 std::vector<Value *> Worklist;
1826 Worklist.push_back(V);
1827 SmallSet<Value*, 8> Visited;
1829 while (!Worklist.empty()) {
1830 Value *V = Worklist.back();
1831 Worklist.pop_back();
1833 for (User *U : V->users()) {
1834 if (Instruction *I = dyn_cast<Instruction>(U)) {
1835 remove(I->getParent()->getParent());
1836 } else if (isa<GlobalValue>(U)) {
1838 } else if (Constant *C = dyn_cast<Constant>(U)) {
1839 for (User *UU : C->users()) {
1840 if (!Visited.insert(UU).second)
1841 Worklist.push_back(UU);