1 //===- MergeFunctions.cpp - Merge identical functions ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass looks for equivalent functions that are mergable and folds them.
12 // Order relation is defined on set of functions. It was made through
13 // special function comparison procedure that returns
14 // 0 when functions are equal,
15 // -1 when Left function is less than right function, and
16 // 1 for opposite case. We need total-ordering, so we need to maintain
17 // four properties on the functions set:
18 // a <= a (reflexivity)
19 // if a <= b and b <= a then a = b (antisymmetry)
20 // if a <= b and b <= c then a <= c (transitivity).
21 // for all a and b: a <= b or b <= a (totality).
23 // Comparison iterates through each instruction in each basic block.
24 // Functions are kept on binary tree. For each new function F we perform
25 // lookup in binary tree.
26 // In practice it works the following way:
27 // -- We define Function* container class with custom "operator<" (FunctionPtr).
28 // -- "FunctionPtr" instances are stored in std::set collection, so every
29 // std::set::insert operation will give you result in log(N) time.
31 // As an optimization, a hash of the function structure is calculated first, and
32 // two functions are only compared if they have the same hash. This hash is
33 // cheap to compute, and has the property that if function F == G according to
34 // the comparison function, then hash(F) == hash(G). This consistency property
35 // is critical to ensuring all possible merging opportunities are exploited.
36 // Collisions in the hash affect the speed of the pass but not the correctness
37 // or determinism of the resulting transformation.
39 // When a match is found the functions are folded. If both functions are
40 // overridable, we move the functionality into a new internal function and
41 // leave two overridable thunks to it.
43 //===----------------------------------------------------------------------===//
47 // * virtual functions.
49 // Many functions have their address taken by the virtual function table for
50 // the object they belong to. However, as long as it's only used for a lookup
51 // and call, this is irrelevant, and we'd like to fold such functions.
53 // * be smarter about bitcasts.
55 // In order to fold functions, we will sometimes add either bitcast instructions
56 // or bitcast constant expressions. Unfortunately, this can confound further
57 // analysis since the two functions differ where one has a bitcast and the
58 // other doesn't. We should learn to look through bitcasts.
60 // * Compare complex types with pointer types inside.
61 // * Compare cross-reference cases.
62 // * Compare complex expressions.
64 // All the three issues above could be described as ability to prove that
65 // fA == fB == fC == fE == fF == fG in example below:
84 // Simplest cross-reference case (fA <--> fB) was implemented in previous
85 // versions of MergeFunctions, though it presented only in two function pairs
86 // in test-suite (that counts >50k functions)
87 // Though possibility to detect complex cross-referencing (e.g.: A->B->C->D->A)
88 // could cover much more cases.
90 //===----------------------------------------------------------------------===//
92 #include "llvm/Transforms/IPO.h"
93 #include "llvm/ADT/DenseSet.h"
94 #include "llvm/ADT/FoldingSet.h"
95 #include "llvm/ADT/STLExtras.h"
96 #include "llvm/ADT/SmallSet.h"
97 #include "llvm/ADT/Statistic.h"
98 #include "llvm/ADT/Hashing.h"
99 #include "llvm/IR/CallSite.h"
100 #include "llvm/IR/Constants.h"
101 #include "llvm/IR/DataLayout.h"
102 #include "llvm/IR/IRBuilder.h"
103 #include "llvm/IR/InlineAsm.h"
104 #include "llvm/IR/Instructions.h"
105 #include "llvm/IR/LLVMContext.h"
106 #include "llvm/IR/Module.h"
107 #include "llvm/IR/Operator.h"
108 #include "llvm/IR/ValueHandle.h"
109 #include "llvm/IR/ValueMap.h"
110 #include "llvm/Pass.h"
111 #include "llvm/Support/CommandLine.h"
112 #include "llvm/Support/Debug.h"
113 #include "llvm/Support/ErrorHandling.h"
114 #include "llvm/Support/raw_ostream.h"
116 using namespace llvm;
118 #define DEBUG_TYPE "mergefunc"
120 STATISTIC(NumFunctionsMerged, "Number of functions merged");
121 STATISTIC(NumThunksWritten, "Number of thunks generated");
122 STATISTIC(NumAliasesWritten, "Number of aliases generated");
123 STATISTIC(NumDoubleWeak, "Number of new functions created");
125 static cl::opt<unsigned> NumFunctionsForSanityCheck(
127 cl::desc("How many functions in module could be used for "
128 "MergeFunctions pass sanity check. "
129 "'0' disables this check. Works only with '-debug' key."),
130 cl::init(0), cl::Hidden);
134 /// GlobalNumberState assigns an integer to each global value in the program,
135 /// which is used by the comparison routine to order references to globals. This
136 /// state must be preserved throughout the pass, because Functions and other
137 /// globals need to maintain their relative order. Globals are assigned a number
138 /// when they are first visited. This order is deterministic, and so the
139 /// assigned numbers are as well. When two functions are merged, neither number
140 /// is updated. If the symbols are weak, this would be incorrect. If they are
141 /// strong, then one will be replaced at all references to the other, and so
142 /// direct callsites will now see one or the other symbol, and no update is
143 /// necessary. Note that if we were guaranteed unique names, we could just
144 /// compare those, but this would not work for stripped bitcodes or for those
145 /// few symbols without a name.
146 class GlobalNumberState {
147 struct Config : ValueMapConfig<GlobalValue*> {
148 enum { FollowRAUW = false };
150 // Each GlobalValue is mapped to an identifier. The Config ensures when RAUW
151 // occurs, the mapping does not change. Tracking changes is unnecessary, and
152 // also problematic for weak symbols (which may be overwritten).
153 typedef ValueMap<GlobalValue *, uint64_t, Config> ValueNumberMap;
154 ValueNumberMap GlobalNumbers;
155 // The next unused serial number to assign to a global.
158 GlobalNumberState() : GlobalNumbers(), NextNumber(0) {}
159 uint64_t getNumber(GlobalValue* Global) {
160 ValueNumberMap::iterator MapIter;
162 std::tie(MapIter, Inserted) = GlobalNumbers.insert({Global, NextNumber});
165 return MapIter->second;
169 /// FunctionComparator - Compares two functions to determine whether or not
170 /// they will generate machine code with the same behaviour. DataLayout is
171 /// used if available. The comparator always fails conservatively (erring on the
172 /// side of claiming that two functions are different).
173 class FunctionComparator {
175 FunctionComparator(const Function *F1, const Function *F2,
176 GlobalNumberState* GN)
177 : FnL(F1), FnR(F2), GlobalNumbers(GN) {}
179 /// Test whether the two functions have equivalent behaviour.
181 /// Hash a function. Equivalent functions will have the same hash, and unequal
182 /// functions will have different hashes with high probability.
183 typedef uint64_t FunctionHash;
184 static FunctionHash functionHash(Function &);
187 /// Test whether two basic blocks have equivalent behaviour.
188 int cmpBasicBlocks(const BasicBlock *BBL, const BasicBlock *BBR);
190 /// Constants comparison.
191 /// Its analog to lexicographical comparison between hypothetical numbers
193 /// <bitcastability-trait><raw-bit-contents>
195 /// 1. Bitcastability.
196 /// Check whether L's type could be losslessly bitcasted to R's type.
197 /// On this stage method, in case when lossless bitcast is not possible
198 /// method returns -1 or 1, thus also defining which type is greater in
199 /// context of bitcastability.
200 /// Stage 0: If types are equal in terms of cmpTypes, then we can go straight
201 /// to the contents comparison.
202 /// If types differ, remember types comparison result and check
203 /// whether we still can bitcast types.
204 /// Stage 1: Types that satisfies isFirstClassType conditions are always
205 /// greater then others.
206 /// Stage 2: Vector is greater then non-vector.
207 /// If both types are vectors, then vector with greater bitwidth is
209 /// If both types are vectors with the same bitwidth, then types
210 /// are bitcastable, and we can skip other stages, and go to contents
212 /// Stage 3: Pointer types are greater than non-pointers. If both types are
213 /// pointers of the same address space - go to contents comparison.
214 /// Different address spaces: pointer with greater address space is
216 /// Stage 4: Types are neither vectors, nor pointers. And they differ.
217 /// We don't know how to bitcast them. So, we better don't do it,
218 /// and return types comparison result (so it determines the
219 /// relationship among constants we don't know how to bitcast).
221 /// Just for clearance, let's see how the set of constants could look
222 /// on single dimension axis:
224 /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
225 /// Where: NFCT - Not a FirstClassType
226 /// FCT - FirstClassTyp:
228 /// 2. Compare raw contents.
229 /// It ignores types on this stage and only compares bits from L and R.
230 /// Returns 0, if L and R has equivalent contents.
231 /// -1 or 1 if values are different.
233 /// 2.1. If contents are numbers, compare numbers.
234 /// Ints with greater bitwidth are greater. Ints with same bitwidths
235 /// compared by their contents.
236 /// 2.2. "And so on". Just to avoid discrepancies with comments
237 /// perhaps it would be better to read the implementation itself.
238 /// 3. And again about overall picture. Let's look back at how the ordered set
239 /// of constants will look like:
240 /// [NFCT], [FCT, "others"], [FCT, pointers], [FCT, vectors]
242 /// Now look, what could be inside [FCT, "others"], for example:
243 /// [FCT, "others"] =
245 /// [double 0.1], [double 1.23],
246 /// [i32 1], [i32 2],
247 /// { double 1.0 }, ; StructTyID, NumElements = 1
248 /// { i32 1 }, ; StructTyID, NumElements = 1
249 /// { double 1, i32 1 }, ; StructTyID, NumElements = 2
250 /// { i32 1, double 1 } ; StructTyID, NumElements = 2
253 /// Let's explain the order. Float numbers will be less than integers, just
254 /// because of cmpType terms: FloatTyID < IntegerTyID.
255 /// Floats (with same fltSemantics) are sorted according to their value.
256 /// Then you can see integers, and they are, like a floats,
257 /// could be easy sorted among each others.
258 /// The structures. Structures are grouped at the tail, again because of their
259 /// TypeID: StructTyID > IntegerTyID > FloatTyID.
260 /// Structures with greater number of elements are greater. Structures with
261 /// greater elements going first are greater.
262 /// The same logic with vectors, arrays and other possible complex types.
264 /// Bitcastable constants.
265 /// Let's assume, that some constant, belongs to some group of
266 /// "so-called-equal" values with different types, and at the same time
267 /// belongs to another group of constants with equal types
268 /// and "really" equal values.
270 /// Now, prove that this is impossible:
272 /// If constant A with type TyA is bitcastable to B with type TyB, then:
273 /// 1. All constants with equal types to TyA, are bitcastable to B. Since
274 /// those should be vectors (if TyA is vector), pointers
275 /// (if TyA is pointer), or else (if TyA equal to TyB), those types should
277 /// 2. All constants with non-equal, but bitcastable types to TyA, are
278 /// bitcastable to B.
279 /// Once again, just because we allow it to vectors and pointers only.
280 /// This statement could be expanded as below:
281 /// 2.1. All vectors with equal bitwidth to vector A, has equal bitwidth to
282 /// vector B, and thus bitcastable to B as well.
283 /// 2.2. All pointers of the same address space, no matter what they point to,
284 /// bitcastable. So if C is pointer, it could be bitcasted to A and to B.
285 /// So any constant equal or bitcastable to A is equal or bitcastable to B.
288 /// In another words, for pointers and vectors, we ignore top-level type and
289 /// look at their particular properties (bit-width for vectors, and
290 /// address space for pointers).
291 /// If these properties are equal - compare their contents.
292 int cmpConstants(const Constant *L, const Constant *R);
294 /// Compares two global values by number. Uses the GlobalNumbersState to
295 /// identify the same gobals across function calls.
296 int cmpGlobalValues(GlobalValue *L, GlobalValue *R);
298 /// Assign or look up previously assigned numbers for the two values, and
299 /// return whether the numbers are equal. Numbers are assigned in the order
301 /// Comparison order:
302 /// Stage 0: Value that is function itself is always greater then others.
303 /// If left and right values are references to their functions, then
305 /// Stage 1: Constants are greater than non-constants.
306 /// If both left and right are constants, then the result of
307 /// cmpConstants is used as cmpValues result.
308 /// Stage 2: InlineAsm instances are greater than others. If both left and
309 /// right are InlineAsm instances, InlineAsm* pointers casted to
310 /// integers and compared as numbers.
311 /// Stage 3: For all other cases we compare order we meet these values in
312 /// their functions. If right value was met first during scanning,
313 /// then left value is greater.
314 /// In another words, we compare serial numbers, for more details
315 /// see comments for sn_mapL and sn_mapR.
316 int cmpValues(const Value *L, const Value *R);
318 /// Compare two Instructions for equivalence, similar to
319 /// Instruction::isSameOperationAs but with modifications to the type
321 /// Stages are listed in "most significant stage first" order:
322 /// On each stage below, we do comparison between some left and right
323 /// operation parts. If parts are non-equal, we assign parts comparison
324 /// result to the operation comparison result and exit from method.
325 /// Otherwise we proceed to the next stage.
327 /// 1. Operations opcodes. Compared as numbers.
328 /// 2. Number of operands.
329 /// 3. Operation types. Compared with cmpType method.
330 /// 4. Compare operation subclass optional data as stream of bytes:
331 /// just convert it to integers and call cmpNumbers.
332 /// 5. Compare in operation operand types with cmpType in
333 /// most significant operand first order.
334 /// 6. Last stage. Check operations for some specific attributes.
335 /// For example, for Load it would be:
336 /// 6.1.Load: volatile (as boolean flag)
337 /// 6.2.Load: alignment (as integer numbers)
338 /// 6.3.Load: synch-scope (as integer numbers)
339 /// 6.4.Load: range metadata (as integer numbers)
340 /// On this stage its better to see the code, since its not more than 10-15
341 /// strings for particular instruction, and could change sometimes.
342 int cmpOperations(const Instruction *L, const Instruction *R) const;
344 /// Compare two GEPs for equivalent pointer arithmetic.
345 /// Parts to be compared for each comparison stage,
346 /// most significant stage first:
347 /// 1. Address space. As numbers.
348 /// 2. Constant offset, (using GEPOperator::accumulateConstantOffset method).
349 /// 3. Pointer operand type (using cmpType method).
350 /// 4. Number of operands.
351 /// 5. Compare operands, using cmpValues method.
352 int cmpGEPs(const GEPOperator *GEPL, const GEPOperator *GEPR);
353 int cmpGEPs(const GetElementPtrInst *GEPL, const GetElementPtrInst *GEPR) {
354 return cmpGEPs(cast<GEPOperator>(GEPL), cast<GEPOperator>(GEPR));
357 /// cmpType - compares two types,
358 /// defines total ordering among the types set.
361 /// 0 if types are equal,
362 /// -1 if Left is less than Right,
363 /// +1 if Left is greater than Right.
366 /// Comparison is broken onto stages. Like in lexicographical comparison
367 /// stage coming first has higher priority.
368 /// On each explanation stage keep in mind total ordering properties.
370 /// 0. Before comparison we coerce pointer types of 0 address space to
372 /// We also don't bother with same type at left and right, so
373 /// just return 0 in this case.
375 /// 1. If types are of different kind (different type IDs).
376 /// Return result of type IDs comparison, treating them as numbers.
377 /// 2. If types are integers, check that they have the same width. If they
378 /// are vectors, check that they have the same count and subtype.
379 /// 3. Types have the same ID, so check whether they are one of:
388 /// We can treat these types as equal whenever their IDs are same.
389 /// 4. If Left and Right are pointers, return result of address space
390 /// comparison (numbers comparison). We can treat pointer types of same
391 /// address space as equal.
392 /// 5. If types are complex.
393 /// Then both Left and Right are to be expanded and their element types will
394 /// be checked with the same way. If we get Res != 0 on some stage, return it.
395 /// Otherwise return 0.
396 /// 6. For all other cases put llvm_unreachable.
397 int cmpTypes(Type *TyL, Type *TyR) const;
399 int cmpNumbers(uint64_t L, uint64_t R) const;
400 int cmpAPInts(const APInt &L, const APInt &R) const;
401 int cmpAPFloats(const APFloat &L, const APFloat &R) const;
402 int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
403 int cmpMem(StringRef L, StringRef R) const;
404 int cmpAttrs(const AttributeSet L, const AttributeSet R) const;
405 int cmpRangeMetadata(const MDNode* L, const MDNode* R) const;
407 // The two functions undergoing comparison.
408 const Function *FnL, *FnR;
410 /// Assign serial numbers to values from left function, and values from
413 /// Being comparing functions we need to compare values we meet at left and
415 /// Its easy to sort things out for external values. It just should be
416 /// the same value at left and right.
417 /// But for local values (those were introduced inside function body)
418 /// we have to ensure they were introduced at exactly the same place,
419 /// and plays the same role.
420 /// Let's assign serial number to each value when we meet it first time.
421 /// Values that were met at same place will be with same serial numbers.
422 /// In this case it would be good to explain few points about values assigned
423 /// to BBs and other ways of implementation (see below).
425 /// 1. Safety of BB reordering.
426 /// It's safe to change the order of BasicBlocks in function.
427 /// Relationship with other functions and serial numbering will not be
428 /// changed in this case.
429 /// As follows from FunctionComparator::compare(), we do CFG walk: we start
430 /// from the entry, and then take each terminator. So it doesn't matter how in
431 /// fact BBs are ordered in function. And since cmpValues are called during
432 /// this walk, the numbering depends only on how BBs located inside the CFG.
433 /// So the answer is - yes. We will get the same numbering.
435 /// 2. Impossibility to use dominance properties of values.
436 /// If we compare two instruction operands: first is usage of local
437 /// variable AL from function FL, and second is usage of local variable AR
438 /// from FR, we could compare their origins and check whether they are
439 /// defined at the same place.
440 /// But, we are still not able to compare operands of PHI nodes, since those
441 /// could be operands from further BBs we didn't scan yet.
442 /// So it's impossible to use dominance properties in general.
443 DenseMap<const Value*, int> sn_mapL, sn_mapR;
445 // The global state we will use
446 GlobalNumberState* GlobalNumbers;
450 mutable AssertingVH<Function> F;
451 FunctionComparator::FunctionHash Hash;
453 // Note the hash is recalculated potentially multiple times, but it is cheap.
454 FunctionNode(Function *F)
455 : F(F), Hash(FunctionComparator::functionHash(*F)) {}
456 Function *getFunc() const { return F; }
457 FunctionComparator::FunctionHash getHash() const { return Hash; }
459 /// Replace the reference to the function F by the function G, assuming their
460 /// implementations are equal.
461 void replaceBy(Function *G) const {
465 void release() { F = 0; }
469 int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const {
470 if (L < R) return -1;
475 int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
476 if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
478 if (L.ugt(R)) return 1;
479 if (R.ugt(L)) return -1;
483 int FunctionComparator::cmpAPFloats(const APFloat &L, const APFloat &R) const {
484 // Floats are ordered first by semantics (i.e. float, double, half, etc.),
485 // then by value interpreted as a bitstring (aka APInt).
486 const fltSemantics &SL = L.getSemantics(), &SR = R.getSemantics();
487 if (int Res = cmpNumbers(APFloat::semanticsPrecision(SL),
488 APFloat::semanticsPrecision(SR)))
490 if (int Res = cmpNumbers(APFloat::semanticsMaxExponent(SL),
491 APFloat::semanticsMaxExponent(SR)))
493 if (int Res = cmpNumbers(APFloat::semanticsMinExponent(SL),
494 APFloat::semanticsMinExponent(SR)))
496 if (int Res = cmpNumbers(APFloat::semanticsSizeInBits(SL),
497 APFloat::semanticsSizeInBits(SR)))
499 return cmpAPInts(L.bitcastToAPInt(), R.bitcastToAPInt());
502 int FunctionComparator::cmpMem(StringRef L, StringRef R) const {
503 // Prevent heavy comparison, compare sizes first.
504 if (int Res = cmpNumbers(L.size(), R.size()))
507 // Compare strings lexicographically only when it is necessary: only when
508 // strings are equal in size.
512 int FunctionComparator::cmpAttrs(const AttributeSet L,
513 const AttributeSet R) const {
514 if (int Res = cmpNumbers(L.getNumSlots(), R.getNumSlots()))
517 for (unsigned i = 0, e = L.getNumSlots(); i != e; ++i) {
518 AttributeSet::iterator LI = L.begin(i), LE = L.end(i), RI = R.begin(i),
520 for (; LI != LE && RI != RE; ++LI, ++RI) {
535 int FunctionComparator::cmpRangeMetadata(const MDNode* L,
536 const MDNode* R) const {
543 // Range metadata is a sequence of numbers. Make sure they are the same
545 // TODO: Note that as this is metadata, it is possible to drop and/or merge
546 // this data when considering functions to merge. Thus this comparison would
547 // return 0 (i.e. equivalent), but merging would become more complicated
548 // because the ranges would need to be unioned. It is not likely that
549 // functions differ ONLY in this metadata if they are actually the same
550 // function semantically.
551 if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
553 for (size_t I = 0; I < L->getNumOperands(); ++I) {
554 ConstantInt* LLow = mdconst::extract<ConstantInt>(L->getOperand(I));
555 ConstantInt* RLow = mdconst::extract<ConstantInt>(R->getOperand(I));
556 if (int Res = cmpAPInts(LLow->getValue(), RLow->getValue()))
562 /// Constants comparison:
563 /// 1. Check whether type of L constant could be losslessly bitcasted to R
565 /// 2. Compare constant contents.
566 /// For more details see declaration comments.
567 int FunctionComparator::cmpConstants(const Constant *L, const Constant *R) {
569 Type *TyL = L->getType();
570 Type *TyR = R->getType();
572 // Check whether types are bitcastable. This part is just re-factored
573 // Type::canLosslesslyBitCastTo method, but instead of returning true/false,
574 // we also pack into result which type is "less" for us.
575 int TypesRes = cmpTypes(TyL, TyR);
577 // Types are different, but check whether we can bitcast them.
578 if (!TyL->isFirstClassType()) {
579 if (TyR->isFirstClassType())
581 // Neither TyL nor TyR are values of first class type. Return the result
582 // of comparing the types
585 if (!TyR->isFirstClassType()) {
586 if (TyL->isFirstClassType())
591 // Vector -> Vector conversions are always lossless if the two vector types
592 // have the same size, otherwise not.
593 unsigned TyLWidth = 0;
594 unsigned TyRWidth = 0;
596 if (auto *VecTyL = dyn_cast<VectorType>(TyL))
597 TyLWidth = VecTyL->getBitWidth();
598 if (auto *VecTyR = dyn_cast<VectorType>(TyR))
599 TyRWidth = VecTyR->getBitWidth();
601 if (TyLWidth != TyRWidth)
602 return cmpNumbers(TyLWidth, TyRWidth);
604 // Zero bit-width means neither TyL nor TyR are vectors.
606 PointerType *PTyL = dyn_cast<PointerType>(TyL);
607 PointerType *PTyR = dyn_cast<PointerType>(TyR);
609 unsigned AddrSpaceL = PTyL->getAddressSpace();
610 unsigned AddrSpaceR = PTyR->getAddressSpace();
611 if (int Res = cmpNumbers(AddrSpaceL, AddrSpaceR))
619 // TyL and TyR aren't vectors, nor pointers. We don't know how to
625 // OK, types are bitcastable, now check constant contents.
627 if (L->isNullValue() && R->isNullValue())
629 if (L->isNullValue() && !R->isNullValue())
631 if (!L->isNullValue() && R->isNullValue())
634 auto GlobalValueL = const_cast<GlobalValue*>(dyn_cast<GlobalValue>(L));
635 auto GlobalValueR = const_cast<GlobalValue*>(dyn_cast<GlobalValue>(R));
636 if (GlobalValueL && GlobalValueR) {
637 return cmpGlobalValues(GlobalValueL, GlobalValueR);
640 if (int Res = cmpNumbers(L->getValueID(), R->getValueID()))
643 if (const auto *SeqL = dyn_cast<ConstantDataSequential>(L)) {
644 const auto *SeqR = cast<ConstantDataSequential>(R);
645 // This handles ConstantDataArray and ConstantDataVector. Note that we
646 // compare the two raw data arrays, which might differ depending on the host
647 // endianness. This isn't a problem though, because the endiness of a module
648 // will affect the order of the constants, but this order is the same
649 // for a given input module and host platform.
650 return cmpMem(SeqL->getRawDataValues(), SeqR->getRawDataValues());
653 switch (L->getValueID()) {
654 case Value::UndefValueVal: return TypesRes;
655 case Value::ConstantIntVal: {
656 const APInt &LInt = cast<ConstantInt>(L)->getValue();
657 const APInt &RInt = cast<ConstantInt>(R)->getValue();
658 return cmpAPInts(LInt, RInt);
660 case Value::ConstantFPVal: {
661 const APFloat &LAPF = cast<ConstantFP>(L)->getValueAPF();
662 const APFloat &RAPF = cast<ConstantFP>(R)->getValueAPF();
663 return cmpAPFloats(LAPF, RAPF);
665 case Value::ConstantArrayVal: {
666 const ConstantArray *LA = cast<ConstantArray>(L);
667 const ConstantArray *RA = cast<ConstantArray>(R);
668 uint64_t NumElementsL = cast<ArrayType>(TyL)->getNumElements();
669 uint64_t NumElementsR = cast<ArrayType>(TyR)->getNumElements();
670 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
672 for (uint64_t i = 0; i < NumElementsL; ++i) {
673 if (int Res = cmpConstants(cast<Constant>(LA->getOperand(i)),
674 cast<Constant>(RA->getOperand(i))))
679 case Value::ConstantStructVal: {
680 const ConstantStruct *LS = cast<ConstantStruct>(L);
681 const ConstantStruct *RS = cast<ConstantStruct>(R);
682 unsigned NumElementsL = cast<StructType>(TyL)->getNumElements();
683 unsigned NumElementsR = cast<StructType>(TyR)->getNumElements();
684 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
686 for (unsigned i = 0; i != NumElementsL; ++i) {
687 if (int Res = cmpConstants(cast<Constant>(LS->getOperand(i)),
688 cast<Constant>(RS->getOperand(i))))
693 case Value::ConstantVectorVal: {
694 const ConstantVector *LV = cast<ConstantVector>(L);
695 const ConstantVector *RV = cast<ConstantVector>(R);
696 unsigned NumElementsL = cast<VectorType>(TyL)->getNumElements();
697 unsigned NumElementsR = cast<VectorType>(TyR)->getNumElements();
698 if (int Res = cmpNumbers(NumElementsL, NumElementsR))
700 for (uint64_t i = 0; i < NumElementsL; ++i) {
701 if (int Res = cmpConstants(cast<Constant>(LV->getOperand(i)),
702 cast<Constant>(RV->getOperand(i))))
707 case Value::ConstantExprVal: {
708 const ConstantExpr *LE = cast<ConstantExpr>(L);
709 const ConstantExpr *RE = cast<ConstantExpr>(R);
710 unsigned NumOperandsL = LE->getNumOperands();
711 unsigned NumOperandsR = RE->getNumOperands();
712 if (int Res = cmpNumbers(NumOperandsL, NumOperandsR))
714 for (unsigned i = 0; i < NumOperandsL; ++i) {
715 if (int Res = cmpConstants(cast<Constant>(LE->getOperand(i)),
716 cast<Constant>(RE->getOperand(i))))
721 case Value::BlockAddressVal: {
722 const BlockAddress *LBA = cast<BlockAddress>(L);
723 const BlockAddress *RBA = cast<BlockAddress>(R);
724 if (int Res = cmpValues(LBA->getFunction(), RBA->getFunction()))
726 if (LBA->getFunction() == RBA->getFunction()) {
727 // They are BBs in the same function. Order by which comes first in the
728 // BB order of the function. This order is deterministic.
729 Function* F = LBA->getFunction();
730 BasicBlock *LBB = LBA->getBasicBlock();
731 BasicBlock *RBB = RBA->getBasicBlock();
734 for(BasicBlock &BB : F->getBasicBlockList()) {
742 llvm_unreachable("Basic Block Address does not point to a basic block in "
746 // cmpValues said the functions are the same. So because they aren't
747 // literally the same pointer, they must respectively be the left and
749 assert(LBA->getFunction() == FnL && RBA->getFunction() == FnR);
750 // cmpValues will tell us if these are equivalent BasicBlocks, in the
751 // context of their respective functions.
752 return cmpValues(LBA->getBasicBlock(), RBA->getBasicBlock());
755 default: // Unknown constant, abort.
756 DEBUG(dbgs() << "Looking at valueID " << L->getValueID() << "\n");
757 llvm_unreachable("Constant ValueID not recognized.");
762 int FunctionComparator::cmpGlobalValues(GlobalValue *L, GlobalValue* R) {
763 return cmpNumbers(GlobalNumbers->getNumber(L), GlobalNumbers->getNumber(R));
766 /// cmpType - compares two types,
767 /// defines total ordering among the types set.
768 /// See method declaration comments for more details.
769 int FunctionComparator::cmpTypes(Type *TyL, Type *TyR) const {
771 PointerType *PTyL = dyn_cast<PointerType>(TyL);
772 PointerType *PTyR = dyn_cast<PointerType>(TyR);
774 const DataLayout &DL = FnL->getParent()->getDataLayout();
775 if (PTyL && PTyL->getAddressSpace() == 0)
776 TyL = DL.getIntPtrType(TyL);
777 if (PTyR && PTyR->getAddressSpace() == 0)
778 TyR = DL.getIntPtrType(TyR);
783 if (int Res = cmpNumbers(TyL->getTypeID(), TyR->getTypeID()))
786 switch (TyL->getTypeID()) {
788 llvm_unreachable("Unknown type!");
789 // Fall through in Release mode.
790 case Type::IntegerTyID:
791 return cmpNumbers(cast<IntegerType>(TyL)->getBitWidth(),
792 cast<IntegerType>(TyR)->getBitWidth());
793 case Type::VectorTyID: {
794 VectorType *VTyL = cast<VectorType>(TyL), *VTyR = cast<VectorType>(TyR);
795 if (int Res = cmpNumbers(VTyL->getNumElements(), VTyR->getNumElements()))
797 return cmpTypes(VTyL->getElementType(), VTyR->getElementType());
799 // TyL == TyR would have returned true earlier, because types are uniqued.
801 case Type::FloatTyID:
802 case Type::DoubleTyID:
803 case Type::X86_FP80TyID:
804 case Type::FP128TyID:
805 case Type::PPC_FP128TyID:
806 case Type::LabelTyID:
807 case Type::MetadataTyID:
808 case Type::TokenTyID:
811 case Type::PointerTyID: {
812 assert(PTyL && PTyR && "Both types must be pointers here.");
813 return cmpNumbers(PTyL->getAddressSpace(), PTyR->getAddressSpace());
816 case Type::StructTyID: {
817 StructType *STyL = cast<StructType>(TyL);
818 StructType *STyR = cast<StructType>(TyR);
819 if (STyL->getNumElements() != STyR->getNumElements())
820 return cmpNumbers(STyL->getNumElements(), STyR->getNumElements());
822 if (STyL->isPacked() != STyR->isPacked())
823 return cmpNumbers(STyL->isPacked(), STyR->isPacked());
825 for (unsigned i = 0, e = STyL->getNumElements(); i != e; ++i) {
826 if (int Res = cmpTypes(STyL->getElementType(i), STyR->getElementType(i)))
832 case Type::FunctionTyID: {
833 FunctionType *FTyL = cast<FunctionType>(TyL);
834 FunctionType *FTyR = cast<FunctionType>(TyR);
835 if (FTyL->getNumParams() != FTyR->getNumParams())
836 return cmpNumbers(FTyL->getNumParams(), FTyR->getNumParams());
838 if (FTyL->isVarArg() != FTyR->isVarArg())
839 return cmpNumbers(FTyL->isVarArg(), FTyR->isVarArg());
841 if (int Res = cmpTypes(FTyL->getReturnType(), FTyR->getReturnType()))
844 for (unsigned i = 0, e = FTyL->getNumParams(); i != e; ++i) {
845 if (int Res = cmpTypes(FTyL->getParamType(i), FTyR->getParamType(i)))
851 case Type::ArrayTyID: {
852 ArrayType *ATyL = cast<ArrayType>(TyL);
853 ArrayType *ATyR = cast<ArrayType>(TyR);
854 if (ATyL->getNumElements() != ATyR->getNumElements())
855 return cmpNumbers(ATyL->getNumElements(), ATyR->getNumElements());
856 return cmpTypes(ATyL->getElementType(), ATyR->getElementType());
861 // Determine whether the two operations are the same except that pointer-to-A
862 // and pointer-to-B are equivalent. This should be kept in sync with
863 // Instruction::isSameOperationAs.
864 // Read method declaration comments for more details.
865 int FunctionComparator::cmpOperations(const Instruction *L,
866 const Instruction *R) const {
867 // Differences from Instruction::isSameOperationAs:
868 // * replace type comparison with calls to isEquivalentType.
869 // * we test for I->hasSameSubclassOptionalData (nuw/nsw/tail) at the top
870 // * because of the above, we don't test for the tail bit on calls later on
871 if (int Res = cmpNumbers(L->getOpcode(), R->getOpcode()))
874 if (int Res = cmpNumbers(L->getNumOperands(), R->getNumOperands()))
877 if (int Res = cmpTypes(L->getType(), R->getType()))
880 if (int Res = cmpNumbers(L->getRawSubclassOptionalData(),
881 R->getRawSubclassOptionalData()))
884 if (const AllocaInst *AI = dyn_cast<AllocaInst>(L)) {
885 if (int Res = cmpTypes(AI->getAllocatedType(),
886 cast<AllocaInst>(R)->getAllocatedType()))
889 cmpNumbers(AI->getAlignment(), cast<AllocaInst>(R)->getAlignment()))
893 // We have two instructions of identical opcode and #operands. Check to see
894 // if all operands are the same type
895 for (unsigned i = 0, e = L->getNumOperands(); i != e; ++i) {
897 cmpTypes(L->getOperand(i)->getType(), R->getOperand(i)->getType()))
901 // Check special state that is a part of some instructions.
902 if (const LoadInst *LI = dyn_cast<LoadInst>(L)) {
903 if (int Res = cmpNumbers(LI->isVolatile(), cast<LoadInst>(R)->isVolatile()))
906 cmpNumbers(LI->getAlignment(), cast<LoadInst>(R)->getAlignment()))
909 cmpNumbers(LI->getOrdering(), cast<LoadInst>(R)->getOrdering()))
912 cmpNumbers(LI->getSynchScope(), cast<LoadInst>(R)->getSynchScope()))
914 return cmpRangeMetadata(LI->getMetadata(LLVMContext::MD_range),
915 cast<LoadInst>(R)->getMetadata(LLVMContext::MD_range));
917 if (const StoreInst *SI = dyn_cast<StoreInst>(L)) {
919 cmpNumbers(SI->isVolatile(), cast<StoreInst>(R)->isVolatile()))
922 cmpNumbers(SI->getAlignment(), cast<StoreInst>(R)->getAlignment()))
925 cmpNumbers(SI->getOrdering(), cast<StoreInst>(R)->getOrdering()))
927 return cmpNumbers(SI->getSynchScope(), cast<StoreInst>(R)->getSynchScope());
929 if (const CmpInst *CI = dyn_cast<CmpInst>(L))
930 return cmpNumbers(CI->getPredicate(), cast<CmpInst>(R)->getPredicate());
931 if (const CallInst *CI = dyn_cast<CallInst>(L)) {
932 if (int Res = cmpNumbers(CI->getCallingConv(),
933 cast<CallInst>(R)->getCallingConv()))
936 cmpAttrs(CI->getAttributes(), cast<CallInst>(R)->getAttributes()))
938 return cmpRangeMetadata(
939 CI->getMetadata(LLVMContext::MD_range),
940 cast<CallInst>(R)->getMetadata(LLVMContext::MD_range));
942 if (const InvokeInst *CI = dyn_cast<InvokeInst>(L)) {
943 if (int Res = cmpNumbers(CI->getCallingConv(),
944 cast<InvokeInst>(R)->getCallingConv()))
947 cmpAttrs(CI->getAttributes(), cast<InvokeInst>(R)->getAttributes()))
949 return cmpRangeMetadata(
950 CI->getMetadata(LLVMContext::MD_range),
951 cast<InvokeInst>(R)->getMetadata(LLVMContext::MD_range));
953 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(L)) {
954 ArrayRef<unsigned> LIndices = IVI->getIndices();
955 ArrayRef<unsigned> RIndices = cast<InsertValueInst>(R)->getIndices();
956 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
958 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
959 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
963 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(L)) {
964 ArrayRef<unsigned> LIndices = EVI->getIndices();
965 ArrayRef<unsigned> RIndices = cast<ExtractValueInst>(R)->getIndices();
966 if (int Res = cmpNumbers(LIndices.size(), RIndices.size()))
968 for (size_t i = 0, e = LIndices.size(); i != e; ++i) {
969 if (int Res = cmpNumbers(LIndices[i], RIndices[i]))
973 if (const FenceInst *FI = dyn_cast<FenceInst>(L)) {
975 cmpNumbers(FI->getOrdering(), cast<FenceInst>(R)->getOrdering()))
977 return cmpNumbers(FI->getSynchScope(), cast<FenceInst>(R)->getSynchScope());
980 if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(L)) {
981 if (int Res = cmpNumbers(CXI->isVolatile(),
982 cast<AtomicCmpXchgInst>(R)->isVolatile()))
984 if (int Res = cmpNumbers(CXI->isWeak(),
985 cast<AtomicCmpXchgInst>(R)->isWeak()))
987 if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
988 cast<AtomicCmpXchgInst>(R)->getSuccessOrdering()))
990 if (int Res = cmpNumbers(CXI->getFailureOrdering(),
991 cast<AtomicCmpXchgInst>(R)->getFailureOrdering()))
993 return cmpNumbers(CXI->getSynchScope(),
994 cast<AtomicCmpXchgInst>(R)->getSynchScope());
996 if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(L)) {
997 if (int Res = cmpNumbers(RMWI->getOperation(),
998 cast<AtomicRMWInst>(R)->getOperation()))
1000 if (int Res = cmpNumbers(RMWI->isVolatile(),
1001 cast<AtomicRMWInst>(R)->isVolatile()))
1003 if (int Res = cmpNumbers(RMWI->getOrdering(),
1004 cast<AtomicRMWInst>(R)->getOrdering()))
1006 return cmpNumbers(RMWI->getSynchScope(),
1007 cast<AtomicRMWInst>(R)->getSynchScope());
1012 // Determine whether two GEP operations perform the same underlying arithmetic.
1013 // Read method declaration comments for more details.
1014 int FunctionComparator::cmpGEPs(const GEPOperator *GEPL,
1015 const GEPOperator *GEPR) {
1017 unsigned int ASL = GEPL->getPointerAddressSpace();
1018 unsigned int ASR = GEPR->getPointerAddressSpace();
1020 if (int Res = cmpNumbers(ASL, ASR))
1023 // When we have target data, we can reduce the GEP down to the value in bytes
1024 // added to the address.
1025 const DataLayout &DL = FnL->getParent()->getDataLayout();
1026 unsigned BitWidth = DL.getPointerSizeInBits(ASL);
1027 APInt OffsetL(BitWidth, 0), OffsetR(BitWidth, 0);
1028 if (GEPL->accumulateConstantOffset(DL, OffsetL) &&
1029 GEPR->accumulateConstantOffset(DL, OffsetR))
1030 return cmpAPInts(OffsetL, OffsetR);
1031 if (int Res = cmpTypes(GEPL->getPointerOperand()->getType(),
1032 GEPR->getPointerOperand()->getType()))
1035 if (int Res = cmpNumbers(GEPL->getNumOperands(), GEPR->getNumOperands()))
1038 for (unsigned i = 0, e = GEPL->getNumOperands(); i != e; ++i) {
1039 if (int Res = cmpValues(GEPL->getOperand(i), GEPR->getOperand(i)))
1046 int FunctionComparator::cmpInlineAsm(const InlineAsm *L,
1047 const InlineAsm *R) const {
1048 // InlineAsm's are uniqued. If they are the same pointer, obviously they are
1049 // the same, otherwise compare the fields.
1052 if (int Res = cmpTypes(L->getFunctionType(), R->getFunctionType()))
1054 if (int Res = cmpMem(L->getAsmString(), R->getAsmString()))
1056 if (int Res = cmpMem(L->getConstraintString(), R->getConstraintString()))
1058 if (int Res = cmpNumbers(L->hasSideEffects(), R->hasSideEffects()))
1060 if (int Res = cmpNumbers(L->isAlignStack(), R->isAlignStack()))
1062 if (int Res = cmpNumbers(L->getDialect(), R->getDialect()))
1064 llvm_unreachable("InlineAsm blocks were not uniqued.");
1068 /// Compare two values used by the two functions under pair-wise comparison. If
1069 /// this is the first time the values are seen, they're added to the mapping so
1070 /// that we will detect mismatches on next use.
1071 /// See comments in declaration for more details.
1072 int FunctionComparator::cmpValues(const Value *L, const Value *R) {
1073 // Catch self-reference case.
1085 const Constant *ConstL = dyn_cast<Constant>(L);
1086 const Constant *ConstR = dyn_cast<Constant>(R);
1087 if (ConstL && ConstR) {
1090 return cmpConstants(ConstL, ConstR);
1098 const InlineAsm *InlineAsmL = dyn_cast<InlineAsm>(L);
1099 const InlineAsm *InlineAsmR = dyn_cast<InlineAsm>(R);
1101 if (InlineAsmL && InlineAsmR)
1102 return cmpInlineAsm(InlineAsmL, InlineAsmR);
1108 auto LeftSN = sn_mapL.insert(std::make_pair(L, sn_mapL.size())),
1109 RightSN = sn_mapR.insert(std::make_pair(R, sn_mapR.size()));
1111 return cmpNumbers(LeftSN.first->second, RightSN.first->second);
1113 // Test whether two basic blocks have equivalent behaviour.
1114 int FunctionComparator::cmpBasicBlocks(const BasicBlock *BBL,
1115 const BasicBlock *BBR) {
1116 BasicBlock::const_iterator InstL = BBL->begin(), InstLE = BBL->end();
1117 BasicBlock::const_iterator InstR = BBR->begin(), InstRE = BBR->end();
1120 if (int Res = cmpValues(InstL, InstR))
1123 const GetElementPtrInst *GEPL = dyn_cast<GetElementPtrInst>(InstL);
1124 const GetElementPtrInst *GEPR = dyn_cast<GetElementPtrInst>(InstR);
1133 cmpValues(GEPL->getPointerOperand(), GEPR->getPointerOperand()))
1135 if (int Res = cmpGEPs(GEPL, GEPR))
1138 if (int Res = cmpOperations(InstL, InstR))
1140 assert(InstL->getNumOperands() == InstR->getNumOperands());
1142 for (unsigned i = 0, e = InstL->getNumOperands(); i != e; ++i) {
1143 Value *OpL = InstL->getOperand(i);
1144 Value *OpR = InstR->getOperand(i);
1145 if (int Res = cmpValues(OpL, OpR))
1147 // cmpValues should ensure this is true.
1148 assert(cmpTypes(OpL->getType(), OpR->getType()) == 0);
1153 } while (InstL != InstLE && InstR != InstRE);
1155 if (InstL != InstLE && InstR == InstRE)
1157 if (InstL == InstLE && InstR != InstRE)
1162 // Test whether the two functions have equivalent behaviour.
1163 int FunctionComparator::compare() {
1168 if (int Res = cmpAttrs(FnL->getAttributes(), FnR->getAttributes()))
1171 if (int Res = cmpNumbers(FnL->hasGC(), FnR->hasGC()))
1175 if (int Res = cmpMem(FnL->getGC(), FnR->getGC()))
1179 if (int Res = cmpNumbers(FnL->hasSection(), FnR->hasSection()))
1182 if (FnL->hasSection()) {
1183 if (int Res = cmpMem(FnL->getSection(), FnR->getSection()))
1187 if (int Res = cmpNumbers(FnL->isVarArg(), FnR->isVarArg()))
1190 // TODO: if it's internal and only used in direct calls, we could handle this
1192 if (int Res = cmpNumbers(FnL->getCallingConv(), FnR->getCallingConv()))
1195 if (int Res = cmpTypes(FnL->getFunctionType(), FnR->getFunctionType()))
1198 assert(FnL->arg_size() == FnR->arg_size() &&
1199 "Identically typed functions have different numbers of args!");
1201 // Visit the arguments so that they get enumerated in the order they're
1203 for (Function::const_arg_iterator ArgLI = FnL->arg_begin(),
1204 ArgRI = FnR->arg_begin(),
1205 ArgLE = FnL->arg_end();
1206 ArgLI != ArgLE; ++ArgLI, ++ArgRI) {
1207 if (cmpValues(ArgLI, ArgRI) != 0)
1208 llvm_unreachable("Arguments repeat!");
1211 // We do a CFG-ordered walk since the actual ordering of the blocks in the
1212 // linked list is immaterial. Our walk starts at the entry block for both
1213 // functions, then takes each block from each terminator in order. As an
1214 // artifact, this also means that unreachable blocks are ignored.
1215 SmallVector<const BasicBlock *, 8> FnLBBs, FnRBBs;
1216 SmallSet<const BasicBlock *, 128> VisitedBBs; // in terms of F1.
1218 FnLBBs.push_back(&FnL->getEntryBlock());
1219 FnRBBs.push_back(&FnR->getEntryBlock());
1221 VisitedBBs.insert(FnLBBs[0]);
1222 while (!FnLBBs.empty()) {
1223 const BasicBlock *BBL = FnLBBs.pop_back_val();
1224 const BasicBlock *BBR = FnRBBs.pop_back_val();
1226 if (int Res = cmpValues(BBL, BBR))
1229 if (int Res = cmpBasicBlocks(BBL, BBR))
1232 const TerminatorInst *TermL = BBL->getTerminator();
1233 const TerminatorInst *TermR = BBR->getTerminator();
1235 assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
1236 for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
1237 if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
1240 FnLBBs.push_back(TermL->getSuccessor(i));
1241 FnRBBs.push_back(TermR->getSuccessor(i));
1247 // Accumulate the hash of a sequence of 64-bit integers. This is similar to a
1248 // hash of a sequence of 64bit ints, but the entire input does not need to be
1249 // available at once. This interface is necessary for functionHash because it
1250 // needs to accumulate the hash as the structure of the function is traversed
1251 // without saving these values to an intermediate buffer. This form of hashing
1252 // is not often needed, as usually the object to hash is just read from a
1254 class HashAccumulator64 {
1257 // Initialize to random constant, so the state isn't zero.
1258 HashAccumulator64() { Hash = 0x6acaa36bef8325c5ULL; }
1259 void add(uint64_t V) {
1260 Hash = llvm::hashing::detail::hash_16_bytes(Hash, V);
1262 // No finishing is required, because the entire hash value is used.
1263 uint64_t getHash() { return Hash; }
1266 // A function hash is calculated by considering only the number of arguments and
1267 // whether a function is varargs, the order of basic blocks (given by the
1268 // successors of each basic block in depth first order), and the order of
1269 // opcodes of each instruction within each of these basic blocks. This mirrors
1270 // the strategy compare() uses to compare functions by walking the BBs in depth
1271 // first order and comparing each instruction in sequence. Because this hash
1272 // does not look at the operands, it is insensitive to things such as the
1273 // target of calls and the constants used in the function, which makes it useful
1274 // when possibly merging functions which are the same modulo constants and call
1276 FunctionComparator::FunctionHash FunctionComparator::functionHash(Function &F) {
1277 HashAccumulator64 H;
1278 H.add(F.isVarArg());
1279 H.add(F.arg_size());
1281 SmallVector<const BasicBlock *, 8> BBs;
1282 SmallSet<const BasicBlock *, 16> VisitedBBs;
1284 // Walk the blocks in the same order as FunctionComparator::cmpBasicBlocks(),
1285 // accumulating the hash of the function "structure." (BB and opcode sequence)
1286 BBs.push_back(&F.getEntryBlock());
1287 VisitedBBs.insert(BBs[0]);
1288 while (!BBs.empty()) {
1289 const BasicBlock *BB = BBs.pop_back_val();
1290 // This random value acts as a block header, as otherwise the partition of
1291 // opcodes into BBs wouldn't affect the hash, only the order of the opcodes
1293 for (auto &Inst : *BB) {
1294 H.add(Inst.getOpcode());
1296 const TerminatorInst *Term = BB->getTerminator();
1297 for (unsigned i = 0, e = Term->getNumSuccessors(); i != e; ++i) {
1298 if (!VisitedBBs.insert(Term->getSuccessor(i)).second)
1300 BBs.push_back(Term->getSuccessor(i));
1309 /// MergeFunctions finds functions which will generate identical machine code,
1310 /// by considering all pointer types to be equivalent. Once identified,
1311 /// MergeFunctions will fold them by replacing a call to one to a call to a
1312 /// bitcast of the other.
1314 class MergeFunctions : public ModulePass {
1318 : ModulePass(ID), FnTree(FunctionNodeCmp(&GlobalNumbers)), FNodesInTree(),
1319 HasGlobalAliases(false) {
1320 initializeMergeFunctionsPass(*PassRegistry::getPassRegistry());
1323 bool runOnModule(Module &M) override;
1326 // The function comparison operator is provided here so that FunctionNodes do
1327 // not need to become larger with another pointer.
1328 class FunctionNodeCmp {
1329 GlobalNumberState* GlobalNumbers;
1331 FunctionNodeCmp(GlobalNumberState* GN) : GlobalNumbers(GN) {}
1332 bool operator()(const FunctionNode &LHS, const FunctionNode &RHS) const {
1333 // Order first by hashes, then full function comparison.
1334 if (LHS.getHash() != RHS.getHash())
1335 return LHS.getHash() < RHS.getHash();
1336 FunctionComparator FCmp(LHS.getFunc(), RHS.getFunc(), GlobalNumbers);
1337 return FCmp.compare() == -1;
1340 typedef std::set<FunctionNode, FunctionNodeCmp> FnTreeType;
1342 GlobalNumberState GlobalNumbers;
1344 /// A work queue of functions that may have been modified and should be
1346 std::vector<WeakVH> Deferred;
1348 /// Checks the rules of order relation introduced among functions set.
1349 /// Returns true, if sanity check has been passed, and false if failed.
1350 bool doSanityCheck(std::vector<WeakVH> &Worklist);
1352 /// Insert a ComparableFunction into the FnTree, or merge it away if it's
1353 /// equal to one that's already present.
1354 bool insert(Function *NewFunction);
1356 /// Remove a Function from the FnTree and queue it up for a second sweep of
1358 void remove(Function *F);
1360 /// Find the functions that use this Value and remove them from FnTree and
1361 /// queue the functions.
1362 void removeUsers(Value *V);
1364 /// Replace all direct calls of Old with calls of New. Will bitcast New if
1365 /// necessary to make types match.
1366 void replaceDirectCallers(Function *Old, Function *New);
1368 /// Merge two equivalent functions. Upon completion, G may be deleted, or may
1369 /// be converted into a thunk. In either case, it should never be visited
1371 void mergeTwoFunctions(Function *F, Function *G);
1373 /// Replace G with a thunk or an alias to F. Deletes G.
1374 void writeThunkOrAlias(Function *F, Function *G);
1376 /// Replace G with a simple tail call to bitcast(F). Also replace direct uses
1377 /// of G with bitcast(F). Deletes G.
1378 void writeThunk(Function *F, Function *G);
1380 /// Replace G with an alias to F. Deletes G.
1381 void writeAlias(Function *F, Function *G);
1383 /// Replace function F with function G in the function tree.
1384 void replaceFunctionInTree(const FunctionNode &FN, Function *G);
1386 /// The set of all distinct functions. Use the insert() and remove() methods
1387 /// to modify it. The map allows efficient lookup and deferring of Functions.
1389 // Map functions to the iterators of the FunctionNode which contains them
1390 // in the FnTree. This must be updated carefully whenever the FnTree is
1391 // modified, i.e. in insert(), remove(), and replaceFunctionInTree(), to avoid
1392 // dangling iterators into FnTree. The invariant that preserves this is that
1393 // there is exactly one mapping F -> FN for each FunctionNode FN in FnTree.
1394 ValueMap<Function*, FnTreeType::iterator> FNodesInTree;
1396 /// Whether or not the target supports global aliases.
1397 bool HasGlobalAliases;
1400 } // end anonymous namespace
1402 char MergeFunctions::ID = 0;
1403 INITIALIZE_PASS(MergeFunctions, "mergefunc", "Merge Functions", false, false)
1405 ModulePass *llvm::createMergeFunctionsPass() {
1406 return new MergeFunctions();
1409 bool MergeFunctions::doSanityCheck(std::vector<WeakVH> &Worklist) {
1410 if (const unsigned Max = NumFunctionsForSanityCheck) {
1411 unsigned TripleNumber = 0;
1414 dbgs() << "MERGEFUNC-SANITY: Started for first " << Max << " functions.\n";
1417 for (std::vector<WeakVH>::iterator I = Worklist.begin(), E = Worklist.end();
1418 I != E && i < Max; ++I, ++i) {
1420 for (std::vector<WeakVH>::iterator J = I; J != E && j < Max; ++J, ++j) {
1421 Function *F1 = cast<Function>(*I);
1422 Function *F2 = cast<Function>(*J);
1423 int Res1 = FunctionComparator(F1, F2, &GlobalNumbers).compare();
1424 int Res2 = FunctionComparator(F2, F1, &GlobalNumbers).compare();
1426 // If F1 <= F2, then F2 >= F1, otherwise report failure.
1427 if (Res1 != -Res2) {
1428 dbgs() << "MERGEFUNC-SANITY: Non-symmetric; triple: " << TripleNumber
1439 for (std::vector<WeakVH>::iterator K = J; K != E && k < Max;
1440 ++k, ++K, ++TripleNumber) {
1444 Function *F3 = cast<Function>(*K);
1445 int Res3 = FunctionComparator(F1, F3, &GlobalNumbers).compare();
1446 int Res4 = FunctionComparator(F2, F3, &GlobalNumbers).compare();
1448 bool Transitive = true;
1450 if (Res1 != 0 && Res1 == Res4) {
1451 // F1 > F2, F2 > F3 => F1 > F3
1452 Transitive = Res3 == Res1;
1453 } else if (Res3 != 0 && Res3 == -Res4) {
1454 // F1 > F3, F3 > F2 => F1 > F2
1455 Transitive = Res3 == Res1;
1456 } else if (Res4 != 0 && -Res3 == Res4) {
1457 // F2 > F3, F3 > F1 => F2 > F1
1458 Transitive = Res4 == -Res1;
1462 dbgs() << "MERGEFUNC-SANITY: Non-transitive; triple: "
1463 << TripleNumber << "\n";
1464 dbgs() << "Res1, Res3, Res4: " << Res1 << ", " << Res3 << ", "
1475 dbgs() << "MERGEFUNC-SANITY: " << (Valid ? "Passed." : "Failed.") << "\n";
1481 bool MergeFunctions::runOnModule(Module &M) {
1482 bool Changed = false;
1484 // All functions in the module, ordered by hash. Functions with a unique
1485 // hash value are easily eliminated.
1486 std::vector<std::pair<FunctionComparator::FunctionHash, Function *>>
1488 for (Function &Func : M) {
1489 if (!Func.isDeclaration() && !Func.hasAvailableExternallyLinkage()) {
1490 HashedFuncs.push_back({FunctionComparator::functionHash(Func), &Func});
1495 HashedFuncs.begin(), HashedFuncs.end(),
1496 [](const std::pair<FunctionComparator::FunctionHash, Function *> &a,
1497 const std::pair<FunctionComparator::FunctionHash, Function *> &b) {
1498 return a.first < b.first;
1501 auto S = HashedFuncs.begin();
1502 for (auto I = HashedFuncs.begin(), IE = HashedFuncs.end(); I != IE; ++I) {
1503 // If the hash value matches the previous value or the next one, we must
1504 // consider merging it. Otherwise it is dropped and never considered again.
1505 if ((I != S && std::prev(I)->first == I->first) ||
1506 (std::next(I) != IE && std::next(I)->first == I->first) ) {
1507 Deferred.push_back(WeakVH(I->second));
1512 std::vector<WeakVH> Worklist;
1513 Deferred.swap(Worklist);
1515 DEBUG(doSanityCheck(Worklist));
1517 DEBUG(dbgs() << "size of module: " << M.size() << '\n');
1518 DEBUG(dbgs() << "size of worklist: " << Worklist.size() << '\n');
1520 // Insert only strong functions and merge them. Strong function merging
1521 // always deletes one of them.
1522 for (std::vector<WeakVH>::iterator I = Worklist.begin(),
1523 E = Worklist.end(); I != E; ++I) {
1525 Function *F = cast<Function>(*I);
1526 if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
1527 !F->mayBeOverridden()) {
1528 Changed |= insert(F);
1532 // Insert only weak functions and merge them. By doing these second we
1533 // create thunks to the strong function when possible. When two weak
1534 // functions are identical, we create a new strong function with two weak
1535 // weak thunks to it which are identical but not mergable.
1536 for (std::vector<WeakVH>::iterator I = Worklist.begin(),
1537 E = Worklist.end(); I != E; ++I) {
1539 Function *F = cast<Function>(*I);
1540 if (!F->isDeclaration() && !F->hasAvailableExternallyLinkage() &&
1541 F->mayBeOverridden()) {
1542 Changed |= insert(F);
1545 DEBUG(dbgs() << "size of FnTree: " << FnTree.size() << '\n');
1546 } while (!Deferred.empty());
1553 // Replace direct callers of Old with New.
1554 void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
1555 Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
1556 for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) {
1559 CallSite CS(U->getUser());
1560 if (CS && CS.isCallee(U)) {
1561 // Transfer the called function's attributes to the call site. Due to the
1562 // bitcast we will 'lose' ABI changing attributes because the 'called
1563 // function' is no longer a Function* but the bitcast. Code that looks up
1564 // the attributes from the called function will fail.
1566 // FIXME: This is not actually true, at least not anymore. The callsite
1567 // will always have the same ABI affecting attributes as the callee,
1568 // because otherwise the original input has UB. Note that Old and New
1569 // always have matching ABI, so no attributes need to be changed.
1570 // Transferring other attributes may help other optimizations, but that
1571 // should be done uniformly and not in this ad-hoc way.
1572 auto &Context = New->getContext();
1573 auto NewFuncAttrs = New->getAttributes();
1574 auto CallSiteAttrs = CS.getAttributes();
1576 CallSiteAttrs = CallSiteAttrs.addAttributes(
1577 Context, AttributeSet::ReturnIndex, NewFuncAttrs.getRetAttributes());
1579 for (unsigned argIdx = 0; argIdx < CS.arg_size(); argIdx++) {
1580 AttributeSet Attrs = NewFuncAttrs.getParamAttributes(argIdx);
1581 if (Attrs.getNumSlots())
1582 CallSiteAttrs = CallSiteAttrs.addAttributes(Context, argIdx, Attrs);
1585 CS.setAttributes(CallSiteAttrs);
1587 remove(CS.getInstruction()->getParent()->getParent());
1593 // Replace G with an alias to F if possible, or else a thunk to F. Deletes G.
1594 void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) {
1595 if (HasGlobalAliases && G->hasUnnamedAddr()) {
1596 if (G->hasExternalLinkage() || G->hasLocalLinkage() ||
1597 G->hasWeakLinkage()) {
1606 // Helper for writeThunk,
1607 // Selects proper bitcast operation,
1608 // but a bit simpler then CastInst::getCastOpcode.
1609 static Value *createCast(IRBuilder<false> &Builder, Value *V, Type *DestTy) {
1610 Type *SrcTy = V->getType();
1611 if (SrcTy->isStructTy()) {
1612 assert(DestTy->isStructTy());
1613 assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements());
1614 Value *Result = UndefValue::get(DestTy);
1615 for (unsigned int I = 0, E = SrcTy->getStructNumElements(); I < E; ++I) {
1616 Value *Element = createCast(
1617 Builder, Builder.CreateExtractValue(V, makeArrayRef(I)),
1618 DestTy->getStructElementType(I));
1621 Builder.CreateInsertValue(Result, Element, makeArrayRef(I));
1625 assert(!DestTy->isStructTy());
1626 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
1627 return Builder.CreateIntToPtr(V, DestTy);
1628 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
1629 return Builder.CreatePtrToInt(V, DestTy);
1631 return Builder.CreateBitCast(V, DestTy);
1634 // Replace G with a simple tail call to bitcast(F). Also replace direct uses
1635 // of G with bitcast(F). Deletes G.
1636 void MergeFunctions::writeThunk(Function *F, Function *G) {
1637 if (!G->mayBeOverridden()) {
1638 // Redirect direct callers of G to F.
1639 replaceDirectCallers(G, F);
1642 // If G was internal then we may have replaced all uses of G with F. If so,
1643 // stop here and delete G. There's no need for a thunk.
1644 if (G->hasLocalLinkage() && G->use_empty()) {
1645 G->eraseFromParent();
1649 Function *NewG = Function::Create(G->getFunctionType(), G->getLinkage(), "",
1651 BasicBlock *BB = BasicBlock::Create(F->getContext(), "", NewG);
1652 IRBuilder<false> Builder(BB);
1654 SmallVector<Value *, 16> Args;
1656 FunctionType *FFTy = F->getFunctionType();
1657 for (Function::arg_iterator AI = NewG->arg_begin(), AE = NewG->arg_end();
1659 Args.push_back(createCast(Builder, (Value*)AI, FFTy->getParamType(i)));
1663 CallInst *CI = Builder.CreateCall(F, Args);
1665 CI->setCallingConv(F->getCallingConv());
1666 CI->setAttributes(F->getAttributes());
1667 if (NewG->getReturnType()->isVoidTy()) {
1668 Builder.CreateRetVoid();
1670 Builder.CreateRet(createCast(Builder, CI, NewG->getReturnType()));
1673 NewG->copyAttributesFrom(G);
1676 G->replaceAllUsesWith(NewG);
1677 G->eraseFromParent();
1679 DEBUG(dbgs() << "writeThunk: " << NewG->getName() << '\n');
1683 // Replace G with an alias to F and delete G.
1684 void MergeFunctions::writeAlias(Function *F, Function *G) {
1685 PointerType *PTy = G->getType();
1686 auto *GA = GlobalAlias::create(PTy, G->getLinkage(), "", F);
1687 F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
1689 GA->setVisibility(G->getVisibility());
1691 G->replaceAllUsesWith(GA);
1692 G->eraseFromParent();
1694 DEBUG(dbgs() << "writeAlias: " << GA->getName() << '\n');
1695 ++NumAliasesWritten;
1698 // Merge two equivalent functions. Upon completion, Function G is deleted.
1699 void MergeFunctions::mergeTwoFunctions(Function *F, Function *G) {
1700 if (F->mayBeOverridden()) {
1701 assert(G->mayBeOverridden());
1703 // Make them both thunks to the same internal function.
1704 Function *H = Function::Create(F->getFunctionType(), F->getLinkage(), "",
1706 H->copyAttributesFrom(F);
1709 F->replaceAllUsesWith(H);
1711 unsigned MaxAlignment = std::max(G->getAlignment(), H->getAlignment());
1713 if (HasGlobalAliases) {
1721 F->setAlignment(MaxAlignment);
1722 F->setLinkage(GlobalValue::PrivateLinkage);
1725 writeThunkOrAlias(F, G);
1728 ++NumFunctionsMerged;
1731 /// Replace function F by function G.
1732 void MergeFunctions::replaceFunctionInTree(const FunctionNode &FN,
1734 Function *F = FN.getFunc();
1735 assert(FunctionComparator(F, G, &GlobalNumbers).compare() == 0 &&
1736 "The two functions must be equal");
1738 auto I = FNodesInTree.find(F);
1739 assert(I != FNodesInTree.end() && "F should be in FNodesInTree");
1740 assert(FNodesInTree.count(G) == 0 && "FNodesInTree should not contain G");
1742 FnTreeType::iterator IterToFNInFnTree = I->second;
1743 assert(&(*IterToFNInFnTree) == &FN && "F should map to FN in FNodesInTree.");
1744 // Remove F -> FN and insert G -> FN
1745 FNodesInTree.erase(I);
1746 FNodesInTree.insert({G, IterToFNInFnTree});
1747 // Replace F with G in FN, which is stored inside the FnTree.
1751 // Insert a ComparableFunction into the FnTree, or merge it away if equal to one
1752 // that was already inserted.
1753 bool MergeFunctions::insert(Function *NewFunction) {
1754 std::pair<FnTreeType::iterator, bool> Result =
1755 FnTree.insert(FunctionNode(NewFunction));
1757 if (Result.second) {
1758 assert(FNodesInTree.count(NewFunction) == 0);
1759 FNodesInTree.insert({NewFunction, Result.first});
1760 DEBUG(dbgs() << "Inserting as unique: " << NewFunction->getName() << '\n');
1764 const FunctionNode &OldF = *Result.first;
1766 // Don't merge tiny functions, since it can just end up making the function
1768 // FIXME: Should still merge them if they are unnamed_addr and produce an
1770 if (NewFunction->size() == 1) {
1771 if (NewFunction->front().size() <= 2) {
1772 DEBUG(dbgs() << NewFunction->getName()
1773 << " is to small to bother merging\n");
1778 // Impose a total order (by name) on the replacement of functions. This is
1779 // important when operating on more than one module independently to prevent
1780 // cycles of thunks calling each other when the modules are linked together.
1782 // When one function is weak and the other is strong there is an order imposed
1783 // already. We process strong functions before weak functions.
1784 if ((OldF.getFunc()->mayBeOverridden() && NewFunction->mayBeOverridden()) ||
1785 (!OldF.getFunc()->mayBeOverridden() && !NewFunction->mayBeOverridden()))
1786 if (OldF.getFunc()->getName() > NewFunction->getName()) {
1787 // Swap the two functions.
1788 Function *F = OldF.getFunc();
1789 replaceFunctionInTree(*Result.first, NewFunction);
1791 assert(OldF.getFunc() != F && "Must have swapped the functions.");
1794 // Never thunk a strong function to a weak function.
1795 assert(!OldF.getFunc()->mayBeOverridden() || NewFunction->mayBeOverridden());
1797 DEBUG(dbgs() << " " << OldF.getFunc()->getName()
1798 << " == " << NewFunction->getName() << '\n');
1800 Function *DeleteF = NewFunction;
1801 mergeTwoFunctions(OldF.getFunc(), DeleteF);
1805 // Remove a function from FnTree. If it was already in FnTree, add
1806 // it to Deferred so that we'll look at it in the next round.
1807 void MergeFunctions::remove(Function *F) {
1808 auto I = FNodesInTree.find(F);
1809 if (I != FNodesInTree.end()) {
1810 DEBUG(dbgs() << "Deferred " << F->getName()<< ".\n");
1811 FnTree.erase(I->second);
1812 // I->second has been invalidated, remove it from the FNodesInTree map to
1813 // preserve the invariant.
1814 FNodesInTree.erase(I);
1815 Deferred.emplace_back(F);
1819 // For each instruction used by the value, remove() the function that contains
1820 // the instruction. This should happen right before a call to RAUW.
1821 void MergeFunctions::removeUsers(Value *V) {
1822 std::vector<Value *> Worklist;
1823 Worklist.push_back(V);
1824 SmallSet<Value*, 8> Visited;
1826 while (!Worklist.empty()) {
1827 Value *V = Worklist.back();
1828 Worklist.pop_back();
1830 for (User *U : V->users()) {
1831 if (Instruction *I = dyn_cast<Instruction>(U)) {
1832 remove(I->getParent()->getParent());
1833 } else if (isa<GlobalValue>(U)) {
1835 } else if (Constant *C = dyn_cast<Constant>(U)) {
1836 for (User *UU : C->users()) {
1837 if (!Visited.insert(UU).second)
1838 Worklist.push_back(UU);