1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/BasicBlock.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/IntrinsicInst.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/Value.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/DepthFirstIterator.h"
29 #include "llvm/ADT/PostOrderIterator.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/Statistic.h"
33 #include "llvm/Analysis/Dominators.h"
34 #include "llvm/Analysis/AliasAnalysis.h"
35 #include "llvm/Analysis/MallocHelper.h"
36 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
37 #include "llvm/Support/CFG.h"
38 #include "llvm/Support/CommandLine.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Target/TargetData.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/Utils/Local.h"
48 STATISTIC(NumGVNInstr, "Number of instructions deleted");
49 STATISTIC(NumGVNLoad, "Number of loads deleted");
50 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
51 STATISTIC(NumGVNBlocks, "Number of blocks merged");
52 STATISTIC(NumPRELoad, "Number of loads PRE'd");
54 static cl::opt<bool> EnablePRE("enable-pre",
55 cl::init(true), cl::Hidden);
56 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
58 //===----------------------------------------------------------------------===//
60 //===----------------------------------------------------------------------===//
62 /// This class holds the mapping between values and value numbers. It is used
63 /// as an efficient mechanism to determine the expression-wise equivalence of
67 enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL,
68 UDIV, SDIV, FDIV, UREM, SREM,
69 FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ,
70 ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
71 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
72 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
73 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
74 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
75 SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI,
76 FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT,
77 PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT,
80 ExpressionOpcode opcode;
85 SmallVector<uint32_t, 4> varargs;
89 Expression(ExpressionOpcode o) : opcode(o) { }
91 bool operator==(const Expression &other) const {
92 if (opcode != other.opcode)
94 else if (opcode == EMPTY || opcode == TOMBSTONE)
96 else if (type != other.type)
98 else if (function != other.function)
100 else if (firstVN != other.firstVN)
102 else if (secondVN != other.secondVN)
104 else if (thirdVN != other.thirdVN)
107 if (varargs.size() != other.varargs.size())
110 for (size_t i = 0; i < varargs.size(); ++i)
111 if (varargs[i] != other.varargs[i])
118 bool operator!=(const Expression &other) const {
119 return !(*this == other);
125 DenseMap<Value*, uint32_t> valueNumbering;
126 DenseMap<Expression, uint32_t> expressionNumbering;
128 MemoryDependenceAnalysis* MD;
131 uint32_t nextValueNumber;
133 Expression::ExpressionOpcode getOpcode(BinaryOperator* BO);
134 Expression::ExpressionOpcode getOpcode(CmpInst* C);
135 Expression::ExpressionOpcode getOpcode(CastInst* C);
136 Expression create_expression(BinaryOperator* BO);
137 Expression create_expression(CmpInst* C);
138 Expression create_expression(ShuffleVectorInst* V);
139 Expression create_expression(ExtractElementInst* C);
140 Expression create_expression(InsertElementInst* V);
141 Expression create_expression(SelectInst* V);
142 Expression create_expression(CastInst* C);
143 Expression create_expression(GetElementPtrInst* G);
144 Expression create_expression(CallInst* C);
145 Expression create_expression(Constant* C);
147 ValueTable() : nextValueNumber(1) { }
148 uint32_t lookup_or_add(Value* V);
149 uint32_t lookup(Value* V) const;
150 void add(Value* V, uint32_t num);
152 void erase(Value* v);
154 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
155 AliasAnalysis *getAliasAnalysis() const { return AA; }
156 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
157 void setDomTree(DominatorTree* D) { DT = D; }
158 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
159 void verifyRemoved(const Value *) const;
164 template <> struct DenseMapInfo<Expression> {
165 static inline Expression getEmptyKey() {
166 return Expression(Expression::EMPTY);
169 static inline Expression getTombstoneKey() {
170 return Expression(Expression::TOMBSTONE);
173 static unsigned getHashValue(const Expression e) {
174 unsigned hash = e.opcode;
176 hash = e.firstVN + hash * 37;
177 hash = e.secondVN + hash * 37;
178 hash = e.thirdVN + hash * 37;
180 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
181 (unsigned)((uintptr_t)e.type >> 9)) +
184 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
185 E = e.varargs.end(); I != E; ++I)
186 hash = *I + hash * 37;
188 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
189 (unsigned)((uintptr_t)e.function >> 9)) +
194 static bool isEqual(const Expression &LHS, const Expression &RHS) {
197 static bool isPod() { return true; }
201 //===----------------------------------------------------------------------===//
202 // ValueTable Internal Functions
203 //===----------------------------------------------------------------------===//
204 Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) {
205 switch(BO->getOpcode()) {
206 default: // THIS SHOULD NEVER HAPPEN
207 llvm_unreachable("Binary operator with unknown opcode?");
208 case Instruction::Add: return Expression::ADD;
209 case Instruction::FAdd: return Expression::FADD;
210 case Instruction::Sub: return Expression::SUB;
211 case Instruction::FSub: return Expression::FSUB;
212 case Instruction::Mul: return Expression::MUL;
213 case Instruction::FMul: return Expression::FMUL;
214 case Instruction::UDiv: return Expression::UDIV;
215 case Instruction::SDiv: return Expression::SDIV;
216 case Instruction::FDiv: return Expression::FDIV;
217 case Instruction::URem: return Expression::UREM;
218 case Instruction::SRem: return Expression::SREM;
219 case Instruction::FRem: return Expression::FREM;
220 case Instruction::Shl: return Expression::SHL;
221 case Instruction::LShr: return Expression::LSHR;
222 case Instruction::AShr: return Expression::ASHR;
223 case Instruction::And: return Expression::AND;
224 case Instruction::Or: return Expression::OR;
225 case Instruction::Xor: return Expression::XOR;
229 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
230 if (isa<ICmpInst>(C)) {
231 switch (C->getPredicate()) {
232 default: // THIS SHOULD NEVER HAPPEN
233 llvm_unreachable("Comparison with unknown predicate?");
234 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
235 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
236 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
237 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
238 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
239 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
240 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
241 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
242 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
243 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
246 switch (C->getPredicate()) {
247 default: // THIS SHOULD NEVER HAPPEN
248 llvm_unreachable("Comparison with unknown predicate?");
249 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
250 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
251 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
252 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
253 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
254 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
255 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
256 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
257 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
258 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
259 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
260 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
261 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
262 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
267 Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) {
268 switch(C->getOpcode()) {
269 default: // THIS SHOULD NEVER HAPPEN
270 llvm_unreachable("Cast operator with unknown opcode?");
271 case Instruction::Trunc: return Expression::TRUNC;
272 case Instruction::ZExt: return Expression::ZEXT;
273 case Instruction::SExt: return Expression::SEXT;
274 case Instruction::FPToUI: return Expression::FPTOUI;
275 case Instruction::FPToSI: return Expression::FPTOSI;
276 case Instruction::UIToFP: return Expression::UITOFP;
277 case Instruction::SIToFP: return Expression::SITOFP;
278 case Instruction::FPTrunc: return Expression::FPTRUNC;
279 case Instruction::FPExt: return Expression::FPEXT;
280 case Instruction::PtrToInt: return Expression::PTRTOINT;
281 case Instruction::IntToPtr: return Expression::INTTOPTR;
282 case Instruction::BitCast: return Expression::BITCAST;
286 Expression ValueTable::create_expression(CallInst* C) {
289 e.type = C->getType();
293 e.function = C->getCalledFunction();
294 e.opcode = Expression::CALL;
296 for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
298 e.varargs.push_back(lookup_or_add(*I));
303 Expression ValueTable::create_expression(BinaryOperator* BO) {
306 e.firstVN = lookup_or_add(BO->getOperand(0));
307 e.secondVN = lookup_or_add(BO->getOperand(1));
310 e.type = BO->getType();
311 e.opcode = getOpcode(BO);
316 Expression ValueTable::create_expression(CmpInst* C) {
319 e.firstVN = lookup_or_add(C->getOperand(0));
320 e.secondVN = lookup_or_add(C->getOperand(1));
323 e.type = C->getType();
324 e.opcode = getOpcode(C);
329 Expression ValueTable::create_expression(CastInst* C) {
332 e.firstVN = lookup_or_add(C->getOperand(0));
336 e.type = C->getType();
337 e.opcode = getOpcode(C);
342 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
345 e.firstVN = lookup_or_add(S->getOperand(0));
346 e.secondVN = lookup_or_add(S->getOperand(1));
347 e.thirdVN = lookup_or_add(S->getOperand(2));
349 e.type = S->getType();
350 e.opcode = Expression::SHUFFLE;
355 Expression ValueTable::create_expression(ExtractElementInst* E) {
358 e.firstVN = lookup_or_add(E->getOperand(0));
359 e.secondVN = lookup_or_add(E->getOperand(1));
362 e.type = E->getType();
363 e.opcode = Expression::EXTRACT;
368 Expression ValueTable::create_expression(InsertElementInst* I) {
371 e.firstVN = lookup_or_add(I->getOperand(0));
372 e.secondVN = lookup_or_add(I->getOperand(1));
373 e.thirdVN = lookup_or_add(I->getOperand(2));
375 e.type = I->getType();
376 e.opcode = Expression::INSERT;
381 Expression ValueTable::create_expression(SelectInst* I) {
384 e.firstVN = lookup_or_add(I->getCondition());
385 e.secondVN = lookup_or_add(I->getTrueValue());
386 e.thirdVN = lookup_or_add(I->getFalseValue());
388 e.type = I->getType();
389 e.opcode = Expression::SELECT;
394 Expression ValueTable::create_expression(GetElementPtrInst* G) {
397 e.firstVN = lookup_or_add(G->getPointerOperand());
401 e.type = G->getType();
402 e.opcode = Expression::GEP;
404 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
406 e.varargs.push_back(lookup_or_add(*I));
411 //===----------------------------------------------------------------------===//
412 // ValueTable External Functions
413 //===----------------------------------------------------------------------===//
415 /// add - Insert a value into the table with a specified value number.
416 void ValueTable::add(Value* V, uint32_t num) {
417 valueNumbering.insert(std::make_pair(V, num));
420 /// lookup_or_add - Returns the value number for the specified value, assigning
421 /// it a new number if it did not have one before.
422 uint32_t ValueTable::lookup_or_add(Value* V) {
423 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
424 if (VI != valueNumbering.end())
427 if (CallInst* C = dyn_cast<CallInst>(V)) {
428 if (AA->doesNotAccessMemory(C)) {
429 Expression e = create_expression(C);
431 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
432 if (EI != expressionNumbering.end()) {
433 valueNumbering.insert(std::make_pair(V, EI->second));
436 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
437 valueNumbering.insert(std::make_pair(V, nextValueNumber));
439 return nextValueNumber++;
441 } else if (AA->onlyReadsMemory(C)) {
442 Expression e = create_expression(C);
444 if (expressionNumbering.find(e) == expressionNumbering.end()) {
445 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
446 valueNumbering.insert(std::make_pair(V, nextValueNumber));
447 return nextValueNumber++;
450 MemDepResult local_dep = MD->getDependency(C);
452 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
453 valueNumbering.insert(std::make_pair(V, nextValueNumber));
454 return nextValueNumber++;
457 if (local_dep.isDef()) {
458 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
460 if (local_cdep->getNumOperands() != C->getNumOperands()) {
461 valueNumbering.insert(std::make_pair(V, nextValueNumber));
462 return nextValueNumber++;
465 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
466 uint32_t c_vn = lookup_or_add(C->getOperand(i));
467 uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
469 valueNumbering.insert(std::make_pair(V, nextValueNumber));
470 return nextValueNumber++;
474 uint32_t v = lookup_or_add(local_cdep);
475 valueNumbering.insert(std::make_pair(V, v));
480 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
481 MD->getNonLocalCallDependency(CallSite(C));
482 // FIXME: call/call dependencies for readonly calls should return def, not
483 // clobber! Move the checking logic to MemDep!
486 // Check to see if we have a single dominating call instruction that is
488 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
489 const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i];
490 // Ignore non-local dependencies.
491 if (I->second.isNonLocal())
494 // We don't handle non-depedencies. If we already have a call, reject
495 // instruction dependencies.
496 if (I->second.isClobber() || cdep != 0) {
501 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst());
502 // FIXME: All duplicated with non-local case.
503 if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){
504 cdep = NonLocalDepCall;
513 valueNumbering.insert(std::make_pair(V, nextValueNumber));
514 return nextValueNumber++;
517 if (cdep->getNumOperands() != C->getNumOperands()) {
518 valueNumbering.insert(std::make_pair(V, nextValueNumber));
519 return nextValueNumber++;
521 for (unsigned i = 1; i < C->getNumOperands(); ++i) {
522 uint32_t c_vn = lookup_or_add(C->getOperand(i));
523 uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
525 valueNumbering.insert(std::make_pair(V, nextValueNumber));
526 return nextValueNumber++;
530 uint32_t v = lookup_or_add(cdep);
531 valueNumbering.insert(std::make_pair(V, v));
535 valueNumbering.insert(std::make_pair(V, nextValueNumber));
536 return nextValueNumber++;
538 } else if (BinaryOperator* BO = dyn_cast<BinaryOperator>(V)) {
539 Expression e = create_expression(BO);
541 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
542 if (EI != expressionNumbering.end()) {
543 valueNumbering.insert(std::make_pair(V, EI->second));
546 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
547 valueNumbering.insert(std::make_pair(V, nextValueNumber));
549 return nextValueNumber++;
551 } else if (CmpInst* C = dyn_cast<CmpInst>(V)) {
552 Expression e = create_expression(C);
554 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
555 if (EI != expressionNumbering.end()) {
556 valueNumbering.insert(std::make_pair(V, EI->second));
559 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
560 valueNumbering.insert(std::make_pair(V, nextValueNumber));
562 return nextValueNumber++;
564 } else if (ShuffleVectorInst* U = dyn_cast<ShuffleVectorInst>(V)) {
565 Expression e = create_expression(U);
567 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
568 if (EI != expressionNumbering.end()) {
569 valueNumbering.insert(std::make_pair(V, EI->second));
572 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
573 valueNumbering.insert(std::make_pair(V, nextValueNumber));
575 return nextValueNumber++;
577 } else if (ExtractElementInst* U = dyn_cast<ExtractElementInst>(V)) {
578 Expression e = create_expression(U);
580 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
581 if (EI != expressionNumbering.end()) {
582 valueNumbering.insert(std::make_pair(V, EI->second));
585 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
586 valueNumbering.insert(std::make_pair(V, nextValueNumber));
588 return nextValueNumber++;
590 } else if (InsertElementInst* U = dyn_cast<InsertElementInst>(V)) {
591 Expression e = create_expression(U);
593 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
594 if (EI != expressionNumbering.end()) {
595 valueNumbering.insert(std::make_pair(V, EI->second));
598 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
599 valueNumbering.insert(std::make_pair(V, nextValueNumber));
601 return nextValueNumber++;
603 } else if (SelectInst* U = dyn_cast<SelectInst>(V)) {
604 Expression e = create_expression(U);
606 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
607 if (EI != expressionNumbering.end()) {
608 valueNumbering.insert(std::make_pair(V, EI->second));
611 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
612 valueNumbering.insert(std::make_pair(V, nextValueNumber));
614 return nextValueNumber++;
616 } else if (CastInst* U = dyn_cast<CastInst>(V)) {
617 Expression e = create_expression(U);
619 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
620 if (EI != expressionNumbering.end()) {
621 valueNumbering.insert(std::make_pair(V, EI->second));
624 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
625 valueNumbering.insert(std::make_pair(V, nextValueNumber));
627 return nextValueNumber++;
629 } else if (GetElementPtrInst* U = dyn_cast<GetElementPtrInst>(V)) {
630 Expression e = create_expression(U);
632 DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
633 if (EI != expressionNumbering.end()) {
634 valueNumbering.insert(std::make_pair(V, EI->second));
637 expressionNumbering.insert(std::make_pair(e, nextValueNumber));
638 valueNumbering.insert(std::make_pair(V, nextValueNumber));
640 return nextValueNumber++;
643 valueNumbering.insert(std::make_pair(V, nextValueNumber));
644 return nextValueNumber++;
648 /// lookup - Returns the value number of the specified value. Fails if
649 /// the value has not yet been numbered.
650 uint32_t ValueTable::lookup(Value* V) const {
651 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
652 assert(VI != valueNumbering.end() && "Value not numbered?");
656 /// clear - Remove all entries from the ValueTable
657 void ValueTable::clear() {
658 valueNumbering.clear();
659 expressionNumbering.clear();
663 /// erase - Remove a value from the value numbering
664 void ValueTable::erase(Value* V) {
665 valueNumbering.erase(V);
668 /// verifyRemoved - Verify that the value is removed from all internal data
670 void ValueTable::verifyRemoved(const Value *V) const {
671 for (DenseMap<Value*, uint32_t>::iterator
672 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
673 assert(I->first != V && "Inst still occurs in value numbering map!");
677 //===----------------------------------------------------------------------===//
679 //===----------------------------------------------------------------------===//
682 struct ValueNumberScope {
683 ValueNumberScope* parent;
684 DenseMap<uint32_t, Value*> table;
686 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
692 class GVN : public FunctionPass {
693 bool runOnFunction(Function &F);
695 static char ID; // Pass identification, replacement for typeid
696 GVN() : FunctionPass(&ID) { }
699 MemoryDependenceAnalysis *MD;
703 DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
705 typedef DenseMap<Value*, SmallPtrSet<Instruction*, 4> > PhiMapType;
709 // This transformation requires dominator postdominator info
710 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
711 AU.addRequired<DominatorTree>();
712 AU.addRequired<MemoryDependenceAnalysis>();
713 AU.addRequired<AliasAnalysis>();
715 AU.addPreserved<DominatorTree>();
716 AU.addPreserved<AliasAnalysis>();
720 // FIXME: eliminate or document these better
721 bool processLoad(LoadInst* L,
722 SmallVectorImpl<Instruction*> &toErase);
723 bool processInstruction(Instruction* I,
724 SmallVectorImpl<Instruction*> &toErase);
725 bool processNonLocalLoad(LoadInst* L,
726 SmallVectorImpl<Instruction*> &toErase);
727 bool processBlock(BasicBlock* BB);
728 Value *GetValueForBlock(BasicBlock *BB, Instruction* orig,
729 DenseMap<BasicBlock*, Value*> &Phis,
730 bool top_level = false);
731 void dump(DenseMap<uint32_t, Value*>& d);
732 bool iterateOnFunction(Function &F);
733 Value* CollapsePhi(PHINode* p);
734 bool performPRE(Function& F);
735 Value* lookupNumber(BasicBlock* BB, uint32_t num);
736 Value* AttemptRedundancyElimination(Instruction* orig, unsigned valno);
737 void cleanupGlobalSets();
738 void verifyRemoved(const Instruction *I) const;
744 // createGVNPass - The public interface to this file...
745 FunctionPass *llvm::createGVNPass() { return new GVN(); }
747 static RegisterPass<GVN> X("gvn",
748 "Global Value Numbering");
750 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
752 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
753 E = d.end(); I != E; ++I) {
754 printf("%d\n", I->first);
760 static bool isSafeReplacement(PHINode* p, Instruction* inst) {
761 if (!isa<PHINode>(inst))
764 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
766 if (PHINode* use_phi = dyn_cast<PHINode>(UI))
767 if (use_phi->getParent() == inst->getParent())
773 Value* GVN::CollapsePhi(PHINode* p) {
774 Value* constVal = p->hasConstantValue(DT);
775 if (!constVal) return 0;
777 Instruction* inst = dyn_cast<Instruction>(constVal);
781 if (DT->dominates(inst, p))
782 if (isSafeReplacement(p, inst))
787 /// GetValueForBlock - Get the value to use within the specified basic block.
788 /// available values are in Phis.
789 Value *GVN::GetValueForBlock(BasicBlock *BB, Instruction* orig,
790 DenseMap<BasicBlock*, Value*> &Phis,
793 // If we have already computed this value, return the previously computed val.
794 DenseMap<BasicBlock*, Value*>::iterator V = Phis.find(BB);
795 if (V != Phis.end() && !top_level) return V->second;
797 // If the block is unreachable, just return undef, since this path
798 // can't actually occur at runtime.
799 if (!DT->isReachableFromEntry(BB))
800 return Phis[BB] = UndefValue::get(orig->getType());
802 if (BasicBlock *Pred = BB->getSinglePredecessor()) {
803 Value *ret = GetValueForBlock(Pred, orig, Phis);
808 // Get the number of predecessors of this block so we can reserve space later.
809 // If there is already a PHI in it, use the #preds from it, otherwise count.
810 // Getting it from the PHI is constant time.
812 if (PHINode *ExistingPN = dyn_cast<PHINode>(BB->begin()))
813 NumPreds = ExistingPN->getNumIncomingValues();
815 NumPreds = std::distance(pred_begin(BB), pred_end(BB));
817 // Otherwise, the idom is the loop, so we need to insert a PHI node. Do so
818 // now, then get values to fill in the incoming values for the PHI.
819 PHINode *PN = PHINode::Create(orig->getType(), orig->getName()+".rle",
821 PN->reserveOperandSpace(NumPreds);
823 Phis.insert(std::make_pair(BB, PN));
825 // Fill in the incoming values for the block.
826 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
827 Value* val = GetValueForBlock(*PI, orig, Phis);
828 PN->addIncoming(val, *PI);
831 VN.getAliasAnalysis()->copyValue(orig, PN);
833 // Attempt to collapse PHI nodes that are trivially redundant
834 Value* v = CollapsePhi(PN);
836 // Cache our phi construction results
837 if (LoadInst* L = dyn_cast<LoadInst>(orig))
838 phiMap[L->getPointerOperand()].insert(PN);
840 phiMap[orig].insert(PN);
845 PN->replaceAllUsesWith(v);
846 if (isa<PointerType>(v->getType()))
847 MD->invalidateCachedPointerInfo(v);
849 for (DenseMap<BasicBlock*, Value*>::iterator I = Phis.begin(),
850 E = Phis.end(); I != E; ++I)
854 DEBUG(errs() << "GVN removed: " << *PN << '\n');
855 MD->removeInstruction(PN);
856 PN->eraseFromParent();
857 DEBUG(verifyRemoved(PN));
863 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
864 /// we're analyzing is fully available in the specified block. As we go, keep
865 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
866 /// map is actually a tri-state map with the following values:
867 /// 0) we know the block *is not* fully available.
868 /// 1) we know the block *is* fully available.
869 /// 2) we do not know whether the block is fully available or not, but we are
870 /// currently speculating that it will be.
871 /// 3) we are speculating for this block and have used that to speculate for
873 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
874 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
875 // Optimistically assume that the block is fully available and check to see
876 // if we already know about this block in one lookup.
877 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
878 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
880 // If the entry already existed for this block, return the precomputed value.
882 // If this is a speculative "available" value, mark it as being used for
883 // speculation of other blocks.
884 if (IV.first->second == 2)
885 IV.first->second = 3;
886 return IV.first->second != 0;
889 // Otherwise, see if it is fully available in all predecessors.
890 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
892 // If this block has no predecessors, it isn't live-in here.
894 goto SpeculationFailure;
896 for (; PI != PE; ++PI)
897 // If the value isn't fully available in one of our predecessors, then it
898 // isn't fully available in this block either. Undo our previous
899 // optimistic assumption and bail out.
900 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
901 goto SpeculationFailure;
905 // SpeculationFailure - If we get here, we found out that this is not, after
906 // all, a fully-available block. We have a problem if we speculated on this and
907 // used the speculation to mark other blocks as available.
909 char &BBVal = FullyAvailableBlocks[BB];
911 // If we didn't speculate on this, just return with it set to false.
917 // If we did speculate on this value, we could have blocks set to 1 that are
918 // incorrect. Walk the (transitive) successors of this block and mark them as
920 SmallVector<BasicBlock*, 32> BBWorklist;
921 BBWorklist.push_back(BB);
923 while (!BBWorklist.empty()) {
924 BasicBlock *Entry = BBWorklist.pop_back_val();
925 // Note that this sets blocks to 0 (unavailable) if they happen to not
926 // already be in FullyAvailableBlocks. This is safe.
927 char &EntryVal = FullyAvailableBlocks[Entry];
928 if (EntryVal == 0) continue; // Already unavailable.
930 // Mark as unavailable.
933 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
934 BBWorklist.push_back(*I);
940 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
941 /// non-local by performing PHI construction.
942 bool GVN::processNonLocalLoad(LoadInst *LI,
943 SmallVectorImpl<Instruction*> &toErase) {
944 // Find the non-local dependencies of the load.
945 SmallVector<MemoryDependenceAnalysis::NonLocalDepEntry, 64> Deps;
946 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
948 //DEBUG(errs() << "INVESTIGATING NONLOCAL LOAD: "
949 // << Deps.size() << *LI << '\n');
951 // If we had to process more than one hundred blocks to find the
952 // dependencies, this load isn't worth worrying about. Optimizing
953 // it will be too expensive.
954 if (Deps.size() > 100)
957 // If we had a phi translation failure, we'll have a single entry which is a
958 // clobber in the current block. Reject this early.
959 if (Deps.size() == 1 && Deps[0].second.isClobber()) {
961 errs() << "GVN: non-local load ";
962 WriteAsOperand(errs(), LI);
963 errs() << " is clobbered by " << *Deps[0].second.getInst() << '\n';
968 // Filter out useless results (non-locals, etc). Keep track of the blocks
969 // where we have a value available in repl, also keep track of whether we see
970 // dependencies that produce an unknown value for the load (such as a call
971 // that could potentially clobber the load).
972 SmallVector<std::pair<BasicBlock*, Value*>, 16> ValuesPerBlock;
973 SmallVector<BasicBlock*, 16> UnavailableBlocks;
975 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
976 BasicBlock *DepBB = Deps[i].first;
977 MemDepResult DepInfo = Deps[i].second;
979 if (DepInfo.isClobber()) {
980 UnavailableBlocks.push_back(DepBB);
984 Instruction *DepInst = DepInfo.getInst();
986 // Loading the allocation -> undef.
987 if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
988 ValuesPerBlock.push_back(std::make_pair(DepBB,
989 UndefValue::get(LI->getType())));
993 if (StoreInst* S = dyn_cast<StoreInst>(DepInst)) {
994 // Reject loads and stores that are to the same address but are of
996 // NOTE: 403.gcc does have this case (e.g. in readonly_fields_p) because
997 // of bitfield access, it would be interesting to optimize for it at some
999 if (S->getOperand(0)->getType() != LI->getType()) {
1000 UnavailableBlocks.push_back(DepBB);
1004 ValuesPerBlock.push_back(std::make_pair(DepBB, S->getOperand(0)));
1006 } else if (LoadInst* LD = dyn_cast<LoadInst>(DepInst)) {
1007 if (LD->getType() != LI->getType()) {
1008 UnavailableBlocks.push_back(DepBB);
1011 ValuesPerBlock.push_back(std::make_pair(DepBB, LD));
1013 UnavailableBlocks.push_back(DepBB);
1018 // If we have no predecessors that produce a known value for this load, exit
1020 if (ValuesPerBlock.empty()) return false;
1022 // If all of the instructions we depend on produce a known value for this
1023 // load, then it is fully redundant and we can use PHI insertion to compute
1024 // its value. Insert PHIs and remove the fully redundant value now.
1025 if (UnavailableBlocks.empty()) {
1026 // Use cached PHI construction information from previous runs
1027 SmallPtrSet<Instruction*, 4> &p = phiMap[LI->getPointerOperand()];
1028 // FIXME: What does phiMap do? Are we positive it isn't getting invalidated?
1029 for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
1031 if ((*I)->getParent() == LI->getParent()) {
1032 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD #1: " << *LI << '\n');
1033 LI->replaceAllUsesWith(*I);
1034 if (isa<PointerType>((*I)->getType()))
1035 MD->invalidateCachedPointerInfo(*I);
1036 toErase.push_back(LI);
1041 ValuesPerBlock.push_back(std::make_pair((*I)->getParent(), *I));
1044 DEBUG(errs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1046 DenseMap<BasicBlock*, Value*> BlockReplValues;
1047 BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
1048 // Perform PHI construction.
1049 Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
1050 LI->replaceAllUsesWith(v);
1052 if (isa<PHINode>(v))
1054 if (isa<PointerType>(v->getType()))
1055 MD->invalidateCachedPointerInfo(v);
1056 toErase.push_back(LI);
1061 if (!EnablePRE || !EnableLoadPRE)
1064 // Okay, we have *some* definitions of the value. This means that the value
1065 // is available in some of our (transitive) predecessors. Lets think about
1066 // doing PRE of this load. This will involve inserting a new load into the
1067 // predecessor when it's not available. We could do this in general, but
1068 // prefer to not increase code size. As such, we only do this when we know
1069 // that we only have to insert *one* load (which means we're basically moving
1070 // the load, not inserting a new one).
1072 SmallPtrSet<BasicBlock *, 4> Blockers;
1073 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1074 Blockers.insert(UnavailableBlocks[i]);
1076 // Lets find first basic block with more than one predecessor. Walk backwards
1077 // through predecessors if needed.
1078 BasicBlock *LoadBB = LI->getParent();
1079 BasicBlock *TmpBB = LoadBB;
1081 bool isSinglePred = false;
1082 bool allSingleSucc = true;
1083 while (TmpBB->getSinglePredecessor()) {
1084 isSinglePred = true;
1085 TmpBB = TmpBB->getSinglePredecessor();
1086 if (!TmpBB) // If haven't found any, bail now.
1088 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1090 if (Blockers.count(TmpBB))
1092 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1093 allSingleSucc = false;
1099 // If we have a repl set with LI itself in it, this means we have a loop where
1100 // at least one of the values is LI. Since this means that we won't be able
1101 // to eliminate LI even if we insert uses in the other predecessors, we will
1102 // end up increasing code size. Reject this by scanning for LI.
1103 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1104 if (ValuesPerBlock[i].second == LI)
1109 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1110 if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].second))
1111 // "Hot" Instruction is in some loop (because it dominates its dep.
1113 if (DT->dominates(LI, I)) {
1118 // We are interested only in "hot" instructions. We don't want to do any
1119 // mis-optimizations here.
1124 // Okay, we have some hope :). Check to see if the loaded value is fully
1125 // available in all but one predecessor.
1126 // FIXME: If we could restructure the CFG, we could make a common pred with
1127 // all the preds that don't have an available LI and insert a new load into
1129 BasicBlock *UnavailablePred = 0;
1131 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1132 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1133 FullyAvailableBlocks[ValuesPerBlock[i].first] = true;
1134 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1135 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1137 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1139 if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
1142 // If this load is not available in multiple predecessors, reject it.
1143 if (UnavailablePred && UnavailablePred != *PI)
1145 UnavailablePred = *PI;
1148 assert(UnavailablePred != 0 &&
1149 "Fully available value should be eliminated above!");
1151 // If the loaded pointer is PHI node defined in this block, do PHI translation
1152 // to get its value in the predecessor.
1153 Value *LoadPtr = LI->getOperand(0)->DoPHITranslation(LoadBB, UnavailablePred);
1155 // Make sure the value is live in the predecessor. If it was defined by a
1156 // non-PHI instruction in this block, we don't know how to recompute it above.
1157 if (Instruction *LPInst = dyn_cast<Instruction>(LoadPtr))
1158 if (!DT->dominates(LPInst->getParent(), UnavailablePred)) {
1159 DEBUG(errs() << "COULDN'T PRE LOAD BECAUSE PTR IS UNAVAILABLE IN PRED: "
1160 << *LPInst << '\n' << *LI << "\n");
1164 // We don't currently handle critical edges :(
1165 if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
1166 DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
1167 << UnavailablePred->getName() << "': " << *LI << '\n');
1171 // Make sure it is valid to move this load here. We have to watch out for:
1172 // @1 = getelementptr (i8* p, ...
1173 // test p and branch if == 0
1175 // It is valid to have the getelementptr before the test, even if p can be 0,
1176 // as getelementptr only does address arithmetic.
1177 // If we are not pushing the value through any multiple-successor blocks
1178 // we do not have this case. Otherwise, check that the load is safe to
1179 // put anywhere; this can be improved, but should be conservatively safe.
1180 if (!allSingleSucc &&
1181 !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator()))
1184 // Okay, we can eliminate this load by inserting a reload in the predecessor
1185 // and using PHI construction to get the value in the other predecessors, do
1187 DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1189 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1191 UnavailablePred->getTerminator());
1193 SmallPtrSet<Instruction*, 4> &p = phiMap[LI->getPointerOperand()];
1194 for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
1196 ValuesPerBlock.push_back(std::make_pair((*I)->getParent(), *I));
1198 DenseMap<BasicBlock*, Value*> BlockReplValues;
1199 BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
1200 BlockReplValues[UnavailablePred] = NewLoad;
1202 // Perform PHI construction.
1203 Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
1204 LI->replaceAllUsesWith(v);
1205 if (isa<PHINode>(v))
1207 if (isa<PointerType>(v->getType()))
1208 MD->invalidateCachedPointerInfo(v);
1209 toErase.push_back(LI);
1214 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
1215 /// then a load from a must-aliased pointer of a different type, try to coerce
1216 /// the stored value. If we can't do it, return null.
1217 static Value *CoerceAvailableValueToLoadType(Value *StoredVal, LoadInst *L,
1218 const TargetData &TD) {
1219 const Type *StoredValTy = StoredVal->getType();
1220 const Type *LoadedTy = L->getType();
1222 uint64_t StoreSize = TD.getTypeSizeInBits(StoredValTy);
1223 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
1225 // If the store and reload are the same size, we can always reuse it.
1226 if (StoreSize == LoadSize) {
1227 if (isa<PointerType>(StoredValTy) && isa<PointerType>(LoadedTy)) {
1228 // Pointer to Pointer -> use bitcast.
1229 return new BitCastInst(StoredVal, LoadedTy, "", L);
1232 // Convert source pointers to integers, which can be bitcast.
1233 if (isa<PointerType>(StoredValTy)) {
1234 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
1235 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", L);
1238 const Type *TypeToCastTo = LoadedTy;
1239 if (isa<PointerType>(TypeToCastTo))
1240 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
1242 if (StoredValTy != TypeToCastTo)
1243 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", L);
1245 // Cast to pointer if the load needs a pointer type.
1246 if (isa<PointerType>(LoadedTy))
1247 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", L);
1252 // If the loaded value is smaller than the available value, then we can
1253 // extract out a piece from it. If the available value is too small, then we
1254 // can't do anything.
1255 if (StoreSize < LoadSize)
1258 // Convert source pointers to integers, which can be manipulated.
1259 if (isa<PointerType>(StoredValTy)) {
1260 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
1261 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", L);
1264 // Convert vectors and fp to integer, which can be manipulated.
1265 if (!isa<IntegerType>(StoredValTy)) {
1266 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
1267 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", L);
1270 // If this is a big-endian system, we need to shift the value down to the low
1271 // bits so that a truncate will work.
1272 if (TD.isBigEndian()) {
1273 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
1274 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", L);
1277 // Truncate the integer to the right size now.
1278 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
1279 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", L);
1281 if (LoadedTy == NewIntTy)
1284 // If the result is a pointer, inttoptr.
1285 if (isa<PointerType>(LoadedTy))
1286 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", L);
1288 // Otherwise, bitcast.
1289 return new BitCastInst(StoredVal, LoadedTy, "bitcast", L);
1293 /// processLoad - Attempt to eliminate a load, first by eliminating it
1294 /// locally, and then attempting non-local elimination if that fails.
1295 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1296 if (L->isVolatile())
1299 // ... to a pointer that has been loaded from before...
1300 MemDepResult dep = MD->getDependency(L);
1302 // If the value isn't available, don't do anything!
1303 if (dep.isClobber()) {
1304 // FIXME: In the future, we should handle things like:
1305 // store i32 123, i32* %P
1306 // %A = bitcast i32* %P to i8*
1307 // %B = gep i8* %A, i32 1
1310 // We could do that by recognizing if the clobber instructions are obviously
1311 // a common base + constant offset, and if the previous store (or memset)
1312 // completely covers this load. This sort of thing can happen in bitfield
1315 // fast print dep, using operator<< on instruction would be too slow
1316 errs() << "GVN: load ";
1317 WriteAsOperand(errs(), L);
1318 Instruction *I = dep.getInst();
1319 errs() << " is clobbered by " << *I << '\n';
1324 // If it is defined in another block, try harder.
1325 if (dep.isNonLocal())
1326 return processNonLocalLoad(L, toErase);
1328 Instruction *DepInst = dep.getInst();
1329 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1330 Value *StoredVal = DepSI->getOperand(0);
1332 // The store and load are to a must-aliased pointer, but they may not
1333 // actually have the same type. See if we know how to reuse the stored
1334 // value (depending on its type).
1335 const TargetData *TD = 0;
1336 if (StoredVal->getType() != L->getType() &&
1337 (TD = getAnalysisIfAvailable<TargetData>())) {
1338 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L, *TD);
1342 DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1343 << '\n' << *L << "\n\n\n");
1347 L->replaceAllUsesWith(StoredVal);
1348 if (isa<PointerType>(StoredVal->getType()))
1349 MD->invalidateCachedPointerInfo(StoredVal);
1350 toErase.push_back(L);
1355 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1356 Value *AvailableVal = DepLI;
1358 // The loads are of a must-aliased pointer, but they may not actually have
1359 // the same type. See if we know how to reuse the previously loaded value
1360 // (depending on its type).
1361 const TargetData *TD = 0;
1362 if (DepLI->getType() != L->getType() &&
1363 (TD = getAnalysisIfAvailable<TargetData>())) {
1364 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L, *TD);
1365 if (AvailableVal == 0)
1368 DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1369 << "\n" << *L << "\n\n\n");
1373 L->replaceAllUsesWith(AvailableVal);
1374 if (isa<PointerType>(DepLI->getType()))
1375 MD->invalidateCachedPointerInfo(DepLI);
1376 toErase.push_back(L);
1381 // FIXME: We should handle memset/memcpy/memmove as dependent instructions to
1382 // forward the value if available.
1385 // If this load really doesn't depend on anything, then we must be loading an
1386 // undef value. This can happen when loading for a fresh allocation with no
1387 // intervening stores, for example.
1388 if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
1389 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1390 toErase.push_back(L);
1398 Value* GVN::lookupNumber(BasicBlock* BB, uint32_t num) {
1399 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1400 if (I == localAvail.end())
1403 ValueNumberScope* locals = I->second;
1406 DenseMap<uint32_t, Value*>::iterator I = locals->table.find(num);
1407 if (I != locals->table.end())
1410 locals = locals->parent;
1416 /// AttemptRedundancyElimination - If the "fast path" of redundancy elimination
1417 /// by inheritance from the dominator fails, see if we can perform phi
1418 /// construction to eliminate the redundancy.
1419 Value* GVN::AttemptRedundancyElimination(Instruction* orig, unsigned valno) {
1420 BasicBlock* BaseBlock = orig->getParent();
1422 SmallPtrSet<BasicBlock*, 4> Visited;
1423 SmallVector<BasicBlock*, 8> Stack;
1424 Stack.push_back(BaseBlock);
1426 DenseMap<BasicBlock*, Value*> Results;
1428 // Walk backwards through our predecessors, looking for instances of the
1429 // value number we're looking for. Instances are recorded in the Results
1430 // map, which is then used to perform phi construction.
1431 while (!Stack.empty()) {
1432 BasicBlock* Current = Stack.back();
1435 // If we've walked all the way to a proper dominator, then give up. Cases
1436 // where the instance is in the dominator will have been caught by the fast
1437 // path, and any cases that require phi construction further than this are
1438 // probably not worth it anyways. Note that this is a SIGNIFICANT compile
1439 // time improvement.
1440 if (DT->properlyDominates(Current, orig->getParent())) return 0;
1442 DenseMap<BasicBlock*, ValueNumberScope*>::iterator LA =
1443 localAvail.find(Current);
1444 if (LA == localAvail.end()) return 0;
1445 DenseMap<uint32_t, Value*>::iterator V = LA->second->table.find(valno);
1447 if (V != LA->second->table.end()) {
1448 // Found an instance, record it.
1449 Results.insert(std::make_pair(Current, V->second));
1453 // If we reach the beginning of the function, then give up.
1454 if (pred_begin(Current) == pred_end(Current))
1457 for (pred_iterator PI = pred_begin(Current), PE = pred_end(Current);
1459 if (Visited.insert(*PI))
1460 Stack.push_back(*PI);
1463 // If we didn't find instances, give up. Otherwise, perform phi construction.
1464 if (Results.size() == 0)
1467 return GetValueForBlock(BaseBlock, orig, Results, true);
1470 /// processInstruction - When calculating availability, handle an instruction
1471 /// by inserting it into the appropriate sets
1472 bool GVN::processInstruction(Instruction *I,
1473 SmallVectorImpl<Instruction*> &toErase) {
1474 if (LoadInst* L = dyn_cast<LoadInst>(I)) {
1475 bool changed = processLoad(L, toErase);
1478 unsigned num = VN.lookup_or_add(L);
1479 localAvail[I->getParent()]->table.insert(std::make_pair(num, L));
1485 uint32_t nextNum = VN.getNextUnusedValueNumber();
1486 unsigned num = VN.lookup_or_add(I);
1488 if (BranchInst* BI = dyn_cast<BranchInst>(I)) {
1489 localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
1491 if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1494 Value* branchCond = BI->getCondition();
1495 uint32_t condVN = VN.lookup_or_add(branchCond);
1497 BasicBlock* trueSucc = BI->getSuccessor(0);
1498 BasicBlock* falseSucc = BI->getSuccessor(1);
1500 if (trueSucc->getSinglePredecessor())
1501 localAvail[trueSucc]->table[condVN] =
1502 ConstantInt::getTrue(trueSucc->getContext());
1503 if (falseSucc->getSinglePredecessor())
1504 localAvail[falseSucc]->table[condVN] =
1505 ConstantInt::getFalse(trueSucc->getContext());
1509 // Allocations are always uniquely numbered, so we can save time and memory
1510 // by fast failing them.
1511 } else if (isa<AllocationInst>(I) || isMalloc(I) || isa<TerminatorInst>(I)) {
1512 localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
1516 // Collapse PHI nodes
1517 if (PHINode* p = dyn_cast<PHINode>(I)) {
1518 Value* constVal = CollapsePhi(p);
1521 for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
1523 PI->second.erase(p);
1525 p->replaceAllUsesWith(constVal);
1526 if (isa<PointerType>(constVal->getType()))
1527 MD->invalidateCachedPointerInfo(constVal);
1530 toErase.push_back(p);
1532 localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
1535 // If the number we were assigned was a brand new VN, then we don't
1536 // need to do a lookup to see if the number already exists
1537 // somewhere in the domtree: it can't!
1538 } else if (num == nextNum) {
1539 localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
1541 // Perform fast-path value-number based elimination of values inherited from
1543 } else if (Value* repl = lookupNumber(I->getParent(), num)) {
1546 I->replaceAllUsesWith(repl);
1547 if (isa<PointerType>(repl->getType()))
1548 MD->invalidateCachedPointerInfo(repl);
1549 toErase.push_back(I);
1553 // Perform slow-pathvalue-number based elimination with phi construction.
1554 } else if (Value* repl = AttemptRedundancyElimination(I, num)) {
1557 I->replaceAllUsesWith(repl);
1558 if (isa<PointerType>(repl->getType()))
1559 MD->invalidateCachedPointerInfo(repl);
1560 toErase.push_back(I);
1564 localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
1570 /// runOnFunction - This is the main transformation entry point for a function.
1571 bool GVN::runOnFunction(Function& F) {
1572 MD = &getAnalysis<MemoryDependenceAnalysis>();
1573 DT = &getAnalysis<DominatorTree>();
1574 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1578 bool changed = false;
1579 bool shouldContinue = true;
1581 // Merge unconditional branches, allowing PRE to catch more
1582 // optimization opportunities.
1583 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1584 BasicBlock* BB = FI;
1586 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1587 if (removedBlock) NumGVNBlocks++;
1589 changed |= removedBlock;
1592 unsigned Iteration = 0;
1594 while (shouldContinue) {
1595 DEBUG(errs() << "GVN iteration: " << Iteration << "\n");
1596 shouldContinue = iterateOnFunction(F);
1597 changed |= shouldContinue;
1602 bool PREChanged = true;
1603 while (PREChanged) {
1604 PREChanged = performPRE(F);
1605 changed |= PREChanged;
1608 // FIXME: Should perform GVN again after PRE does something. PRE can move
1609 // computations into blocks where they become fully redundant. Note that
1610 // we can't do this until PRE's critical edge splitting updates memdep.
1611 // Actually, when this happens, we should just fully integrate PRE into GVN.
1613 cleanupGlobalSets();
1619 bool GVN::processBlock(BasicBlock* BB) {
1620 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
1621 // incrementing BI before processing an instruction).
1622 SmallVector<Instruction*, 8> toErase;
1623 bool changed_function = false;
1625 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
1627 changed_function |= processInstruction(BI, toErase);
1628 if (toErase.empty()) {
1633 // If we need some instructions deleted, do it now.
1634 NumGVNInstr += toErase.size();
1636 // Avoid iterator invalidation.
1637 bool AtStart = BI == BB->begin();
1641 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
1642 E = toErase.end(); I != E; ++I) {
1643 DEBUG(errs() << "GVN removed: " << **I << '\n');
1644 MD->removeInstruction(*I);
1645 (*I)->eraseFromParent();
1646 DEBUG(verifyRemoved(*I));
1656 return changed_function;
1659 /// performPRE - Perform a purely local form of PRE that looks for diamond
1660 /// control flow patterns and attempts to perform simple PRE at the join point.
1661 bool GVN::performPRE(Function& F) {
1662 bool Changed = false;
1663 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
1664 DenseMap<BasicBlock*, Value*> predMap;
1665 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
1666 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
1667 BasicBlock* CurrentBlock = *DI;
1669 // Nothing to PRE in the entry block.
1670 if (CurrentBlock == &F.getEntryBlock()) continue;
1672 for (BasicBlock::iterator BI = CurrentBlock->begin(),
1673 BE = CurrentBlock->end(); BI != BE; ) {
1674 Instruction *CurInst = BI++;
1676 if (isa<AllocationInst>(CurInst) || isMalloc(CurInst) ||
1677 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
1678 (CurInst->getType() == Type::getVoidTy(F.getContext())) ||
1679 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
1680 isa<DbgInfoIntrinsic>(CurInst))
1683 uint32_t valno = VN.lookup(CurInst);
1685 // Look for the predecessors for PRE opportunities. We're
1686 // only trying to solve the basic diamond case, where
1687 // a value is computed in the successor and one predecessor,
1688 // but not the other. We also explicitly disallow cases
1689 // where the successor is its own predecessor, because they're
1690 // more complicated to get right.
1691 unsigned numWith = 0;
1692 unsigned numWithout = 0;
1693 BasicBlock* PREPred = 0;
1696 for (pred_iterator PI = pred_begin(CurrentBlock),
1697 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
1698 // We're not interested in PRE where the block is its
1699 // own predecessor, on in blocks with predecessors
1700 // that are not reachable.
1701 if (*PI == CurrentBlock) {
1704 } else if (!localAvail.count(*PI)) {
1709 DenseMap<uint32_t, Value*>::iterator predV =
1710 localAvail[*PI]->table.find(valno);
1711 if (predV == localAvail[*PI]->table.end()) {
1714 } else if (predV->second == CurInst) {
1717 predMap[*PI] = predV->second;
1722 // Don't do PRE when it might increase code size, i.e. when
1723 // we would need to insert instructions in more than one pred.
1724 if (numWithout != 1 || numWith == 0)
1727 // We can't do PRE safely on a critical edge, so instead we schedule
1728 // the edge to be split and perform the PRE the next time we iterate
1730 unsigned succNum = 0;
1731 for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
1733 if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
1738 if (isCriticalEdge(PREPred->getTerminator(), succNum)) {
1739 toSplit.push_back(std::make_pair(PREPred->getTerminator(), succNum));
1743 // Instantiate the expression the in predecessor that lacked it.
1744 // Because we are going top-down through the block, all value numbers
1745 // will be available in the predecessor by the time we need them. Any
1746 // that weren't original present will have been instantiated earlier
1748 Instruction* PREInstr = CurInst->clone(CurInst->getContext());
1749 bool success = true;
1750 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
1751 Value *Op = PREInstr->getOperand(i);
1752 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
1755 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
1756 PREInstr->setOperand(i, V);
1763 // Fail out if we encounter an operand that is not available in
1764 // the PRE predecessor. This is typically because of loads which
1765 // are not value numbered precisely.
1768 DEBUG(verifyRemoved(PREInstr));
1772 PREInstr->insertBefore(PREPred->getTerminator());
1773 PREInstr->setName(CurInst->getName() + ".pre");
1774 predMap[PREPred] = PREInstr;
1775 VN.add(PREInstr, valno);
1778 // Update the availability map to include the new instruction.
1779 localAvail[PREPred]->table.insert(std::make_pair(valno, PREInstr));
1781 // Create a PHI to make the value available in this block.
1782 PHINode* Phi = PHINode::Create(CurInst->getType(),
1783 CurInst->getName() + ".pre-phi",
1784 CurrentBlock->begin());
1785 for (pred_iterator PI = pred_begin(CurrentBlock),
1786 PE = pred_end(CurrentBlock); PI != PE; ++PI)
1787 Phi->addIncoming(predMap[*PI], *PI);
1790 localAvail[CurrentBlock]->table[valno] = Phi;
1792 CurInst->replaceAllUsesWith(Phi);
1793 if (isa<PointerType>(Phi->getType()))
1794 MD->invalidateCachedPointerInfo(Phi);
1797 DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n');
1798 MD->removeInstruction(CurInst);
1799 CurInst->eraseFromParent();
1800 DEBUG(verifyRemoved(CurInst));
1805 for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
1806 I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
1807 SplitCriticalEdge(I->first, I->second, this);
1809 return Changed || toSplit.size();
1812 /// iterateOnFunction - Executes one iteration of GVN
1813 bool GVN::iterateOnFunction(Function &F) {
1814 cleanupGlobalSets();
1816 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
1817 DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
1819 localAvail[DI->getBlock()] =
1820 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
1822 localAvail[DI->getBlock()] = new ValueNumberScope(0);
1825 // Top-down walk of the dominator tree
1826 bool changed = false;
1828 // Needed for value numbering with phi construction to work.
1829 ReversePostOrderTraversal<Function*> RPOT(&F);
1830 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
1831 RE = RPOT.end(); RI != RE; ++RI)
1832 changed |= processBlock(*RI);
1834 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
1835 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
1836 changed |= processBlock(DI->getBlock());
1842 void GVN::cleanupGlobalSets() {
1846 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
1847 I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
1852 /// verifyRemoved - Verify that the specified instruction does not occur in our
1853 /// internal data structures.
1854 void GVN::verifyRemoved(const Instruction *Inst) const {
1855 VN.verifyRemoved(Inst);
1857 // Walk through the PHI map to make sure the instruction isn't hiding in there
1859 for (PhiMapType::iterator
1860 I = phiMap.begin(), E = phiMap.end(); I != E; ++I) {
1861 assert(I->first != Inst && "Inst is still a key in PHI map!");
1863 for (SmallPtrSet<Instruction*, 4>::iterator
1864 II = I->second.begin(), IE = I->second.end(); II != IE; ++II) {
1865 assert(*II != Inst && "Inst is still a value in PHI map!");
1869 // Walk through the value number scope to make sure the instruction isn't
1870 // ferreted away in it.
1871 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
1872 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
1873 const ValueNumberScope *VNS = I->second;
1876 for (DenseMap<uint32_t, Value*>::iterator
1877 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
1878 assert(II->second != Inst && "Inst still in value numbering scope!");