1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/Function.h"
24 #include "llvm/IntrinsicInst.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/Operator.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/Dominators.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/MemoryBuiltins.h"
33 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
34 #include "llvm/Analysis/PHITransAddr.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/Target/TargetData.h"
37 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
38 #include "llvm/Transforms/Utils/Local.h"
39 #include "llvm/Transforms/Utils/SSAUpdater.h"
40 #include "llvm/ADT/DenseMap.h"
41 #include "llvm/ADT/DepthFirstIterator.h"
42 #include "llvm/ADT/PostOrderIterator.h"
43 #include "llvm/ADT/SmallPtrSet.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/Support/Allocator.h"
46 #include "llvm/Support/CFG.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/GetElementPtrTypeIterator.h"
51 #include "llvm/Support/IRBuilder.h"
55 STATISTIC(NumGVNInstr, "Number of instructions deleted");
56 STATISTIC(NumGVNLoad, "Number of loads deleted");
57 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
58 STATISTIC(NumGVNBlocks, "Number of blocks merged");
59 STATISTIC(NumPRELoad, "Number of loads PRE'd");
61 static cl::opt<bool> EnablePRE("enable-pre",
62 cl::init(true), cl::Hidden);
63 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
65 //===----------------------------------------------------------------------===//
67 //===----------------------------------------------------------------------===//
69 /// This class holds the mapping between values and value numbers. It is used
70 /// as an efficient mechanism to determine the expression-wise equivalence of
74 enum ExpressionOpcode {
75 ADD = Instruction::Add,
76 FADD = Instruction::FAdd,
77 SUB = Instruction::Sub,
78 FSUB = Instruction::FSub,
79 MUL = Instruction::Mul,
80 FMUL = Instruction::FMul,
81 UDIV = Instruction::UDiv,
82 SDIV = Instruction::SDiv,
83 FDIV = Instruction::FDiv,
84 UREM = Instruction::URem,
85 SREM = Instruction::SRem,
86 FREM = Instruction::FRem,
87 SHL = Instruction::Shl,
88 LSHR = Instruction::LShr,
89 ASHR = Instruction::AShr,
90 AND = Instruction::And,
92 XOR = Instruction::Xor,
93 TRUNC = Instruction::Trunc,
94 ZEXT = Instruction::ZExt,
95 SEXT = Instruction::SExt,
96 FPTOUI = Instruction::FPToUI,
97 FPTOSI = Instruction::FPToSI,
98 UITOFP = Instruction::UIToFP,
99 SITOFP = Instruction::SIToFP,
100 FPTRUNC = Instruction::FPTrunc,
101 FPEXT = Instruction::FPExt,
102 PTRTOINT = Instruction::PtrToInt,
103 INTTOPTR = Instruction::IntToPtr,
104 BITCAST = Instruction::BitCast,
105 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
106 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
107 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
108 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
109 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
110 SHUFFLE, SELECT, GEP, CALL, CONSTANT,
111 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
113 ExpressionOpcode opcode;
115 SmallVector<uint32_t, 4> varargs;
119 Expression(ExpressionOpcode o) : opcode(o) { }
121 bool operator==(const Expression &other) const {
122 if (opcode != other.opcode)
124 else if (opcode == EMPTY || opcode == TOMBSTONE)
126 else if (type != other.type)
128 else if (function != other.function)
130 else if (varargs != other.varargs)
135 /*bool operator!=(const Expression &other) const {
136 return !(*this == other);
142 DenseMap<Value*, uint32_t> valueNumbering;
143 DenseMap<Expression, uint32_t> expressionNumbering;
145 MemoryDependenceAnalysis* MD;
148 uint32_t nextValueNumber;
150 Expression::ExpressionOpcode getOpcode(CmpInst* C);
151 Expression create_expression(BinaryOperator* BO);
152 Expression create_expression(CmpInst* C);
153 Expression create_expression(ShuffleVectorInst* V);
154 Expression create_expression(ExtractElementInst* C);
155 Expression create_expression(InsertElementInst* V);
156 Expression create_expression(SelectInst* V);
157 Expression create_expression(CastInst* C);
158 Expression create_expression(GetElementPtrInst* G);
159 Expression create_expression(CallInst* C);
160 Expression create_expression(ExtractValueInst* C);
161 Expression create_expression(InsertValueInst* C);
163 uint32_t lookup_or_add_call(CallInst* C);
165 ValueTable() : nextValueNumber(1) { }
166 uint32_t lookup_or_add(Value *V);
167 uint32_t lookup(Value *V) const;
168 void add(Value *V, uint32_t num);
170 void erase(Value *v);
171 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
172 AliasAnalysis *getAliasAnalysis() const { return AA; }
173 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
174 void setDomTree(DominatorTree* D) { DT = D; }
175 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
176 void verifyRemoved(const Value *) const;
181 template <> struct DenseMapInfo<Expression> {
182 static inline Expression getEmptyKey() {
183 return Expression(Expression::EMPTY);
186 static inline Expression getTombstoneKey() {
187 return Expression(Expression::TOMBSTONE);
190 static unsigned getHashValue(const Expression e) {
191 unsigned hash = e.opcode;
193 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
194 (unsigned)((uintptr_t)e.type >> 9));
196 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
197 E = e.varargs.end(); I != E; ++I)
198 hash = *I + hash * 37;
200 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
201 (unsigned)((uintptr_t)e.function >> 9)) +
206 static bool isEqual(const Expression &LHS, const Expression &RHS) {
213 //===----------------------------------------------------------------------===//
214 // ValueTable Internal Functions
215 //===----------------------------------------------------------------------===//
217 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
218 if (isa<ICmpInst>(C)) {
219 switch (C->getPredicate()) {
220 default: // THIS SHOULD NEVER HAPPEN
221 llvm_unreachable("Comparison with unknown predicate?");
222 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
223 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
224 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
225 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
226 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
227 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
228 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
229 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
230 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
231 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
234 switch (C->getPredicate()) {
235 default: // THIS SHOULD NEVER HAPPEN
236 llvm_unreachable("Comparison with unknown predicate?");
237 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
238 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
239 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
240 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
241 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
242 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
243 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
244 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
245 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
246 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
247 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
248 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
249 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
250 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
255 Expression ValueTable::create_expression(CallInst* C) {
258 e.type = C->getType();
259 e.function = C->getCalledFunction();
260 e.opcode = Expression::CALL;
263 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
265 e.varargs.push_back(lookup_or_add(*I));
270 Expression ValueTable::create_expression(BinaryOperator* BO) {
272 e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
273 e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
275 e.type = BO->getType();
276 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
281 Expression ValueTable::create_expression(CmpInst* C) {
284 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
285 e.varargs.push_back(lookup_or_add(C->getOperand(1)));
287 e.type = C->getType();
288 e.opcode = getOpcode(C);
293 Expression ValueTable::create_expression(CastInst* C) {
296 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
298 e.type = C->getType();
299 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
304 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
307 e.varargs.push_back(lookup_or_add(S->getOperand(0)));
308 e.varargs.push_back(lookup_or_add(S->getOperand(1)));
309 e.varargs.push_back(lookup_or_add(S->getOperand(2)));
311 e.type = S->getType();
312 e.opcode = Expression::SHUFFLE;
317 Expression ValueTable::create_expression(ExtractElementInst* E) {
320 e.varargs.push_back(lookup_or_add(E->getOperand(0)));
321 e.varargs.push_back(lookup_or_add(E->getOperand(1)));
323 e.type = E->getType();
324 e.opcode = Expression::EXTRACT;
329 Expression ValueTable::create_expression(InsertElementInst* I) {
332 e.varargs.push_back(lookup_or_add(I->getOperand(0)));
333 e.varargs.push_back(lookup_or_add(I->getOperand(1)));
334 e.varargs.push_back(lookup_or_add(I->getOperand(2)));
336 e.type = I->getType();
337 e.opcode = Expression::INSERT;
342 Expression ValueTable::create_expression(SelectInst* I) {
345 e.varargs.push_back(lookup_or_add(I->getCondition()));
346 e.varargs.push_back(lookup_or_add(I->getTrueValue()));
347 e.varargs.push_back(lookup_or_add(I->getFalseValue()));
349 e.type = I->getType();
350 e.opcode = Expression::SELECT;
355 Expression ValueTable::create_expression(GetElementPtrInst* G) {
358 e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
360 e.type = G->getType();
361 e.opcode = Expression::GEP;
363 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
365 e.varargs.push_back(lookup_or_add(*I));
370 Expression ValueTable::create_expression(ExtractValueInst* E) {
373 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
374 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
376 e.varargs.push_back(*II);
378 e.type = E->getType();
379 e.opcode = Expression::EXTRACTVALUE;
384 Expression ValueTable::create_expression(InsertValueInst* E) {
387 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
388 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
389 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
391 e.varargs.push_back(*II);
393 e.type = E->getType();
394 e.opcode = Expression::INSERTVALUE;
399 //===----------------------------------------------------------------------===//
400 // ValueTable External Functions
401 //===----------------------------------------------------------------------===//
403 /// add - Insert a value into the table with a specified value number.
404 void ValueTable::add(Value *V, uint32_t num) {
405 valueNumbering.insert(std::make_pair(V, num));
408 uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
409 if (AA->doesNotAccessMemory(C)) {
410 Expression exp = create_expression(C);
411 uint32_t& e = expressionNumbering[exp];
412 if (!e) e = nextValueNumber++;
413 valueNumbering[C] = e;
415 } else if (AA->onlyReadsMemory(C)) {
416 Expression exp = create_expression(C);
417 uint32_t& e = expressionNumbering[exp];
419 e = nextValueNumber++;
420 valueNumbering[C] = e;
424 e = nextValueNumber++;
425 valueNumbering[C] = e;
429 MemDepResult local_dep = MD->getDependency(C);
431 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
432 valueNumbering[C] = nextValueNumber;
433 return nextValueNumber++;
436 if (local_dep.isDef()) {
437 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
439 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
440 valueNumbering[C] = nextValueNumber;
441 return nextValueNumber++;
444 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
445 uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
446 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
448 valueNumbering[C] = nextValueNumber;
449 return nextValueNumber++;
453 uint32_t v = lookup_or_add(local_cdep);
454 valueNumbering[C] = v;
459 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
460 MD->getNonLocalCallDependency(CallSite(C));
461 // FIXME: call/call dependencies for readonly calls should return def, not
462 // clobber! Move the checking logic to MemDep!
465 // Check to see if we have a single dominating call instruction that is
467 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
468 const NonLocalDepEntry *I = &deps[i];
469 // Ignore non-local dependencies.
470 if (I->getResult().isNonLocal())
473 // We don't handle non-depedencies. If we already have a call, reject
474 // instruction dependencies.
475 if (I->getResult().isClobber() || cdep != 0) {
480 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
481 // FIXME: All duplicated with non-local case.
482 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
483 cdep = NonLocalDepCall;
492 valueNumbering[C] = nextValueNumber;
493 return nextValueNumber++;
496 if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
497 valueNumbering[C] = nextValueNumber;
498 return nextValueNumber++;
500 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
501 uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
502 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
504 valueNumbering[C] = nextValueNumber;
505 return nextValueNumber++;
509 uint32_t v = lookup_or_add(cdep);
510 valueNumbering[C] = v;
514 valueNumbering[C] = nextValueNumber;
515 return nextValueNumber++;
519 /// lookup_or_add - Returns the value number for the specified value, assigning
520 /// it a new number if it did not have one before.
521 uint32_t ValueTable::lookup_or_add(Value *V) {
522 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
523 if (VI != valueNumbering.end())
526 if (!isa<Instruction>(V)) {
527 valueNumbering[V] = nextValueNumber;
528 return nextValueNumber++;
531 Instruction* I = cast<Instruction>(V);
533 switch (I->getOpcode()) {
534 case Instruction::Call:
535 return lookup_or_add_call(cast<CallInst>(I));
536 case Instruction::Add:
537 case Instruction::FAdd:
538 case Instruction::Sub:
539 case Instruction::FSub:
540 case Instruction::Mul:
541 case Instruction::FMul:
542 case Instruction::UDiv:
543 case Instruction::SDiv:
544 case Instruction::FDiv:
545 case Instruction::URem:
546 case Instruction::SRem:
547 case Instruction::FRem:
548 case Instruction::Shl:
549 case Instruction::LShr:
550 case Instruction::AShr:
551 case Instruction::And:
552 case Instruction::Or :
553 case Instruction::Xor:
554 exp = create_expression(cast<BinaryOperator>(I));
556 case Instruction::ICmp:
557 case Instruction::FCmp:
558 exp = create_expression(cast<CmpInst>(I));
560 case Instruction::Trunc:
561 case Instruction::ZExt:
562 case Instruction::SExt:
563 case Instruction::FPToUI:
564 case Instruction::FPToSI:
565 case Instruction::UIToFP:
566 case Instruction::SIToFP:
567 case Instruction::FPTrunc:
568 case Instruction::FPExt:
569 case Instruction::PtrToInt:
570 case Instruction::IntToPtr:
571 case Instruction::BitCast:
572 exp = create_expression(cast<CastInst>(I));
574 case Instruction::Select:
575 exp = create_expression(cast<SelectInst>(I));
577 case Instruction::ExtractElement:
578 exp = create_expression(cast<ExtractElementInst>(I));
580 case Instruction::InsertElement:
581 exp = create_expression(cast<InsertElementInst>(I));
583 case Instruction::ShuffleVector:
584 exp = create_expression(cast<ShuffleVectorInst>(I));
586 case Instruction::ExtractValue:
587 exp = create_expression(cast<ExtractValueInst>(I));
589 case Instruction::InsertValue:
590 exp = create_expression(cast<InsertValueInst>(I));
592 case Instruction::GetElementPtr:
593 exp = create_expression(cast<GetElementPtrInst>(I));
596 valueNumbering[V] = nextValueNumber;
597 return nextValueNumber++;
600 uint32_t& e = expressionNumbering[exp];
601 if (!e) e = nextValueNumber++;
602 valueNumbering[V] = e;
606 /// lookup - Returns the value number of the specified value. Fails if
607 /// the value has not yet been numbered.
608 uint32_t ValueTable::lookup(Value *V) const {
609 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
610 assert(VI != valueNumbering.end() && "Value not numbered?");
614 /// clear - Remove all entries from the ValueTable
615 void ValueTable::clear() {
616 valueNumbering.clear();
617 expressionNumbering.clear();
621 /// erase - Remove a value from the value numbering
622 void ValueTable::erase(Value *V) {
623 valueNumbering.erase(V);
626 /// verifyRemoved - Verify that the value is removed from all internal data
628 void ValueTable::verifyRemoved(const Value *V) const {
629 for (DenseMap<Value*, uint32_t>::const_iterator
630 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
631 assert(I->first != V && "Inst still occurs in value numbering map!");
635 //===----------------------------------------------------------------------===//
637 //===----------------------------------------------------------------------===//
640 struct ValueNumberScope {
641 ValueNumberScope* parent;
642 DenseMap<uint32_t, Value*> table;
644 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
650 class GVN : public FunctionPass {
651 bool runOnFunction(Function &F);
653 static char ID; // Pass identification, replacement for typeid
654 explicit GVN(bool noloads = false)
655 : FunctionPass(ID), NoLoads(noloads), MD(0) {
656 initializeGVNPass(*PassRegistry::getPassRegistry());
661 MemoryDependenceAnalysis *MD;
663 const TargetData* TD;
667 /// NumberTable - A mapping from value numers to lists of Value*'s that
668 /// have that value number. Use lookupNumber to query it.
669 DenseMap<uint32_t, std::pair<Value*, void*> > NumberTable;
670 BumpPtrAllocator TableAllocator;
672 /// insert_table - Push a new Value to the NumberTable onto the list for
673 /// its value number.
674 void insert_table(uint32_t N, Value *V) {
675 std::pair<Value*, void*>& Curr = NumberTable[N];
681 std::pair<Value*, void*>* Node =
682 TableAllocator.Allocate<std::pair<Value*, void*> >();
684 Node->second = Curr.second;
688 /// erase_table - Scan the list of values corresponding to a given value
689 /// number, and remove the given value if encountered.
690 void erase_table(uint32_t N, Value *V) {
691 std::pair<Value*, void*>* Prev = 0;
692 std::pair<Value*, void*>* Curr = &NumberTable[N];
694 while (Curr->first != V) {
696 Curr = static_cast<std::pair<Value*, void*>*>(Curr->second);
700 Prev->second = Curr->second;
705 std::pair<Value*, void*>* Next =
706 static_cast<std::pair<Value*, void*>*>(Curr->second);
707 Curr->first = Next->first;
708 Curr->second = Next->second;
713 // List of critical edges to be split between iterations.
714 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
716 // This transformation requires dominator postdominator info
717 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
718 AU.addRequired<DominatorTree>();
720 AU.addRequired<MemoryDependenceAnalysis>();
721 AU.addRequired<AliasAnalysis>();
723 AU.addPreserved<DominatorTree>();
724 AU.addPreserved<AliasAnalysis>();
728 // FIXME: eliminate or document these better
729 bool processLoad(LoadInst* L,
730 SmallVectorImpl<Instruction*> &toErase);
731 bool processInstruction(Instruction *I,
732 SmallVectorImpl<Instruction*> &toErase);
733 bool processNonLocalLoad(LoadInst* L,
734 SmallVectorImpl<Instruction*> &toErase);
735 bool processBlock(BasicBlock *BB);
736 void dump(DenseMap<uint32_t, Value*>& d);
737 bool iterateOnFunction(Function &F);
738 bool performPRE(Function& F);
739 Value *lookupNumber(BasicBlock *BB, uint32_t num);
740 void cleanupGlobalSets();
741 void verifyRemoved(const Instruction *I) const;
742 bool splitCriticalEdges();
748 // createGVNPass - The public interface to this file...
749 FunctionPass *llvm::createGVNPass(bool NoLoads) {
750 return new GVN(NoLoads);
753 INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
754 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
755 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
756 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
757 INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
759 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
761 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
762 E = d.end(); I != E; ++I) {
763 errs() << I->first << "\n";
769 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
770 /// we're analyzing is fully available in the specified block. As we go, keep
771 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
772 /// map is actually a tri-state map with the following values:
773 /// 0) we know the block *is not* fully available.
774 /// 1) we know the block *is* fully available.
775 /// 2) we do not know whether the block is fully available or not, but we are
776 /// currently speculating that it will be.
777 /// 3) we are speculating for this block and have used that to speculate for
779 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
780 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
781 // Optimistically assume that the block is fully available and check to see
782 // if we already know about this block in one lookup.
783 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
784 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
786 // If the entry already existed for this block, return the precomputed value.
788 // If this is a speculative "available" value, mark it as being used for
789 // speculation of other blocks.
790 if (IV.first->second == 2)
791 IV.first->second = 3;
792 return IV.first->second != 0;
795 // Otherwise, see if it is fully available in all predecessors.
796 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
798 // If this block has no predecessors, it isn't live-in here.
800 goto SpeculationFailure;
802 for (; PI != PE; ++PI)
803 // If the value isn't fully available in one of our predecessors, then it
804 // isn't fully available in this block either. Undo our previous
805 // optimistic assumption and bail out.
806 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
807 goto SpeculationFailure;
811 // SpeculationFailure - If we get here, we found out that this is not, after
812 // all, a fully-available block. We have a problem if we speculated on this and
813 // used the speculation to mark other blocks as available.
815 char &BBVal = FullyAvailableBlocks[BB];
817 // If we didn't speculate on this, just return with it set to false.
823 // If we did speculate on this value, we could have blocks set to 1 that are
824 // incorrect. Walk the (transitive) successors of this block and mark them as
826 SmallVector<BasicBlock*, 32> BBWorklist;
827 BBWorklist.push_back(BB);
830 BasicBlock *Entry = BBWorklist.pop_back_val();
831 // Note that this sets blocks to 0 (unavailable) if they happen to not
832 // already be in FullyAvailableBlocks. This is safe.
833 char &EntryVal = FullyAvailableBlocks[Entry];
834 if (EntryVal == 0) continue; // Already unavailable.
836 // Mark as unavailable.
839 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
840 BBWorklist.push_back(*I);
841 } while (!BBWorklist.empty());
847 /// CanCoerceMustAliasedValueToLoad - Return true if
848 /// CoerceAvailableValueToLoadType will succeed.
849 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
851 const TargetData &TD) {
852 // If the loaded or stored value is an first class array or struct, don't try
853 // to transform them. We need to be able to bitcast to integer.
854 if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
855 StoredVal->getType()->isStructTy() ||
856 StoredVal->getType()->isArrayTy())
859 // The store has to be at least as big as the load.
860 if (TD.getTypeSizeInBits(StoredVal->getType()) <
861 TD.getTypeSizeInBits(LoadTy))
868 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
869 /// then a load from a must-aliased pointer of a different type, try to coerce
870 /// the stored value. LoadedTy is the type of the load we want to replace and
871 /// InsertPt is the place to insert new instructions.
873 /// If we can't do it, return null.
874 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
875 const Type *LoadedTy,
876 Instruction *InsertPt,
877 const TargetData &TD) {
878 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
881 const Type *StoredValTy = StoredVal->getType();
883 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
884 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
886 // If the store and reload are the same size, we can always reuse it.
887 if (StoreSize == LoadSize) {
888 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
889 // Pointer to Pointer -> use bitcast.
890 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
893 // Convert source pointers to integers, which can be bitcast.
894 if (StoredValTy->isPointerTy()) {
895 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
896 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
899 const Type *TypeToCastTo = LoadedTy;
900 if (TypeToCastTo->isPointerTy())
901 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
903 if (StoredValTy != TypeToCastTo)
904 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
906 // Cast to pointer if the load needs a pointer type.
907 if (LoadedTy->isPointerTy())
908 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
913 // If the loaded value is smaller than the available value, then we can
914 // extract out a piece from it. If the available value is too small, then we
915 // can't do anything.
916 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
918 // Convert source pointers to integers, which can be manipulated.
919 if (StoredValTy->isPointerTy()) {
920 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
921 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
924 // Convert vectors and fp to integer, which can be manipulated.
925 if (!StoredValTy->isIntegerTy()) {
926 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
927 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
930 // If this is a big-endian system, we need to shift the value down to the low
931 // bits so that a truncate will work.
932 if (TD.isBigEndian()) {
933 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
934 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
937 // Truncate the integer to the right size now.
938 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
939 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
941 if (LoadedTy == NewIntTy)
944 // If the result is a pointer, inttoptr.
945 if (LoadedTy->isPointerTy())
946 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
948 // Otherwise, bitcast.
949 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
952 /// AnalyzeLoadFromClobberingWrite - This function is called when we have a
953 /// memdep query of a load that ends up being a clobbering memory write (store,
954 /// memset, memcpy, memmove). This means that the write *may* provide bits used
955 /// by the load but we can't be sure because the pointers don't mustalias.
957 /// Check this case to see if there is anything more we can do before we give
958 /// up. This returns -1 if we have to give up, or a byte number in the stored
959 /// value of the piece that feeds the load.
960 static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
962 uint64_t WriteSizeInBits,
963 const TargetData &TD) {
964 // If the loaded or stored value is an first class array or struct, don't try
965 // to transform them. We need to be able to bitcast to integer.
966 if (LoadTy->isStructTy() || LoadTy->isArrayTy())
969 int64_t StoreOffset = 0, LoadOffset = 0;
970 Value *StoreBase = GetPointerBaseWithConstantOffset(WritePtr, StoreOffset,TD);
971 Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
972 if (StoreBase != LoadBase)
975 // If the load and store are to the exact same address, they should have been
976 // a must alias. AA must have gotten confused.
977 // FIXME: Study to see if/when this happens. One case is forwarding a memset
978 // to a load from the base of the memset.
980 if (LoadOffset == StoreOffset) {
981 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
982 << "Base = " << *StoreBase << "\n"
983 << "Store Ptr = " << *WritePtr << "\n"
984 << "Store Offs = " << StoreOffset << "\n"
985 << "Load Ptr = " << *LoadPtr << "\n";
990 // If the load and store don't overlap at all, the store doesn't provide
991 // anything to the load. In this case, they really don't alias at all, AA
992 // must have gotten confused.
993 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
995 if ((WriteSizeInBits & 7) | (LoadSize & 7))
997 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
1001 bool isAAFailure = false;
1002 if (StoreOffset < LoadOffset)
1003 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1005 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1009 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1010 << "Base = " << *StoreBase << "\n"
1011 << "Store Ptr = " << *WritePtr << "\n"
1012 << "Store Offs = " << StoreOffset << "\n"
1013 << "Load Ptr = " << *LoadPtr << "\n";
1019 // If the Load isn't completely contained within the stored bits, we don't
1020 // have all the bits to feed it. We could do something crazy in the future
1021 // (issue a smaller load then merge the bits in) but this seems unlikely to be
1023 if (StoreOffset > LoadOffset ||
1024 StoreOffset+StoreSize < LoadOffset+LoadSize)
1027 // Okay, we can do this transformation. Return the number of bytes into the
1028 // store that the load is.
1029 return LoadOffset-StoreOffset;
1032 /// AnalyzeLoadFromClobberingStore - This function is called when we have a
1033 /// memdep query of a load that ends up being a clobbering store.
1034 static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1036 const TargetData &TD) {
1037 // Cannot handle reading from store of first-class aggregate yet.
1038 if (DepSI->getValueOperand()->getType()->isStructTy() ||
1039 DepSI->getValueOperand()->getType()->isArrayTy())
1042 Value *StorePtr = DepSI->getPointerOperand();
1043 uint64_t StoreSize =TD.getTypeSizeInBits(DepSI->getValueOperand()->getType());
1044 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1045 StorePtr, StoreSize, TD);
1048 static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1050 const TargetData &TD) {
1051 // If the mem operation is a non-constant size, we can't handle it.
1052 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1053 if (SizeCst == 0) return -1;
1054 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1056 // If this is memset, we just need to see if the offset is valid in the size
1058 if (MI->getIntrinsicID() == Intrinsic::memset)
1059 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1062 // If we have a memcpy/memmove, the only case we can handle is if this is a
1063 // copy from constant memory. In that case, we can read directly from the
1065 MemTransferInst *MTI = cast<MemTransferInst>(MI);
1067 Constant *Src = dyn_cast<Constant>(MTI->getSource());
1068 if (Src == 0) return -1;
1070 GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src));
1071 if (GV == 0 || !GV->isConstant()) return -1;
1073 // See if the access is within the bounds of the transfer.
1074 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1075 MI->getDest(), MemSizeInBits, TD);
1079 // Otherwise, see if we can constant fold a load from the constant with the
1080 // offset applied as appropriate.
1081 Src = ConstantExpr::getBitCast(Src,
1082 llvm::Type::getInt8PtrTy(Src->getContext()));
1083 Constant *OffsetCst =
1084 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1085 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1086 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1087 if (ConstantFoldLoadFromConstPtr(Src, &TD))
1093 /// GetStoreValueForLoad - This function is called when we have a
1094 /// memdep query of a load that ends up being a clobbering store. This means
1095 /// that the store *may* provide bits used by the load but we can't be sure
1096 /// because the pointers don't mustalias. Check this case to see if there is
1097 /// anything more we can do before we give up.
1098 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1100 Instruction *InsertPt, const TargetData &TD){
1101 LLVMContext &Ctx = SrcVal->getType()->getContext();
1103 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
1104 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
1106 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1108 // Compute which bits of the stored value are being used by the load. Convert
1109 // to an integer type to start with.
1110 if (SrcVal->getType()->isPointerTy())
1111 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1112 if (!SrcVal->getType()->isIntegerTy())
1113 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1116 // Shift the bits to the least significant depending on endianness.
1118 if (TD.isLittleEndian())
1119 ShiftAmt = Offset*8;
1121 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1124 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1126 if (LoadSize != StoreSize)
1127 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1130 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1133 /// GetMemInstValueForLoad - This function is called when we have a
1134 /// memdep query of a load that ends up being a clobbering mem intrinsic.
1135 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1136 const Type *LoadTy, Instruction *InsertPt,
1137 const TargetData &TD){
1138 LLVMContext &Ctx = LoadTy->getContext();
1139 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1141 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1143 // We know that this method is only called when the mem transfer fully
1144 // provides the bits for the load.
1145 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1146 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1147 // independently of what the offset is.
1148 Value *Val = MSI->getValue();
1150 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1152 Value *OneElt = Val;
1154 // Splat the value out to the right number of bits.
1155 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1156 // If we can double the number of bytes set, do it.
1157 if (NumBytesSet*2 <= LoadSize) {
1158 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1159 Val = Builder.CreateOr(Val, ShVal);
1164 // Otherwise insert one byte at a time.
1165 Value *ShVal = Builder.CreateShl(Val, 1*8);
1166 Val = Builder.CreateOr(OneElt, ShVal);
1170 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1173 // Otherwise, this is a memcpy/memmove from a constant global.
1174 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1175 Constant *Src = cast<Constant>(MTI->getSource());
1177 // Otherwise, see if we can constant fold a load from the constant with the
1178 // offset applied as appropriate.
1179 Src = ConstantExpr::getBitCast(Src,
1180 llvm::Type::getInt8PtrTy(Src->getContext()));
1181 Constant *OffsetCst =
1182 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1183 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1184 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1185 return ConstantFoldLoadFromConstPtr(Src, &TD);
1190 struct AvailableValueInBlock {
1191 /// BB - The basic block in question.
1194 SimpleVal, // A simple offsetted value that is accessed.
1195 MemIntrin // A memory intrinsic which is loaded from.
1198 /// V - The value that is live out of the block.
1199 PointerIntPair<Value *, 1, ValType> Val;
1201 /// Offset - The byte offset in Val that is interesting for the load query.
1204 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1205 unsigned Offset = 0) {
1206 AvailableValueInBlock Res;
1208 Res.Val.setPointer(V);
1209 Res.Val.setInt(SimpleVal);
1210 Res.Offset = Offset;
1214 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1215 unsigned Offset = 0) {
1216 AvailableValueInBlock Res;
1218 Res.Val.setPointer(MI);
1219 Res.Val.setInt(MemIntrin);
1220 Res.Offset = Offset;
1224 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1225 Value *getSimpleValue() const {
1226 assert(isSimpleValue() && "Wrong accessor");
1227 return Val.getPointer();
1230 MemIntrinsic *getMemIntrinValue() const {
1231 assert(!isSimpleValue() && "Wrong accessor");
1232 return cast<MemIntrinsic>(Val.getPointer());
1235 /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1236 /// defined here to the specified type. This handles various coercion cases.
1237 Value *MaterializeAdjustedValue(const Type *LoadTy,
1238 const TargetData *TD) const {
1240 if (isSimpleValue()) {
1241 Res = getSimpleValue();
1242 if (Res->getType() != LoadTy) {
1243 assert(TD && "Need target data to handle type mismatch case");
1244 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1247 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
1248 << *getSimpleValue() << '\n'
1249 << *Res << '\n' << "\n\n\n");
1252 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1253 LoadTy, BB->getTerminator(), *TD);
1254 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1255 << " " << *getMemIntrinValue() << '\n'
1256 << *Res << '\n' << "\n\n\n");
1264 /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1265 /// construct SSA form, allowing us to eliminate LI. This returns the value
1266 /// that should be used at LI's definition site.
1267 static Value *ConstructSSAForLoadSet(LoadInst *LI,
1268 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1269 const TargetData *TD,
1270 const DominatorTree &DT,
1271 AliasAnalysis *AA) {
1272 // Check for the fully redundant, dominating load case. In this case, we can
1273 // just use the dominating value directly.
1274 if (ValuesPerBlock.size() == 1 &&
1275 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1276 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1278 // Otherwise, we have to construct SSA form.
1279 SmallVector<PHINode*, 8> NewPHIs;
1280 SSAUpdater SSAUpdate(&NewPHIs);
1281 SSAUpdate.Initialize(LI->getType(), LI->getName());
1283 const Type *LoadTy = LI->getType();
1285 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1286 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1287 BasicBlock *BB = AV.BB;
1289 if (SSAUpdate.HasValueForBlock(BB))
1292 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1295 // Perform PHI construction.
1296 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1298 // If new PHI nodes were created, notify alias analysis.
1299 if (V->getType()->isPointerTy())
1300 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1301 AA->copyValue(LI, NewPHIs[i]);
1306 static bool isLifetimeStart(const Instruction *Inst) {
1307 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1308 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1312 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1313 /// non-local by performing PHI construction.
1314 bool GVN::processNonLocalLoad(LoadInst *LI,
1315 SmallVectorImpl<Instruction*> &toErase) {
1316 // Find the non-local dependencies of the load.
1317 SmallVector<NonLocalDepResult, 64> Deps;
1318 AliasAnalysis::Location Loc = VN.getAliasAnalysis()->getLocation(LI);
1319 MD->getNonLocalPointerDependency(Loc, true, LI->getParent(), Deps);
1320 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1321 // << Deps.size() << *LI << '\n');
1323 // If we had to process more than one hundred blocks to find the
1324 // dependencies, this load isn't worth worrying about. Optimizing
1325 // it will be too expensive.
1326 if (Deps.size() > 100)
1329 // If we had a phi translation failure, we'll have a single entry which is a
1330 // clobber in the current block. Reject this early.
1331 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1333 dbgs() << "GVN: non-local load ";
1334 WriteAsOperand(dbgs(), LI);
1335 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1340 // Filter out useless results (non-locals, etc). Keep track of the blocks
1341 // where we have a value available in repl, also keep track of whether we see
1342 // dependencies that produce an unknown value for the load (such as a call
1343 // that could potentially clobber the load).
1344 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1345 SmallVector<BasicBlock*, 16> UnavailableBlocks;
1347 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1348 BasicBlock *DepBB = Deps[i].getBB();
1349 MemDepResult DepInfo = Deps[i].getResult();
1351 if (DepInfo.isClobber()) {
1352 // The address being loaded in this non-local block may not be the same as
1353 // the pointer operand of the load if PHI translation occurs. Make sure
1354 // to consider the right address.
1355 Value *Address = Deps[i].getAddress();
1357 // If the dependence is to a store that writes to a superset of the bits
1358 // read by the load, we can extract the bits we need for the load from the
1360 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1361 if (TD && Address) {
1362 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1365 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1366 DepSI->getValueOperand(),
1373 // If the clobbering value is a memset/memcpy/memmove, see if we can
1374 // forward a value on from it.
1375 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1376 if (TD && Address) {
1377 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1380 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1387 UnavailableBlocks.push_back(DepBB);
1391 Instruction *DepInst = DepInfo.getInst();
1393 // Loading the allocation -> undef.
1394 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1395 // Loading immediately after lifetime begin -> undef.
1396 isLifetimeStart(DepInst)) {
1397 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1398 UndefValue::get(LI->getType())));
1402 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1403 // Reject loads and stores that are to the same address but are of
1404 // different types if we have to.
1405 if (S->getValueOperand()->getType() != LI->getType()) {
1406 // If the stored value is larger or equal to the loaded value, we can
1408 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getValueOperand(),
1409 LI->getType(), *TD)) {
1410 UnavailableBlocks.push_back(DepBB);
1415 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1416 S->getValueOperand()));
1420 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1421 // If the types mismatch and we can't handle it, reject reuse of the load.
1422 if (LD->getType() != LI->getType()) {
1423 // If the stored value is larger or equal to the loaded value, we can
1425 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1426 UnavailableBlocks.push_back(DepBB);
1430 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1434 UnavailableBlocks.push_back(DepBB);
1438 // If we have no predecessors that produce a known value for this load, exit
1440 if (ValuesPerBlock.empty()) return false;
1442 // If all of the instructions we depend on produce a known value for this
1443 // load, then it is fully redundant and we can use PHI insertion to compute
1444 // its value. Insert PHIs and remove the fully redundant value now.
1445 if (UnavailableBlocks.empty()) {
1446 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1448 // Perform PHI construction.
1449 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1450 VN.getAliasAnalysis());
1451 LI->replaceAllUsesWith(V);
1453 if (isa<PHINode>(V))
1455 if (V->getType()->isPointerTy())
1456 MD->invalidateCachedPointerInfo(V);
1458 toErase.push_back(LI);
1463 if (!EnablePRE || !EnableLoadPRE)
1466 // Okay, we have *some* definitions of the value. This means that the value
1467 // is available in some of our (transitive) predecessors. Lets think about
1468 // doing PRE of this load. This will involve inserting a new load into the
1469 // predecessor when it's not available. We could do this in general, but
1470 // prefer to not increase code size. As such, we only do this when we know
1471 // that we only have to insert *one* load (which means we're basically moving
1472 // the load, not inserting a new one).
1474 SmallPtrSet<BasicBlock *, 4> Blockers;
1475 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1476 Blockers.insert(UnavailableBlocks[i]);
1478 // Lets find first basic block with more than one predecessor. Walk backwards
1479 // through predecessors if needed.
1480 BasicBlock *LoadBB = LI->getParent();
1481 BasicBlock *TmpBB = LoadBB;
1483 bool isSinglePred = false;
1484 bool allSingleSucc = true;
1485 while (TmpBB->getSinglePredecessor()) {
1486 isSinglePred = true;
1487 TmpBB = TmpBB->getSinglePredecessor();
1488 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1490 if (Blockers.count(TmpBB))
1493 // If any of these blocks has more than one successor (i.e. if the edge we
1494 // just traversed was critical), then there are other paths through this
1495 // block along which the load may not be anticipated. Hoisting the load
1496 // above this block would be adding the load to execution paths along
1497 // which it was not previously executed.
1498 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1505 // FIXME: It is extremely unclear what this loop is doing, other than
1506 // artificially restricting loadpre.
1509 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1510 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1511 if (AV.isSimpleValue())
1512 // "Hot" Instruction is in some loop (because it dominates its dep.
1514 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1515 if (DT->dominates(LI, I)) {
1521 // We are interested only in "hot" instructions. We don't want to do any
1522 // mis-optimizations here.
1527 // Check to see how many predecessors have the loaded value fully
1529 DenseMap<BasicBlock*, Value*> PredLoads;
1530 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1531 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1532 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1533 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1534 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1536 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
1537 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1539 BasicBlock *Pred = *PI;
1540 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1543 PredLoads[Pred] = 0;
1545 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1546 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1547 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1548 << Pred->getName() << "': " << *LI << '\n');
1551 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1552 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1555 if (!NeedToSplit.empty()) {
1556 toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
1560 // Decide whether PRE is profitable for this load.
1561 unsigned NumUnavailablePreds = PredLoads.size();
1562 assert(NumUnavailablePreds != 0 &&
1563 "Fully available value should be eliminated above!");
1565 // If this load is unavailable in multiple predecessors, reject it.
1566 // FIXME: If we could restructure the CFG, we could make a common pred with
1567 // all the preds that don't have an available LI and insert a new load into
1569 if (NumUnavailablePreds != 1)
1572 // Check if the load can safely be moved to all the unavailable predecessors.
1573 bool CanDoPRE = true;
1574 SmallVector<Instruction*, 8> NewInsts;
1575 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1576 E = PredLoads.end(); I != E; ++I) {
1577 BasicBlock *UnavailablePred = I->first;
1579 // Do PHI translation to get its value in the predecessor if necessary. The
1580 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1582 // If all preds have a single successor, then we know it is safe to insert
1583 // the load on the pred (?!?), so we can insert code to materialize the
1584 // pointer if it is not available.
1585 PHITransAddr Address(LI->getPointerOperand(), TD);
1587 if (allSingleSucc) {
1588 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1591 Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1592 LoadPtr = Address.getAddr();
1595 // If we couldn't find or insert a computation of this phi translated value,
1598 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1599 << *LI->getPointerOperand() << "\n");
1604 // Make sure it is valid to move this load here. We have to watch out for:
1605 // @1 = getelementptr (i8* p, ...
1606 // test p and branch if == 0
1608 // It is valid to have the getelementptr before the test, even if p can be 0,
1609 // as getelementptr only does address arithmetic.
1610 // If we are not pushing the value through any multiple-successor blocks
1611 // we do not have this case. Otherwise, check that the load is safe to
1612 // put anywhere; this can be improved, but should be conservatively safe.
1613 if (!allSingleSucc &&
1614 // FIXME: REEVALUTE THIS.
1615 !isSafeToLoadUnconditionally(LoadPtr,
1616 UnavailablePred->getTerminator(),
1617 LI->getAlignment(), TD)) {
1622 I->second = LoadPtr;
1626 while (!NewInsts.empty())
1627 NewInsts.pop_back_val()->eraseFromParent();
1631 // Okay, we can eliminate this load by inserting a reload in the predecessor
1632 // and using PHI construction to get the value in the other predecessors, do
1634 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1635 DEBUG(if (!NewInsts.empty())
1636 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1637 << *NewInsts.back() << '\n');
1639 // Assign value numbers to the new instructions.
1640 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1641 // FIXME: We really _ought_ to insert these value numbers into their
1642 // parent's availability map. However, in doing so, we risk getting into
1643 // ordering issues. If a block hasn't been processed yet, we would be
1644 // marking a value as AVAIL-IN, which isn't what we intend.
1645 VN.lookup_or_add(NewInsts[i]);
1648 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1649 E = PredLoads.end(); I != E; ++I) {
1650 BasicBlock *UnavailablePred = I->first;
1651 Value *LoadPtr = I->second;
1653 Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1655 UnavailablePred->getTerminator());
1657 // Transfer the old load's TBAA tag to the new load.
1658 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa))
1659 NewLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
1661 // Add the newly created load.
1662 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1664 MD->invalidateCachedPointerInfo(LoadPtr);
1665 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1668 // Perform PHI construction.
1669 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1670 VN.getAliasAnalysis());
1671 LI->replaceAllUsesWith(V);
1672 if (isa<PHINode>(V))
1674 if (V->getType()->isPointerTy())
1675 MD->invalidateCachedPointerInfo(V);
1677 toErase.push_back(LI);
1682 /// processLoad - Attempt to eliminate a load, first by eliminating it
1683 /// locally, and then attempting non-local elimination if that fails.
1684 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1688 if (L->isVolatile())
1691 // ... to a pointer that has been loaded from before...
1692 MemDepResult Dep = MD->getDependency(L);
1694 // If the value isn't available, don't do anything!
1695 if (Dep.isClobber()) {
1696 // Check to see if we have something like this:
1697 // store i32 123, i32* %P
1698 // %A = bitcast i32* %P to i8*
1699 // %B = gep i8* %A, i32 1
1702 // We could do that by recognizing if the clobber instructions are obviously
1703 // a common base + constant offset, and if the previous store (or memset)
1704 // completely covers this load. This sort of thing can happen in bitfield
1706 Value *AvailVal = 0;
1707 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1709 int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1710 L->getPointerOperand(),
1713 AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset,
1714 L->getType(), L, *TD);
1717 // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1718 // a value on from it.
1719 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1721 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1722 L->getPointerOperand(),
1725 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1730 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1731 << *AvailVal << '\n' << *L << "\n\n\n");
1733 // Replace the load!
1734 L->replaceAllUsesWith(AvailVal);
1735 if (AvailVal->getType()->isPointerTy())
1736 MD->invalidateCachedPointerInfo(AvailVal);
1738 toErase.push_back(L);
1744 // fast print dep, using operator<< on instruction would be too slow
1745 dbgs() << "GVN: load ";
1746 WriteAsOperand(dbgs(), L);
1747 Instruction *I = Dep.getInst();
1748 dbgs() << " is clobbered by " << *I << '\n';
1753 // If it is defined in another block, try harder.
1754 if (Dep.isNonLocal())
1755 return processNonLocalLoad(L, toErase);
1757 Instruction *DepInst = Dep.getInst();
1758 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1759 Value *StoredVal = DepSI->getValueOperand();
1761 // The store and load are to a must-aliased pointer, but they may not
1762 // actually have the same type. See if we know how to reuse the stored
1763 // value (depending on its type).
1764 if (StoredVal->getType() != L->getType()) {
1766 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1771 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1772 << '\n' << *L << "\n\n\n");
1779 L->replaceAllUsesWith(StoredVal);
1780 if (StoredVal->getType()->isPointerTy())
1781 MD->invalidateCachedPointerInfo(StoredVal);
1783 toErase.push_back(L);
1788 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1789 Value *AvailableVal = DepLI;
1791 // The loads are of a must-aliased pointer, but they may not actually have
1792 // the same type. See if we know how to reuse the previously loaded value
1793 // (depending on its type).
1794 if (DepLI->getType() != L->getType()) {
1796 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1797 if (AvailableVal == 0)
1800 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1801 << "\n" << *L << "\n\n\n");
1808 L->replaceAllUsesWith(AvailableVal);
1809 if (DepLI->getType()->isPointerTy())
1810 MD->invalidateCachedPointerInfo(DepLI);
1812 toErase.push_back(L);
1817 // If this load really doesn't depend on anything, then we must be loading an
1818 // undef value. This can happen when loading for a fresh allocation with no
1819 // intervening stores, for example.
1820 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1821 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1823 toErase.push_back(L);
1828 // If this load occurs either right after a lifetime begin,
1829 // then the loaded value is undefined.
1830 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1831 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1832 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1834 toErase.push_back(L);
1843 // lookupNumber - In order to find a leader for a given value number at a
1844 // specific basic block, we first obtain the list of all Values for that number,
1845 // and then scan the list to find one whose block dominates the block in
1846 // question. This is fast because dominator tree queries consist of only
1847 // a few comparisons of DFS numbers.
1848 Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1849 std::pair<Value*, void*> Vals = NumberTable[num];
1850 if (!Vals.first) return 0;
1851 Instruction *Inst = dyn_cast<Instruction>(Vals.first);
1852 if (!Inst) return Vals.first;
1853 BasicBlock *Parent = Inst->getParent();
1854 if (DT->dominates(Parent, BB))
1857 std::pair<Value*, void*>* Next =
1858 static_cast<std::pair<Value*, void*>*>(Vals.second);
1860 Instruction *CurrInst = dyn_cast<Instruction>(Next->first);
1861 if (!CurrInst) return Next->first;
1863 BasicBlock *Parent = CurrInst->getParent();
1864 if (DT->dominates(Parent, BB))
1867 Next = static_cast<std::pair<Value*, void*>*>(Next->second);
1874 /// processInstruction - When calculating availability, handle an instruction
1875 /// by inserting it into the appropriate sets
1876 bool GVN::processInstruction(Instruction *I,
1877 SmallVectorImpl<Instruction*> &toErase) {
1878 // Ignore dbg info intrinsics.
1879 if (isa<DbgInfoIntrinsic>(I))
1882 // If the instruction can be easily simplified then do so now in preference
1883 // to value numbering it. Value numbering often exposes redundancies, for
1884 // example if it determines that %y is equal to %x then the instruction
1885 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
1886 if (Value *V = SimplifyInstruction(I, TD, DT)) {
1887 I->replaceAllUsesWith(V);
1888 if (MD && V->getType()->isPointerTy())
1889 MD->invalidateCachedPointerInfo(V);
1891 toErase.push_back(I);
1895 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1896 bool Changed = processLoad(LI, toErase);
1899 unsigned Num = VN.lookup_or_add(LI);
1900 insert_table(Num, LI);
1906 uint32_t NextNum = VN.getNextUnusedValueNumber();
1907 unsigned Num = VN.lookup_or_add(I);
1909 // Allocations are always uniquely numbered, so we can save time and memory
1910 // by fast failing them.
1911 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) {
1912 insert_table(Num, I);
1916 // If the number we were assigned was a brand new VN, then we don't
1917 // need to do a lookup to see if the number already exists
1918 // somewhere in the domtree: it can't!
1919 if (Num == NextNum) {
1920 insert_table(Num, I);
1924 // Perform fast-path value-number based elimination of values inherited from
1926 Value *repl = lookupNumber(I->getParent(), Num);
1928 // Failure, just remember this instance for future use.
1929 insert_table(Num, I);
1935 I->replaceAllUsesWith(repl);
1936 if (MD && repl->getType()->isPointerTy())
1937 MD->invalidateCachedPointerInfo(repl);
1938 toErase.push_back(I);
1942 /// runOnFunction - This is the main transformation entry point for a function.
1943 bool GVN::runOnFunction(Function& F) {
1945 MD = &getAnalysis<MemoryDependenceAnalysis>();
1946 DT = &getAnalysis<DominatorTree>();
1947 TD = getAnalysisIfAvailable<TargetData>();
1948 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
1952 bool Changed = false;
1953 bool ShouldContinue = true;
1955 // Merge unconditional branches, allowing PRE to catch more
1956 // optimization opportunities.
1957 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
1958 BasicBlock *BB = FI;
1960 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
1961 if (removedBlock) ++NumGVNBlocks;
1963 Changed |= removedBlock;
1966 unsigned Iteration = 0;
1968 while (ShouldContinue) {
1969 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
1970 ShouldContinue = iterateOnFunction(F);
1971 if (splitCriticalEdges())
1972 ShouldContinue = true;
1973 Changed |= ShouldContinue;
1978 bool PREChanged = true;
1979 while (PREChanged) {
1980 PREChanged = performPRE(F);
1981 Changed |= PREChanged;
1984 // FIXME: Should perform GVN again after PRE does something. PRE can move
1985 // computations into blocks where they become fully redundant. Note that
1986 // we can't do this until PRE's critical edge splitting updates memdep.
1987 // Actually, when this happens, we should just fully integrate PRE into GVN.
1989 cleanupGlobalSets();
1995 bool GVN::processBlock(BasicBlock *BB) {
1996 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
1997 // incrementing BI before processing an instruction).
1998 SmallVector<Instruction*, 8> toErase;
1999 bool ChangedFunction = false;
2001 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2003 ChangedFunction |= processInstruction(BI, toErase);
2004 if (toErase.empty()) {
2009 // If we need some instructions deleted, do it now.
2010 NumGVNInstr += toErase.size();
2012 // Avoid iterator invalidation.
2013 bool AtStart = BI == BB->begin();
2017 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2018 E = toErase.end(); I != E; ++I) {
2019 DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2020 if (MD) MD->removeInstruction(*I);
2021 (*I)->eraseFromParent();
2022 DEBUG(verifyRemoved(*I));
2032 return ChangedFunction;
2035 /// performPRE - Perform a purely local form of PRE that looks for diamond
2036 /// control flow patterns and attempts to perform simple PRE at the join point.
2037 bool GVN::performPRE(Function &F) {
2038 bool Changed = false;
2039 DenseMap<BasicBlock*, Value*> predMap;
2040 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2041 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2042 BasicBlock *CurrentBlock = *DI;
2044 // Nothing to PRE in the entry block.
2045 if (CurrentBlock == &F.getEntryBlock()) continue;
2047 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2048 BE = CurrentBlock->end(); BI != BE; ) {
2049 Instruction *CurInst = BI++;
2051 if (isa<AllocaInst>(CurInst) ||
2052 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2053 CurInst->getType()->isVoidTy() ||
2054 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2055 isa<DbgInfoIntrinsic>(CurInst))
2058 // We don't currently value number ANY inline asm calls.
2059 if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
2060 if (CallI->isInlineAsm())
2063 uint32_t ValNo = VN.lookup(CurInst);
2065 // Look for the predecessors for PRE opportunities. We're
2066 // only trying to solve the basic diamond case, where
2067 // a value is computed in the successor and one predecessor,
2068 // but not the other. We also explicitly disallow cases
2069 // where the successor is its own predecessor, because they're
2070 // more complicated to get right.
2071 unsigned NumWith = 0;
2072 unsigned NumWithout = 0;
2073 BasicBlock *PREPred = 0;
2076 for (pred_iterator PI = pred_begin(CurrentBlock),
2077 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2078 BasicBlock *P = *PI;
2079 // We're not interested in PRE where the block is its
2080 // own predecessor, or in blocks with predecessors
2081 // that are not reachable.
2082 if (P == CurrentBlock) {
2085 } else if (!DT->dominates(&F.getEntryBlock(), P)) {
2090 Value* predV = lookupNumber(P, ValNo);
2094 } else if (predV == CurInst) {
2102 // Don't do PRE when it might increase code size, i.e. when
2103 // we would need to insert instructions in more than one pred.
2104 if (NumWithout != 1 || NumWith == 0)
2107 // Don't do PRE across indirect branch.
2108 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2111 // We can't do PRE safely on a critical edge, so instead we schedule
2112 // the edge to be split and perform the PRE the next time we iterate
2114 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2115 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2116 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2120 // Instantiate the expression in the predecessor that lacked it.
2121 // Because we are going top-down through the block, all value numbers
2122 // will be available in the predecessor by the time we need them. Any
2123 // that weren't originally present will have been instantiated earlier
2125 Instruction *PREInstr = CurInst->clone();
2126 bool success = true;
2127 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2128 Value *Op = PREInstr->getOperand(i);
2129 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2132 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2133 PREInstr->setOperand(i, V);
2140 // Fail out if we encounter an operand that is not available in
2141 // the PRE predecessor. This is typically because of loads which
2142 // are not value numbered precisely.
2145 DEBUG(verifyRemoved(PREInstr));
2149 PREInstr->insertBefore(PREPred->getTerminator());
2150 PREInstr->setName(CurInst->getName() + ".pre");
2151 predMap[PREPred] = PREInstr;
2152 VN.add(PREInstr, ValNo);
2155 // Update the availability map to include the new instruction.
2156 insert_table(ValNo, PREInstr);
2158 // Create a PHI to make the value available in this block.
2159 PHINode* Phi = PHINode::Create(CurInst->getType(),
2160 CurInst->getName() + ".pre-phi",
2161 CurrentBlock->begin());
2162 for (pred_iterator PI = pred_begin(CurrentBlock),
2163 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2164 BasicBlock *P = *PI;
2165 Phi->addIncoming(predMap[P], P);
2169 insert_table(ValNo, Phi);
2171 CurInst->replaceAllUsesWith(Phi);
2172 if (MD && Phi->getType()->isPointerTy())
2173 MD->invalidateCachedPointerInfo(Phi);
2175 erase_table(ValNo, CurInst);
2177 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2178 if (MD) MD->removeInstruction(CurInst);
2179 CurInst->eraseFromParent();
2180 DEBUG(verifyRemoved(CurInst));
2185 if (splitCriticalEdges())
2191 /// splitCriticalEdges - Split critical edges found during the previous
2192 /// iteration that may enable further optimization.
2193 bool GVN::splitCriticalEdges() {
2194 if (toSplit.empty())
2197 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2198 SplitCriticalEdge(Edge.first, Edge.second, this);
2199 } while (!toSplit.empty());
2200 if (MD) MD->invalidateCachedPredecessors();
2204 /// iterateOnFunction - Executes one iteration of GVN
2205 bool GVN::iterateOnFunction(Function &F) {
2206 cleanupGlobalSets();
2208 // Top-down walk of the dominator tree
2209 bool Changed = false;
2211 // Needed for value numbering with phi construction to work.
2212 ReversePostOrderTraversal<Function*> RPOT(&F);
2213 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2214 RE = RPOT.end(); RI != RE; ++RI)
2215 Changed |= processBlock(*RI);
2217 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2218 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2219 Changed |= processBlock(DI->getBlock());
2225 void GVN::cleanupGlobalSets() {
2227 NumberTable.clear();
2228 TableAllocator.Reset();
2231 /// verifyRemoved - Verify that the specified instruction does not occur in our
2232 /// internal data structures.
2233 void GVN::verifyRemoved(const Instruction *Inst) const {
2234 VN.verifyRemoved(Inst);
2236 // Walk through the value number scope to make sure the instruction isn't
2237 // ferreted away in it.
2238 for (DenseMap<uint32_t, std::pair<Value*, void*> >::const_iterator
2239 I = NumberTable.begin(), E = NumberTable.end(); I != E; ++I) {
2240 std::pair<Value*, void*> const * Node = &I->second;
2241 assert(Node->first != Inst && "Inst still in value numbering scope!");
2243 while (Node->second) {
2244 Node = static_cast<std::pair<Value*, void*>*>(Node->second);
2245 assert(Node->first != Inst && "Inst still in value numbering scope!");