1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs global value numbering to eliminate fully redundant
11 // instructions. It also performs simple dead load elimination.
13 // Note that this pass does the value numbering itself; it does not use the
14 // ValueNumbering analysis passes.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "gvn"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/BasicBlock.h"
21 #include "llvm/Constants.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Function.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/LLVMContext.h"
27 #include "llvm/Operator.h"
28 #include "llvm/Value.h"
29 #include "llvm/ADT/DenseMap.h"
30 #include "llvm/ADT/DepthFirstIterator.h"
31 #include "llvm/ADT/PostOrderIterator.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/ConstantFolding.h"
37 #include "llvm/Analysis/Dominators.h"
38 #include "llvm/Analysis/Loads.h"
39 #include "llvm/Analysis/MemoryBuiltins.h"
40 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
41 #include "llvm/Analysis/PHITransAddr.h"
42 #include "llvm/Support/CFG.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/GetElementPtrTypeIterator.h"
47 #include "llvm/Support/IRBuilder.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Target/TargetData.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include "llvm/Transforms/Utils/SSAUpdater.h"
55 STATISTIC(NumGVNInstr, "Number of instructions deleted");
56 STATISTIC(NumGVNLoad, "Number of loads deleted");
57 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
58 STATISTIC(NumGVNBlocks, "Number of blocks merged");
59 STATISTIC(NumPRELoad, "Number of loads PRE'd");
61 static cl::opt<bool> EnablePRE("enable-pre",
62 cl::init(true), cl::Hidden);
63 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
65 //===----------------------------------------------------------------------===//
67 //===----------------------------------------------------------------------===//
69 /// This class holds the mapping between values and value numbers. It is used
70 /// as an efficient mechanism to determine the expression-wise equivalence of
74 enum ExpressionOpcode {
75 ADD = Instruction::Add,
76 FADD = Instruction::FAdd,
77 SUB = Instruction::Sub,
78 FSUB = Instruction::FSub,
79 MUL = Instruction::Mul,
80 FMUL = Instruction::FMul,
81 UDIV = Instruction::UDiv,
82 SDIV = Instruction::SDiv,
83 FDIV = Instruction::FDiv,
84 UREM = Instruction::URem,
85 SREM = Instruction::SRem,
86 FREM = Instruction::FRem,
87 SHL = Instruction::Shl,
88 LSHR = Instruction::LShr,
89 ASHR = Instruction::AShr,
90 AND = Instruction::And,
92 XOR = Instruction::Xor,
93 TRUNC = Instruction::Trunc,
94 ZEXT = Instruction::ZExt,
95 SEXT = Instruction::SExt,
96 FPTOUI = Instruction::FPToUI,
97 FPTOSI = Instruction::FPToSI,
98 UITOFP = Instruction::UIToFP,
99 SITOFP = Instruction::SIToFP,
100 FPTRUNC = Instruction::FPTrunc,
101 FPEXT = Instruction::FPExt,
102 PTRTOINT = Instruction::PtrToInt,
103 INTTOPTR = Instruction::IntToPtr,
104 BITCAST = Instruction::BitCast,
105 ICMPEQ, ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
106 ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
107 FCMPOGT, FCMPOGE, FCMPOLT, FCMPOLE, FCMPONE,
108 FCMPORD, FCMPUNO, FCMPUEQ, FCMPUGT, FCMPUGE,
109 FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
110 SHUFFLE, SELECT, GEP, CALL, CONSTANT,
111 INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
113 ExpressionOpcode opcode;
115 SmallVector<uint32_t, 4> varargs;
119 Expression(ExpressionOpcode o) : opcode(o) { }
121 bool operator==(const Expression &other) const {
122 if (opcode != other.opcode)
124 else if (opcode == EMPTY || opcode == TOMBSTONE)
126 else if (type != other.type)
128 else if (function != other.function)
131 if (varargs.size() != other.varargs.size())
134 for (size_t i = 0; i < varargs.size(); ++i)
135 if (varargs[i] != other.varargs[i])
142 /*bool operator!=(const Expression &other) const {
143 return !(*this == other);
149 DenseMap<Value*, uint32_t> valueNumbering;
150 DenseMap<Expression, uint32_t> expressionNumbering;
152 MemoryDependenceAnalysis* MD;
155 uint32_t nextValueNumber;
157 Expression::ExpressionOpcode getOpcode(CmpInst* C);
158 Expression create_expression(BinaryOperator* BO);
159 Expression create_expression(CmpInst* C);
160 Expression create_expression(ShuffleVectorInst* V);
161 Expression create_expression(ExtractElementInst* C);
162 Expression create_expression(InsertElementInst* V);
163 Expression create_expression(SelectInst* V);
164 Expression create_expression(CastInst* C);
165 Expression create_expression(GetElementPtrInst* G);
166 Expression create_expression(CallInst* C);
167 Expression create_expression(ExtractValueInst* C);
168 Expression create_expression(InsertValueInst* C);
170 uint32_t lookup_or_add_call(CallInst* C);
172 ValueTable() : nextValueNumber(1) { }
173 uint32_t lookup_or_add(Value *V);
174 uint32_t lookup(Value *V) const;
175 void add(Value *V, uint32_t num);
177 void erase(Value *v);
178 void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
179 AliasAnalysis *getAliasAnalysis() const { return AA; }
180 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
181 void setDomTree(DominatorTree* D) { DT = D; }
182 uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
183 void verifyRemoved(const Value *) const;
188 template <> struct DenseMapInfo<Expression> {
189 static inline Expression getEmptyKey() {
190 return Expression(Expression::EMPTY);
193 static inline Expression getTombstoneKey() {
194 return Expression(Expression::TOMBSTONE);
197 static unsigned getHashValue(const Expression e) {
198 unsigned hash = e.opcode;
200 hash = ((unsigned)((uintptr_t)e.type >> 4) ^
201 (unsigned)((uintptr_t)e.type >> 9));
203 for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
204 E = e.varargs.end(); I != E; ++I)
205 hash = *I + hash * 37;
207 hash = ((unsigned)((uintptr_t)e.function >> 4) ^
208 (unsigned)((uintptr_t)e.function >> 9)) +
213 static bool isEqual(const Expression &LHS, const Expression &RHS) {
219 struct isPodLike<Expression> { static const bool value = true; };
223 //===----------------------------------------------------------------------===//
224 // ValueTable Internal Functions
225 //===----------------------------------------------------------------------===//
227 Expression::ExpressionOpcode ValueTable::getOpcode(CmpInst* C) {
228 if (isa<ICmpInst>(C)) {
229 switch (C->getPredicate()) {
230 default: // THIS SHOULD NEVER HAPPEN
231 llvm_unreachable("Comparison with unknown predicate?");
232 case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
233 case ICmpInst::ICMP_NE: return Expression::ICMPNE;
234 case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
235 case ICmpInst::ICMP_UGE: return Expression::ICMPUGE;
236 case ICmpInst::ICMP_ULT: return Expression::ICMPULT;
237 case ICmpInst::ICMP_ULE: return Expression::ICMPULE;
238 case ICmpInst::ICMP_SGT: return Expression::ICMPSGT;
239 case ICmpInst::ICMP_SGE: return Expression::ICMPSGE;
240 case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
241 case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
244 switch (C->getPredicate()) {
245 default: // THIS SHOULD NEVER HAPPEN
246 llvm_unreachable("Comparison with unknown predicate?");
247 case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
248 case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
249 case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
250 case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
251 case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
252 case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
253 case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
254 case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
255 case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
256 case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
257 case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
258 case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
259 case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
260 case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
265 Expression ValueTable::create_expression(CallInst* C) {
268 e.type = C->getType();
269 e.function = C->getCalledFunction();
270 e.opcode = Expression::CALL;
273 for (CallInst::op_iterator I = CS.arg_begin(), E = CS.arg_end();
275 e.varargs.push_back(lookup_or_add(*I));
280 Expression ValueTable::create_expression(BinaryOperator* BO) {
282 e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
283 e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
285 e.type = BO->getType();
286 e.opcode = static_cast<Expression::ExpressionOpcode>(BO->getOpcode());
291 Expression ValueTable::create_expression(CmpInst* C) {
294 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
295 e.varargs.push_back(lookup_or_add(C->getOperand(1)));
297 e.type = C->getType();
298 e.opcode = getOpcode(C);
303 Expression ValueTable::create_expression(CastInst* C) {
306 e.varargs.push_back(lookup_or_add(C->getOperand(0)));
308 e.type = C->getType();
309 e.opcode = static_cast<Expression::ExpressionOpcode>(C->getOpcode());
314 Expression ValueTable::create_expression(ShuffleVectorInst* S) {
317 e.varargs.push_back(lookup_or_add(S->getOperand(0)));
318 e.varargs.push_back(lookup_or_add(S->getOperand(1)));
319 e.varargs.push_back(lookup_or_add(S->getOperand(2)));
321 e.type = S->getType();
322 e.opcode = Expression::SHUFFLE;
327 Expression ValueTable::create_expression(ExtractElementInst* E) {
330 e.varargs.push_back(lookup_or_add(E->getOperand(0)));
331 e.varargs.push_back(lookup_or_add(E->getOperand(1)));
333 e.type = E->getType();
334 e.opcode = Expression::EXTRACT;
339 Expression ValueTable::create_expression(InsertElementInst* I) {
342 e.varargs.push_back(lookup_or_add(I->getOperand(0)));
343 e.varargs.push_back(lookup_or_add(I->getOperand(1)));
344 e.varargs.push_back(lookup_or_add(I->getOperand(2)));
346 e.type = I->getType();
347 e.opcode = Expression::INSERT;
352 Expression ValueTable::create_expression(SelectInst* I) {
355 e.varargs.push_back(lookup_or_add(I->getCondition()));
356 e.varargs.push_back(lookup_or_add(I->getTrueValue()));
357 e.varargs.push_back(lookup_or_add(I->getFalseValue()));
359 e.type = I->getType();
360 e.opcode = Expression::SELECT;
365 Expression ValueTable::create_expression(GetElementPtrInst* G) {
368 e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
370 e.type = G->getType();
371 e.opcode = Expression::GEP;
373 for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
375 e.varargs.push_back(lookup_or_add(*I));
380 Expression ValueTable::create_expression(ExtractValueInst* E) {
383 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
384 for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
386 e.varargs.push_back(*II);
388 e.type = E->getType();
389 e.opcode = Expression::EXTRACTVALUE;
394 Expression ValueTable::create_expression(InsertValueInst* E) {
397 e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
398 e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
399 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
401 e.varargs.push_back(*II);
403 e.type = E->getType();
404 e.opcode = Expression::INSERTVALUE;
409 //===----------------------------------------------------------------------===//
410 // ValueTable External Functions
411 //===----------------------------------------------------------------------===//
413 /// add - Insert a value into the table with a specified value number.
414 void ValueTable::add(Value *V, uint32_t num) {
415 valueNumbering.insert(std::make_pair(V, num));
418 uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
419 if (AA->doesNotAccessMemory(C)) {
420 Expression exp = create_expression(C);
421 uint32_t& e = expressionNumbering[exp];
422 if (!e) e = nextValueNumber++;
423 valueNumbering[C] = e;
425 } else if (AA->onlyReadsMemory(C)) {
426 Expression exp = create_expression(C);
427 uint32_t& e = expressionNumbering[exp];
429 e = nextValueNumber++;
430 valueNumbering[C] = e;
434 e = nextValueNumber++;
435 valueNumbering[C] = e;
439 MemDepResult local_dep = MD->getDependency(C);
441 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
442 valueNumbering[C] = nextValueNumber;
443 return nextValueNumber++;
446 if (local_dep.isDef()) {
447 CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
449 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
450 valueNumbering[C] = nextValueNumber;
451 return nextValueNumber++;
454 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
455 uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
456 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i));
458 valueNumbering[C] = nextValueNumber;
459 return nextValueNumber++;
463 uint32_t v = lookup_or_add(local_cdep);
464 valueNumbering[C] = v;
469 const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
470 MD->getNonLocalCallDependency(CallSite(C));
471 // FIXME: call/call dependencies for readonly calls should return def, not
472 // clobber! Move the checking logic to MemDep!
475 // Check to see if we have a single dominating call instruction that is
477 for (unsigned i = 0, e = deps.size(); i != e; ++i) {
478 const NonLocalDepEntry *I = &deps[i];
479 // Ignore non-local dependencies.
480 if (I->getResult().isNonLocal())
483 // We don't handle non-depedencies. If we already have a call, reject
484 // instruction dependencies.
485 if (I->getResult().isClobber() || cdep != 0) {
490 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
491 // FIXME: All duplicated with non-local case.
492 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
493 cdep = NonLocalDepCall;
502 valueNumbering[C] = nextValueNumber;
503 return nextValueNumber++;
506 if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
507 valueNumbering[C] = nextValueNumber;
508 return nextValueNumber++;
510 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
511 uint32_t c_vn = lookup_or_add(C->getArgOperand(i));
512 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i));
514 valueNumbering[C] = nextValueNumber;
515 return nextValueNumber++;
519 uint32_t v = lookup_or_add(cdep);
520 valueNumbering[C] = v;
524 valueNumbering[C] = nextValueNumber;
525 return nextValueNumber++;
529 /// lookup_or_add - Returns the value number for the specified value, assigning
530 /// it a new number if it did not have one before.
531 uint32_t ValueTable::lookup_or_add(Value *V) {
532 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
533 if (VI != valueNumbering.end())
536 if (!isa<Instruction>(V)) {
537 valueNumbering[V] = nextValueNumber;
538 return nextValueNumber++;
541 Instruction* I = cast<Instruction>(V);
543 switch (I->getOpcode()) {
544 case Instruction::Call:
545 return lookup_or_add_call(cast<CallInst>(I));
546 case Instruction::Add:
547 case Instruction::FAdd:
548 case Instruction::Sub:
549 case Instruction::FSub:
550 case Instruction::Mul:
551 case Instruction::FMul:
552 case Instruction::UDiv:
553 case Instruction::SDiv:
554 case Instruction::FDiv:
555 case Instruction::URem:
556 case Instruction::SRem:
557 case Instruction::FRem:
558 case Instruction::Shl:
559 case Instruction::LShr:
560 case Instruction::AShr:
561 case Instruction::And:
562 case Instruction::Or :
563 case Instruction::Xor:
564 exp = create_expression(cast<BinaryOperator>(I));
566 case Instruction::ICmp:
567 case Instruction::FCmp:
568 exp = create_expression(cast<CmpInst>(I));
570 case Instruction::Trunc:
571 case Instruction::ZExt:
572 case Instruction::SExt:
573 case Instruction::FPToUI:
574 case Instruction::FPToSI:
575 case Instruction::UIToFP:
576 case Instruction::SIToFP:
577 case Instruction::FPTrunc:
578 case Instruction::FPExt:
579 case Instruction::PtrToInt:
580 case Instruction::IntToPtr:
581 case Instruction::BitCast:
582 exp = create_expression(cast<CastInst>(I));
584 case Instruction::Select:
585 exp = create_expression(cast<SelectInst>(I));
587 case Instruction::ExtractElement:
588 exp = create_expression(cast<ExtractElementInst>(I));
590 case Instruction::InsertElement:
591 exp = create_expression(cast<InsertElementInst>(I));
593 case Instruction::ShuffleVector:
594 exp = create_expression(cast<ShuffleVectorInst>(I));
596 case Instruction::ExtractValue:
597 exp = create_expression(cast<ExtractValueInst>(I));
599 case Instruction::InsertValue:
600 exp = create_expression(cast<InsertValueInst>(I));
602 case Instruction::GetElementPtr:
603 exp = create_expression(cast<GetElementPtrInst>(I));
606 valueNumbering[V] = nextValueNumber;
607 return nextValueNumber++;
610 uint32_t& e = expressionNumbering[exp];
611 if (!e) e = nextValueNumber++;
612 valueNumbering[V] = e;
616 /// lookup - Returns the value number of the specified value. Fails if
617 /// the value has not yet been numbered.
618 uint32_t ValueTable::lookup(Value *V) const {
619 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
620 assert(VI != valueNumbering.end() && "Value not numbered?");
624 /// clear - Remove all entries from the ValueTable
625 void ValueTable::clear() {
626 valueNumbering.clear();
627 expressionNumbering.clear();
631 /// erase - Remove a value from the value numbering
632 void ValueTable::erase(Value *V) {
633 valueNumbering.erase(V);
636 /// verifyRemoved - Verify that the value is removed from all internal data
638 void ValueTable::verifyRemoved(const Value *V) const {
639 for (DenseMap<Value*, uint32_t>::const_iterator
640 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
641 assert(I->first != V && "Inst still occurs in value numbering map!");
645 //===----------------------------------------------------------------------===//
647 //===----------------------------------------------------------------------===//
650 struct ValueNumberScope {
651 ValueNumberScope* parent;
652 DenseMap<uint32_t, Value*> table;
654 ValueNumberScope(ValueNumberScope* p) : parent(p) { }
660 class GVN : public FunctionPass {
661 bool runOnFunction(Function &F);
663 static char ID; // Pass identification, replacement for typeid
664 explicit GVN(bool noloads = false)
665 : FunctionPass(ID), NoLoads(noloads), MD(0) { }
669 MemoryDependenceAnalysis *MD;
673 DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
675 // List of critical edges to be split between iterations.
676 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
678 // This transformation requires dominator postdominator info
679 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
680 AU.addRequired<DominatorTree>();
682 AU.addRequired<MemoryDependenceAnalysis>();
683 AU.addRequired<AliasAnalysis>();
685 AU.addPreserved<DominatorTree>();
686 AU.addPreserved<AliasAnalysis>();
690 // FIXME: eliminate or document these better
691 bool processLoad(LoadInst* L,
692 SmallVectorImpl<Instruction*> &toErase);
693 bool processInstruction(Instruction *I,
694 SmallVectorImpl<Instruction*> &toErase);
695 bool processNonLocalLoad(LoadInst* L,
696 SmallVectorImpl<Instruction*> &toErase);
697 bool processBlock(BasicBlock *BB);
698 void dump(DenseMap<uint32_t, Value*>& d);
699 bool iterateOnFunction(Function &F);
700 Value *CollapsePhi(PHINode* p);
701 bool performPRE(Function& F);
702 Value *lookupNumber(BasicBlock *BB, uint32_t num);
703 void cleanupGlobalSets();
704 void verifyRemoved(const Instruction *I) const;
705 bool splitCriticalEdges();
711 // createGVNPass - The public interface to this file...
712 FunctionPass *llvm::createGVNPass(bool NoLoads) {
713 return new GVN(NoLoads);
716 INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false)
717 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
718 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
719 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
720 INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false)
722 void GVN::dump(DenseMap<uint32_t, Value*>& d) {
724 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
725 E = d.end(); I != E; ++I) {
726 errs() << I->first << "\n";
732 static bool isSafeReplacement(PHINode* p, Instruction *inst) {
733 if (!isa<PHINode>(inst))
736 for (Instruction::use_iterator UI = p->use_begin(), E = p->use_end();
738 if (PHINode* use_phi = dyn_cast<PHINode>(*UI))
739 if (use_phi->getParent() == inst->getParent())
745 Value *GVN::CollapsePhi(PHINode *PN) {
746 Value *ConstVal = PN->hasConstantValue(DT);
747 if (!ConstVal) return 0;
749 Instruction *Inst = dyn_cast<Instruction>(ConstVal);
753 if (DT->dominates(Inst, PN))
754 if (isSafeReplacement(PN, Inst))
759 /// IsValueFullyAvailableInBlock - Return true if we can prove that the value
760 /// we're analyzing is fully available in the specified block. As we go, keep
761 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
762 /// map is actually a tri-state map with the following values:
763 /// 0) we know the block *is not* fully available.
764 /// 1) we know the block *is* fully available.
765 /// 2) we do not know whether the block is fully available or not, but we are
766 /// currently speculating that it will be.
767 /// 3) we are speculating for this block and have used that to speculate for
769 static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
770 DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
771 // Optimistically assume that the block is fully available and check to see
772 // if we already know about this block in one lookup.
773 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
774 FullyAvailableBlocks.insert(std::make_pair(BB, 2));
776 // If the entry already existed for this block, return the precomputed value.
778 // If this is a speculative "available" value, mark it as being used for
779 // speculation of other blocks.
780 if (IV.first->second == 2)
781 IV.first->second = 3;
782 return IV.first->second != 0;
785 // Otherwise, see if it is fully available in all predecessors.
786 pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
788 // If this block has no predecessors, it isn't live-in here.
790 goto SpeculationFailure;
792 for (; PI != PE; ++PI)
793 // If the value isn't fully available in one of our predecessors, then it
794 // isn't fully available in this block either. Undo our previous
795 // optimistic assumption and bail out.
796 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
797 goto SpeculationFailure;
801 // SpeculationFailure - If we get here, we found out that this is not, after
802 // all, a fully-available block. We have a problem if we speculated on this and
803 // used the speculation to mark other blocks as available.
805 char &BBVal = FullyAvailableBlocks[BB];
807 // If we didn't speculate on this, just return with it set to false.
813 // If we did speculate on this value, we could have blocks set to 1 that are
814 // incorrect. Walk the (transitive) successors of this block and mark them as
816 SmallVector<BasicBlock*, 32> BBWorklist;
817 BBWorklist.push_back(BB);
820 BasicBlock *Entry = BBWorklist.pop_back_val();
821 // Note that this sets blocks to 0 (unavailable) if they happen to not
822 // already be in FullyAvailableBlocks. This is safe.
823 char &EntryVal = FullyAvailableBlocks[Entry];
824 if (EntryVal == 0) continue; // Already unavailable.
826 // Mark as unavailable.
829 for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
830 BBWorklist.push_back(*I);
831 } while (!BBWorklist.empty());
837 /// CanCoerceMustAliasedValueToLoad - Return true if
838 /// CoerceAvailableValueToLoadType will succeed.
839 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal,
841 const TargetData &TD) {
842 // If the loaded or stored value is an first class array or struct, don't try
843 // to transform them. We need to be able to bitcast to integer.
844 if (LoadTy->isStructTy() || LoadTy->isArrayTy() ||
845 StoredVal->getType()->isStructTy() ||
846 StoredVal->getType()->isArrayTy())
849 // The store has to be at least as big as the load.
850 if (TD.getTypeSizeInBits(StoredVal->getType()) <
851 TD.getTypeSizeInBits(LoadTy))
858 /// CoerceAvailableValueToLoadType - If we saw a store of a value to memory, and
859 /// then a load from a must-aliased pointer of a different type, try to coerce
860 /// the stored value. LoadedTy is the type of the load we want to replace and
861 /// InsertPt is the place to insert new instructions.
863 /// If we can't do it, return null.
864 static Value *CoerceAvailableValueToLoadType(Value *StoredVal,
865 const Type *LoadedTy,
866 Instruction *InsertPt,
867 const TargetData &TD) {
868 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, TD))
871 const Type *StoredValTy = StoredVal->getType();
873 uint64_t StoreSize = TD.getTypeStoreSizeInBits(StoredValTy);
874 uint64_t LoadSize = TD.getTypeSizeInBits(LoadedTy);
876 // If the store and reload are the same size, we can always reuse it.
877 if (StoreSize == LoadSize) {
878 if (StoredValTy->isPointerTy() && LoadedTy->isPointerTy()) {
879 // Pointer to Pointer -> use bitcast.
880 return new BitCastInst(StoredVal, LoadedTy, "", InsertPt);
883 // Convert source pointers to integers, which can be bitcast.
884 if (StoredValTy->isPointerTy()) {
885 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
886 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
889 const Type *TypeToCastTo = LoadedTy;
890 if (TypeToCastTo->isPointerTy())
891 TypeToCastTo = TD.getIntPtrType(StoredValTy->getContext());
893 if (StoredValTy != TypeToCastTo)
894 StoredVal = new BitCastInst(StoredVal, TypeToCastTo, "", InsertPt);
896 // Cast to pointer if the load needs a pointer type.
897 if (LoadedTy->isPointerTy())
898 StoredVal = new IntToPtrInst(StoredVal, LoadedTy, "", InsertPt);
903 // If the loaded value is smaller than the available value, then we can
904 // extract out a piece from it. If the available value is too small, then we
905 // can't do anything.
906 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail");
908 // Convert source pointers to integers, which can be manipulated.
909 if (StoredValTy->isPointerTy()) {
910 StoredValTy = TD.getIntPtrType(StoredValTy->getContext());
911 StoredVal = new PtrToIntInst(StoredVal, StoredValTy, "", InsertPt);
914 // Convert vectors and fp to integer, which can be manipulated.
915 if (!StoredValTy->isIntegerTy()) {
916 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize);
917 StoredVal = new BitCastInst(StoredVal, StoredValTy, "", InsertPt);
920 // If this is a big-endian system, we need to shift the value down to the low
921 // bits so that a truncate will work.
922 if (TD.isBigEndian()) {
923 Constant *Val = ConstantInt::get(StoredVal->getType(), StoreSize-LoadSize);
924 StoredVal = BinaryOperator::CreateLShr(StoredVal, Val, "tmp", InsertPt);
927 // Truncate the integer to the right size now.
928 const Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize);
929 StoredVal = new TruncInst(StoredVal, NewIntTy, "trunc", InsertPt);
931 if (LoadedTy == NewIntTy)
934 // If the result is a pointer, inttoptr.
935 if (LoadedTy->isPointerTy())
936 return new IntToPtrInst(StoredVal, LoadedTy, "inttoptr", InsertPt);
938 // Otherwise, bitcast.
939 return new BitCastInst(StoredVal, LoadedTy, "bitcast", InsertPt);
942 /// GetBaseWithConstantOffset - Analyze the specified pointer to see if it can
943 /// be expressed as a base pointer plus a constant offset. Return the base and
944 /// offset to the caller.
945 static Value *GetBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
946 const TargetData &TD) {
947 Operator *PtrOp = dyn_cast<Operator>(Ptr);
948 if (PtrOp == 0) return Ptr;
950 // Just look through bitcasts.
951 if (PtrOp->getOpcode() == Instruction::BitCast)
952 return GetBaseWithConstantOffset(PtrOp->getOperand(0), Offset, TD);
954 // If this is a GEP with constant indices, we can look through it.
955 GEPOperator *GEP = dyn_cast<GEPOperator>(PtrOp);
956 if (GEP == 0 || !GEP->hasAllConstantIndices()) return Ptr;
958 gep_type_iterator GTI = gep_type_begin(GEP);
959 for (User::op_iterator I = GEP->idx_begin(), E = GEP->idx_end(); I != E;
961 ConstantInt *OpC = cast<ConstantInt>(*I);
962 if (OpC->isZero()) continue;
964 // Handle a struct and array indices which add their offset to the pointer.
965 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
966 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
968 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
969 Offset += OpC->getSExtValue()*Size;
973 // Re-sign extend from the pointer size if needed to get overflow edge cases
975 unsigned PtrSize = TD.getPointerSizeInBits();
977 Offset = (Offset << (64-PtrSize)) >> (64-PtrSize);
979 return GetBaseWithConstantOffset(GEP->getPointerOperand(), Offset, TD);
983 /// AnalyzeLoadFromClobberingWrite - This function is called when we have a
984 /// memdep query of a load that ends up being a clobbering memory write (store,
985 /// memset, memcpy, memmove). This means that the write *may* provide bits used
986 /// by the load but we can't be sure because the pointers don't mustalias.
988 /// Check this case to see if there is anything more we can do before we give
989 /// up. This returns -1 if we have to give up, or a byte number in the stored
990 /// value of the piece that feeds the load.
991 static int AnalyzeLoadFromClobberingWrite(const Type *LoadTy, Value *LoadPtr,
993 uint64_t WriteSizeInBits,
994 const TargetData &TD) {
995 // If the loaded or stored value is an first class array or struct, don't try
996 // to transform them. We need to be able to bitcast to integer.
997 if (LoadTy->isStructTy() || LoadTy->isArrayTy())
1000 int64_t StoreOffset = 0, LoadOffset = 0;
1001 Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
1003 GetBaseWithConstantOffset(LoadPtr, LoadOffset, TD);
1004 if (StoreBase != LoadBase)
1007 // If the load and store are to the exact same address, they should have been
1008 // a must alias. AA must have gotten confused.
1009 // FIXME: Study to see if/when this happens. One case is forwarding a memset
1010 // to a load from the base of the memset.
1012 if (LoadOffset == StoreOffset) {
1013 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
1014 << "Base = " << *StoreBase << "\n"
1015 << "Store Ptr = " << *WritePtr << "\n"
1016 << "Store Offs = " << StoreOffset << "\n"
1017 << "Load Ptr = " << *LoadPtr << "\n";
1022 // If the load and store don't overlap at all, the store doesn't provide
1023 // anything to the load. In this case, they really don't alias at all, AA
1024 // must have gotten confused.
1025 // FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
1026 // remove this check, as it is duplicated with what we have below.
1027 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy);
1029 if ((WriteSizeInBits & 7) | (LoadSize & 7))
1031 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
1035 bool isAAFailure = false;
1036 if (StoreOffset < LoadOffset)
1037 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset;
1039 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset;
1043 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n"
1044 << "Base = " << *StoreBase << "\n"
1045 << "Store Ptr = " << *WritePtr << "\n"
1046 << "Store Offs = " << StoreOffset << "\n"
1047 << "Load Ptr = " << *LoadPtr << "\n";
1053 // If the Load isn't completely contained within the stored bits, we don't
1054 // have all the bits to feed it. We could do something crazy in the future
1055 // (issue a smaller load then merge the bits in) but this seems unlikely to be
1057 if (StoreOffset > LoadOffset ||
1058 StoreOffset+StoreSize < LoadOffset+LoadSize)
1061 // Okay, we can do this transformation. Return the number of bytes into the
1062 // store that the load is.
1063 return LoadOffset-StoreOffset;
1066 /// AnalyzeLoadFromClobberingStore - This function is called when we have a
1067 /// memdep query of a load that ends up being a clobbering store.
1068 static int AnalyzeLoadFromClobberingStore(const Type *LoadTy, Value *LoadPtr,
1070 const TargetData &TD) {
1071 // Cannot handle reading from store of first-class aggregate yet.
1072 if (DepSI->getOperand(0)->getType()->isStructTy() ||
1073 DepSI->getOperand(0)->getType()->isArrayTy())
1076 Value *StorePtr = DepSI->getPointerOperand();
1077 uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
1078 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1079 StorePtr, StoreSize, TD);
1082 static int AnalyzeLoadFromClobberingMemInst(const Type *LoadTy, Value *LoadPtr,
1084 const TargetData &TD) {
1085 // If the mem operation is a non-constant size, we can't handle it.
1086 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
1087 if (SizeCst == 0) return -1;
1088 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
1090 // If this is memset, we just need to see if the offset is valid in the size
1092 if (MI->getIntrinsicID() == Intrinsic::memset)
1093 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(),
1096 // If we have a memcpy/memmove, the only case we can handle is if this is a
1097 // copy from constant memory. In that case, we can read directly from the
1099 MemTransferInst *MTI = cast<MemTransferInst>(MI);
1101 Constant *Src = dyn_cast<Constant>(MTI->getSource());
1102 if (Src == 0) return -1;
1104 GlobalVariable *GV = dyn_cast<GlobalVariable>(Src->getUnderlyingObject());
1105 if (GV == 0 || !GV->isConstant()) return -1;
1107 // See if the access is within the bounds of the transfer.
1108 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr,
1109 MI->getDest(), MemSizeInBits, TD);
1113 // Otherwise, see if we can constant fold a load from the constant with the
1114 // offset applied as appropriate.
1115 Src = ConstantExpr::getBitCast(Src,
1116 llvm::Type::getInt8PtrTy(Src->getContext()));
1117 Constant *OffsetCst =
1118 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1119 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1120 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1121 if (ConstantFoldLoadFromConstPtr(Src, &TD))
1127 /// GetStoreValueForLoad - This function is called when we have a
1128 /// memdep query of a load that ends up being a clobbering store. This means
1129 /// that the store *may* provide bits used by the load but we can't be sure
1130 /// because the pointers don't mustalias. Check this case to see if there is
1131 /// anything more we can do before we give up.
1132 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset,
1134 Instruction *InsertPt, const TargetData &TD){
1135 LLVMContext &Ctx = SrcVal->getType()->getContext();
1137 uint64_t StoreSize = (TD.getTypeSizeInBits(SrcVal->getType()) + 7) / 8;
1138 uint64_t LoadSize = (TD.getTypeSizeInBits(LoadTy) + 7) / 8;
1140 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1142 // Compute which bits of the stored value are being used by the load. Convert
1143 // to an integer type to start with.
1144 if (SrcVal->getType()->isPointerTy())
1145 SrcVal = Builder.CreatePtrToInt(SrcVal, TD.getIntPtrType(Ctx), "tmp");
1146 if (!SrcVal->getType()->isIntegerTy())
1147 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8),
1150 // Shift the bits to the least significant depending on endianness.
1152 if (TD.isLittleEndian())
1153 ShiftAmt = Offset*8;
1155 ShiftAmt = (StoreSize-LoadSize-Offset)*8;
1158 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt, "tmp");
1160 if (LoadSize != StoreSize)
1161 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8),
1164 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
1167 /// GetMemInstValueForLoad - This function is called when we have a
1168 /// memdep query of a load that ends up being a clobbering mem intrinsic.
1169 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
1170 const Type *LoadTy, Instruction *InsertPt,
1171 const TargetData &TD){
1172 LLVMContext &Ctx = LoadTy->getContext();
1173 uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
1175 IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
1177 // We know that this method is only called when the mem transfer fully
1178 // provides the bits for the load.
1179 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
1180 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
1181 // independently of what the offset is.
1182 Value *Val = MSI->getValue();
1184 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
1186 Value *OneElt = Val;
1188 // Splat the value out to the right number of bits.
1189 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
1190 // If we can double the number of bytes set, do it.
1191 if (NumBytesSet*2 <= LoadSize) {
1192 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
1193 Val = Builder.CreateOr(Val, ShVal);
1198 // Otherwise insert one byte at a time.
1199 Value *ShVal = Builder.CreateShl(Val, 1*8);
1200 Val = Builder.CreateOr(OneElt, ShVal);
1204 return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
1207 // Otherwise, this is a memcpy/memmove from a constant global.
1208 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst);
1209 Constant *Src = cast<Constant>(MTI->getSource());
1211 // Otherwise, see if we can constant fold a load from the constant with the
1212 // offset applied as appropriate.
1213 Src = ConstantExpr::getBitCast(Src,
1214 llvm::Type::getInt8PtrTy(Src->getContext()));
1215 Constant *OffsetCst =
1216 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset);
1217 Src = ConstantExpr::getGetElementPtr(Src, &OffsetCst, 1);
1218 Src = ConstantExpr::getBitCast(Src, PointerType::getUnqual(LoadTy));
1219 return ConstantFoldLoadFromConstPtr(Src, &TD);
1224 struct AvailableValueInBlock {
1225 /// BB - The basic block in question.
1228 SimpleVal, // A simple offsetted value that is accessed.
1229 MemIntrin // A memory intrinsic which is loaded from.
1232 /// V - The value that is live out of the block.
1233 PointerIntPair<Value *, 1, ValType> Val;
1235 /// Offset - The byte offset in Val that is interesting for the load query.
1238 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
1239 unsigned Offset = 0) {
1240 AvailableValueInBlock Res;
1242 Res.Val.setPointer(V);
1243 Res.Val.setInt(SimpleVal);
1244 Res.Offset = Offset;
1248 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
1249 unsigned Offset = 0) {
1250 AvailableValueInBlock Res;
1252 Res.Val.setPointer(MI);
1253 Res.Val.setInt(MemIntrin);
1254 Res.Offset = Offset;
1258 bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
1259 Value *getSimpleValue() const {
1260 assert(isSimpleValue() && "Wrong accessor");
1261 return Val.getPointer();
1264 MemIntrinsic *getMemIntrinValue() const {
1265 assert(!isSimpleValue() && "Wrong accessor");
1266 return cast<MemIntrinsic>(Val.getPointer());
1269 /// MaterializeAdjustedValue - Emit code into this block to adjust the value
1270 /// defined here to the specified type. This handles various coercion cases.
1271 Value *MaterializeAdjustedValue(const Type *LoadTy,
1272 const TargetData *TD) const {
1274 if (isSimpleValue()) {
1275 Res = getSimpleValue();
1276 if (Res->getType() != LoadTy) {
1277 assert(TD && "Need target data to handle type mismatch case");
1278 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(),
1281 DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
1282 << *getSimpleValue() << '\n'
1283 << *Res << '\n' << "\n\n\n");
1286 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset,
1287 LoadTy, BB->getTerminator(), *TD);
1288 DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1289 << " " << *getMemIntrinValue() << '\n'
1290 << *Res << '\n' << "\n\n\n");
1298 /// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
1299 /// construct SSA form, allowing us to eliminate LI. This returns the value
1300 /// that should be used at LI's definition site.
1301 static Value *ConstructSSAForLoadSet(LoadInst *LI,
1302 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
1303 const TargetData *TD,
1304 const DominatorTree &DT,
1305 AliasAnalysis *AA) {
1306 // Check for the fully redundant, dominating load case. In this case, we can
1307 // just use the dominating value directly.
1308 if (ValuesPerBlock.size() == 1 &&
1309 DT.properlyDominates(ValuesPerBlock[0].BB, LI->getParent()))
1310 return ValuesPerBlock[0].MaterializeAdjustedValue(LI->getType(), TD);
1312 // Otherwise, we have to construct SSA form.
1313 SmallVector<PHINode*, 8> NewPHIs;
1314 SSAUpdater SSAUpdate(&NewPHIs);
1315 SSAUpdate.Initialize(LI->getType(), LI->getName());
1317 const Type *LoadTy = LI->getType();
1319 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1320 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1321 BasicBlock *BB = AV.BB;
1323 if (SSAUpdate.HasValueForBlock(BB))
1326 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LoadTy, TD));
1329 // Perform PHI construction.
1330 Value *V = SSAUpdate.GetValueInMiddleOfBlock(LI->getParent());
1332 // If new PHI nodes were created, notify alias analysis.
1333 if (V->getType()->isPointerTy())
1334 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
1335 AA->copyValue(LI, NewPHIs[i]);
1340 static bool isLifetimeStart(const Instruction *Inst) {
1341 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1342 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1346 /// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
1347 /// non-local by performing PHI construction.
1348 bool GVN::processNonLocalLoad(LoadInst *LI,
1349 SmallVectorImpl<Instruction*> &toErase) {
1350 // Find the non-local dependencies of the load.
1351 SmallVector<NonLocalDepResult, 64> Deps;
1352 MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
1354 //DEBUG(dbgs() << "INVESTIGATING NONLOCAL LOAD: "
1355 // << Deps.size() << *LI << '\n');
1357 // If we had to process more than one hundred blocks to find the
1358 // dependencies, this load isn't worth worrying about. Optimizing
1359 // it will be too expensive.
1360 if (Deps.size() > 100)
1363 // If we had a phi translation failure, we'll have a single entry which is a
1364 // clobber in the current block. Reject this early.
1365 if (Deps.size() == 1 && Deps[0].getResult().isClobber()) {
1367 dbgs() << "GVN: non-local load ";
1368 WriteAsOperand(dbgs(), LI);
1369 dbgs() << " is clobbered by " << *Deps[0].getResult().getInst() << '\n';
1374 // Filter out useless results (non-locals, etc). Keep track of the blocks
1375 // where we have a value available in repl, also keep track of whether we see
1376 // dependencies that produce an unknown value for the load (such as a call
1377 // that could potentially clobber the load).
1378 SmallVector<AvailableValueInBlock, 16> ValuesPerBlock;
1379 SmallVector<BasicBlock*, 16> UnavailableBlocks;
1381 const TargetData *TD = 0;
1383 for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
1384 BasicBlock *DepBB = Deps[i].getBB();
1385 MemDepResult DepInfo = Deps[i].getResult();
1387 if (DepInfo.isClobber()) {
1388 // The address being loaded in this non-local block may not be the same as
1389 // the pointer operand of the load if PHI translation occurs. Make sure
1390 // to consider the right address.
1391 Value *Address = Deps[i].getAddress();
1393 // If the dependence is to a store that writes to a superset of the bits
1394 // read by the load, we can extract the bits we need for the load from the
1396 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) {
1398 TD = getAnalysisIfAvailable<TargetData>();
1399 if (TD && Address) {
1400 int Offset = AnalyzeLoadFromClobberingStore(LI->getType(), Address,
1403 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1404 DepSI->getOperand(0),
1411 // If the clobbering value is a memset/memcpy/memmove, see if we can
1412 // forward a value on from it.
1413 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
1415 TD = getAnalysisIfAvailable<TargetData>();
1416 if (TD && Address) {
1417 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address,
1420 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
1427 UnavailableBlocks.push_back(DepBB);
1431 Instruction *DepInst = DepInfo.getInst();
1433 // Loading the allocation -> undef.
1434 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
1435 // Loading immediately after lifetime begin -> undef.
1436 isLifetimeStart(DepInst)) {
1437 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1438 UndefValue::get(LI->getType())));
1442 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1443 // Reject loads and stores that are to the same address but are of
1444 // different types if we have to.
1445 if (S->getOperand(0)->getType() != LI->getType()) {
1447 TD = getAnalysisIfAvailable<TargetData>();
1449 // If the stored value is larger or equal to the loaded value, we can
1451 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(S->getOperand(0),
1452 LI->getType(), *TD)) {
1453 UnavailableBlocks.push_back(DepBB);
1458 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1463 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1464 // If the types mismatch and we can't handle it, reject reuse of the load.
1465 if (LD->getType() != LI->getType()) {
1467 TD = getAnalysisIfAvailable<TargetData>();
1469 // If the stored value is larger or equal to the loaded value, we can
1471 if (TD == 0 || !CanCoerceMustAliasedValueToLoad(LD, LI->getType(),*TD)){
1472 UnavailableBlocks.push_back(DepBB);
1476 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, LD));
1480 UnavailableBlocks.push_back(DepBB);
1484 // If we have no predecessors that produce a known value for this load, exit
1486 if (ValuesPerBlock.empty()) return false;
1488 // If all of the instructions we depend on produce a known value for this
1489 // load, then it is fully redundant and we can use PHI insertion to compute
1490 // its value. Insert PHIs and remove the fully redundant value now.
1491 if (UnavailableBlocks.empty()) {
1492 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n');
1494 // Perform PHI construction.
1495 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1496 VN.getAliasAnalysis());
1497 LI->replaceAllUsesWith(V);
1499 if (isa<PHINode>(V))
1501 if (V->getType()->isPointerTy())
1502 MD->invalidateCachedPointerInfo(V);
1504 toErase.push_back(LI);
1509 if (!EnablePRE || !EnableLoadPRE)
1512 // Okay, we have *some* definitions of the value. This means that the value
1513 // is available in some of our (transitive) predecessors. Lets think about
1514 // doing PRE of this load. This will involve inserting a new load into the
1515 // predecessor when it's not available. We could do this in general, but
1516 // prefer to not increase code size. As such, we only do this when we know
1517 // that we only have to insert *one* load (which means we're basically moving
1518 // the load, not inserting a new one).
1520 SmallPtrSet<BasicBlock *, 4> Blockers;
1521 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1522 Blockers.insert(UnavailableBlocks[i]);
1524 // Lets find first basic block with more than one predecessor. Walk backwards
1525 // through predecessors if needed.
1526 BasicBlock *LoadBB = LI->getParent();
1527 BasicBlock *TmpBB = LoadBB;
1529 bool isSinglePred = false;
1530 bool allSingleSucc = true;
1531 while (TmpBB->getSinglePredecessor()) {
1532 isSinglePred = true;
1533 TmpBB = TmpBB->getSinglePredecessor();
1534 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1536 if (Blockers.count(TmpBB))
1539 // If any of these blocks has more than one successor (i.e. if the edge we
1540 // just traversed was critical), then there are other paths through this
1541 // block along which the load may not be anticipated. Hoisting the load
1542 // above this block would be adding the load to execution paths along
1543 // which it was not previously executed.
1544 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1551 // FIXME: It is extremely unclear what this loop is doing, other than
1552 // artificially restricting loadpre.
1555 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
1556 const AvailableValueInBlock &AV = ValuesPerBlock[i];
1557 if (AV.isSimpleValue())
1558 // "Hot" Instruction is in some loop (because it dominates its dep.
1560 if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
1561 if (DT->dominates(LI, I)) {
1567 // We are interested only in "hot" instructions. We don't want to do any
1568 // mis-optimizations here.
1573 // Check to see how many predecessors have the loaded value fully
1575 DenseMap<BasicBlock*, Value*> PredLoads;
1576 DenseMap<BasicBlock*, char> FullyAvailableBlocks;
1577 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
1578 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true;
1579 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
1580 FullyAvailableBlocks[UnavailableBlocks[i]] = false;
1582 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> NeedToSplit;
1583 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
1585 BasicBlock *Pred = *PI;
1586 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1589 PredLoads[Pred] = 0;
1591 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1592 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1593 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1594 << Pred->getName() << "': " << *LI << '\n');
1597 unsigned SuccNum = GetSuccessorNumber(Pred, LoadBB);
1598 NeedToSplit.push_back(std::make_pair(Pred->getTerminator(), SuccNum));
1601 if (!NeedToSplit.empty()) {
1602 toSplit.append(NeedToSplit.begin(), NeedToSplit.end());
1606 // Decide whether PRE is profitable for this load.
1607 unsigned NumUnavailablePreds = PredLoads.size();
1608 assert(NumUnavailablePreds != 0 &&
1609 "Fully available value should be eliminated above!");
1611 // If this load is unavailable in multiple predecessors, reject it.
1612 // FIXME: If we could restructure the CFG, we could make a common pred with
1613 // all the preds that don't have an available LI and insert a new load into
1615 if (NumUnavailablePreds != 1)
1618 // Check if the load can safely be moved to all the unavailable predecessors.
1619 bool CanDoPRE = true;
1620 SmallVector<Instruction*, 8> NewInsts;
1621 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1622 E = PredLoads.end(); I != E; ++I) {
1623 BasicBlock *UnavailablePred = I->first;
1625 // Do PHI translation to get its value in the predecessor if necessary. The
1626 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1628 // If all preds have a single successor, then we know it is safe to insert
1629 // the load on the pred (?!?), so we can insert code to materialize the
1630 // pointer if it is not available.
1631 PHITransAddr Address(LI->getOperand(0), TD);
1633 if (allSingleSucc) {
1634 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred,
1637 Address.PHITranslateValue(LoadBB, UnavailablePred, DT);
1638 LoadPtr = Address.getAddr();
1641 // If we couldn't find or insert a computation of this phi translated value,
1644 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1645 << *LI->getOperand(0) << "\n");
1650 // Make sure it is valid to move this load here. We have to watch out for:
1651 // @1 = getelementptr (i8* p, ...
1652 // test p and branch if == 0
1654 // It is valid to have the getelementptr before the test, even if p can be 0,
1655 // as getelementptr only does address arithmetic.
1656 // If we are not pushing the value through any multiple-successor blocks
1657 // we do not have this case. Otherwise, check that the load is safe to
1658 // put anywhere; this can be improved, but should be conservatively safe.
1659 if (!allSingleSucc &&
1660 // FIXME: REEVALUTE THIS.
1661 !isSafeToLoadUnconditionally(LoadPtr,
1662 UnavailablePred->getTerminator(),
1663 LI->getAlignment(), TD)) {
1668 I->second = LoadPtr;
1672 while (!NewInsts.empty())
1673 NewInsts.pop_back_val()->eraseFromParent();
1677 // Okay, we can eliminate this load by inserting a reload in the predecessor
1678 // and using PHI construction to get the value in the other predecessors, do
1680 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
1681 DEBUG(if (!NewInsts.empty())
1682 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: "
1683 << *NewInsts.back() << '\n');
1685 // Assign value numbers to the new instructions.
1686 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) {
1687 // FIXME: We really _ought_ to insert these value numbers into their
1688 // parent's availability map. However, in doing so, we risk getting into
1689 // ordering issues. If a block hasn't been processed yet, we would be
1690 // marking a value as AVAIL-IN, which isn't what we intend.
1691 VN.lookup_or_add(NewInsts[i]);
1694 for (DenseMap<BasicBlock*, Value*>::iterator I = PredLoads.begin(),
1695 E = PredLoads.end(); I != E; ++I) {
1696 BasicBlock *UnavailablePred = I->first;
1697 Value *LoadPtr = I->second;
1699 Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
1701 UnavailablePred->getTerminator());
1703 // Add the newly created load.
1704 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred,
1706 MD->invalidateCachedPointerInfo(LoadPtr);
1707 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1710 // Perform PHI construction.
1711 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, TD, *DT,
1712 VN.getAliasAnalysis());
1713 LI->replaceAllUsesWith(V);
1714 if (isa<PHINode>(V))
1716 if (V->getType()->isPointerTy())
1717 MD->invalidateCachedPointerInfo(V);
1719 toErase.push_back(LI);
1724 /// processLoad - Attempt to eliminate a load, first by eliminating it
1725 /// locally, and then attempting non-local elimination if that fails.
1726 bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
1730 if (L->isVolatile())
1733 // ... to a pointer that has been loaded from before...
1734 MemDepResult Dep = MD->getDependency(L);
1736 // If the value isn't available, don't do anything!
1737 if (Dep.isClobber()) {
1738 // Check to see if we have something like this:
1739 // store i32 123, i32* %P
1740 // %A = bitcast i32* %P to i8*
1741 // %B = gep i8* %A, i32 1
1744 // We could do that by recognizing if the clobber instructions are obviously
1745 // a common base + constant offset, and if the previous store (or memset)
1746 // completely covers this load. This sort of thing can happen in bitfield
1748 Value *AvailVal = 0;
1749 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
1750 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1751 int Offset = AnalyzeLoadFromClobberingStore(L->getType(),
1752 L->getPointerOperand(),
1755 AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
1756 L->getType(), L, *TD);
1759 // If the clobbering value is a memset/memcpy/memmove, see if we can forward
1760 // a value on from it.
1761 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
1762 if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
1763 int Offset = AnalyzeLoadFromClobberingMemInst(L->getType(),
1764 L->getPointerOperand(),
1767 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
1772 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
1773 << *AvailVal << '\n' << *L << "\n\n\n");
1775 // Replace the load!
1776 L->replaceAllUsesWith(AvailVal);
1777 if (AvailVal->getType()->isPointerTy())
1778 MD->invalidateCachedPointerInfo(AvailVal);
1780 toErase.push_back(L);
1786 // fast print dep, using operator<< on instruction would be too slow
1787 dbgs() << "GVN: load ";
1788 WriteAsOperand(dbgs(), L);
1789 Instruction *I = Dep.getInst();
1790 dbgs() << " is clobbered by " << *I << '\n';
1795 // If it is defined in another block, try harder.
1796 if (Dep.isNonLocal())
1797 return processNonLocalLoad(L, toErase);
1799 Instruction *DepInst = Dep.getInst();
1800 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1801 Value *StoredVal = DepSI->getOperand(0);
1803 // The store and load are to a must-aliased pointer, but they may not
1804 // actually have the same type. See if we know how to reuse the stored
1805 // value (depending on its type).
1806 const TargetData *TD = 0;
1807 if (StoredVal->getType() != L->getType()) {
1808 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1809 StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
1814 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
1815 << '\n' << *L << "\n\n\n");
1822 L->replaceAllUsesWith(StoredVal);
1823 if (StoredVal->getType()->isPointerTy())
1824 MD->invalidateCachedPointerInfo(StoredVal);
1826 toErase.push_back(L);
1831 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
1832 Value *AvailableVal = DepLI;
1834 // The loads are of a must-aliased pointer, but they may not actually have
1835 // the same type. See if we know how to reuse the previously loaded value
1836 // (depending on its type).
1837 const TargetData *TD = 0;
1838 if (DepLI->getType() != L->getType()) {
1839 if ((TD = getAnalysisIfAvailable<TargetData>())) {
1840 AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
1841 if (AvailableVal == 0)
1844 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
1845 << "\n" << *L << "\n\n\n");
1852 L->replaceAllUsesWith(AvailableVal);
1853 if (DepLI->getType()->isPointerTy())
1854 MD->invalidateCachedPointerInfo(DepLI);
1856 toErase.push_back(L);
1861 // If this load really doesn't depend on anything, then we must be loading an
1862 // undef value. This can happen when loading for a fresh allocation with no
1863 // intervening stores, for example.
1864 if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
1865 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1867 toErase.push_back(L);
1872 // If this load occurs either right after a lifetime begin,
1873 // then the loaded value is undefined.
1874 if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
1875 if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
1876 L->replaceAllUsesWith(UndefValue::get(L->getType()));
1878 toErase.push_back(L);
1887 Value *GVN::lookupNumber(BasicBlock *BB, uint32_t num) {
1888 DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
1889 if (I == localAvail.end())
1892 ValueNumberScope *Locals = I->second;
1894 DenseMap<uint32_t, Value*>::iterator I = Locals->table.find(num);
1895 if (I != Locals->table.end())
1897 Locals = Locals->parent;
1904 /// processInstruction - When calculating availability, handle an instruction
1905 /// by inserting it into the appropriate sets
1906 bool GVN::processInstruction(Instruction *I,
1907 SmallVectorImpl<Instruction*> &toErase) {
1908 // Ignore dbg info intrinsics.
1909 if (isa<DbgInfoIntrinsic>(I))
1912 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
1913 bool Changed = processLoad(LI, toErase);
1916 unsigned Num = VN.lookup_or_add(LI);
1917 localAvail[I->getParent()]->table.insert(std::make_pair(Num, LI));
1923 uint32_t NextNum = VN.getNextUnusedValueNumber();
1924 unsigned Num = VN.lookup_or_add(I);
1926 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
1927 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1929 if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
1932 Value *BranchCond = BI->getCondition();
1933 uint32_t CondVN = VN.lookup_or_add(BranchCond);
1935 BasicBlock *TrueSucc = BI->getSuccessor(0);
1936 BasicBlock *FalseSucc = BI->getSuccessor(1);
1938 if (TrueSucc->getSinglePredecessor())
1939 localAvail[TrueSucc]->table[CondVN] =
1940 ConstantInt::getTrue(TrueSucc->getContext());
1941 if (FalseSucc->getSinglePredecessor())
1942 localAvail[FalseSucc]->table[CondVN] =
1943 ConstantInt::getFalse(TrueSucc->getContext());
1947 // Allocations are always uniquely numbered, so we can save time and memory
1948 // by fast failing them.
1949 } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
1950 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1954 // Collapse PHI nodes
1955 if (PHINode* p = dyn_cast<PHINode>(I)) {
1956 Value *constVal = CollapsePhi(p);
1959 p->replaceAllUsesWith(constVal);
1960 if (MD && constVal->getType()->isPointerTy())
1961 MD->invalidateCachedPointerInfo(constVal);
1964 toErase.push_back(p);
1966 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1969 // If the number we were assigned was a brand new VN, then we don't
1970 // need to do a lookup to see if the number already exists
1971 // somewhere in the domtree: it can't!
1972 } else if (Num == NextNum) {
1973 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1975 // Perform fast-path value-number based elimination of values inherited from
1977 } else if (Value *repl = lookupNumber(I->getParent(), Num)) {
1980 I->replaceAllUsesWith(repl);
1981 if (MD && repl->getType()->isPointerTy())
1982 MD->invalidateCachedPointerInfo(repl);
1983 toErase.push_back(I);
1987 localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
1993 /// runOnFunction - This is the main transformation entry point for a function.
1994 bool GVN::runOnFunction(Function& F) {
1996 MD = &getAnalysis<MemoryDependenceAnalysis>();
1997 DT = &getAnalysis<DominatorTree>();
1998 VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
2002 bool Changed = false;
2003 bool ShouldContinue = true;
2005 // Merge unconditional branches, allowing PRE to catch more
2006 // optimization opportunities.
2007 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2008 BasicBlock *BB = FI;
2010 bool removedBlock = MergeBlockIntoPredecessor(BB, this);
2011 if (removedBlock) ++NumGVNBlocks;
2013 Changed |= removedBlock;
2016 unsigned Iteration = 0;
2018 while (ShouldContinue) {
2019 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2020 ShouldContinue = iterateOnFunction(F);
2021 if (splitCriticalEdges())
2022 ShouldContinue = true;
2023 Changed |= ShouldContinue;
2028 bool PREChanged = true;
2029 while (PREChanged) {
2030 PREChanged = performPRE(F);
2031 Changed |= PREChanged;
2034 // FIXME: Should perform GVN again after PRE does something. PRE can move
2035 // computations into blocks where they become fully redundant. Note that
2036 // we can't do this until PRE's critical edge splitting updates memdep.
2037 // Actually, when this happens, we should just fully integrate PRE into GVN.
2039 cleanupGlobalSets();
2045 bool GVN::processBlock(BasicBlock *BB) {
2046 // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
2047 // incrementing BI before processing an instruction).
2048 SmallVector<Instruction*, 8> toErase;
2049 bool ChangedFunction = false;
2051 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2053 ChangedFunction |= processInstruction(BI, toErase);
2054 if (toErase.empty()) {
2059 // If we need some instructions deleted, do it now.
2060 NumGVNInstr += toErase.size();
2062 // Avoid iterator invalidation.
2063 bool AtStart = BI == BB->begin();
2067 for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
2068 E = toErase.end(); I != E; ++I) {
2069 DEBUG(dbgs() << "GVN removed: " << **I << '\n');
2070 if (MD) MD->removeInstruction(*I);
2071 (*I)->eraseFromParent();
2072 DEBUG(verifyRemoved(*I));
2082 return ChangedFunction;
2085 /// performPRE - Perform a purely local form of PRE that looks for diamond
2086 /// control flow patterns and attempts to perform simple PRE at the join point.
2087 bool GVN::performPRE(Function &F) {
2088 bool Changed = false;
2089 DenseMap<BasicBlock*, Value*> predMap;
2090 for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
2091 DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
2092 BasicBlock *CurrentBlock = *DI;
2094 // Nothing to PRE in the entry block.
2095 if (CurrentBlock == &F.getEntryBlock()) continue;
2097 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2098 BE = CurrentBlock->end(); BI != BE; ) {
2099 Instruction *CurInst = BI++;
2101 if (isa<AllocaInst>(CurInst) ||
2102 isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
2103 CurInst->getType()->isVoidTy() ||
2104 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2105 isa<DbgInfoIntrinsic>(CurInst))
2108 // We don't currently value number ANY inline asm calls.
2109 if (CallInst *CallI = dyn_cast<CallInst>(CurInst))
2110 if (CallI->isInlineAsm())
2113 uint32_t ValNo = VN.lookup(CurInst);
2115 // Look for the predecessors for PRE opportunities. We're
2116 // only trying to solve the basic diamond case, where
2117 // a value is computed in the successor and one predecessor,
2118 // but not the other. We also explicitly disallow cases
2119 // where the successor is its own predecessor, because they're
2120 // more complicated to get right.
2121 unsigned NumWith = 0;
2122 unsigned NumWithout = 0;
2123 BasicBlock *PREPred = 0;
2126 for (pred_iterator PI = pred_begin(CurrentBlock),
2127 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2128 BasicBlock *P = *PI;
2129 // We're not interested in PRE where the block is its
2130 // own predecessor, or in blocks with predecessors
2131 // that are not reachable.
2132 if (P == CurrentBlock) {
2135 } else if (!localAvail.count(P)) {
2140 DenseMap<uint32_t, Value*>::iterator predV =
2141 localAvail[P]->table.find(ValNo);
2142 if (predV == localAvail[P]->table.end()) {
2145 } else if (predV->second == CurInst) {
2148 predMap[P] = predV->second;
2153 // Don't do PRE when it might increase code size, i.e. when
2154 // we would need to insert instructions in more than one pred.
2155 if (NumWithout != 1 || NumWith == 0)
2158 // Don't do PRE across indirect branch.
2159 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2162 // We can't do PRE safely on a critical edge, so instead we schedule
2163 // the edge to be split and perform the PRE the next time we iterate
2165 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2166 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2167 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2171 // Instantiate the expression in the predecessor that lacked it.
2172 // Because we are going top-down through the block, all value numbers
2173 // will be available in the predecessor by the time we need them. Any
2174 // that weren't originally present will have been instantiated earlier
2176 Instruction *PREInstr = CurInst->clone();
2177 bool success = true;
2178 for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
2179 Value *Op = PREInstr->getOperand(i);
2180 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2183 if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
2184 PREInstr->setOperand(i, V);
2191 // Fail out if we encounter an operand that is not available in
2192 // the PRE predecessor. This is typically because of loads which
2193 // are not value numbered precisely.
2196 DEBUG(verifyRemoved(PREInstr));
2200 PREInstr->insertBefore(PREPred->getTerminator());
2201 PREInstr->setName(CurInst->getName() + ".pre");
2202 predMap[PREPred] = PREInstr;
2203 VN.add(PREInstr, ValNo);
2206 // Update the availability map to include the new instruction.
2207 localAvail[PREPred]->table.insert(std::make_pair(ValNo, PREInstr));
2209 // Create a PHI to make the value available in this block.
2210 PHINode* Phi = PHINode::Create(CurInst->getType(),
2211 CurInst->getName() + ".pre-phi",
2212 CurrentBlock->begin());
2213 for (pred_iterator PI = pred_begin(CurrentBlock),
2214 PE = pred_end(CurrentBlock); PI != PE; ++PI) {
2215 BasicBlock *P = *PI;
2216 Phi->addIncoming(predMap[P], P);
2220 localAvail[CurrentBlock]->table[ValNo] = Phi;
2222 CurInst->replaceAllUsesWith(Phi);
2223 if (MD && Phi->getType()->isPointerTy())
2224 MD->invalidateCachedPointerInfo(Phi);
2227 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2228 if (MD) MD->removeInstruction(CurInst);
2229 CurInst->eraseFromParent();
2230 DEBUG(verifyRemoved(CurInst));
2235 if (splitCriticalEdges())
2241 /// splitCriticalEdges - Split critical edges found during the previous
2242 /// iteration that may enable further optimization.
2243 bool GVN::splitCriticalEdges() {
2244 if (toSplit.empty())
2247 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val();
2248 SplitCriticalEdge(Edge.first, Edge.second, this);
2249 } while (!toSplit.empty());
2250 if (MD) MD->invalidateCachedPredecessors();
2254 /// iterateOnFunction - Executes one iteration of GVN
2255 bool GVN::iterateOnFunction(Function &F) {
2256 cleanupGlobalSets();
2258 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2259 DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
2261 localAvail[DI->getBlock()] =
2262 new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
2264 localAvail[DI->getBlock()] = new ValueNumberScope(0);
2267 // Top-down walk of the dominator tree
2268 bool Changed = false;
2270 // Needed for value numbering with phi construction to work.
2271 ReversePostOrderTraversal<Function*> RPOT(&F);
2272 for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
2273 RE = RPOT.end(); RI != RE; ++RI)
2274 Changed |= processBlock(*RI);
2276 for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
2277 DE = df_end(DT->getRootNode()); DI != DE; ++DI)
2278 Changed |= processBlock(DI->getBlock());
2284 void GVN::cleanupGlobalSets() {
2287 for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
2288 I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
2293 /// verifyRemoved - Verify that the specified instruction does not occur in our
2294 /// internal data structures.
2295 void GVN::verifyRemoved(const Instruction *Inst) const {
2296 VN.verifyRemoved(Inst);
2298 // Walk through the value number scope to make sure the instruction isn't
2299 // ferreted away in it.
2300 for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
2301 I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
2302 const ValueNumberScope *VNS = I->second;
2305 for (DenseMap<uint32_t, Value*>::const_iterator
2306 II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
2307 assert(II->second != Inst && "Inst still in value numbering scope!");