-//===- GVN.cpp - Eliminate redundant values and loads ------------===//
+//===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This pass performs global value numbering to eliminate fully redundant
// instructions. It also performs simple dead load elimination.
//
+// Note that this pass does the value numbering itself; it does not use the
+// ValueNumbering analysis passes.
+//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "gvn"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
#include "llvm/IntrinsicInst.h"
-#include "llvm/Instructions.h"
-#include "llvm/ParameterAttributes.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Value.h"
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Target/TargetData.h"
-#include <list>
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include <cstdio>
using namespace llvm;
-STATISTIC(NumGVNInstr, "Number of instructions deleted");
-STATISTIC(NumGVNLoad, "Number of loads deleted");
-STATISTIC(NumMemSetInfer, "Number of memsets inferred");
+STATISTIC(NumGVNInstr, "Number of instructions deleted");
+STATISTIC(NumGVNLoad, "Number of loads deleted");
+STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
+STATISTIC(NumGVNBlocks, "Number of blocks merged");
+STATISTIC(NumPRELoad, "Number of loads PRE'd");
-namespace {
- cl::opt<bool>
- FormMemSet("form-memset-from-stores",
- cl::desc("Transform straight-line stores to memsets"),
- cl::init(true), cl::Hidden);
-}
+static cl::opt<bool> EnablePRE("enable-pre",
+ cl::init(true), cl::Hidden);
+static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true));
//===----------------------------------------------------------------------===//
// ValueTable Class
/// two values.
namespace {
struct VISIBILITY_HIDDEN Expression {
- enum ExpressionOpcode { ADD, SUB, MUL, UDIV, SDIV, FDIV, UREM, SREM,
+ enum ExpressionOpcode { ADD, FADD, SUB, FSUB, MUL, FMUL,
+ UDIV, SDIV, FDIV, UREM, SREM,
FREM, SHL, LSHR, ASHR, AND, OR, XOR, ICMPEQ,
ICMPNE, ICMPUGT, ICMPUGE, ICMPULT, ICMPULE,
ICMPSGT, ICMPSGE, ICMPSLT, ICMPSLE, FCMPOEQ,
FCMPULT, FCMPULE, FCMPUNE, EXTRACT, INSERT,
SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI,
FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT,
- PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, EMPTY,
- TOMBSTONE };
+ PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT,
+ EMPTY, TOMBSTONE };
ExpressionOpcode opcode;
const Type* type;
}
bool operator!=(const Expression &other) const {
- if (opcode != other.opcode)
- return true;
- else if (opcode == EMPTY || opcode == TOMBSTONE)
- return false;
- else if (type != other.type)
- return true;
- else if (function != other.function)
- return true;
- else if (firstVN != other.firstVN)
- return true;
- else if (secondVN != other.secondVN)
- return true;
- else if (thirdVN != other.thirdVN)
- return true;
- else {
- if (varargs.size() != other.varargs.size())
- return true;
-
- for (size_t i = 0; i < varargs.size(); ++i)
- if (varargs[i] != other.varargs[i])
- return true;
-
- return false;
- }
+ return !(*this == other);
}
};
DenseMap<Value*, uint32_t> valueNumbering;
DenseMap<Expression, uint32_t> expressionNumbering;
AliasAnalysis* AA;
+ MemoryDependenceAnalysis* MD;
+ DominatorTree* DT;
uint32_t nextValueNumber;
Expression create_expression(CastInst* C);
Expression create_expression(GetElementPtrInst* G);
Expression create_expression(CallInst* C);
+ Expression create_expression(Constant* C);
public:
ValueTable() : nextValueNumber(1) { }
uint32_t lookup_or_add(Value* V);
void erase(Value* v);
unsigned size();
void setAliasAnalysis(AliasAnalysis* A) { AA = A; }
- uint32_t hash_operand(Value* v);
+ AliasAnalysis *getAliasAnalysis() const { return AA; }
+ void setMemDep(MemoryDependenceAnalysis* M) { MD = M; }
+ void setDomTree(DominatorTree* D) { DT = D; }
+ uint32_t getNextUnusedValueNumber() { return nextValueNumber; }
+ void verifyRemoved(const Value *) const;
};
}
Expression::ExpressionOpcode ValueTable::getOpcode(BinaryOperator* BO) {
switch(BO->getOpcode()) {
default: // THIS SHOULD NEVER HAPPEN
- assert(0 && "Binary operator with unknown opcode?");
+ llvm_unreachable("Binary operator with unknown opcode?");
case Instruction::Add: return Expression::ADD;
+ case Instruction::FAdd: return Expression::FADD;
case Instruction::Sub: return Expression::SUB;
+ case Instruction::FSub: return Expression::FSUB;
case Instruction::Mul: return Expression::MUL;
+ case Instruction::FMul: return Expression::FMUL;
case Instruction::UDiv: return Expression::UDIV;
case Instruction::SDiv: return Expression::SDIV;
case Instruction::FDiv: return Expression::FDIV;
if (isa<ICmpInst>(C)) {
switch (C->getPredicate()) {
default: // THIS SHOULD NEVER HAPPEN
- assert(0 && "Comparison with unknown predicate?");
+ llvm_unreachable("Comparison with unknown predicate?");
case ICmpInst::ICMP_EQ: return Expression::ICMPEQ;
case ICmpInst::ICMP_NE: return Expression::ICMPNE;
case ICmpInst::ICMP_UGT: return Expression::ICMPUGT;
case ICmpInst::ICMP_SLT: return Expression::ICMPSLT;
case ICmpInst::ICMP_SLE: return Expression::ICMPSLE;
}
- }
- assert(isa<FCmpInst>(C) && "Unknown compare");
- switch (C->getPredicate()) {
- default: // THIS SHOULD NEVER HAPPEN
- assert(0 && "Comparison with unknown predicate?");
- case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
- case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
- case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
- case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
- case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
- case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
- case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
- case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
- case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
- case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
- case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
- case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
- case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
- case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
+ } else {
+ switch (C->getPredicate()) {
+ default: // THIS SHOULD NEVER HAPPEN
+ llvm_unreachable("Comparison with unknown predicate?");
+ case FCmpInst::FCMP_OEQ: return Expression::FCMPOEQ;
+ case FCmpInst::FCMP_OGT: return Expression::FCMPOGT;
+ case FCmpInst::FCMP_OGE: return Expression::FCMPOGE;
+ case FCmpInst::FCMP_OLT: return Expression::FCMPOLT;
+ case FCmpInst::FCMP_OLE: return Expression::FCMPOLE;
+ case FCmpInst::FCMP_ONE: return Expression::FCMPONE;
+ case FCmpInst::FCMP_ORD: return Expression::FCMPORD;
+ case FCmpInst::FCMP_UNO: return Expression::FCMPUNO;
+ case FCmpInst::FCMP_UEQ: return Expression::FCMPUEQ;
+ case FCmpInst::FCMP_UGT: return Expression::FCMPUGT;
+ case FCmpInst::FCMP_UGE: return Expression::FCMPUGE;
+ case FCmpInst::FCMP_ULT: return Expression::FCMPULT;
+ case FCmpInst::FCMP_ULE: return Expression::FCMPULE;
+ case FCmpInst::FCMP_UNE: return Expression::FCMPUNE;
+ }
}
}
Expression::ExpressionOpcode ValueTable::getOpcode(CastInst* C) {
switch(C->getOpcode()) {
default: // THIS SHOULD NEVER HAPPEN
- assert(0 && "Cast operator with unknown opcode?");
+ llvm_unreachable("Cast operator with unknown opcode?");
case Instruction::Trunc: return Expression::TRUNC;
case Instruction::ZExt: return Expression::ZEXT;
case Instruction::SExt: return Expression::SEXT;
}
}
-uint32_t ValueTable::hash_operand(Value* v) {
- if (CallInst* CI = dyn_cast<CallInst>(v))
- if (!AA->doesNotAccessMemory(CI))
- return nextValueNumber++;
-
- return lookup_or_add(v);
-}
-
Expression ValueTable::create_expression(CallInst* C) {
Expression e;
for (CallInst::op_iterator I = C->op_begin()+1, E = C->op_end();
I != E; ++I)
- e.varargs.push_back(hash_operand(*I));
+ e.varargs.push_back(lookup_or_add(*I));
return e;
}
Expression ValueTable::create_expression(BinaryOperator* BO) {
Expression e;
- e.firstVN = hash_operand(BO->getOperand(0));
- e.secondVN = hash_operand(BO->getOperand(1));
+ e.firstVN = lookup_or_add(BO->getOperand(0));
+ e.secondVN = lookup_or_add(BO->getOperand(1));
e.thirdVN = 0;
e.function = 0;
e.type = BO->getType();
Expression ValueTable::create_expression(CmpInst* C) {
Expression e;
- e.firstVN = hash_operand(C->getOperand(0));
- e.secondVN = hash_operand(C->getOperand(1));
+ e.firstVN = lookup_or_add(C->getOperand(0));
+ e.secondVN = lookup_or_add(C->getOperand(1));
e.thirdVN = 0;
e.function = 0;
e.type = C->getType();
Expression ValueTable::create_expression(CastInst* C) {
Expression e;
- e.firstVN = hash_operand(C->getOperand(0));
+ e.firstVN = lookup_or_add(C->getOperand(0));
e.secondVN = 0;
e.thirdVN = 0;
e.function = 0;
Expression ValueTable::create_expression(ShuffleVectorInst* S) {
Expression e;
- e.firstVN = hash_operand(S->getOperand(0));
- e.secondVN = hash_operand(S->getOperand(1));
- e.thirdVN = hash_operand(S->getOperand(2));
+ e.firstVN = lookup_or_add(S->getOperand(0));
+ e.secondVN = lookup_or_add(S->getOperand(1));
+ e.thirdVN = lookup_or_add(S->getOperand(2));
e.function = 0;
e.type = S->getType();
e.opcode = Expression::SHUFFLE;
Expression ValueTable::create_expression(ExtractElementInst* E) {
Expression e;
- e.firstVN = hash_operand(E->getOperand(0));
- e.secondVN = hash_operand(E->getOperand(1));
+ e.firstVN = lookup_or_add(E->getOperand(0));
+ e.secondVN = lookup_or_add(E->getOperand(1));
e.thirdVN = 0;
e.function = 0;
e.type = E->getType();
Expression ValueTable::create_expression(InsertElementInst* I) {
Expression e;
- e.firstVN = hash_operand(I->getOperand(0));
- e.secondVN = hash_operand(I->getOperand(1));
- e.thirdVN = hash_operand(I->getOperand(2));
+ e.firstVN = lookup_or_add(I->getOperand(0));
+ e.secondVN = lookup_or_add(I->getOperand(1));
+ e.thirdVN = lookup_or_add(I->getOperand(2));
e.function = 0;
e.type = I->getType();
e.opcode = Expression::INSERT;
Expression ValueTable::create_expression(SelectInst* I) {
Expression e;
- e.firstVN = hash_operand(I->getCondition());
- e.secondVN = hash_operand(I->getTrueValue());
- e.thirdVN = hash_operand(I->getFalseValue());
+ e.firstVN = lookup_or_add(I->getCondition());
+ e.secondVN = lookup_or_add(I->getTrueValue());
+ e.thirdVN = lookup_or_add(I->getFalseValue());
e.function = 0;
e.type = I->getType();
e.opcode = Expression::SELECT;
Expression ValueTable::create_expression(GetElementPtrInst* G) {
Expression e;
-
- e.firstVN = hash_operand(G->getPointerOperand());
+
+ e.firstVN = lookup_or_add(G->getPointerOperand());
e.secondVN = 0;
e.thirdVN = 0;
e.function = 0;
for (GetElementPtrInst::op_iterator I = G->idx_begin(), E = G->idx_end();
I != E; ++I)
- e.varargs.push_back(hash_operand(*I));
+ e.varargs.push_back(lookup_or_add(*I));
return e;
}
// ValueTable External Functions
//===----------------------------------------------------------------------===//
+/// add - Insert a value into the table with a specified value number.
+void ValueTable::add(Value* V, uint32_t num) {
+ valueNumbering.insert(std::make_pair(V, num));
+}
+
/// lookup_or_add - Returns the value number for the specified value, assigning
/// it a new number if it did not have one before.
uint32_t ValueTable::lookup_or_add(Value* V) {
return VI->second;
if (CallInst* C = dyn_cast<CallInst>(V)) {
- if (AA->onlyReadsMemory(C)) { // includes doesNotAccessMemory
+ if (AA->doesNotAccessMemory(C)) {
Expression e = create_expression(C);
DenseMap<Expression, uint32_t>::iterator EI = expressionNumbering.find(e);
return nextValueNumber++;
}
+ } else if (AA->onlyReadsMemory(C)) {
+ Expression e = create_expression(C);
+
+ if (expressionNumbering.find(e) == expressionNumbering.end()) {
+ expressionNumbering.insert(std::make_pair(e, nextValueNumber));
+ valueNumbering.insert(std::make_pair(V, nextValueNumber));
+ return nextValueNumber++;
+ }
+
+ MemDepResult local_dep = MD->getDependency(C);
+
+ if (!local_dep.isDef() && !local_dep.isNonLocal()) {
+ valueNumbering.insert(std::make_pair(V, nextValueNumber));
+ return nextValueNumber++;
+ }
+
+ if (local_dep.isDef()) {
+ CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
+
+ if (local_cdep->getNumOperands() != C->getNumOperands()) {
+ valueNumbering.insert(std::make_pair(V, nextValueNumber));
+ return nextValueNumber++;
+ }
+
+ for (unsigned i = 1; i < C->getNumOperands(); ++i) {
+ uint32_t c_vn = lookup_or_add(C->getOperand(i));
+ uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
+ if (c_vn != cd_vn) {
+ valueNumbering.insert(std::make_pair(V, nextValueNumber));
+ return nextValueNumber++;
+ }
+ }
+
+ uint32_t v = lookup_or_add(local_cdep);
+ valueNumbering.insert(std::make_pair(V, v));
+ return v;
+ }
+
+ // Non-local case.
+ const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
+ MD->getNonLocalCallDependency(CallSite(C));
+ // FIXME: call/call dependencies for readonly calls should return def, not
+ // clobber! Move the checking logic to MemDep!
+ CallInst* cdep = 0;
+
+ // Check to see if we have a single dominating call instruction that is
+ // identical to C.
+ for (unsigned i = 0, e = deps.size(); i != e; ++i) {
+ const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i];
+ // Ignore non-local dependencies.
+ if (I->second.isNonLocal())
+ continue;
+
+ // We don't handle non-depedencies. If we already have a call, reject
+ // instruction dependencies.
+ if (I->second.isClobber() || cdep != 0) {
+ cdep = 0;
+ break;
+ }
+
+ CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst());
+ // FIXME: All duplicated with non-local case.
+ if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){
+ cdep = NonLocalDepCall;
+ continue;
+ }
+
+ cdep = 0;
+ break;
+ }
+
+ if (!cdep) {
+ valueNumbering.insert(std::make_pair(V, nextValueNumber));
+ return nextValueNumber++;
+ }
+
+ if (cdep->getNumOperands() != C->getNumOperands()) {
+ valueNumbering.insert(std::make_pair(V, nextValueNumber));
+ return nextValueNumber++;
+ }
+ for (unsigned i = 1; i < C->getNumOperands(); ++i) {
+ uint32_t c_vn = lookup_or_add(C->getOperand(i));
+ uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
+ if (c_vn != cd_vn) {
+ valueNumbering.insert(std::make_pair(V, nextValueNumber));
+ return nextValueNumber++;
+ }
+ }
+
+ uint32_t v = lookup_or_add(cdep);
+ valueNumbering.insert(std::make_pair(V, v));
+ return v;
+
} else {
valueNumbering.insert(std::make_pair(V, nextValueNumber));
return nextValueNumber++;
valueNumbering.erase(V);
}
+/// verifyRemoved - Verify that the value is removed from all internal data
+/// structures.
+void ValueTable::verifyRemoved(const Value *V) const {
+ for (DenseMap<Value*, uint32_t>::iterator
+ I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
+ assert(I->first != V && "Inst still occurs in value numbering map!");
+ }
+}
+
//===----------------------------------------------------------------------===//
-// ValueNumberedSet Class
+// GVN Pass
//===----------------------------------------------------------------------===//
+
namespace {
-class VISIBILITY_HIDDEN ValueNumberedSet {
- private:
- SmallPtrSet<Value*, 8> contents;
- BitVector numbers;
- public:
- ValueNumberedSet() { numbers.resize(1); }
- ValueNumberedSet(const ValueNumberedSet& other) {
- numbers = other.numbers;
- contents = other.contents;
- }
-
- typedef SmallPtrSet<Value*, 8>::iterator iterator;
-
- iterator begin() { return contents.begin(); }
- iterator end() { return contents.end(); }
-
- bool insert(Value* v) { return contents.insert(v); }
- void insert(iterator I, iterator E) { contents.insert(I, E); }
- void erase(Value* v) { contents.erase(v); }
- unsigned count(Value* v) { return contents.count(v); }
- size_t size() { return contents.size(); }
-
- void set(unsigned i) {
- if (i >= numbers.size())
- numbers.resize(i+1);
-
- numbers.set(i);
- }
-
- void operator=(const ValueNumberedSet& other) {
- contents = other.contents;
- numbers = other.numbers;
- }
+ struct VISIBILITY_HIDDEN ValueNumberScope {
+ ValueNumberScope* parent;
+ DenseMap<uint32_t, Value*> table;
- void reset(unsigned i) {
- if (i < numbers.size())
- numbers.reset(i);
- }
-
- bool test(unsigned i) {
- if (i >= numbers.size())
- return false;
-
- return numbers.test(i);
- }
-
- void clear() {
- contents.clear();
- numbers.clear();
- }
-};
+ ValueNumberScope(ValueNumberScope* p) : parent(p) { }
+ };
}
-//===----------------------------------------------------------------------===//
-// GVN Pass
-//===----------------------------------------------------------------------===//
-
namespace {
class VISIBILITY_HIDDEN GVN : public FunctionPass {
bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- GVN() : FunctionPass((intptr_t)&ID) { }
+ GVN() : FunctionPass(&ID) { }
private:
+ MemoryDependenceAnalysis *MD;
+ DominatorTree *DT;
+
ValueTable VN;
-
- DenseMap<BasicBlock*, ValueNumberedSet> availableOut;
+ DenseMap<BasicBlock*, ValueNumberScope*> localAvail;
typedef DenseMap<Value*, SmallPtrSet<Instruction*, 4> > PhiMapType;
PhiMapType phiMap;
// This transformation requires dominator postdominator info
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
- AU.setPreservesCFG();
AU.addRequired<DominatorTree>();
AU.addRequired<MemoryDependenceAnalysis>();
AU.addRequired<AliasAnalysis>();
- AU.addRequired<TargetData>();
+
+ AU.addPreserved<DominatorTree>();
AU.addPreserved<AliasAnalysis>();
- AU.addPreserved<MemoryDependenceAnalysis>();
- AU.addPreserved<TargetData>();
}
// Helper fuctions
// FIXME: eliminate or document these better
- Value* find_leader(ValueNumberedSet& vals, uint32_t v) ;
- void val_insert(ValueNumberedSet& s, Value* v);
bool processLoad(LoadInst* L,
- DenseMap<Value*, LoadInst*> &lastLoad,
SmallVectorImpl<Instruction*> &toErase);
- bool processStore(StoreInst *SI, SmallVectorImpl<Instruction*> &toErase);
bool processInstruction(Instruction* I,
- ValueNumberedSet& currAvail,
- DenseMap<Value*, LoadInst*>& lastSeenLoad,
SmallVectorImpl<Instruction*> &toErase);
bool processNonLocalLoad(LoadInst* L,
SmallVectorImpl<Instruction*> &toErase);
- bool processMemCpy(MemCpyInst* M, MemCpyInst* MDep,
- SmallVectorImpl<Instruction*> &toErase);
- bool performCallSlotOptzn(MemCpyInst* cpy, CallInst* C,
- SmallVectorImpl<Instruction*> &toErase);
- Value *GetValueForBlock(BasicBlock *BB, LoadInst* orig,
+ bool processBlock(BasicBlock* BB);
+ Value *GetValueForBlock(BasicBlock *BB, Instruction* orig,
DenseMap<BasicBlock*, Value*> &Phis,
bool top_level = false);
- void dump(DenseMap<BasicBlock*, Value*>& d);
+ void dump(DenseMap<uint32_t, Value*>& d);
bool iterateOnFunction(Function &F);
Value* CollapsePhi(PHINode* p);
bool isSafeReplacement(PHINode* p, Instruction* inst);
+ bool performPRE(Function& F);
+ Value* lookupNumber(BasicBlock* BB, uint32_t num);
+ bool mergeBlockIntoPredecessor(BasicBlock* BB);
+ Value* AttemptRedundancyElimination(Instruction* orig, unsigned valno);
+ void cleanupGlobalSets();
+ void verifyRemoved(const Instruction *I) const;
};
char GVN::ID = 0;
static RegisterPass<GVN> X("gvn",
"Global Value Numbering");
-/// find_leader - Given a set and a value number, return the first
-/// element of the set with that value number, or 0 if no such element
-/// is present
-Value* GVN::find_leader(ValueNumberedSet& vals, uint32_t v) {
- if (!vals.test(v))
- return 0;
-
- for (ValueNumberedSet::iterator I = vals.begin(), E = vals.end();
- I != E; ++I)
- if (v == VN.lookup(*I))
- return *I;
-
- assert(0 && "No leader found, but present bit is set?");
- return 0;
-}
-
-/// val_insert - Insert a value into a set only if there is not a value
-/// with the same value number already in the set
-void GVN::val_insert(ValueNumberedSet& s, Value* v) {
- uint32_t num = VN.lookup(v);
- if (!s.test(num))
- s.insert(v);
-}
-
-void GVN::dump(DenseMap<BasicBlock*, Value*>& d) {
+void GVN::dump(DenseMap<uint32_t, Value*>& d) {
printf("{\n");
- for (DenseMap<BasicBlock*, Value*>::iterator I = d.begin(),
+ for (DenseMap<uint32_t, Value*>::iterator I = d.begin(),
E = d.end(); I != E; ++I) {
- if (I->second == MemoryDependenceAnalysis::None)
- printf("None\n");
- else
+ printf("%d\n", I->first);
I->second->dump();
}
printf("}\n");
}
Value* GVN::CollapsePhi(PHINode* p) {
- DominatorTree &DT = getAnalysis<DominatorTree>();
Value* constVal = p->hasConstantValue();
-
if (!constVal) return 0;
Instruction* inst = dyn_cast<Instruction>(constVal);
if (!inst)
return constVal;
- if (DT.dominates(inst, p))
+ if (DT->dominates(inst, p))
if (isSafeReplacement(p, inst))
return inst;
return 0;
/// GetValueForBlock - Get the value to use within the specified basic block.
/// available values are in Phis.
-Value *GVN::GetValueForBlock(BasicBlock *BB, LoadInst* orig,
+Value *GVN::GetValueForBlock(BasicBlock *BB, Instruction* orig,
DenseMap<BasicBlock*, Value*> &Phis,
bool top_level) {
DenseMap<BasicBlock*, Value*>::iterator V = Phis.find(BB);
if (V != Phis.end() && !top_level) return V->second;
- BasicBlock* singlePred = BB->getSinglePredecessor();
- if (singlePred) {
- Value *ret = GetValueForBlock(singlePred, orig, Phis);
+ // If the block is unreachable, just return undef, since this path
+ // can't actually occur at runtime.
+ if (!DT->isReachableFromEntry(BB))
+ return Phis[BB] = Context->getUndef(orig->getType());
+
+ if (BasicBlock *Pred = BB->getSinglePredecessor()) {
+ Value *ret = GetValueForBlock(Pred, orig, Phis);
Phis[BB] = ret;
return ret;
}
+
+ // Get the number of predecessors of this block so we can reserve space later.
+ // If there is already a PHI in it, use the #preds from it, otherwise count.
+ // Getting it from the PHI is constant time.
+ unsigned NumPreds;
+ if (PHINode *ExistingPN = dyn_cast<PHINode>(BB->begin()))
+ NumPreds = ExistingPN->getNumIncomingValues();
+ else
+ NumPreds = std::distance(pred_begin(BB), pred_end(BB));
// Otherwise, the idom is the loop, so we need to insert a PHI node. Do so
// now, then get values to fill in the incoming values for the PHI.
- PHINode *PN = new PHINode(orig->getType(), orig->getName()+".rle",
- BB->begin());
- PN->reserveOperandSpace(std::distance(pred_begin(BB), pred_end(BB)));
+ PHINode *PN = PHINode::Create(orig->getType(), orig->getName()+".rle",
+ BB->begin());
+ PN->reserveOperandSpace(NumPreds);
- if (Phis.count(BB) == 0)
- Phis.insert(std::make_pair(BB, PN));
+ Phis.insert(std::make_pair(BB, PN));
// Fill in the incoming values for the block.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
PN->addIncoming(val, *PI);
}
- AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
- AA.copyValue(orig, PN);
+ VN.getAliasAnalysis()->copyValue(orig, PN);
// Attempt to collapse PHI nodes that are trivially redundant
Value* v = CollapsePhi(PN);
if (!v) {
// Cache our phi construction results
- phiMap[orig->getPointerOperand()].insert(PN);
+ if (LoadInst* L = dyn_cast<LoadInst>(orig))
+ phiMap[L->getPointerOperand()].insert(PN);
+ else
+ phiMap[orig].insert(PN);
+
return PN;
}
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
-
- MD.removeInstruction(PN);
PN->replaceAllUsesWith(v);
+ if (isa<PointerType>(v->getType()))
+ MD->invalidateCachedPointerInfo(v);
for (DenseMap<BasicBlock*, Value*>::iterator I = Phis.begin(),
E = Phis.end(); I != E; ++I)
if (I->second == PN)
I->second = v;
+ DEBUG(cerr << "GVN removed: " << *PN);
+ MD->removeInstruction(PN);
PN->eraseFromParent();
+ DEBUG(verifyRemoved(PN));
Phis[BB] = v;
return v;
}
-/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
-/// non-local by performing PHI construction.
-bool GVN::processNonLocalLoad(LoadInst* L,
- SmallVectorImpl<Instruction*> &toErase) {
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
+/// IsValueFullyAvailableInBlock - Return true if we can prove that the value
+/// we're analyzing is fully available in the specified block. As we go, keep
+/// track of which blocks we know are fully alive in FullyAvailableBlocks. This
+/// map is actually a tri-state map with the following values:
+/// 0) we know the block *is not* fully available.
+/// 1) we know the block *is* fully available.
+/// 2) we do not know whether the block is fully available or not, but we are
+/// currently speculating that it will be.
+/// 3) we are speculating for this block and have used that to speculate for
+/// other blocks.
+static bool IsValueFullyAvailableInBlock(BasicBlock *BB,
+ DenseMap<BasicBlock*, char> &FullyAvailableBlocks) {
+ // Optimistically assume that the block is fully available and check to see
+ // if we already know about this block in one lookup.
+ std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV =
+ FullyAvailableBlocks.insert(std::make_pair(BB, 2));
+
+ // If the entry already existed for this block, return the precomputed value.
+ if (!IV.second) {
+ // If this is a speculative "available" value, mark it as being used for
+ // speculation of other blocks.
+ if (IV.first->second == 2)
+ IV.first->second = 3;
+ return IV.first->second != 0;
+ }
- // Find the non-local dependencies of the load
- DenseMap<BasicBlock*, Value*> deps;
- MD.getNonLocalDependency(L, deps);
+ // Otherwise, see if it is fully available in all predecessors.
+ pred_iterator PI = pred_begin(BB), PE = pred_end(BB);
- DenseMap<BasicBlock*, Value*> repl;
+ // If this block has no predecessors, it isn't live-in here.
+ if (PI == PE)
+ goto SpeculationFailure;
- // Filter out useless results (non-locals, etc)
- for (DenseMap<BasicBlock*, Value*>::iterator I = deps.begin(), E = deps.end();
- I != E; ++I) {
- if (I->second == MemoryDependenceAnalysis::None)
- return false;
+ for (; PI != PE; ++PI)
+ // If the value isn't fully available in one of our predecessors, then it
+ // isn't fully available in this block either. Undo our previous
+ // optimistic assumption and bail out.
+ if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
+ goto SpeculationFailure;
- if (I->second == MemoryDependenceAnalysis::NonLocal)
- continue;
+ return true;
- if (StoreInst* S = dyn_cast<StoreInst>(I->second)) {
- if (S->getPointerOperand() != L->getPointerOperand())
- return false;
- repl[I->first] = S->getOperand(0);
- } else if (LoadInst* LD = dyn_cast<LoadInst>(I->second)) {
- if (LD->getPointerOperand() != L->getPointerOperand())
- return false;
- repl[I->first] = LD;
- } else {
- return false;
- }
- }
+// SpeculationFailure - If we get here, we found out that this is not, after
+// all, a fully-available block. We have a problem if we speculated on this and
+// used the speculation to mark other blocks as available.
+SpeculationFailure:
+ char &BBVal = FullyAvailableBlocks[BB];
- // Use cached PHI construction information from previous runs
- SmallPtrSet<Instruction*, 4>& p = phiMap[L->getPointerOperand()];
- for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
- I != E; ++I) {
- if ((*I)->getParent() == L->getParent()) {
- MD.removeInstruction(L);
- L->replaceAllUsesWith(*I);
- toErase.push_back(L);
- NumGVNLoad++;
- return true;
- }
+ // If we didn't speculate on this, just return with it set to false.
+ if (BBVal == 2) {
+ BBVal = 0;
+ return false;
+ }
+
+ // If we did speculate on this value, we could have blocks set to 1 that are
+ // incorrect. Walk the (transitive) successors of this block and mark them as
+ // 0 if set to one.
+ SmallVector<BasicBlock*, 32> BBWorklist;
+ BBWorklist.push_back(BB);
+
+ while (!BBWorklist.empty()) {
+ BasicBlock *Entry = BBWorklist.pop_back_val();
+ // Note that this sets blocks to 0 (unavailable) if they happen to not
+ // already be in FullyAvailableBlocks. This is safe.
+ char &EntryVal = FullyAvailableBlocks[Entry];
+ if (EntryVal == 0) continue; // Already unavailable.
+
+ // Mark as unavailable.
+ EntryVal = 0;
- repl.insert(std::make_pair((*I)->getParent(), *I));
+ for (succ_iterator I = succ_begin(Entry), E = succ_end(Entry); I != E; ++I)
+ BBWorklist.push_back(*I);
}
- // Perform PHI construction
- SmallPtrSet<BasicBlock*, 4> visited;
- Value* v = GetValueForBlock(L->getParent(), L, repl, true);
-
- MD.removeInstruction(L);
- L->replaceAllUsesWith(v);
- toErase.push_back(L);
- NumGVNLoad++;
-
- return true;
+ return false;
}
-/// processLoad - Attempt to eliminate a load, first by eliminating it
-/// locally, and then attempting non-local elimination if that fails.
-bool GVN::processLoad(LoadInst *L, DenseMap<Value*, LoadInst*> &lastLoad,
- SmallVectorImpl<Instruction*> &toErase) {
- if (L->isVolatile()) {
- lastLoad[L->getPointerOperand()] = L;
+/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
+/// non-local by performing PHI construction.
+bool GVN::processNonLocalLoad(LoadInst *LI,
+ SmallVectorImpl<Instruction*> &toErase) {
+ // Find the non-local dependencies of the load.
+ SmallVector<MemoryDependenceAnalysis::NonLocalDepEntry, 64> Deps;
+ MD->getNonLocalPointerDependency(LI->getOperand(0), true, LI->getParent(),
+ Deps);
+ //DEBUG(cerr << "INVESTIGATING NONLOCAL LOAD: " << Deps.size() << *LI);
+
+ // If we had to process more than one hundred blocks to find the
+ // dependencies, this load isn't worth worrying about. Optimizing
+ // it will be too expensive.
+ if (Deps.size() > 100)
+ return false;
+
+ // If we had a phi translation failure, we'll have a single entry which is a
+ // clobber in the current block. Reject this early.
+ if (Deps.size() == 1 && Deps[0].second.isClobber()) {
+ DEBUG(
+ DOUT << "GVN: non-local load ";
+ WriteAsOperand(*DOUT.stream(), LI);
+ DOUT << " is clobbered by " << *Deps[0].second.getInst();
+ );
return false;
}
- Value* pointer = L->getPointerOperand();
- LoadInst*& last = lastLoad[pointer];
+ // Filter out useless results (non-locals, etc). Keep track of the blocks
+ // where we have a value available in repl, also keep track of whether we see
+ // dependencies that produce an unknown value for the load (such as a call
+ // that could potentially clobber the load).
+ SmallVector<std::pair<BasicBlock*, Value*>, 16> ValuesPerBlock;
+ SmallVector<BasicBlock*, 16> UnavailableBlocks;
- // ... to a pointer that has been loaded from before...
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
- bool removedNonLocal = false;
- Instruction* dep = MD.getDependency(L);
- if (dep == MemoryDependenceAnalysis::NonLocal &&
- L->getParent() != &L->getParent()->getParent()->getEntryBlock()) {
- removedNonLocal = processNonLocalLoad(L, toErase);
+ for (unsigned i = 0, e = Deps.size(); i != e; ++i) {
+ BasicBlock *DepBB = Deps[i].first;
+ MemDepResult DepInfo = Deps[i].second;
- if (!removedNonLocal)
- last = L;
+ if (DepInfo.isClobber()) {
+ UnavailableBlocks.push_back(DepBB);
+ continue;
+ }
- return removedNonLocal;
- }
-
-
- bool deletedLoad = false;
+ Instruction *DepInst = DepInfo.getInst();
+
+ // Loading the allocation -> undef.
+ if (isa<AllocationInst>(DepInst)) {
+ ValuesPerBlock.push_back(std::make_pair(DepBB,
+ Context->getUndef(LI->getType())));
+ continue;
+ }
- // Walk up the dependency chain until we either find
- // a dependency we can use, or we can't walk any further
- while (dep != MemoryDependenceAnalysis::None &&
- dep != MemoryDependenceAnalysis::NonLocal &&
- (isa<LoadInst>(dep) || isa<StoreInst>(dep))) {
- // ... that depends on a store ...
- if (StoreInst* S = dyn_cast<StoreInst>(dep)) {
- if (S->getPointerOperand() == pointer) {
- // Remove it!
- MD.removeInstruction(L);
-
- L->replaceAllUsesWith(S->getOperand(0));
- toErase.push_back(L);
- deletedLoad = true;
- NumGVNLoad++;
+ if (StoreInst* S = dyn_cast<StoreInst>(DepInst)) {
+ // Reject loads and stores that are to the same address but are of
+ // different types.
+ // NOTE: 403.gcc does have this case (e.g. in readonly_fields_p) because
+ // of bitfield access, it would be interesting to optimize for it at some
+ // point.
+ if (S->getOperand(0)->getType() != LI->getType()) {
+ UnavailableBlocks.push_back(DepBB);
+ continue;
}
- // Whether we removed it or not, we can't
- // go any further
- break;
- } else if (!last) {
- // If we don't depend on a store, and we haven't
- // been loaded before, bail.
- break;
- } else if (dep == last) {
- // Remove it!
- MD.removeInstruction(L);
+ ValuesPerBlock.push_back(std::make_pair(DepBB, S->getOperand(0)));
- L->replaceAllUsesWith(last);
- toErase.push_back(L);
- deletedLoad = true;
- NumGVNLoad++;
-
- break;
+ } else if (LoadInst* LD = dyn_cast<LoadInst>(DepInst)) {
+ if (LD->getType() != LI->getType()) {
+ UnavailableBlocks.push_back(DepBB);
+ continue;
+ }
+ ValuesPerBlock.push_back(std::make_pair(DepBB, LD));
} else {
- dep = MD.getDependency(L, dep);
- }
- }
-
- if (dep != MemoryDependenceAnalysis::None &&
- dep != MemoryDependenceAnalysis::NonLocal &&
- isa<AllocationInst>(dep)) {
- // Check that this load is actually from the
- // allocation we found
- Value* v = L->getOperand(0);
- while (true) {
- if (BitCastInst *BC = dyn_cast<BitCastInst>(v))
- v = BC->getOperand(0);
- else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(v))
- v = GEP->getOperand(0);
- else
- break;
- }
- if (v == dep) {
- // If this load depends directly on an allocation, there isn't
- // anything stored there; therefore, we can optimize this load
- // to undef.
- MD.removeInstruction(L);
-
- L->replaceAllUsesWith(UndefValue::get(L->getType()));
- toErase.push_back(L);
- deletedLoad = true;
- NumGVNLoad++;
+ UnavailableBlocks.push_back(DepBB);
+ continue;
}
}
-
- if (!deletedLoad)
- last = L;
- return deletedLoad;
-}
-
-/// isBytewiseValue - If the specified value can be set by repeating the same
-/// byte in memory, return the i8 value that it is represented with. This is
-/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
-/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
-/// byte store (e.g. i16 0x1234), return null.
-static Value *isBytewiseValue(Value *V) {
- // All byte-wide stores are splatable, even of arbitrary variables.
- if (V->getType() == Type::Int8Ty) return V;
-
- // Constant float and double values can be handled as integer values if the
- // corresponding integer value is "byteable". An important case is 0.0.
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
- if (CFP->getType() == Type::FloatTy)
- V = ConstantExpr::getBitCast(CFP, Type::Int32Ty);
- if (CFP->getType() == Type::DoubleTy)
- V = ConstantExpr::getBitCast(CFP, Type::Int64Ty);
- // Don't handle long double formats, which have strange constraints.
- }
-
- // We can handle constant integers that are power of two in size and a
- // multiple of 8 bits.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- unsigned Width = CI->getBitWidth();
- if (isPowerOf2_32(Width) && Width > 8) {
- // We can handle this value if the recursive binary decomposition is the
- // same at all levels.
- APInt Val = CI->getValue();
- APInt Val2;
- while (Val.getBitWidth() != 8) {
- unsigned NextWidth = Val.getBitWidth()/2;
- Val2 = Val.lshr(NextWidth);
- Val2.trunc(Val.getBitWidth()/2);
- Val.trunc(Val.getBitWidth()/2);
-
- // If the top/bottom halves aren't the same, reject it.
- if (Val != Val2)
- return 0;
+ // If we have no predecessors that produce a known value for this load, exit
+ // early.
+ if (ValuesPerBlock.empty()) return false;
+
+ // If all of the instructions we depend on produce a known value for this
+ // load, then it is fully redundant and we can use PHI insertion to compute
+ // its value. Insert PHIs and remove the fully redundant value now.
+ if (UnavailableBlocks.empty()) {
+ // Use cached PHI construction information from previous runs
+ SmallPtrSet<Instruction*, 4> &p = phiMap[LI->getPointerOperand()];
+ // FIXME: What does phiMap do? Are we positive it isn't getting invalidated?
+ for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
+ I != E; ++I) {
+ if ((*I)->getParent() == LI->getParent()) {
+ DEBUG(cerr << "GVN REMOVING NONLOCAL LOAD #1: " << *LI);
+ LI->replaceAllUsesWith(*I);
+ if (isa<PointerType>((*I)->getType()))
+ MD->invalidateCachedPointerInfo(*I);
+ toErase.push_back(LI);
+ NumGVNLoad++;
+ return true;
}
- return ConstantInt::get(Val);
- }
- }
-
- // Conceptually, we could handle things like:
- // %a = zext i8 %X to i16
- // %b = shl i16 %a, 8
- // %c = or i16 %a, %b
- // but until there is an example that actually needs this, it doesn't seem
- // worth worrying about.
- return 0;
-}
-
-static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
- bool &VariableIdxFound, TargetData &TD) {
- // Skip over the first indices.
- gep_type_iterator GTI = gep_type_begin(GEP);
- for (unsigned i = 1; i != Idx; ++i, ++GTI)
- /*skip along*/;
-
- // Compute the offset implied by the rest of the indices.
- int64_t Offset = 0;
- for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
- ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
- if (OpC == 0)
- return VariableIdxFound = true;
- if (OpC->isZero()) continue; // No offset.
-
- // Handle struct indices, which add their field offset to the pointer.
- if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
- Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
- continue;
+
+ ValuesPerBlock.push_back(std::make_pair((*I)->getParent(), *I));
}
- // Otherwise, we have a sequential type like an array or vector. Multiply
- // the index by the ElementSize.
- uint64_t Size = TD.getABITypeSize(GTI.getIndexedType());
- Offset += Size*OpC->getSExtValue();
+ DEBUG(cerr << "GVN REMOVING NONLOCAL LOAD: " << *LI);
+
+ DenseMap<BasicBlock*, Value*> BlockReplValues;
+ BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
+ // Perform PHI construction.
+ Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
+ LI->replaceAllUsesWith(v);
+
+ if (isa<PHINode>(v))
+ v->takeName(LI);
+ if (isa<PointerType>(v->getType()))
+ MD->invalidateCachedPointerInfo(v);
+ toErase.push_back(LI);
+ NumGVNLoad++;
+ return true;
}
-
- return Offset;
-}
-
-/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
-/// constant offset, and return that constant offset. For example, Ptr1 might
-/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
-static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
- TargetData &TD) {
- // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
- // base. After that base, they may have some number of common (and
- // potentially variable) indices. After that they handle some constant
- // offset, which determines their offset from each other. At this point, we
- // handle no other case.
- GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
- GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
- if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
- return false;
-
- // Skip any common indices and track the GEP types.
- unsigned Idx = 1;
- for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
- if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
- break;
-
- bool VariableIdxFound = false;
- int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
- int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
- if (VariableIdxFound) return false;
-
- Offset = Offset2-Offset1;
- return true;
-}
-
-
-/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
-/// This allows us to analyze stores like:
-/// store 0 -> P+1
-/// store 0 -> P+0
-/// store 0 -> P+3
-/// store 0 -> P+2
-/// which sometimes happens with stores to arrays of structs etc. When we see
-/// the first store, we make a range [1, 2). The second store extends the range
-/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
-/// two ranges into [0, 3) which is memset'able.
-namespace {
-struct MemsetRange {
- // Start/End - A semi range that describes the span that this range covers.
- // The range is closed at the start and open at the end: [Start, End).
- int64_t Start, End;
-
- /// StartPtr - The getelementptr instruction that points to the start of the
- /// range.
- Value *StartPtr;
-
- /// Alignment - The known alignment of the first store.
- unsigned Alignment;
-
- /// TheStores - The actual stores that make up this range.
- SmallVector<StoreInst*, 16> TheStores;
- bool isProfitableToUseMemset(const TargetData &TD) const;
-
-};
-} // end anon namespace
-
-bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
- // If we found more than 8 stores to merge or 64 bytes, use memset.
- if (TheStores.size() >= 8 || End-Start >= 64) return true;
-
- // Assume that the code generator is capable of merging pairs of stores
- // together if it wants to.
- if (TheStores.size() <= 2) return false;
-
- // If we have fewer than 8 stores, it can still be worthwhile to do this.
- // For example, merging 4 i8 stores into an i32 store is useful almost always.
- // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
- // memset will be split into 2 32-bit stores anyway) and doing so can
- // pessimize the llvm optimizer.
- //
- // Since we don't have perfect knowledge here, make some assumptions: assume
- // the maximum GPR width is the same size as the pointer size and assume that
- // this width can be stored. If so, check to see whether we will end up
- // actually reducing the number of stores used.
- unsigned Bytes = unsigned(End-Start);
- unsigned NumPointerStores = Bytes/TD.getPointerSize();
-
- // Assume the remaining bytes if any are done a byte at a time.
- unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
-
- // If we will reduce the # stores (according to this heuristic), do the
- // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
- // etc.
- return TheStores.size() > NumPointerStores+NumByteStores;
-}
-
+ if (!EnablePRE || !EnableLoadPRE)
+ return false;
-namespace {
-class MemsetRanges {
- /// Ranges - A sorted list of the memset ranges. We use std::list here
- /// because each element is relatively large and expensive to copy.
- std::list<MemsetRange> Ranges;
- typedef std::list<MemsetRange>::iterator range_iterator;
- TargetData &TD;
-public:
- MemsetRanges(TargetData &td) : TD(td) {}
-
- typedef std::list<MemsetRange>::const_iterator const_iterator;
- const_iterator begin() const { return Ranges.begin(); }
- const_iterator end() const { return Ranges.end(); }
+ // Okay, we have *some* definitions of the value. This means that the value
+ // is available in some of our (transitive) predecessors. Lets think about
+ // doing PRE of this load. This will involve inserting a new load into the
+ // predecessor when it's not available. We could do this in general, but
+ // prefer to not increase code size. As such, we only do this when we know
+ // that we only have to insert *one* load (which means we're basically moving
+ // the load, not inserting a new one).
+
+ SmallPtrSet<BasicBlock *, 4> Blockers;
+ for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
+ Blockers.insert(UnavailableBlocks[i]);
+
+ // Lets find first basic block with more than one predecessor. Walk backwards
+ // through predecessors if needed.
+ BasicBlock *LoadBB = LI->getParent();
+ BasicBlock *TmpBB = LoadBB;
+
+ bool isSinglePred = false;
+ bool allSingleSucc = true;
+ while (TmpBB->getSinglePredecessor()) {
+ isSinglePred = true;
+ TmpBB = TmpBB->getSinglePredecessor();
+ if (!TmpBB) // If haven't found any, bail now.
+ return false;
+ if (TmpBB == LoadBB) // Infinite (unreachable) loop.
+ return false;
+ if (Blockers.count(TmpBB))
+ return false;
+ if (TmpBB->getTerminator()->getNumSuccessors() != 1)
+ allSingleSucc = false;
+ }
+ assert(TmpBB);
+ LoadBB = TmpBB;
- void addStore(int64_t OffsetFromFirst, StoreInst *SI);
-};
+ // If we have a repl set with LI itself in it, this means we have a loop where
+ // at least one of the values is LI. Since this means that we won't be able
+ // to eliminate LI even if we insert uses in the other predecessors, we will
+ // end up increasing code size. Reject this by scanning for LI.
+ for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
+ if (ValuesPerBlock[i].second == LI)
+ return false;
-} // end anon namespace
-
-
-/// addStore - Add a new store to the MemsetRanges data structure. This adds a
-/// new range for the specified store at the specified offset, merging into
-/// existing ranges as appropriate.
-void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
- int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
-
- // Do a linear search of the ranges to see if this can be joined and/or to
- // find the insertion point in the list. We keep the ranges sorted for
- // simplicity here. This is a linear search of a linked list, which is ugly,
- // however the number of ranges is limited, so this won't get crazy slow.
- range_iterator I = Ranges.begin(), E = Ranges.end();
-
- while (I != E && Start > I->End)
- ++I;
-
- // We now know that I == E, in which case we didn't find anything to merge
- // with, or that Start <= I->End. If End < I->Start or I == E, then we need
- // to insert a new range. Handle this now.
- if (I == E || End < I->Start) {
- MemsetRange &R = *Ranges.insert(I, MemsetRange());
- R.Start = Start;
- R.End = End;
- R.StartPtr = SI->getPointerOperand();
- R.Alignment = SI->getAlignment();
- R.TheStores.push_back(SI);
- return;
+ if (isSinglePred) {
+ bool isHot = false;
+ for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
+ if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].second))
+ // "Hot" Instruction is in some loop (because it dominates its dep.
+ // instruction).
+ if (DT->dominates(LI, I)) {
+ isHot = true;
+ break;
+ }
+
+ // We are interested only in "hot" instructions. We don't want to do any
+ // mis-optimizations here.
+ if (!isHot)
+ return false;
}
- // This store overlaps with I, add it.
- I->TheStores.push_back(SI);
+ // Okay, we have some hope :). Check to see if the loaded value is fully
+ // available in all but one predecessor.
+ // FIXME: If we could restructure the CFG, we could make a common pred with
+ // all the preds that don't have an available LI and insert a new load into
+ // that one block.
+ BasicBlock *UnavailablePred = 0;
+
+ DenseMap<BasicBlock*, char> FullyAvailableBlocks;
+ for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
+ FullyAvailableBlocks[ValuesPerBlock[i].first] = true;
+ for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i)
+ FullyAvailableBlocks[UnavailableBlocks[i]] = false;
+
+ for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB);
+ PI != E; ++PI) {
+ if (IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks))
+ continue;
+
+ // If this load is not available in multiple predecessors, reject it.
+ if (UnavailablePred && UnavailablePred != *PI)
+ return false;
+ UnavailablePred = *PI;
+ }
- // At this point, we may have an interval that completely contains our store.
- // If so, just add it to the interval and return.
- if (I->Start <= Start && I->End >= End)
- return;
+ assert(UnavailablePred != 0 &&
+ "Fully available value should be eliminated above!");
- // Now we know that Start <= I->End and End >= I->Start so the range overlaps
- // but is not entirely contained within the range.
+ // If the loaded pointer is PHI node defined in this block, do PHI translation
+ // to get its value in the predecessor.
+ Value *LoadPtr = LI->getOperand(0)->DoPHITranslation(LoadBB, UnavailablePred);
- // See if the range extends the start of the range. In this case, it couldn't
- // possibly cause it to join the prior range, because otherwise we would have
- // stopped on *it*.
- if (Start < I->Start)
- I->Start = Start;
-
- // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
- // is in or right at the end of I), and that End >= I->Start. Extend I out to
- // End.
- if (End > I->End) {
- I->End = End;
- range_iterator NextI = I;;
- while (++NextI != E && End >= NextI->Start) {
- // Merge the range in.
- I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
- if (NextI->End > I->End)
- I->End = NextI->End;
- Ranges.erase(NextI);
- NextI = I;
+ // Make sure the value is live in the predecessor. If it was defined by a
+ // non-PHI instruction in this block, we don't know how to recompute it above.
+ if (Instruction *LPInst = dyn_cast<Instruction>(LoadPtr))
+ if (!DT->dominates(LPInst->getParent(), UnavailablePred)) {
+ DEBUG(cerr << "COULDN'T PRE LOAD BECAUSE PTR IS UNAVAILABLE IN PRED: "
+ << *LPInst << *LI << "\n");
+ return false;
}
- }
-}
-
-
-
-/// processStore - When GVN is scanning forward over instructions, we look for
-/// some other patterns to fold away. In particular, this looks for stores to
-/// neighboring locations of memory. If it sees enough consequtive ones
-/// (currently 4) it attempts to merge them together into a memcpy/memset.
-bool GVN::processStore(StoreInst *SI, SmallVectorImpl<Instruction*> &toErase) {
- if (!FormMemSet) return false;
- if (SI->isVolatile()) return false;
-
- // There are two cases that are interesting for this code to handle: memcpy
- // and memset. Right now we only handle memset.
- // Ensure that the value being stored is something that can be memset'able a
- // byte at a time like "0" or "-1" or any width, as well as things like
- // 0xA0A0A0A0 and 0.0.
- Value *ByteVal = isBytewiseValue(SI->getOperand(0));
- if (!ByteVal)
+ // We don't currently handle critical edges :(
+ if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
+ DEBUG(cerr << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
+ << UnavailablePred->getName() << "': " << *LI);
return false;
+ }
- TargetData &TD = getAnalysis<TargetData>();
- AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
-
- // Okay, so we now have a single store that can be splatable. Scan to find
- // all subsequent stores of the same value to offset from the same pointer.
- // Join these together into ranges, so we can decide whether contiguous blocks
- // are stored.
- MemsetRanges Ranges(TD);
-
- // Add our first pointer.
- Ranges.addStore(0, SI);
- Value *StartPtr = SI->getPointerOperand();
-
- BasicBlock::iterator BI = SI;
- for (++BI; !isa<TerminatorInst>(BI); ++BI) {
- if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
- // If the call is readnone, ignore it, otherwise bail out. We don't even
- // allow readonly here because we don't want something like:
- // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
- if (AA.getModRefBehavior(CallSite::get(BI)) ==
- AliasAnalysis::DoesNotAccessMemory)
- continue;
-
- // TODO: If this is a memset, try to join it in.
-
- break;
- } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
- break;
-
- // If this is a non-store instruction it is fine, ignore it.
- StoreInst *NextStore = dyn_cast<StoreInst>(BI);
- if (NextStore == 0) continue;
-
- // If this is a store, see if we can merge it in.
- if (NextStore->isVolatile()) break;
-
- // Check to see if this stored value is of the same byte-splattable value.
- if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
- break;
-
- // Check to see if this store is to a constant offset from the start ptr.
- int64_t Offset;
- if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, TD))
- break;
+ // Make sure it is valid to move this load here. We have to watch out for:
+ // @1 = getelementptr (i8* p, ...
+ // test p and branch if == 0
+ // load @1
+ // It is valid to have the getelementptr before the test, even if p can be 0,
+ // as getelementptr only does address arithmetic.
+ // If we are not pushing the value through any multiple-successor blocks
+ // we do not have this case. Otherwise, check that the load is safe to
+ // put anywhere; this can be improved, but should be conservatively safe.
+ if (!allSingleSucc &&
+ !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator()))
+ return false;
- Ranges.addStore(Offset, NextStore);
- }
-
- Function *MemSetF = 0;
+ // Okay, we can eliminate this load by inserting a reload in the predecessor
+ // and using PHI construction to get the value in the other predecessors, do
+ // it.
+ DEBUG(cerr << "GVN REMOVING PRE LOAD: " << *LI);
- // Now that we have full information about ranges, loop over the ranges and
- // emit memset's for anything big enough to be worthwhile.
- bool MadeChange = false;
- for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
- I != E; ++I) {
- const MemsetRange &Range = *I;
-
- if (Range.TheStores.size() == 1) continue;
-
- // If it is profitable to lower this range to memset, do so now.
- if (!Range.isProfitableToUseMemset(TD))
- continue;
-
- // Otherwise, we do want to transform this! Create a new memset. We put
- // the memset right after the first store that we found in this block. This
- // ensures that the caller will increment the iterator to the memset before
- // it deletes all the stores.
- BasicBlock::iterator InsertPt = SI; ++InsertPt;
-
- if (MemSetF == 0)
- MemSetF = Intrinsic::getDeclaration(SI->getParent()->getParent()
- ->getParent(), Intrinsic::memset_i64);
-
- // StartPtr may not dominate the starting point. Instead of using it, base
- // the destination pointer off the input to the first store in the block.
- StartPtr = SI->getPointerOperand();
-
- // Cast the start ptr to be i8* as memset requires.
- const Type *i8Ptr = PointerType::getUnqual(Type::Int8Ty);
- if (StartPtr->getType() != i8Ptr)
- StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(),
- InsertPt);
-
- // Offset the pointer if needed.
- if (Range.Start)
- StartPtr = new GetElementPtrInst(StartPtr, ConstantInt::get(Type::Int64Ty,
- Range.Start),
- "ptroffset", InsertPt);
-
- Value *Ops[] = {
- StartPtr, ByteVal, // Start, value
- ConstantInt::get(Type::Int64Ty, Range.End-Range.Start), // size
- ConstantInt::get(Type::Int32Ty, Range.Alignment) // align
- };
- Value *C = new CallInst(MemSetF, Ops, Ops+4, "", InsertPt);
- DEBUG(cerr << "Replace stores:\n";
- for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
- cerr << *Range.TheStores[i];
- cerr << "With: " << *C); C=C;
-
- // Zap all the stores.
- toErase.append(Range.TheStores.begin(), Range.TheStores.end());
- ++NumMemSetInfer;
- MadeChange = true;
- }
+ Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
+ LI->getAlignment(),
+ UnavailablePred->getTerminator());
- return MadeChange;
+ SmallPtrSet<Instruction*, 4> &p = phiMap[LI->getPointerOperand()];
+ for (SmallPtrSet<Instruction*, 4>::iterator I = p.begin(), E = p.end();
+ I != E; ++I)
+ ValuesPerBlock.push_back(std::make_pair((*I)->getParent(), *I));
+
+ DenseMap<BasicBlock*, Value*> BlockReplValues;
+ BlockReplValues.insert(ValuesPerBlock.begin(), ValuesPerBlock.end());
+ BlockReplValues[UnavailablePred] = NewLoad;
+
+ // Perform PHI construction.
+ Value* v = GetValueForBlock(LI->getParent(), LI, BlockReplValues, true);
+ LI->replaceAllUsesWith(v);
+ if (isa<PHINode>(v))
+ v->takeName(LI);
+ if (isa<PointerType>(v->getType()))
+ MD->invalidateCachedPointerInfo(v);
+ toErase.push_back(LI);
+ NumPRELoad++;
+ return true;
}
-
-/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
-/// and checks for the possibility of a call slot optimization by having
-/// the call write its result directly into the destination of the memcpy.
-bool GVN::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C,
- SmallVectorImpl<Instruction*> &toErase) {
- // The general transformation to keep in mind is
- //
- // call @func(..., src, ...)
- // memcpy(dest, src, ...)
- //
- // ->
- //
- // memcpy(dest, src, ...)
- // call @func(..., dest, ...)
- //
- // Since moving the memcpy is technically awkward, we additionally check that
- // src only holds uninitialized values at the moment of the call, meaning that
- // the memcpy can be discarded rather than moved.
-
- // Deliberately get the source and destination with bitcasts stripped away,
- // because we'll need to do type comparisons based on the underlying type.
- Value* cpyDest = cpy->getDest();
- Value* cpySrc = cpy->getSource();
- CallSite CS = CallSite::get(C);
-
- // We need to be able to reason about the size of the memcpy, so we require
- // that it be a constant.
- ConstantInt* cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
- if (!cpyLength)
- return false;
-
- // Require that src be an alloca. This simplifies the reasoning considerably.
- AllocaInst* srcAlloca = dyn_cast<AllocaInst>(cpySrc);
- if (!srcAlloca)
- return false;
-
- // Check that all of src is copied to dest.
- TargetData& TD = getAnalysis<TargetData>();
-
- ConstantInt* srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
- if (!srcArraySize)
+/// processLoad - Attempt to eliminate a load, first by eliminating it
+/// locally, and then attempting non-local elimination if that fails.
+bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
+ if (L->isVolatile())
return false;
+
+ Value* pointer = L->getPointerOperand();
- uint64_t srcSize = TD.getABITypeSize(srcAlloca->getAllocatedType()) *
- srcArraySize->getZExtValue();
-
- if (cpyLength->getZExtValue() < srcSize)
+ // ... to a pointer that has been loaded from before...
+ MemDepResult dep = MD->getDependency(L);
+
+ // If the value isn't available, don't do anything!
+ if (dep.isClobber()) {
+ DEBUG(
+ // fast print dep, using operator<< on instruction would be too slow
+ DOUT << "GVN: load ";
+ WriteAsOperand(*DOUT.stream(), L);
+ Instruction *I = dep.getInst();
+ DOUT << " is clobbered by " << *I;
+ );
return false;
+ }
- // Check that accessing the first srcSize bytes of dest will not cause a
- // trap. Otherwise the transform is invalid since it might cause a trap
- // to occur earlier than it otherwise would.
- if (AllocaInst* A = dyn_cast<AllocaInst>(cpyDest)) {
- // The destination is an alloca. Check it is larger than srcSize.
- ConstantInt* destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
- if (!destArraySize)
- return false;
-
- uint64_t destSize = TD.getABITypeSize(A->getAllocatedType()) *
- destArraySize->getZExtValue();
-
- if (destSize < srcSize)
- return false;
- } else if (Argument* A = dyn_cast<Argument>(cpyDest)) {
- // If the destination is an sret parameter then only accesses that are
- // outside of the returned struct type can trap.
- if (!A->hasStructRetAttr())
- return false;
-
- const Type* StructTy = cast<PointerType>(A->getType())->getElementType();
- uint64_t destSize = TD.getABITypeSize(StructTy);
+ // If it is defined in another block, try harder.
+ if (dep.isNonLocal())
+ return processNonLocalLoad(L, toErase);
- if (destSize < srcSize)
+ Instruction *DepInst = dep.getInst();
+ if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
+ // Only forward substitute stores to loads of the same type.
+ // FIXME: Could do better!
+ if (DepSI->getPointerOperand()->getType() != pointer->getType())
return false;
- } else {
- return false;
+
+ // Remove it!
+ L->replaceAllUsesWith(DepSI->getOperand(0));
+ if (isa<PointerType>(DepSI->getOperand(0)->getType()))
+ MD->invalidateCachedPointerInfo(DepSI->getOperand(0));
+ toErase.push_back(L);
+ NumGVNLoad++;
+ return true;
}
- // Check that src is not accessed except via the call and the memcpy. This
- // guarantees that it holds only undefined values when passed in (so the final
- // memcpy can be dropped), that it is not read or written between the call and
- // the memcpy, and that writing beyond the end of it is undefined.
- SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
- srcAlloca->use_end());
- while (!srcUseList.empty()) {
- User* UI = srcUseList.back();
- srcUseList.pop_back();
-
- if (isa<GetElementPtrInst>(UI) || isa<BitCastInst>(UI)) {
- for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
- I != E; ++I)
- srcUseList.push_back(*I);
- } else if (UI != C && UI != cpy) {
+ if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) {
+ // Only forward substitute stores to loads of the same type.
+ // FIXME: Could do better! load i32 -> load i8 -> truncate on little endian.
+ if (DepLI->getType() != L->getType())
return false;
- }
+
+ // Remove it!
+ L->replaceAllUsesWith(DepLI);
+ if (isa<PointerType>(DepLI->getType()))
+ MD->invalidateCachedPointerInfo(DepLI);
+ toErase.push_back(L);
+ NumGVNLoad++;
+ return true;
+ }
+
+ // If this load really doesn't depend on anything, then we must be loading an
+ // undef value. This can happen when loading for a fresh allocation with no
+ // intervening stores, for example.
+ if (isa<AllocationInst>(DepInst)) {
+ L->replaceAllUsesWith(Context->getUndef(L->getType()));
+ toErase.push_back(L);
+ NumGVNLoad++;
+ return true;
}
- // Since we're changing the parameter to the callsite, we need to make sure
- // that what would be the new parameter dominates the callsite.
- DominatorTree& DT = getAnalysis<DominatorTree>();
- if (Instruction* cpyDestInst = dyn_cast<Instruction>(cpyDest))
- if (!DT.dominates(cpyDestInst, C))
- return false;
-
- // In addition to knowing that the call does not access src in some
- // unexpected manner, for example via a global, which we deduce from
- // the use analysis, we also need to know that it does not sneakily
- // access dest. We rely on AA to figure this out for us.
- AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
- if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
- AliasAnalysis::NoModRef)
- return false;
-
- // All the checks have passed, so do the transformation.
- for (unsigned i = 0; i < CS.arg_size(); ++i)
- if (CS.getArgument(i) == cpySrc) {
- if (cpySrc->getType() != cpyDest->getType())
- cpyDest = CastInst::createPointerCast(cpyDest, cpySrc->getType(),
- cpyDest->getName(), C);
- CS.setArgument(i, cpyDest);
- }
-
- // Drop any cached information about the call, because we may have changed
- // its dependence information by changing its parameter.
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
- MD.dropInstruction(C);
-
- // Remove the memcpy
- MD.removeInstruction(cpy);
- toErase.push_back(cpy);
-
- return true;
+ return false;
}
-/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which
-/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
-/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
-/// This allows later passes to remove the first memcpy altogether.
-bool GVN::processMemCpy(MemCpyInst* M, MemCpyInst* MDep,
- SmallVectorImpl<Instruction*> &toErase) {
- // We can only transforms memcpy's where the dest of one is the source of the
- // other
- if (M->getSource() != MDep->getDest())
- return false;
+Value* GVN::lookupNumber(BasicBlock* BB, uint32_t num) {
+ DenseMap<BasicBlock*, ValueNumberScope*>::iterator I = localAvail.find(BB);
+ if (I == localAvail.end())
+ return 0;
- // Second, the length of the memcpy's must be the same, or the preceeding one
- // must be larger than the following one.
- ConstantInt* C1 = dyn_cast<ConstantInt>(MDep->getLength());
- ConstantInt* C2 = dyn_cast<ConstantInt>(M->getLength());
- if (!C1 || !C2)
- return false;
+ ValueNumberScope* locals = I->second;
- uint64_t DepSize = C1->getValue().getZExtValue();
- uint64_t CpySize = C2->getValue().getZExtValue();
+ while (locals) {
+ DenseMap<uint32_t, Value*>::iterator I = locals->table.find(num);
+ if (I != locals->table.end())
+ return I->second;
+ else
+ locals = locals->parent;
+ }
- if (DepSize < CpySize)
- return false;
+ return 0;
+}
+
+/// AttemptRedundancyElimination - If the "fast path" of redundancy elimination
+/// by inheritance from the dominator fails, see if we can perform phi
+/// construction to eliminate the redundancy.
+Value* GVN::AttemptRedundancyElimination(Instruction* orig, unsigned valno) {
+ BasicBlock* BaseBlock = orig->getParent();
- // Finally, we have to make sure that the dest of the second does not
- // alias the source of the first
- AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
- if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
- AliasAnalysis::NoAlias)
- return false;
- else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
- AliasAnalysis::NoAlias)
- return false;
- else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
- != AliasAnalysis::NoAlias)
- return false;
+ SmallPtrSet<BasicBlock*, 4> Visited;
+ SmallVector<BasicBlock*, 8> Stack;
+ Stack.push_back(BaseBlock);
+
+ DenseMap<BasicBlock*, Value*> Results;
- // If all checks passed, then we can transform these memcpy's
- Function* MemCpyFun = Intrinsic::getDeclaration(
- M->getParent()->getParent()->getParent(),
- M->getIntrinsicID());
+ // Walk backwards through our predecessors, looking for instances of the
+ // value number we're looking for. Instances are recorded in the Results
+ // map, which is then used to perform phi construction.
+ while (!Stack.empty()) {
+ BasicBlock* Current = Stack.back();
+ Stack.pop_back();
- std::vector<Value*> args;
- args.push_back(M->getRawDest());
- args.push_back(MDep->getRawSource());
- args.push_back(M->getLength());
- args.push_back(M->getAlignment());
-
- CallInst* C = new CallInst(MemCpyFun, args.begin(), args.end(), "", M);
-
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
- if (MD.getDependency(C) == MDep) {
- MD.dropInstruction(M);
- toErase.push_back(M);
- return true;
+ // If we've walked all the way to a proper dominator, then give up. Cases
+ // where the instance is in the dominator will have been caught by the fast
+ // path, and any cases that require phi construction further than this are
+ // probably not worth it anyways. Note that this is a SIGNIFICANT compile
+ // time improvement.
+ if (DT->properlyDominates(Current, orig->getParent())) return 0;
+
+ DenseMap<BasicBlock*, ValueNumberScope*>::iterator LA =
+ localAvail.find(Current);
+ if (LA == localAvail.end()) return 0;
+ DenseMap<uint32_t, Value*>::iterator V = LA->second->table.find(valno);
+
+ if (V != LA->second->table.end()) {
+ // Found an instance, record it.
+ Results.insert(std::make_pair(Current, V->second));
+ continue;
+ }
+
+ // If we reach the beginning of the function, then give up.
+ if (pred_begin(Current) == pred_end(Current))
+ return 0;
+
+ for (pred_iterator PI = pred_begin(Current), PE = pred_end(Current);
+ PI != PE; ++PI)
+ if (Visited.insert(*PI))
+ Stack.push_back(*PI);
}
- MD.removeInstruction(C);
- toErase.push_back(C);
- return false;
+ // If we didn't find instances, give up. Otherwise, perform phi construction.
+ if (Results.size() == 0)
+ return 0;
+ else
+ return GetValueForBlock(BaseBlock, orig, Results, true);
}
/// processInstruction - When calculating availability, handle an instruction
/// by inserting it into the appropriate sets
-bool GVN::processInstruction(Instruction *I, ValueNumberedSet &currAvail,
- DenseMap<Value*, LoadInst*> &lastSeenLoad,
+bool GVN::processInstruction(Instruction *I,
SmallVectorImpl<Instruction*> &toErase) {
- if (LoadInst* L = dyn_cast<LoadInst>(I))
- return processLoad(L, lastSeenLoad, toErase);
+ if (LoadInst* L = dyn_cast<LoadInst>(I)) {
+ bool changed = processLoad(L, toErase);
+
+ if (!changed) {
+ unsigned num = VN.lookup_or_add(L);
+ localAvail[I->getParent()]->table.insert(std::make_pair(num, L));
+ }
+
+ return changed;
+ }
- if (StoreInst *SI = dyn_cast<StoreInst>(I))
- return processStore(SI, toErase);
+ uint32_t nextNum = VN.getNextUnusedValueNumber();
+ unsigned num = VN.lookup_or_add(I);
- if (MemCpyInst* M = dyn_cast<MemCpyInst>(I)) {
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
-
- // The are two possible optimizations we can do for memcpy:
- // a) memcpy-memcpy xform which exposes redundance for DSE
- // b) call-memcpy xform for return slot optimization
- Instruction* dep = MD.getDependency(M);
- if (dep == MemoryDependenceAnalysis::None ||
- dep == MemoryDependenceAnalysis::NonLocal)
+ if (BranchInst* BI = dyn_cast<BranchInst>(I)) {
+ localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
+
+ if (!BI->isConditional() || isa<Constant>(BI->getCondition()))
return false;
- if (MemCpyInst *MemCpy = dyn_cast<MemCpyInst>(dep))
- return processMemCpy(M, MemCpy, toErase);
- if (CallInst* C = dyn_cast<CallInst>(dep))
- return performCallSlotOptzn(M, C, toErase);
+
+ Value* branchCond = BI->getCondition();
+ uint32_t condVN = VN.lookup_or_add(branchCond);
+
+ BasicBlock* trueSucc = BI->getSuccessor(0);
+ BasicBlock* falseSucc = BI->getSuccessor(1);
+
+ if (trueSucc->getSinglePredecessor())
+ localAvail[trueSucc]->table[condVN] = Context->getConstantIntTrue();
+ if (falseSucc->getSinglePredecessor())
+ localAvail[falseSucc]->table[condVN] = Context->getConstantIntFalse();
+
+ return false;
+
+ // Allocations are always uniquely numbered, so we can save time and memory
+ // by fast failing them.
+ } else if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) {
+ localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
return false;
}
- unsigned num = VN.lookup_or_add(I);
-
// Collapse PHI nodes
if (PHINode* p = dyn_cast<PHINode>(I)) {
Value* constVal = CollapsePhi(p);
if (constVal) {
for (PhiMapType::iterator PI = phiMap.begin(), PE = phiMap.end();
PI != PE; ++PI)
- if (PI->second.count(p))
- PI->second.erase(p);
+ PI->second.erase(p);
p->replaceAllUsesWith(constVal);
+ if (isa<PointerType>(constVal->getType()))
+ MD->invalidateCachedPointerInfo(constVal);
+ VN.erase(p);
+
toErase.push_back(p);
+ } else {
+ localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
}
- // Perform value-number based elimination
- } else if (currAvail.test(num)) {
- Value* repl = find_leader(currAvail, num);
-
- if (CallInst* CI = dyn_cast<CallInst>(I)) {
- AliasAnalysis& AA = getAnalysis<AliasAnalysis>();
- if (!AA.doesNotAccessMemory(CI)) {
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
- if (cast<Instruction>(repl)->getParent() != CI->getParent() ||
- MD.getDependency(CI) != MD.getDependency(cast<CallInst>(repl))) {
- // There must be an intervening may-alias store, so nothing from
- // this point on will be able to be replaced with the preceding call
- currAvail.erase(repl);
- currAvail.insert(I);
-
- return false;
- }
- }
- }
+
+ // If the number we were assigned was a brand new VN, then we don't
+ // need to do a lookup to see if the number already exists
+ // somewhere in the domtree: it can't!
+ } else if (num == nextNum) {
+ localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
+ // Perform fast-path value-number based elimination of values inherited from
+ // dominators.
+ } else if (Value* repl = lookupNumber(I->getParent(), num)) {
// Remove it!
- MemoryDependenceAnalysis& MD = getAnalysis<MemoryDependenceAnalysis>();
- MD.removeInstruction(I);
-
VN.erase(I);
I->replaceAllUsesWith(repl);
+ if (isa<PointerType>(repl->getType()))
+ MD->invalidateCachedPointerInfo(repl);
toErase.push_back(I);
return true;
- } else if (!I->isTerminator()) {
- currAvail.set(num);
- currAvail.insert(I);
+
+#if 0
+ // Perform slow-pathvalue-number based elimination with phi construction.
+ } else if (Value* repl = AttemptRedundancyElimination(I, num)) {
+ // Remove it!
+ VN.erase(I);
+ I->replaceAllUsesWith(repl);
+ if (isa<PointerType>(repl->getType()))
+ MD->invalidateCachedPointerInfo(repl);
+ toErase.push_back(I);
+ return true;
+#endif
+ } else {
+ localAvail[I->getParent()]->table.insert(std::make_pair(num, I));
}
return false;
}
-// GVN::runOnFunction - This is the main transformation entry point for a
-// function.
-//
+/// runOnFunction - This is the main transformation entry point for a function.
bool GVN::runOnFunction(Function& F) {
+ MD = &getAnalysis<MemoryDependenceAnalysis>();
+ DT = &getAnalysis<DominatorTree>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
+ VN.setMemDep(MD);
+ VN.setDomTree(DT);
bool changed = false;
bool shouldContinue = true;
+ // Merge unconditional branches, allowing PRE to catch more
+ // optimization opportunities.
+ for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
+ BasicBlock* BB = FI;
+ ++FI;
+ bool removedBlock = MergeBlockIntoPredecessor(BB, this);
+ if (removedBlock) NumGVNBlocks++;
+
+ changed |= removedBlock;
+ }
+
+ unsigned Iteration = 0;
+
while (shouldContinue) {
+ DEBUG(cerr << "GVN iteration: " << Iteration << "\n");
shouldContinue = iterateOnFunction(F);
changed |= shouldContinue;
+ ++Iteration;
}
+ if (EnablePRE) {
+ bool PREChanged = true;
+ while (PREChanged) {
+ PREChanged = performPRE(F);
+ changed |= PREChanged;
+ }
+ }
+ // FIXME: Should perform GVN again after PRE does something. PRE can move
+ // computations into blocks where they become fully redundant. Note that
+ // we can't do this until PRE's critical edge splitting updates memdep.
+ // Actually, when this happens, we should just fully integrate PRE into GVN.
+
+ cleanupGlobalSets();
+
return changed;
}
-// GVN::iterateOnFunction - Executes one iteration of GVN
-bool GVN::iterateOnFunction(Function &F) {
- // Clean out global sets from any previous functions
- VN.clear();
- availableOut.clear();
- phiMap.clear();
-
+bool GVN::processBlock(BasicBlock* BB) {
+ // FIXME: Kill off toErase by doing erasing eagerly in a helper function (and
+ // incrementing BI before processing an instruction).
+ SmallVector<Instruction*, 8> toErase;
bool changed_function = false;
- DominatorTree &DT = getAnalysis<DominatorTree>();
-
- SmallVector<Instruction*, 4> toErase;
- DenseMap<Value*, LoadInst*> lastSeenLoad;
-
- // Top-down walk of the dominator tree
- for (df_iterator<DomTreeNode*> DI = df_begin(DT.getRootNode()),
- E = df_end(DT.getRootNode()); DI != E; ++DI) {
+ for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
+ BI != BE;) {
+ changed_function |= processInstruction(BI, toErase);
+ if (toErase.empty()) {
+ ++BI;
+ continue;
+ }
+
+ // If we need some instructions deleted, do it now.
+ NumGVNInstr += toErase.size();
- // Get the set to update for this block
- ValueNumberedSet& currAvail = availableOut[DI->getBlock()];
- lastSeenLoad.clear();
+ // Avoid iterator invalidation.
+ bool AtStart = BI == BB->begin();
+ if (!AtStart)
+ --BI;
+
+ for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
+ E = toErase.end(); I != E; ++I) {
+ DEBUG(cerr << "GVN removed: " << **I);
+ MD->removeInstruction(*I);
+ (*I)->eraseFromParent();
+ DEBUG(verifyRemoved(*I));
+ }
+ toErase.clear();
- BasicBlock* BB = DI->getBlock();
+ if (AtStart)
+ BI = BB->begin();
+ else
+ ++BI;
+ }
- // A block inherits AVAIL_OUT from its dominator
- if (DI->getIDom() != 0)
- currAvail = availableOut[DI->getIDom()->getBlock()];
+ return changed_function;
+}
+
+/// performPRE - Perform a purely local form of PRE that looks for diamond
+/// control flow patterns and attempts to perform simple PRE at the join point.
+bool GVN::performPRE(Function& F) {
+ bool Changed = false;
+ SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
+ DenseMap<BasicBlock*, Value*> predMap;
+ for (df_iterator<BasicBlock*> DI = df_begin(&F.getEntryBlock()),
+ DE = df_end(&F.getEntryBlock()); DI != DE; ++DI) {
+ BasicBlock* CurrentBlock = *DI;
+
+ // Nothing to PRE in the entry block.
+ if (CurrentBlock == &F.getEntryBlock()) continue;
+
+ for (BasicBlock::iterator BI = CurrentBlock->begin(),
+ BE = CurrentBlock->end(); BI != BE; ) {
+ Instruction *CurInst = BI++;
+
+ if (isa<AllocationInst>(CurInst) || isa<TerminatorInst>(CurInst) ||
+ isa<PHINode>(CurInst) || (CurInst->getType() == Type::VoidTy) ||
+ CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
+ isa<DbgInfoIntrinsic>(CurInst))
+ continue;
- for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
- BI != BE; ) {
- changed_function |= processInstruction(BI, currAvail,
- lastSeenLoad, toErase);
+ uint32_t valno = VN.lookup(CurInst);
- NumGVNInstr += toErase.size();
+ // Look for the predecessors for PRE opportunities. We're
+ // only trying to solve the basic diamond case, where
+ // a value is computed in the successor and one predecessor,
+ // but not the other. We also explicitly disallow cases
+ // where the successor is its own predecessor, because they're
+ // more complicated to get right.
+ unsigned numWith = 0;
+ unsigned numWithout = 0;
+ BasicBlock* PREPred = 0;
+ predMap.clear();
+
+ for (pred_iterator PI = pred_begin(CurrentBlock),
+ PE = pred_end(CurrentBlock); PI != PE; ++PI) {
+ // We're not interested in PRE where the block is its
+ // own predecessor, on in blocks with predecessors
+ // that are not reachable.
+ if (*PI == CurrentBlock) {
+ numWithout = 2;
+ break;
+ } else if (!localAvail.count(*PI)) {
+ numWithout = 2;
+ break;
+ }
+
+ DenseMap<uint32_t, Value*>::iterator predV =
+ localAvail[*PI]->table.find(valno);
+ if (predV == localAvail[*PI]->table.end()) {
+ PREPred = *PI;
+ numWithout++;
+ } else if (predV->second == CurInst) {
+ numWithout = 2;
+ } else {
+ predMap[*PI] = predV->second;
+ numWith++;
+ }
+ }
- // Avoid iterator invalidation
- ++BI;
+ // Don't do PRE when it might increase code size, i.e. when
+ // we would need to insert instructions in more than one pred.
+ if (numWithout != 1 || numWith == 0)
+ continue;
+
+ // We can't do PRE safely on a critical edge, so instead we schedule
+ // the edge to be split and perform the PRE the next time we iterate
+ // on the function.
+ unsigned succNum = 0;
+ for (unsigned i = 0, e = PREPred->getTerminator()->getNumSuccessors();
+ i != e; ++i)
+ if (PREPred->getTerminator()->getSuccessor(i) == CurrentBlock) {
+ succNum = i;
+ break;
+ }
+
+ if (isCriticalEdge(PREPred->getTerminator(), succNum)) {
+ toSplit.push_back(std::make_pair(PREPred->getTerminator(), succNum));
+ continue;
+ }
+
+ // Instantiate the expression the in predecessor that lacked it.
+ // Because we are going top-down through the block, all value numbers
+ // will be available in the predecessor by the time we need them. Any
+ // that weren't original present will have been instantiated earlier
+ // in this loop.
+ Instruction* PREInstr = CurInst->clone(*Context);
+ bool success = true;
+ for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
+ Value *Op = PREInstr->getOperand(i);
+ if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
+ continue;
+
+ if (Value *V = lookupNumber(PREPred, VN.lookup(Op))) {
+ PREInstr->setOperand(i, V);
+ } else {
+ success = false;
+ break;
+ }
+ }
+
+ // Fail out if we encounter an operand that is not available in
+ // the PRE predecessor. This is typically because of loads which
+ // are not value numbered precisely.
+ if (!success) {
+ delete PREInstr;
+ DEBUG(verifyRemoved(PREInstr));
+ continue;
+ }
+
+ PREInstr->insertBefore(PREPred->getTerminator());
+ PREInstr->setName(CurInst->getName() + ".pre");
+ predMap[PREPred] = PREInstr;
+ VN.add(PREInstr, valno);
+ NumGVNPRE++;
+
+ // Update the availability map to include the new instruction.
+ localAvail[PREPred]->table.insert(std::make_pair(valno, PREInstr));
+
+ // Create a PHI to make the value available in this block.
+ PHINode* Phi = PHINode::Create(CurInst->getType(),
+ CurInst->getName() + ".pre-phi",
+ CurrentBlock->begin());
+ for (pred_iterator PI = pred_begin(CurrentBlock),
+ PE = pred_end(CurrentBlock); PI != PE; ++PI)
+ Phi->addIncoming(predMap[*PI], *PI);
+
+ VN.add(Phi, valno);
+ localAvail[CurrentBlock]->table[valno] = Phi;
+
+ CurInst->replaceAllUsesWith(Phi);
+ if (isa<PointerType>(Phi->getType()))
+ MD->invalidateCachedPointerInfo(Phi);
+ VN.erase(CurInst);
+
+ DEBUG(cerr << "GVN PRE removed: " << *CurInst);
+ MD->removeInstruction(CurInst);
+ CurInst->eraseFromParent();
+ DEBUG(verifyRemoved(CurInst));
+ Changed = true;
+ }
+ }
+
+ for (SmallVector<std::pair<TerminatorInst*, unsigned>, 4>::iterator
+ I = toSplit.begin(), E = toSplit.end(); I != E; ++I)
+ SplitCriticalEdge(I->first, I->second, this);
+
+ return Changed || toSplit.size();
+}
+
+/// iterateOnFunction - Executes one iteration of GVN
+bool GVN::iterateOnFunction(Function &F) {
+ cleanupGlobalSets();
+
+ for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
+ DE = df_end(DT->getRootNode()); DI != DE; ++DI) {
+ if (DI->getIDom())
+ localAvail[DI->getBlock()] =
+ new ValueNumberScope(localAvail[DI->getIDom()->getBlock()]);
+ else
+ localAvail[DI->getBlock()] = new ValueNumberScope(0);
+ }
+
+ // Top-down walk of the dominator tree
+ bool changed = false;
+#if 0
+ // Needed for value numbering with phi construction to work.
+ ReversePostOrderTraversal<Function*> RPOT(&F);
+ for (ReversePostOrderTraversal<Function*>::rpo_iterator RI = RPOT.begin(),
+ RE = RPOT.end(); RI != RE; ++RI)
+ changed |= processBlock(*RI);
+#else
+ for (df_iterator<DomTreeNode*> DI = df_begin(DT->getRootNode()),
+ DE = df_end(DT->getRootNode()); DI != DE; ++DI)
+ changed |= processBlock(DI->getBlock());
+#endif
+
+ return changed;
+}
+
+void GVN::cleanupGlobalSets() {
+ VN.clear();
+ phiMap.clear();
+
+ for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
+ I = localAvail.begin(), E = localAvail.end(); I != E; ++I)
+ delete I->second;
+ localAvail.clear();
+}
- for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
- E = toErase.end(); I != E; ++I)
- (*I)->eraseFromParent();
+/// verifyRemoved - Verify that the specified instruction does not occur in our
+/// internal data structures.
+void GVN::verifyRemoved(const Instruction *Inst) const {
+ VN.verifyRemoved(Inst);
- toErase.clear();
+ // Walk through the PHI map to make sure the instruction isn't hiding in there
+ // somewhere.
+ for (PhiMapType::iterator
+ I = phiMap.begin(), E = phiMap.end(); I != E; ++I) {
+ assert(I->first != Inst && "Inst is still a key in PHI map!");
+
+ for (SmallPtrSet<Instruction*, 4>::iterator
+ II = I->second.begin(), IE = I->second.end(); II != IE; ++II) {
+ assert(*II != Inst && "Inst is still a value in PHI map!");
+ }
+ }
+
+ // Walk through the value number scope to make sure the instruction isn't
+ // ferreted away in it.
+ for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
+ I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
+ const ValueNumberScope *VNS = I->second;
+
+ while (VNS) {
+ for (DenseMap<uint32_t, Value*>::iterator
+ II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
+ assert(II->second != Inst && "Inst still in value numbering scope!");
+ }
+
+ VNS = VNS->parent;
}
}
-
- return changed_function;
}