#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/AliasAnalysis.h"
-#include "llvm/Analysis/MallocHelper.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
#include "llvm/Support/CFG.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
SHUFFLE, SELECT, TRUNC, ZEXT, SEXT, FPTOUI,
FPTOSI, UITOFP, SITOFP, FPTRUNC, FPEXT,
PTRTOINT, INTTOPTR, BITCAST, GEP, CALL, CONSTANT,
- EMPTY, TOMBSTONE };
+ INSERTVALUE, EXTRACTVALUE, EMPTY, TOMBSTONE };
ExpressionOpcode opcode;
const Type* type;
- uint32_t firstVN;
- uint32_t secondVN;
- uint32_t thirdVN;
SmallVector<uint32_t, 4> varargs;
Value *function;
return false;
else if (function != other.function)
return false;
- else if (firstVN != other.firstVN)
- return false;
- else if (secondVN != other.secondVN)
- return false;
- else if (thirdVN != other.thirdVN)
- return false;
else {
if (varargs.size() != other.varargs.size())
return false;
Expression create_expression(GetElementPtrInst* G);
Expression create_expression(CallInst* C);
Expression create_expression(Constant* C);
+ Expression create_expression(ExtractValueInst* C);
+ Expression create_expression(InsertValueInst* C);
+
+ uint32_t lookup_or_add_call(CallInst* C);
public:
ValueTable() : nextValueNumber(1) { }
uint32_t lookup_or_add(Value *V);
static unsigned getHashValue(const Expression e) {
unsigned hash = e.opcode;
- hash = e.firstVN + hash * 37;
- hash = e.secondVN + hash * 37;
- hash = e.thirdVN + hash * 37;
-
hash = ((unsigned)((uintptr_t)e.type >> 4) ^
- (unsigned)((uintptr_t)e.type >> 9)) +
- hash * 37;
+ (unsigned)((uintptr_t)e.type >> 9));
for (SmallVector<uint32_t, 4>::const_iterator I = e.varargs.begin(),
E = e.varargs.end(); I != E; ++I)
Expression e;
e.type = C->getType();
- e.firstVN = 0;
- e.secondVN = 0;
- e.thirdVN = 0;
e.function = C->getCalledFunction();
e.opcode = Expression::CALL;
Expression ValueTable::create_expression(BinaryOperator* BO) {
Expression e;
-
- e.firstVN = lookup_or_add(BO->getOperand(0));
- e.secondVN = lookup_or_add(BO->getOperand(1));
- e.thirdVN = 0;
+ e.varargs.push_back(lookup_or_add(BO->getOperand(0)));
+ e.varargs.push_back(lookup_or_add(BO->getOperand(1)));
e.function = 0;
e.type = BO->getType();
e.opcode = getOpcode(BO);
Expression ValueTable::create_expression(CmpInst* C) {
Expression e;
- e.firstVN = lookup_or_add(C->getOperand(0));
- e.secondVN = lookup_or_add(C->getOperand(1));
- e.thirdVN = 0;
+ e.varargs.push_back(lookup_or_add(C->getOperand(0)));
+ e.varargs.push_back(lookup_or_add(C->getOperand(1)));
e.function = 0;
e.type = C->getType();
e.opcode = getOpcode(C);
Expression ValueTable::create_expression(CastInst* C) {
Expression e;
- e.firstVN = lookup_or_add(C->getOperand(0));
- e.secondVN = 0;
- e.thirdVN = 0;
+ e.varargs.push_back(lookup_or_add(C->getOperand(0)));
e.function = 0;
e.type = C->getType();
e.opcode = getOpcode(C);
Expression ValueTable::create_expression(ShuffleVectorInst* S) {
Expression e;
- e.firstVN = lookup_or_add(S->getOperand(0));
- e.secondVN = lookup_or_add(S->getOperand(1));
- e.thirdVN = lookup_or_add(S->getOperand(2));
+ e.varargs.push_back(lookup_or_add(S->getOperand(0)));
+ e.varargs.push_back(lookup_or_add(S->getOperand(1)));
+ e.varargs.push_back(lookup_or_add(S->getOperand(2)));
e.function = 0;
e.type = S->getType();
e.opcode = Expression::SHUFFLE;
Expression ValueTable::create_expression(ExtractElementInst* E) {
Expression e;
- e.firstVN = lookup_or_add(E->getOperand(0));
- e.secondVN = lookup_or_add(E->getOperand(1));
- e.thirdVN = 0;
+ e.varargs.push_back(lookup_or_add(E->getOperand(0)));
+ e.varargs.push_back(lookup_or_add(E->getOperand(1)));
e.function = 0;
e.type = E->getType();
e.opcode = Expression::EXTRACT;
Expression ValueTable::create_expression(InsertElementInst* I) {
Expression e;
- e.firstVN = lookup_or_add(I->getOperand(0));
- e.secondVN = lookup_or_add(I->getOperand(1));
- e.thirdVN = lookup_or_add(I->getOperand(2));
+ e.varargs.push_back(lookup_or_add(I->getOperand(0)));
+ e.varargs.push_back(lookup_or_add(I->getOperand(1)));
+ e.varargs.push_back(lookup_or_add(I->getOperand(2)));
e.function = 0;
e.type = I->getType();
e.opcode = Expression::INSERT;
Expression ValueTable::create_expression(SelectInst* I) {
Expression e;
- e.firstVN = lookup_or_add(I->getCondition());
- e.secondVN = lookup_or_add(I->getTrueValue());
- e.thirdVN = lookup_or_add(I->getFalseValue());
+ e.varargs.push_back(lookup_or_add(I->getCondition()));
+ e.varargs.push_back(lookup_or_add(I->getTrueValue()));
+ e.varargs.push_back(lookup_or_add(I->getFalseValue()));
e.function = 0;
e.type = I->getType();
e.opcode = Expression::SELECT;
Expression ValueTable::create_expression(GetElementPtrInst* G) {
Expression e;
- e.firstVN = lookup_or_add(G->getPointerOperand());
- e.secondVN = 0;
- e.thirdVN = 0;
+ e.varargs.push_back(lookup_or_add(G->getPointerOperand()));
e.function = 0;
e.type = G->getType();
e.opcode = Expression::GEP;
return e;
}
+Expression ValueTable::create_expression(ExtractValueInst* E) {
+ Expression e;
+
+ e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
+ for (ExtractValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
+ II != IE; ++II)
+ e.varargs.push_back(*II);
+ e.function = 0;
+ e.type = E->getType();
+ e.opcode = Expression::EXTRACTVALUE;
+
+ return e;
+}
+
+Expression ValueTable::create_expression(InsertValueInst* E) {
+ Expression e;
+
+ e.varargs.push_back(lookup_or_add(E->getAggregateOperand()));
+ e.varargs.push_back(lookup_or_add(E->getInsertedValueOperand()));
+ for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end();
+ II != IE; ++II)
+ e.varargs.push_back(*II);
+ e.function = 0;
+ e.type = E->getType();
+ e.opcode = Expression::INSERTVALUE;
+
+ return e;
+}
+
//===----------------------------------------------------------------------===//
// ValueTable External Functions
//===----------------------------------------------------------------------===//
valueNumbering.insert(std::make_pair(V, num));
}
-/// lookup_or_add - Returns the value number for the specified value, assigning
-/// it a new number if it did not have one before.
-uint32_t ValueTable::lookup_or_add(Value *V) {
- DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
- if (VI != valueNumbering.end())
- return VI->second;
-
- if (CallInst* C = dyn_cast<CallInst>(V)) {
- if (AA->doesNotAccessMemory(C)) {
- Expression exp = create_expression(C);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
+uint32_t ValueTable::lookup_or_add_call(CallInst* C) {
+ if (AA->doesNotAccessMemory(C)) {
+ Expression exp = create_expression(C);
+ uint32_t& e = expressionNumbering[exp];
+ if (!e) e = nextValueNumber++;
+ valueNumbering[C] = e;
+ return e;
+ } else if (AA->onlyReadsMemory(C)) {
+ Expression exp = create_expression(C);
+ uint32_t& e = expressionNumbering[exp];
+ if (!e) {
+ e = nextValueNumber++;
+ valueNumbering[C] = e;
return e;
- } else if (AA->onlyReadsMemory(C)) {
- Expression exp = create_expression(C);
- uint32_t& e = expressionNumbering[exp];
- if (!e) {
- e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- }
+ }
+ if (!MD) {
+ e = nextValueNumber++;
+ valueNumbering[C] = e;
+ return e;
+ }
+
+ MemDepResult local_dep = MD->getDependency(C);
+
+ if (!local_dep.isDef() && !local_dep.isNonLocal()) {
+ valueNumbering[C] = nextValueNumber;
+ return nextValueNumber++;
+ }
- MemDepResult local_dep = MD->getDependency(C);
+ if (local_dep.isDef()) {
+ CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
- if (!local_dep.isDef() && !local_dep.isNonLocal()) {
- valueNumbering[V] = nextValueNumber;
+ if (local_cdep->getNumOperands() != C->getNumOperands()) {
+ valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
- if (local_dep.isDef()) {
- CallInst* local_cdep = cast<CallInst>(local_dep.getInst());
-
- if (local_cdep->getNumOperands() != C->getNumOperands()) {
- valueNumbering[V] = nextValueNumber;
+ for (unsigned i = 1; i < C->getNumOperands(); ++i) {
+ uint32_t c_vn = lookup_or_add(C->getOperand(i));
+ uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
+ if (c_vn != cd_vn) {
+ valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
-
- for (unsigned i = 1; i < C->getNumOperands(); ++i) {
- uint32_t c_vn = lookup_or_add(C->getOperand(i));
- uint32_t cd_vn = lookup_or_add(local_cdep->getOperand(i));
- if (c_vn != cd_vn) {
- valueNumbering[V] = nextValueNumber;
- return nextValueNumber++;
- }
- }
-
- uint32_t v = lookup_or_add(local_cdep);
- valueNumbering[V] = v;
- return v;
}
- // Non-local case.
- const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
- MD->getNonLocalCallDependency(CallSite(C));
- // FIXME: call/call dependencies for readonly calls should return def, not
- // clobber! Move the checking logic to MemDep!
- CallInst* cdep = 0;
-
- // Check to see if we have a single dominating call instruction that is
- // identical to C.
- for (unsigned i = 0, e = deps.size(); i != e; ++i) {
- const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i];
- // Ignore non-local dependencies.
- if (I->second.isNonLocal())
- continue;
-
- // We don't handle non-depedencies. If we already have a call, reject
- // instruction dependencies.
- if (I->second.isClobber() || cdep != 0) {
- cdep = 0;
- break;
- }
+ uint32_t v = lookup_or_add(local_cdep);
+ valueNumbering[C] = v;
+ return v;
+ }
- CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst());
- // FIXME: All duplicated with non-local case.
- if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){
- cdep = NonLocalDepCall;
- continue;
- }
+ // Non-local case.
+ const MemoryDependenceAnalysis::NonLocalDepInfo &deps =
+ MD->getNonLocalCallDependency(CallSite(C));
+ // FIXME: call/call dependencies for readonly calls should return def, not
+ // clobber! Move the checking logic to MemDep!
+ CallInst* cdep = 0;
+
+ // Check to see if we have a single dominating call instruction that is
+ // identical to C.
+ for (unsigned i = 0, e = deps.size(); i != e; ++i) {
+ const MemoryDependenceAnalysis::NonLocalDepEntry *I = &deps[i];
+ // Ignore non-local dependencies.
+ if (I->second.isNonLocal())
+ continue;
+ // We don't handle non-depedencies. If we already have a call, reject
+ // instruction dependencies.
+ if (I->second.isClobber() || cdep != 0) {
cdep = 0;
break;
}
- if (!cdep) {
- valueNumbering[V] = nextValueNumber;
- return nextValueNumber++;
+ CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->second.getInst());
+ // FIXME: All duplicated with non-local case.
+ if (NonLocalDepCall && DT->properlyDominates(I->first, C->getParent())){
+ cdep = NonLocalDepCall;
+ continue;
}
- if (cdep->getNumOperands() != C->getNumOperands()) {
- valueNumbering[V] = nextValueNumber;
- return nextValueNumber++;
- }
- for (unsigned i = 1; i < C->getNumOperands(); ++i) {
- uint32_t c_vn = lookup_or_add(C->getOperand(i));
- uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
- if (c_vn != cd_vn) {
- valueNumbering[V] = nextValueNumber;
- return nextValueNumber++;
- }
- }
+ cdep = 0;
+ break;
+ }
- uint32_t v = lookup_or_add(cdep);
- valueNumbering[V] = v;
- return v;
+ if (!cdep) {
+ valueNumbering[C] = nextValueNumber;
+ return nextValueNumber++;
+ }
- } else {
- valueNumbering[V] = nextValueNumber;
+ if (cdep->getNumOperands() != C->getNumOperands()) {
+ valueNumbering[C] = nextValueNumber;
return nextValueNumber++;
}
- } else if (BinaryOperator* BO = dyn_cast<BinaryOperator>(V)) {
- Expression exp = create_expression(BO);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- } else if (CmpInst* C = dyn_cast<CmpInst>(V)) {
- Expression exp = create_expression(C);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- } else if (ShuffleVectorInst* U = dyn_cast<ShuffleVectorInst>(V)) {
- Expression exp = create_expression(U);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- } else if (ExtractElementInst* U = dyn_cast<ExtractElementInst>(V)) {
- Expression exp = create_expression(U);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- } else if (InsertElementInst* U = dyn_cast<InsertElementInst>(V)) {
- Expression exp = create_expression(U);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- } else if (SelectInst* U = dyn_cast<SelectInst>(V)) {
- Expression exp = create_expression(U);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- } else if (CastInst* U = dyn_cast<CastInst>(V)) {
- Expression exp = create_expression(U);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
- } else if (GetElementPtrInst* U = dyn_cast<GetElementPtrInst>(V)) {
- Expression exp = create_expression(U);
- uint32_t& e = expressionNumbering[exp];
- if (!e) e = nextValueNumber++;
- valueNumbering[V] = e;
- return e;
+ for (unsigned i = 1; i < C->getNumOperands(); ++i) {
+ uint32_t c_vn = lookup_or_add(C->getOperand(i));
+ uint32_t cd_vn = lookup_or_add(cdep->getOperand(i));
+ if (c_vn != cd_vn) {
+ valueNumbering[C] = nextValueNumber;
+ return nextValueNumber++;
+ }
+ }
+
+ uint32_t v = lookup_or_add(cdep);
+ valueNumbering[C] = v;
+ return v;
+
} else {
+ valueNumbering[C] = nextValueNumber;
+ return nextValueNumber++;
+ }
+}
+
+/// lookup_or_add - Returns the value number for the specified value, assigning
+/// it a new number if it did not have one before.
+uint32_t ValueTable::lookup_or_add(Value *V) {
+ DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
+ if (VI != valueNumbering.end())
+ return VI->second;
+
+ if (!isa<Instruction>(V)) {
valueNumbering[V] = nextValueNumber;
return nextValueNumber++;
}
+
+ Instruction* I = cast<Instruction>(V);
+ Expression exp;
+ switch (I->getOpcode()) {
+ case Instruction::Call:
+ return lookup_or_add_call(cast<CallInst>(I));
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::FDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::FRem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or :
+ case Instruction::Xor:
+ exp = create_expression(cast<BinaryOperator>(I));
+ break;
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ exp = create_expression(cast<CmpInst>(I));
+ break;
+ case Instruction::Trunc:
+ case Instruction::ZExt:
+ case Instruction::SExt:
+ case Instruction::FPToUI:
+ case Instruction::FPToSI:
+ case Instruction::UIToFP:
+ case Instruction::SIToFP:
+ case Instruction::FPTrunc:
+ case Instruction::FPExt:
+ case Instruction::PtrToInt:
+ case Instruction::IntToPtr:
+ case Instruction::BitCast:
+ exp = create_expression(cast<CastInst>(I));
+ break;
+ case Instruction::Select:
+ exp = create_expression(cast<SelectInst>(I));
+ break;
+ case Instruction::ExtractElement:
+ exp = create_expression(cast<ExtractElementInst>(I));
+ break;
+ case Instruction::InsertElement:
+ exp = create_expression(cast<InsertElementInst>(I));
+ break;
+ case Instruction::ShuffleVector:
+ exp = create_expression(cast<ShuffleVectorInst>(I));
+ break;
+ case Instruction::ExtractValue:
+ exp = create_expression(cast<ExtractValueInst>(I));
+ break;
+ case Instruction::InsertValue:
+ exp = create_expression(cast<InsertValueInst>(I));
+ break;
+ case Instruction::GetElementPtr:
+ exp = create_expression(cast<GetElementPtrInst>(I));
+ break;
+ default:
+ valueNumbering[V] = nextValueNumber;
+ return nextValueNumber++;
+ }
+
+ uint32_t& e = expressionNumbering[exp];
+ if (!e) e = nextValueNumber++;
+ valueNumbering[V] = e;
+ return e;
}
/// lookup - Returns the value number of the specified value. Fails if
/// the value has not yet been numbered.
uint32_t ValueTable::lookup(Value *V) const {
- DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
+ DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
assert(VI != valueNumbering.end() && "Value not numbered?");
return VI->second;
}
/// verifyRemoved - Verify that the value is removed from all internal data
/// structures.
void ValueTable::verifyRemoved(const Value *V) const {
- for (DenseMap<Value*, uint32_t>::iterator
+ for (DenseMap<Value*, uint32_t>::const_iterator
I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
assert(I->first != V && "Inst still occurs in value numbering map!");
}
bool runOnFunction(Function &F);
public:
static char ID; // Pass identification, replacement for typeid
- GVN() : FunctionPass(&ID) { }
+ explicit GVN(bool nopre = false, bool noloads = false)
+ : FunctionPass(&ID), NoPRE(nopre), NoLoads(noloads), MD(0) { }
private:
+ bool NoPRE;
+ bool NoLoads;
MemoryDependenceAnalysis *MD;
DominatorTree *DT;
// This transformation requires dominator postdominator info
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<DominatorTree>();
- AU.addRequired<MemoryDependenceAnalysis>();
+ if (!NoLoads)
+ AU.addRequired<MemoryDependenceAnalysis>();
AU.addRequired<AliasAnalysis>();
AU.addPreserved<DominatorTree>();
}
// createGVNPass - The public interface to this file...
-FunctionPass *llvm::createGVNPass() { return new GVN(); }
+FunctionPass *llvm::createGVNPass(bool NoPRE, bool NoLoads) {
+ return new GVN(NoPRE, NoLoads);
+}
static RegisterPass<GVN> X("gvn",
"Global Value Numbering");
}
-/// AnalyzeLoadFromClobberingStore - This function is called when we have a
-/// memdep query of a load that ends up being a clobbering store. This means
-/// that the store *may* provide bits used by the load but we can't be sure
-/// because the pointers don't mustalias. Check this case to see if there is
-/// anything more we can do before we give up. This returns -1 if we have to
-/// give up, or a byte number in the stored value of the piece that feeds the
-/// load.
-static int AnalyzeLoadFromClobberingStore(LoadInst *L, StoreInst *DepSI,
+/// AnalyzeLoadFromClobberingWrite - This function is called when we have a
+/// memdep query of a load that ends up being a clobbering memory write (store,
+/// memset, memcpy, memmove). This means that the write *may* provide bits used
+/// by the load but we can't be sure because the pointers don't mustalias.
+///
+/// Check this case to see if there is anything more we can do before we give
+/// up. This returns -1 if we have to give up, or a byte number in the stored
+/// value of the piece that feeds the load.
+static int AnalyzeLoadFromClobberingWrite(LoadInst *L, Value *WritePtr,
+ uint64_t WriteSizeInBits,
const TargetData &TD) {
// If the loaded or stored value is an first class array or struct, don't try
// to transform them. We need to be able to bitcast to integer.
- if (isa<StructType>(L->getType()) || isa<ArrayType>(L->getType()) ||
- isa<StructType>(DepSI->getOperand(0)->getType()) ||
- isa<ArrayType>(DepSI->getOperand(0)->getType()))
+ if (isa<StructType>(L->getType()) || isa<ArrayType>(L->getType()))
return -1;
int64_t StoreOffset = 0, LoadOffset = 0;
- Value *StoreBase =
- GetBaseWithConstantOffset(DepSI->getPointerOperand(), StoreOffset, TD);
+ Value *StoreBase = GetBaseWithConstantOffset(WritePtr, StoreOffset, TD);
Value *LoadBase =
GetBaseWithConstantOffset(L->getPointerOperand(), LoadOffset, TD);
if (StoreBase != LoadBase)
#if 0
errs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n"
<< "Base = " << *StoreBase << "\n"
- << "Store Ptr = " << *DepSI->getPointerOperand() << "\n"
- << "Store Offs = " << StoreOffset << " - " << *DepSI << "\n"
+ << "Store Ptr = " << *WritePtr << "\n"
+ << "Store Offs = " << StoreOffset << "\n"
<< "Load Ptr = " << *L->getPointerOperand() << "\n"
<< "Load Offs = " << LoadOffset << " - " << *L << "\n\n";
errs() << "'" << L->getParent()->getParent()->getName() << "'"
// must have gotten confused.
// FIXME: Investigate cases where this bails out, e.g. rdar://7238614. Then
// remove this check, as it is duplicated with what we have below.
- uint64_t StoreSize = TD.getTypeSizeInBits(DepSI->getOperand(0)->getType());
uint64_t LoadSize = TD.getTypeSizeInBits(L->getType());
- if ((StoreSize & 7) | (LoadSize & 7))
+ if ((WriteSizeInBits & 7) | (LoadSize & 7))
return -1;
- StoreSize >>= 3; // Convert to bytes.
+ uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes.
LoadSize >>= 3;
#if 0
errs() << "STORE LOAD DEP WITH COMMON BASE:\n"
<< "Base = " << *StoreBase << "\n"
- << "Store Ptr = " << *DepSI->getPointerOperand() << "\n"
- << "Store Offs = " << StoreOffset << " - " << *DepSI << "\n"
+ << "Store Ptr = " << *WritePtr << "\n"
+ << "Store Offs = " << StoreOffset << "\n"
<< "Load Ptr = " << *L->getPointerOperand() << "\n"
<< "Load Offs = " << LoadOffset << " - " << *L << "\n\n";
errs() << "'" << L->getParent()->getParent()->getName() << "'"
return LoadOffset-StoreOffset;
}
+/// AnalyzeLoadFromClobberingStore - This function is called when we have a
+/// memdep query of a load that ends up being a clobbering store.
+static int AnalyzeLoadFromClobberingStore(LoadInst *L, StoreInst *DepSI,
+ const TargetData &TD) {
+ // Cannot handle reading from store of first-class aggregate yet.
+ if (isa<StructType>(DepSI->getOperand(0)->getType()) ||
+ isa<ArrayType>(DepSI->getOperand(0)->getType()))
+ return -1;
+
+ Value *StorePtr = DepSI->getPointerOperand();
+ uint64_t StoreSize = TD.getTypeSizeInBits(StorePtr->getType());
+ return AnalyzeLoadFromClobberingWrite(L, StorePtr, StoreSize, TD);
+}
+
+static int AnalyzeLoadFromClobberingMemInst(LoadInst *L, MemIntrinsic *MI,
+ const TargetData &TD) {
+ // If the mem operation is a non-constant size, we can't handle it.
+ ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength());
+ if (SizeCst == 0) return -1;
+ uint64_t MemSizeInBits = SizeCst->getZExtValue()*8;
+
+ if (MI->getIntrinsicID() == Intrinsic::memset)
+ return AnalyzeLoadFromClobberingWrite(L, MI->getDest(), MemSizeInBits, TD);
+
+ // Unhandled memcpy/memmove.
+ return -1;
+}
+
/// GetStoreValueForLoad - This function is called when we have a
/// memdep query of a load that ends up being a clobbering store. This means
// Shift the bits to the least significant depending on endianness.
unsigned ShiftAmt;
- if (TD.isLittleEndian()) {
+ if (TD.isLittleEndian())
ShiftAmt = Offset*8;
- } else {
+ else
ShiftAmt = (StoreSize-LoadSize-Offset)*8;
- }
if (ShiftAmt)
SrcVal = BinaryOperator::CreateLShr(SrcVal,
return CoerceAvailableValueToLoadType(SrcVal, LoadTy, InsertPt, TD);
}
+/// GetMemInstValueForLoad - This function is called when we have a
+/// memdep query of a load that ends up being a clobbering mem intrinsic.
+static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset,
+ const Type *LoadTy, Instruction *InsertPt,
+ const TargetData &TD){
+ LLVMContext &Ctx = LoadTy->getContext();
+ uint64_t LoadSize = TD.getTypeSizeInBits(LoadTy)/8;
+
+ IRBuilder<> Builder(InsertPt->getParent(), InsertPt);
+
+ // We know that this method is only called when the mem transfer fully
+ // provides the bits for the load.
+ if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) {
+ // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and
+ // independently of what the offset is.
+ Value *Val = MSI->getValue();
+ if (LoadSize != 1)
+ Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8));
+
+ Value *OneElt = Val;
+
+ // Splat the value out to the right number of bits.
+ for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) {
+ // If we can double the number of bytes set, do it.
+ if (NumBytesSet*2 <= LoadSize) {
+ Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8);
+ Val = Builder.CreateOr(Val, ShVal);
+ NumBytesSet <<= 1;
+ continue;
+ }
+
+ // Otherwise insert one byte at a time.
+ Value *ShVal = Builder.CreateShl(Val, 1*8);
+ Val = Builder.CreateOr(OneElt, ShVal);
+ ++NumBytesSet;
+ }
+
+ return CoerceAvailableValueToLoadType(Val, LoadTy, InsertPt, TD);
+ }
+
+ // ABORT;
+ return 0;
+}
+
+
+
struct AvailableValueInBlock {
/// BB - The basic block in question.
BasicBlock *BB;
+ enum ValType {
+ SimpleVal, // A simple offsetted value that is accessed.
+ MemIntrin // A memory intrinsic which is loaded from.
+ };
+
/// V - The value that is live out of the block.
- Value *V;
- /// Offset - The byte offset in V that is interesting for the load query.
+ PointerIntPair<Value *, 1, ValType> Val;
+
+ /// Offset - The byte offset in Val that is interesting for the load query.
unsigned Offset;
static AvailableValueInBlock get(BasicBlock *BB, Value *V,
unsigned Offset = 0) {
AvailableValueInBlock Res;
Res.BB = BB;
- Res.V = V;
+ Res.Val.setPointer(V);
+ Res.Val.setInt(SimpleVal);
+ Res.Offset = Offset;
+ return Res;
+ }
+
+ static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI,
+ unsigned Offset = 0) {
+ AvailableValueInBlock Res;
+ Res.BB = BB;
+ Res.Val.setPointer(MI);
+ Res.Val.setInt(MemIntrin);
Res.Offset = Offset;
return Res;
}
+
+ bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
+ Value *getSimpleValue() const {
+ assert(isSimpleValue() && "Wrong accessor");
+ return Val.getPointer();
+ }
+
+ MemIntrinsic *getMemIntrinValue() const {
+ assert(!isSimpleValue() && "Wrong accessor");
+ return cast<MemIntrinsic>(Val.getPointer());
+ }
};
/// ConstructSSAForLoadSet - Given a set of loads specified by ValuesPerBlock,
const Type *LoadTy = LI->getType();
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
- BasicBlock *BB = ValuesPerBlock[i].BB;
- Value *AvailableVal = ValuesPerBlock[i].V;
- unsigned Offset = ValuesPerBlock[i].Offset;
+ const AvailableValueInBlock &AV = ValuesPerBlock[i];
+ BasicBlock *BB = AV.BB;
if (SSAUpdate.HasValueForBlock(BB))
continue;
-
- if (AvailableVal->getType() != LoadTy) {
- assert(TD && "Need target data to handle type mismatch case");
- AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy,
- BB->getTerminator(), *TD);
-
- if (Offset) {
- DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n"
- << *ValuesPerBlock[i].V << '\n'
+
+ unsigned Offset = AV.Offset;
+
+ Value *AvailableVal;
+ if (AV.isSimpleValue()) {
+ AvailableVal = AV.getSimpleValue();
+ if (AvailableVal->getType() != LoadTy) {
+ assert(TD && "Need target data to handle type mismatch case");
+ AvailableVal = GetStoreValueForLoad(AvailableVal, Offset, LoadTy,
+ BB->getTerminator(), *TD);
+
+ DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " "
+ << *AV.getSimpleValue() << '\n'
<< *AvailableVal << '\n' << "\n\n\n");
}
-
-
- DEBUG(errs() << "GVN COERCED NONLOCAL VAL:\n"
- << *ValuesPerBlock[i].V << '\n'
+ } else {
+ AvailableVal = GetMemInstValueForLoad(AV.getMemIntrinValue(), Offset,
+ LoadTy, BB->getTerminator(), *TD);
+ DEBUG(errs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
+ << " " << *AV.getMemIntrinValue() << '\n'
<< *AvailableVal << '\n' << "\n\n\n");
}
-
SSAUpdate.AddAvailableValue(BB, AvailableVal);
}
return V;
}
+static bool isLifetimeStart(Instruction *Inst) {
+ if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
+ return II->getIntrinsicID() == Intrinsic::lifetime_start;
+ return false;
+}
+
/// processNonLocalLoad - Attempt to eliminate a load whose dependencies are
/// non-local by performing PHI construction.
bool GVN::processNonLocalLoad(LoadInst *LI,
}
}
}
+
+ // If the clobbering value is a memset/memcpy/memmove, see if we can
+ // forward a value on from it.
+ if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) {
+ if (TD == 0)
+ TD = getAnalysisIfAvailable<TargetData>();
+ if (TD) {
+ int Offset = AnalyzeLoadFromClobberingMemInst(LI, DepMI, *TD);
+ if (Offset != -1) {
+ ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI,
+ Offset));
+ continue;
+ }
+ }
+ }
- // FIXME: Handle memset/memcpy.
UnavailableBlocks.push_back(DepBB);
continue;
}
Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef.
- if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMalloc(DepInst) ||
+ // Loading immediately after lifetime begin -> undef.
+ isLifetimeStart(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
UndefValue::get(LI->getType())));
continue;
}
-
+
if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
// Reject loads and stores that are to the same address but are of
// different types if we have to.
// to eliminate LI even if we insert uses in the other predecessors, we will
// end up increasing code size. Reject this by scanning for LI.
for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
- if (ValuesPerBlock[i].V == LI)
+ if (ValuesPerBlock[i].isSimpleValue() &&
+ ValuesPerBlock[i].getSimpleValue() == LI)
return false;
+ // FIXME: It is extremely unclear what this loop is doing, other than
+ // artificially restricting loadpre.
if (isSinglePred) {
bool isHot = false;
- for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i)
- if (Instruction *I = dyn_cast<Instruction>(ValuesPerBlock[i].V))
+ for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) {
+ const AvailableValueInBlock &AV = ValuesPerBlock[i];
+ if (AV.isSimpleValue())
// "Hot" Instruction is in some loop (because it dominates its dep.
// instruction).
- if (DT->dominates(LI, I)) {
- isHot = true;
- break;
- }
+ if (Instruction *I = dyn_cast<Instruction>(AV.getSimpleValue()))
+ if (DT->dominates(LI, I)) {
+ isHot = true;
+ break;
+ }
+ }
// We are interested only in "hot" instructions. We don't want to do any
// mis-optimizations here.
assert(UnavailablePred != 0 &&
"Fully available value should be eliminated above!");
- // If the loaded pointer is PHI node defined in this block, do PHI translation
- // to get its value in the predecessor.
- Value *LoadPtr = LI->getOperand(0)->DoPHITranslation(LoadBB, UnavailablePred);
-
- // Make sure the value is live in the predecessor. If it was defined by a
- // non-PHI instruction in this block, we don't know how to recompute it above.
- if (Instruction *LPInst = dyn_cast<Instruction>(LoadPtr))
- if (!DT->dominates(LPInst->getParent(), UnavailablePred)) {
- DEBUG(errs() << "COULDN'T PRE LOAD BECAUSE PTR IS UNAVAILABLE IN PRED: "
- << *LPInst << '\n' << *LI << "\n");
- return false;
- }
-
// We don't currently handle critical edges :(
if (UnavailablePred->getTerminator()->getNumSuccessors() != 1) {
DEBUG(errs() << "COULD NOT PRE LOAD BECAUSE OF CRITICAL EDGE '"
<< UnavailablePred->getName() << "': " << *LI << '\n');
return false;
}
+
+ // Do PHI translation to get its value in the predecessor if necessary. The
+ // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
+ //
+ // FIXME: This may insert a computation, but we don't tell scalar GVN
+ // optimization stuff about it. How do we do this?
+ SmallVector<Instruction*, 8> NewInsts;
+ Value *LoadPtr = 0;
+
+ // If all preds have a single successor, then we know it is safe to insert the
+ // load on the pred (?!?), so we can insert code to materialize the pointer if
+ // it is not available.
+ if (allSingleSucc) {
+ LoadPtr = MD->InsertPHITranslatedPointer(LI->getOperand(0), LoadBB,
+ UnavailablePred, TD, *DT,NewInsts);
+ } else {
+ LoadPtr = MD->GetAvailablePHITranslatedValue(LI->getOperand(0), LoadBB,
+ UnavailablePred, TD, *DT);
+ }
+ // Assign value numbers to these new instructions.
+ for (SmallVector<Instruction*, 8>::iterator NI = NewInsts.begin(),
+ NE = NewInsts.end(); NI != NE; ++NI) {
+ // FIXME: We really _ought_ to insert these value numbers into their
+ // parent's availability map. However, in doing so, we risk getting into
+ // ordering issues. If a block hasn't been processed yet, we would be
+ // marking a value as AVAIL-IN, which isn't what we intend.
+ VN.lookup_or_add(*NI);
+ }
+
+ // If we couldn't find or insert a computation of this phi translated value,
+ // we fail PRE.
+ if (LoadPtr == 0) {
+ DEBUG(errs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
+ << *LI->getOperand(0) << "\n");
+ return false;
+ }
+
// Make sure it is valid to move this load here. We have to watch out for:
// @1 = getelementptr (i8* p, ...
// test p and branch if == 0
// we do not have this case. Otherwise, check that the load is safe to
// put anywhere; this can be improved, but should be conservatively safe.
if (!allSingleSucc &&
- !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator()))
+ // FIXME: REEVALUTE THIS.
+ !isSafeToLoadUnconditionally(LoadPtr, UnavailablePred->getTerminator())) {
+ assert(NewInsts.empty() && "Should not have inserted instructions");
return false;
+ }
// Okay, we can eliminate this load by inserting a reload in the predecessor
// and using PHI construction to get the value in the other predecessors, do
// it.
DEBUG(errs() << "GVN REMOVING PRE LOAD: " << *LI << '\n');
-
+ DEBUG(if (!NewInsts.empty())
+ errs() << "INSERTED " << NewInsts.size() << " INSTS: "
+ << *NewInsts.back() << '\n');
+
Value *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
LI->getAlignment(),
UnavailablePred->getTerminator());
/// processLoad - Attempt to eliminate a load, first by eliminating it
/// locally, and then attempting non-local elimination if that fails.
bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
+ if (!MD)
+ return false;
+
if (L->isVolatile())
return false;
// If the value isn't available, don't do anything!
if (Dep.isClobber()) {
- // FIXME: We should handle memset/memcpy/memmove as dependent instructions
- // to forward the value if available.
- //if (isa<MemIntrinsic>(Dep.getInst()))
- //errs() << "LOAD DEPENDS ON MEM: " << *L << "\n" << *Dep.getInst()<<"\n\n";
-
// Check to see if we have something like this:
// store i32 123, i32* %P
// %A = bitcast i32* %P to i8*
// a common base + constant offset, and if the previous store (or memset)
// completely covers this load. This sort of thing can happen in bitfield
// access code.
+ Value *AvailVal = 0;
if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst()))
if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
int Offset = AnalyzeLoadFromClobberingStore(L, DepSI, *TD);
- if (Offset != -1) {
- Value *AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
- L->getType(), L, *TD);
- DEBUG(errs() << "GVN COERCED STORE BITS:\n" << *DepSI << '\n'
- << *AvailVal << '\n' << *L << "\n\n\n");
-
- // Replace the load!
- L->replaceAllUsesWith(AvailVal);
- if (isa<PointerType>(AvailVal->getType()))
- MD->invalidateCachedPointerInfo(AvailVal);
- toErase.push_back(L);
- NumGVNLoad++;
- return true;
- }
+ if (Offset != -1)
+ AvailVal = GetStoreValueForLoad(DepSI->getOperand(0), Offset,
+ L->getType(), L, *TD);
}
+ // If the clobbering value is a memset/memcpy/memmove, see if we can forward
+ // a value on from it.
+ if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) {
+ if (const TargetData *TD = getAnalysisIfAvailable<TargetData>()) {
+ int Offset = AnalyzeLoadFromClobberingMemInst(L, DepMI, *TD);
+ if (Offset != -1)
+ AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L,*TD);
+ }
+ }
+
+ if (AvailVal) {
+ DEBUG(errs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n'
+ << *AvailVal << '\n' << *L << "\n\n\n");
+
+ // Replace the load!
+ L->replaceAllUsesWith(AvailVal);
+ if (isa<PointerType>(AvailVal->getType()))
+ MD->invalidateCachedPointerInfo(AvailVal);
+ toErase.push_back(L);
+ NumGVNLoad++;
+ return true;
+ }
+
DEBUG(
// fast print dep, using operator<< on instruction would be too slow
errs() << "GVN: load ";
// actually have the same type. See if we know how to reuse the stored
// value (depending on its type).
const TargetData *TD = 0;
- if (StoredVal->getType() != L->getType() &&
- (TD = getAnalysisIfAvailable<TargetData>())) {
- StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
- L, *TD);
- if (StoredVal == 0)
+ if (StoredVal->getType() != L->getType()) {
+ if ((TD = getAnalysisIfAvailable<TargetData>())) {
+ StoredVal = CoerceAvailableValueToLoadType(StoredVal, L->getType(),
+ L, *TD);
+ if (StoredVal == 0)
+ return false;
+
+ DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
+ << '\n' << *L << "\n\n\n");
+ }
+ else
return false;
-
- DEBUG(errs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal
- << '\n' << *L << "\n\n\n");
}
// Remove it!
// the same type. See if we know how to reuse the previously loaded value
// (depending on its type).
const TargetData *TD = 0;
- if (DepLI->getType() != L->getType() &&
- (TD = getAnalysisIfAvailable<TargetData>())) {
- AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
- if (AvailableVal == 0)
- return false;
+ if (DepLI->getType() != L->getType()) {
+ if ((TD = getAnalysisIfAvailable<TargetData>())) {
+ AvailableVal = CoerceAvailableValueToLoadType(DepLI, L->getType(), L,*TD);
+ if (AvailableVal == 0)
+ return false;
- DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
- << "\n" << *L << "\n\n\n");
+ DEBUG(errs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal
+ << "\n" << *L << "\n\n\n");
+ }
+ else
+ return false;
}
// Remove it!
// If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example.
- if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
+ if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
L->replaceAllUsesWith(UndefValue::get(L->getType()));
toErase.push_back(L);
NumGVNLoad++;
return true;
}
+
+ // If this load occurs either right after a lifetime begin,
+ // then the loaded value is undefined.
+ if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(DepInst)) {
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
+ L->replaceAllUsesWith(UndefValue::get(L->getType()));
+ toErase.push_back(L);
+ NumGVNLoad++;
+ return true;
+ }
+ }
return false;
}
// Allocations are always uniquely numbered, so we can save time and memory
// by fast failing them.
- } else if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) {
+ } else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
return false;
}
if (constVal) {
p->replaceAllUsesWith(constVal);
- if (isa<PointerType>(constVal->getType()))
+ if (MD && isa<PointerType>(constVal->getType()))
MD->invalidateCachedPointerInfo(constVal);
VN.erase(p);
// Remove it!
VN.erase(I);
I->replaceAllUsesWith(repl);
- if (isa<PointerType>(repl->getType()))
+ if (MD && isa<PointerType>(repl->getType()))
MD->invalidateCachedPointerInfo(repl);
toErase.push_back(I);
return true;
/// runOnFunction - This is the main transformation entry point for a function.
bool GVN::runOnFunction(Function& F) {
- MD = &getAnalysis<MemoryDependenceAnalysis>();
+ if (!NoLoads)
+ MD = &getAnalysis<MemoryDependenceAnalysis>();
DT = &getAnalysis<DominatorTree>();
VN.setAliasAnalysis(&getAnalysis<AliasAnalysis>());
VN.setMemDep(MD);
for (SmallVector<Instruction*, 4>::iterator I = toErase.begin(),
E = toErase.end(); I != E; ++I) {
DEBUG(errs() << "GVN removed: " << **I << '\n');
- MD->removeInstruction(*I);
+ if (MD) MD->removeInstruction(*I);
(*I)->eraseFromParent();
DEBUG(verifyRemoved(*I));
}
/// performPRE - Perform a purely local form of PRE that looks for diamond
/// control flow patterns and attempts to perform simple PRE at the join point.
-bool GVN::performPRE(Function& F) {
+bool GVN::performPRE(Function &F) {
bool Changed = false;
SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit;
DenseMap<BasicBlock*, Value*> predMap;
BE = CurrentBlock->end(); BI != BE; ) {
Instruction *CurInst = BI++;
- if (isa<AllocationInst>(CurInst) ||
+ if (isa<AllocaInst>(CurInst) ||
isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
CurInst->getType()->isVoidTy() ||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
// we would need to insert instructions in more than one pred.
if (NumWithout != 1 || NumWith == 0)
continue;
+
+ // Don't do PRE across indirect branch.
+ if (isa<IndirectBrInst>(PREPred->getTerminator()))
+ continue;
// We can't do PRE safely on a critical edge, so instead we schedule
// the edge to be split and perform the PRE the next time we iterate
localAvail[CurrentBlock]->table[ValNo] = Phi;
CurInst->replaceAllUsesWith(Phi);
- if (isa<PointerType>(Phi->getType()))
+ if (MD && isa<PointerType>(Phi->getType()))
MD->invalidateCachedPointerInfo(Phi);
VN.erase(CurInst);
DEBUG(errs() << "GVN PRE removed: " << *CurInst << '\n');
- MD->removeInstruction(CurInst);
+ if (MD) MD->removeInstruction(CurInst);
CurInst->eraseFromParent();
DEBUG(verifyRemoved(CurInst));
Changed = true;
// Walk through the value number scope to make sure the instruction isn't
// ferreted away in it.
- for (DenseMap<BasicBlock*, ValueNumberScope*>::iterator
+ for (DenseMap<BasicBlock*, ValueNumberScope*>::const_iterator
I = localAvail.begin(), E = localAvail.end(); I != E; ++I) {
const ValueNumberScope *VNS = I->second;
while (VNS) {
- for (DenseMap<uint32_t, Value*>::iterator
+ for (DenseMap<uint32_t, Value*>::const_iterator
II = VNS->table.begin(), IE = VNS->table.end(); II != IE; ++II) {
assert(II->second != Inst && "Inst still in value numbering scope!");
}