X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FAnalysis%2FLint.cpp;h=8ee9b8af51ea79d82f438c3d7243ecc553d074e5;hb=94c465cf108e1e5c764fb82f7eb4d26c714f8747;hp=751c41ce39ccf1c82c345dc29f95709bad604e4e;hpb=17d95965cbf529f96277130b75c018f5f3873b66;p=oota-llvm.git diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp index 751c41ce39c..8ee9b8af51e 100644 --- a/lib/Analysis/Lint.cpp +++ b/lib/Analysis/Lint.cpp @@ -16,42 +16,44 @@ // those aren't comprehensive either. Second, many conditions cannot be // checked statically. This pass does no dynamic instrumentation, so it // can't check for all possible problems. -// +// // Another limitation is that it assumes all code will be executed. A store // through a null pointer in a basic block which is never reached is harmless, -// but this pass will warn about it anyway. +// but this pass will warn about it anyway. This is the main reason why most +// of these checks live here instead of in the Verifier pass. // // Optimization passes may make conditions that this pass checks for more or // less obvious. If an optimization pass appears to be introducing a warning, // it may be that the optimization pass is merely exposing an existing // condition in the code. -// +// // This code may be run before instcombine. In many cases, instcombine checks // for the same kinds of things and turns instructions with undefined behavior // into unreachable (or equivalent). Because of this, this pass makes some // effort to look through bitcasts and so on. -// +// //===----------------------------------------------------------------------===// -#include "llvm/Analysis/Passes.h" +#include "llvm/Analysis/Lint.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Analysis/InstructionSimplify.h" +#include "llvm/Analysis/AssumptionTracker.h" #include "llvm/Analysis/ConstantFolding.h" -#include "llvm/Analysis/Dominators.h" -#include "llvm/Analysis/Lint.h" +#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/Loads.h" +#include "llvm/Analysis/Passes.h" #include "llvm/Analysis/ValueTracking.h" -#include "llvm/Assembly/Writer.h" -#include "llvm/Target/TargetData.h" +#include "llvm/IR/CallSite.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Dominators.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/InstVisitor.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/Pass.h" #include "llvm/PassManager.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Function.h" -#include "llvm/Support/CallSite.h" #include "llvm/Support/Debug.h" -#include "llvm/Support/InstVisitor.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/Target/TargetLibraryInfo.h" using namespace llvm; namespace { @@ -68,8 +70,9 @@ namespace { void visitFunction(Function &F); void visitCallSite(CallSite CS); - void visitMemoryReference(Instruction &I, Value *Ptr, unsigned Align, - const Type *Ty, unsigned Flags); + void visitMemoryReference(Instruction &I, Value *Ptr, + uint64_t Size, unsigned Align, + Type *Ty, unsigned Flags); void visitCallInst(CallInst &I); void visitInvokeInst(InvokeInst &I); @@ -94,79 +97,69 @@ namespace { Value *findValue(Value *V, bool OffsetOk) const; Value *findValueImpl(Value *V, bool OffsetOk, - SmallPtrSet &Visited) const; + SmallPtrSetImpl &Visited) const; public: Module *Mod; AliasAnalysis *AA; + AssumptionTracker *AT; DominatorTree *DT; - TargetData *TD; + const DataLayout *DL; + TargetLibraryInfo *TLI; std::string Messages; raw_string_ostream MessagesStr; static char ID; // Pass identification, replacement for typeid - Lint() : FunctionPass(&ID), MessagesStr(Messages) {} + Lint() : FunctionPass(ID), MessagesStr(Messages) { + initializeLintPass(*PassRegistry::getPassRegistry()); + } - virtual bool runOnFunction(Function &F); + bool runOnFunction(Function &F) override; - virtual void getAnalysisUsage(AnalysisUsage &AU) const { + void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); + AU.addRequired(); + AU.addRequired(); } - virtual void print(raw_ostream &O, const Module *M) const {} + void print(raw_ostream &O, const Module *M) const override {} void WriteValue(const Value *V) { if (!V) return; if (isa(V)) { MessagesStr << *V << '\n'; } else { - WriteAsOperand(MessagesStr, V, true, Mod); + V->printAsOperand(MessagesStr, true, Mod); MessagesStr << '\n'; } } - void WriteType(const Type *T) { - if (!T) return; - MessagesStr << ' '; - WriteTypeSymbolic(MessagesStr, T, Mod); - } - // CheckFailed - A check failed, so print out the condition and the message // that failed. This provides a nice place to put a breakpoint if you want // to see why something is not correct. void CheckFailed(const Twine &Message, - const Value *V1 = 0, const Value *V2 = 0, - const Value *V3 = 0, const Value *V4 = 0) { + const Value *V1 = nullptr, const Value *V2 = nullptr, + const Value *V3 = nullptr, const Value *V4 = nullptr) { MessagesStr << Message.str() << "\n"; WriteValue(V1); WriteValue(V2); WriteValue(V3); WriteValue(V4); } - - void CheckFailed(const Twine &Message, const Value *V1, - const Type *T2, const Value *V3 = 0) { - MessagesStr << Message.str() << "\n"; - WriteValue(V1); - WriteType(T2); - WriteValue(V3); - } - - void CheckFailed(const Twine &Message, const Type *T1, - const Type *T2 = 0, const Type *T3 = 0) { - MessagesStr << Message.str() << "\n"; - WriteType(T1); - WriteType(T2); - WriteType(T3); - } }; } char Lint::ID = 0; -static RegisterPass -X("lint", "Statically lint-checks LLVM IR", false, true); +INITIALIZE_PASS_BEGIN(Lint, "lint", "Statically lint-checks LLVM IR", + false, true) +INITIALIZE_PASS_DEPENDENCY(AssumptionTracker) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_END(Lint, "lint", "Statically lint-checks LLVM IR", + false, true) // Assert - We know that cond should be true, if not print an error message. #define Assert(C, M) \ @@ -186,8 +179,11 @@ X("lint", "Statically lint-checks LLVM IR", false, true); bool Lint::runOnFunction(Function &F) { Mod = F.getParent(); AA = &getAnalysis(); - DT = &getAnalysis(); - TD = getAnalysisIfAvailable(); + AT = &getAnalysis(); + DT = &getAnalysis().getDomTree(); + DataLayoutPass *DLP = getAnalysisIfAvailable(); + DL = DLP ? &DLP->getDataLayout() : nullptr; + TLI = &getAnalysis(); visit(F); dbgs() << MessagesStr.str(); Messages.clear(); @@ -199,35 +195,69 @@ void Lint::visitFunction(Function &F) { // fairly common mistake to neglect to name a function. Assert1(F.hasName() || F.hasLocalLinkage(), "Unusual: Unnamed function with non-local linkage", &F); + + // TODO: Check for irreducible control flow. } void Lint::visitCallSite(CallSite CS) { Instruction &I = *CS.getInstruction(); Value *Callee = CS.getCalledValue(); - visitMemoryReference(I, Callee, 0, 0, MemRef::Callee); + visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize, + 0, nullptr, MemRef::Callee); if (Function *F = dyn_cast(findValue(Callee, /*OffsetOk=*/false))) { Assert1(CS.getCallingConv() == F->getCallingConv(), "Undefined behavior: Caller and callee calling convention differ", &I); - const FunctionType *FT = F->getFunctionType(); - unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin()); + FunctionType *FT = F->getFunctionType(); + unsigned NumActualArgs = CS.arg_size(); Assert1(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs : FT->getNumParams() == NumActualArgs, "Undefined behavior: Call argument count mismatches callee " "argument count", &I); - - // TODO: Check argument types (in case the callee was casted) - // TODO: Check ABI-significant attributes. - - // TODO: Check noalias attribute. - - // TODO: Check sret attribute. + Assert1(FT->getReturnType() == I.getType(), + "Undefined behavior: Call return type mismatches " + "callee return type", &I); + + // Check argument types (in case the callee was casted) and attributes. + // TODO: Verify that caller and callee attributes are compatible. + Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end(); + CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end(); + for (; AI != AE; ++AI) { + Value *Actual = *AI; + if (PI != PE) { + Argument *Formal = PI++; + Assert1(Formal->getType() == Actual->getType(), + "Undefined behavior: Call argument type mismatches " + "callee parameter type", &I); + + // Check that noalias arguments don't alias other arguments. This is + // not fully precise because we don't know the sizes of the dereferenced + // memory regions. + if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy()) + for (CallSite::arg_iterator BI = CS.arg_begin(); BI != AE; ++BI) + if (AI != BI && (*BI)->getType()->isPointerTy()) { + AliasAnalysis::AliasResult Result = AA->alias(*AI, *BI); + Assert1(Result != AliasAnalysis::MustAlias && + Result != AliasAnalysis::PartialAlias, + "Unusual: noalias argument aliases another argument", &I); + } + + // Check that an sret argument points to valid memory. + if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) { + Type *Ty = + cast(Formal->getType())->getElementType(); + visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty), + DL ? DL->getABITypeAlignment(Ty) : 0, + Ty, MemRef::Read | MemRef::Write); + } + } + } } if (CS.isCall() && cast(CS.getInstruction())->isTailCall()) @@ -248,15 +278,18 @@ void Lint::visitCallSite(CallSite CS) { case Intrinsic::memcpy: { MemCpyInst *MCI = cast(&I); - visitMemoryReference(I, MCI->getSource(), MCI->getAlignment(), 0, + // TODO: If the size is known, use it. + visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize, + MCI->getAlignment(), nullptr, MemRef::Write); - visitMemoryReference(I, MCI->getDest(), MCI->getAlignment(), 0, + visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize, + MCI->getAlignment(), nullptr, MemRef::Read); // Check that the memcpy arguments don't overlap. The AliasAnalysis API // isn't expressive enough for what we really want to do. Known partial // overlap is not distinguished from the case where nothing is known. - unsigned Size = 0; + uint64_t Size = 0; if (const ConstantInt *Len = dyn_cast(findValue(MCI->getLength(), /*OffsetOk=*/false))) @@ -269,15 +302,20 @@ void Lint::visitCallSite(CallSite CS) { } case Intrinsic::memmove: { MemMoveInst *MMI = cast(&I); - visitMemoryReference(I, MMI->getSource(), MMI->getAlignment(), 0, + // TODO: If the size is known, use it. + visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize, + MMI->getAlignment(), nullptr, MemRef::Write); - visitMemoryReference(I, MMI->getDest(), MMI->getAlignment(), 0, + visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize, + MMI->getAlignment(), nullptr, MemRef::Read); break; } case Intrinsic::memset: { MemSetInst *MSI = cast(&I); - visitMemoryReference(I, MSI->getDest(), MSI->getAlignment(), 0, + // TODO: If the size is known, use it. + visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize, + MSI->getAlignment(), nullptr, MemRef::Write); break; } @@ -287,24 +325,26 @@ void Lint::visitCallSite(CallSite CS) { "Undefined behavior: va_start called in a non-varargs function", &I); - visitMemoryReference(I, CS.getArgument(0), 0, 0, - MemRef::Read | MemRef::Write); + visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, + 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::vacopy: - visitMemoryReference(I, CS.getArgument(0), 0, 0, MemRef::Write); - visitMemoryReference(I, CS.getArgument(1), 0, 0, MemRef::Read); + visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, + 0, nullptr, MemRef::Write); + visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize, + 0, nullptr, MemRef::Read); break; case Intrinsic::vaend: - visitMemoryReference(I, CS.getArgument(0), 0, 0, - MemRef::Read | MemRef::Write); + visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, + 0, nullptr, MemRef::Read | MemRef::Write); break; case Intrinsic::stackrestore: // Stackrestore doesn't read or write memory, but it sets the // stack pointer, which the compiler may read from or write to // at any time, so check it for both readability and writeability. - visitMemoryReference(I, CS.getArgument(0), 0, 0, - MemRef::Read | MemRef::Write); + visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize, + 0, nullptr, MemRef::Read | MemRef::Write); break; } } @@ -330,15 +370,27 @@ void Lint::visitReturnInst(ReturnInst &I) { } } -// TODO: Add a length argument and check that the reference is in bounds +// TODO: Check that the reference is in bounds. +// TODO: Check readnone/readonly function attributes. void Lint::visitMemoryReference(Instruction &I, - Value *Ptr, unsigned Align, const Type *Ty, - unsigned Flags) { + Value *Ptr, uint64_t Size, unsigned Align, + Type *Ty, unsigned Flags) { + // If no memory is being referenced, it doesn't matter if the pointer + // is valid. + if (Size == 0) + return; + Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true); Assert1(!isa(UnderlyingObject), "Undefined behavior: Null pointer dereference", &I); Assert1(!isa(UnderlyingObject), "Undefined behavior: Undef pointer dereference", &I); + Assert1(!isa(UnderlyingObject) || + !cast(UnderlyingObject)->isAllOnesValue(), + "Unusual: All-ones pointer dereference", &I); + Assert1(!isa(UnderlyingObject) || + !cast(UnderlyingObject)->isOne(), + "Unusual: Address one pointer dereference", &I); if (Flags & MemRef::Write) { if (const GlobalVariable *GV = dyn_cast(UnderlyingObject)) @@ -364,28 +416,64 @@ void Lint::visitMemoryReference(Instruction &I, "Undefined behavior: Branch to non-blockaddress", &I); } - if (TD) { - if (Align == 0 && Ty) Align = TD->getABITypeAlignment(Ty); - - if (Align != 0) { - unsigned BitWidth = TD->getTypeSizeInBits(Ptr->getType()); - APInt Mask = APInt::getAllOnesValue(BitWidth), - KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - ComputeMaskedBits(Ptr, Mask, KnownZero, KnownOne, TD); - Assert1(!(KnownOne & APInt::getLowBitsSet(BitWidth, Log2_32(Align))), - "Undefined behavior: Memory reference address is misaligned", &I); + // Check for buffer overflows and misalignment. + // Only handles memory references that read/write something simple like an + // alloca instruction or a global variable. + int64_t Offset = 0; + if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) { + // OK, so the access is to a constant offset from Ptr. Check that Ptr is + // something we can handle and if so extract the size of this base object + // along with its alignment. + uint64_t BaseSize = AliasAnalysis::UnknownSize; + unsigned BaseAlign = 0; + + if (AllocaInst *AI = dyn_cast(Base)) { + Type *ATy = AI->getAllocatedType(); + if (DL && !AI->isArrayAllocation() && ATy->isSized()) + BaseSize = DL->getTypeAllocSize(ATy); + BaseAlign = AI->getAlignment(); + if (DL && BaseAlign == 0 && ATy->isSized()) + BaseAlign = DL->getABITypeAlignment(ATy); + } else if (GlobalVariable *GV = dyn_cast(Base)) { + // If the global may be defined differently in another compilation unit + // then don't warn about funky memory accesses. + if (GV->hasDefinitiveInitializer()) { + Type *GTy = GV->getType()->getElementType(); + if (DL && GTy->isSized()) + BaseSize = DL->getTypeAllocSize(GTy); + BaseAlign = GV->getAlignment(); + if (DL && BaseAlign == 0 && GTy->isSized()) + BaseAlign = DL->getABITypeAlignment(GTy); + } } + + // Accesses from before the start or after the end of the object are not + // defined. + Assert1(Size == AliasAnalysis::UnknownSize || + BaseSize == AliasAnalysis::UnknownSize || + (Offset >= 0 && Offset + Size <= BaseSize), + "Undefined behavior: Buffer overflow", &I); + + // Accesses that say that the memory is more aligned than it is are not + // defined. + if (DL && Align == 0 && Ty && Ty->isSized()) + Align = DL->getABITypeAlignment(Ty); + Assert1(!BaseAlign || Align <= MinAlign(BaseAlign, Offset), + "Undefined behavior: Memory reference address is misaligned", &I); } } void Lint::visitLoadInst(LoadInst &I) { - visitMemoryReference(I, I.getPointerOperand(), I.getAlignment(), I.getType(), - MemRef::Read); + visitMemoryReference(I, I.getPointerOperand(), + AA->getTypeStoreSize(I.getType()), I.getAlignment(), + I.getType(), MemRef::Read); } void Lint::visitStoreInst(StoreInst &I) { - visitMemoryReference(I, I.getPointerOperand(), I.getAlignment(), - I.getOperand(0)->getType(), MemRef::Write); + visitMemoryReference(I, I.getPointerOperand(), + AA->getTypeStoreSize(I.getOperand(0)->getType()), + I.getAlignment(), + I.getOperand(0)->getType(), MemRef::Write); } void Lint::visitXor(BinaryOperator &I) { @@ -421,34 +509,63 @@ void Lint::visitShl(BinaryOperator &I) { "Undefined result: Shift count out of range", &I); } -static bool isZero(Value *V, TargetData *TD) { +static bool isZero(Value *V, const DataLayout *DL, DominatorTree *DT, + AssumptionTracker *AT) { // Assume undef could be zero. - if (isa(V)) return true; + if (isa(V)) + return true; + + VectorType *VecTy = dyn_cast(V->getType()); + if (!VecTy) { + unsigned BitWidth = V->getType()->getIntegerBitWidth(); + APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); + computeKnownBits(V, KnownZero, KnownOne, DL, + 0, AT, dyn_cast(V), DT); + return KnownZero.isAllOnesValue(); + } - unsigned BitWidth = cast(V->getType())->getBitWidth(); - APInt Mask = APInt::getAllOnesValue(BitWidth), - KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD); - return KnownZero.isAllOnesValue(); + // Per-component check doesn't work with zeroinitializer + Constant *C = dyn_cast(V); + if (!C) + return false; + + if (C->isZeroValue()) + return true; + + // For a vector, KnownZero will only be true if all values are zero, so check + // this per component + unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth(); + for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) { + Constant *Elem = C->getAggregateElement(I); + if (isa(Elem)) + return true; + + APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); + computeKnownBits(Elem, KnownZero, KnownOne, DL); + if (KnownZero.isAllOnesValue()) + return true; + } + + return false; } void Lint::visitSDiv(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } void Lint::visitUDiv(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } void Lint::visitSRem(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } void Lint::visitURem(BinaryOperator &I) { - Assert1(!isZero(I.getOperand(1), TD), + Assert1(!isZero(I.getOperand(1), DL, DT, AT), "Undefined behavior: Division by zero", &I); } @@ -457,15 +574,21 @@ void Lint::visitAllocaInst(AllocaInst &I) { // This isn't undefined behavior, it's just an obvious pessimization. Assert1(&I.getParent()->getParent()->getEntryBlock() == I.getParent(), "Pessimization: Static alloca outside of entry block", &I); + + // TODO: Check for an unusual size (MSB set?) } void Lint::visitVAArgInst(VAArgInst &I) { - visitMemoryReference(I, I.getOperand(0), 0, 0, - MemRef::Read | MemRef::Write); + visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, + nullptr, MemRef::Read | MemRef::Write); } void Lint::visitIndirectBrInst(IndirectBrInst &I) { - visitMemoryReference(I, I.getAddress(), 0, 0, MemRef::Branchee); + visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, + nullptr, MemRef::Branchee); + + Assert1(I.getNumDestinations() != 0, + "Undefined behavior: indirectbr with no destinations", &I); } void Lint::visitExtractElementInst(ExtractElementInst &I) { @@ -487,7 +610,7 @@ void Lint::visitInsertElementInst(InsertElementInst &I) { void Lint::visitUnreachableInst(UnreachableInst &I) { // This isn't undefined behavior, it's merely suspicious. Assert1(&I == I.getParent()->begin() || - prior(BasicBlock::iterator(&I))->mayHaveSideEffects(), + std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(), "Unusual: unreachable immediately preceded by instruction without " "side effects", &I); } @@ -506,49 +629,67 @@ Value *Lint::findValue(Value *V, bool OffsetOk) const { /// findValueImpl - Implementation helper for findValue. Value *Lint::findValueImpl(Value *V, bool OffsetOk, - SmallPtrSet &Visited) const { + SmallPtrSetImpl &Visited) const { // Detect self-referential values. - if (!Visited.insert(V)) + if (!Visited.insert(V).second) return UndefValue::get(V->getType()); // TODO: Look through sext or zext cast, when the result is known to // be interpreted as signed or unsigned, respectively. + // TODO: Look through eliminable cast pairs. // TODO: Look through calls with unique return values. // TODO: Look through vector insert/extract/shuffle. - V = OffsetOk ? V->getUnderlyingObject() : V->stripPointerCasts(); + V = OffsetOk ? GetUnderlyingObject(V, DL) : V->stripPointerCasts(); if (LoadInst *L = dyn_cast(V)) { BasicBlock::iterator BBI = L; BasicBlock *BB = L->getParent(); + SmallPtrSet VisitedBlocks; for (;;) { + if (!VisitedBlocks.insert(BB).second) + break; if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(), BB, BBI, 6, AA)) return findValueImpl(U, OffsetOk, Visited); - BB = L->getParent()->getUniquePredecessor(); + if (BBI != BB->begin()) break; + BB = BB->getUniquePredecessor(); if (!BB) break; BBI = BB->end(); } + } else if (PHINode *PN = dyn_cast(V)) { + if (Value *W = PN->hasConstantValue()) + if (W != V) + return findValueImpl(W, OffsetOk, Visited); } else if (CastInst *CI = dyn_cast(V)) { - if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) : - Type::getInt64Ty(V->getContext()))) + if (CI->isNoopCast(DL)) return findValueImpl(CI->getOperand(0), OffsetOk, Visited); - } else if (PHINode *PN = dyn_cast(V)) { - if (Value *W = PN->hasConstantValue(DT)) - return findValueImpl(W, OffsetOk, Visited); } else if (ExtractValueInst *Ex = dyn_cast(V)) { if (Value *W = FindInsertedValue(Ex->getAggregateOperand(), - Ex->idx_begin(), - Ex->idx_end())) + Ex->getIndices())) if (W != V) return findValueImpl(W, OffsetOk, Visited); + } else if (ConstantExpr *CE = dyn_cast(V)) { + // Same as above, but for ConstantExpr instead of Instruction. + if (Instruction::isCast(CE->getOpcode())) { + if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()), + CE->getOperand(0)->getType(), + CE->getType(), + DL ? DL->getIntPtrType(V->getType()) : + Type::getInt64Ty(V->getContext()))) + return findValueImpl(CE->getOperand(0), OffsetOk, Visited); + } else if (CE->getOpcode() == Instruction::ExtractValue) { + ArrayRef Indices = CE->getIndices(); + if (Value *W = FindInsertedValue(CE->getOperand(0), Indices)) + if (W != V) + return findValueImpl(W, OffsetOk, Visited); + } } // As a last resort, try SimplifyInstruction or constant folding. if (Instruction *Inst = dyn_cast(V)) { - if (Value *W = SimplifyInstruction(Inst, TD)) - if (W != Inst) - return findValueImpl(W, OffsetOk, Visited); + if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT, AT)) + return findValueImpl(W, OffsetOk, Visited); } else if (ConstantExpr *CE = dyn_cast(V)) { - if (Value *W = ConstantFoldConstantExpression(CE, TD)) + if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI)) if (W != V) return findValueImpl(W, OffsetOk, Visited); }