X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FAnalysis%2FBasicAliasAnalysis.cpp;h=d49898a401cc33bd28a923d5fce35dab6546349b;hb=579dca12c2cfd60bc18aaadbd5331897d48fec29;hp=e928a265ff091da20634fd413205ee51f10996a3;hpb=7a82ba0510aece59f59fe988ab273c3542b4d559;p=oota-llvm.git diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index e928a265ff0..d49898a401c 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -1,58 +1,111 @@ //===- BasicAliasAnalysis.cpp - Local Alias Analysis Impl -----------------===// -// +// // The LLVM Compiler Infrastructure // // This file was developed by the LLVM research group and is distributed under // the University of Illinois Open Source License. See LICENSE.TXT for details. -// +// //===----------------------------------------------------------------------===// // // This file defines the default implementation of the Alias Analysis interface // that simply implements a few identities (two different globals cannot alias, // etc), but otherwise does no analysis. // -// FIXME: This could be extended for a very simple form of mod/ref information. -// If a pointer is locally allocated (either malloc or alloca) and never passed -// into a call or stored to memory, then we know that calls will not mod/ref the -// memory. This can be important for tailcallelim, and can support CSE of loads -// and dead store elimination across calls. This is particularly important for -// stack allocated arrays. -// //===----------------------------------------------------------------------===// #include "llvm/Analysis/AliasAnalysis.h" -#include "llvm/Pass.h" -#include "llvm/Argument.h" -#include "llvm/iOther.h" -#include "llvm/iMemory.h" +#include "llvm/Analysis/Passes.h" #include "llvm/Constants.h" -#include "llvm/GlobalVariable.h" #include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Instructions.h" +#include "llvm/Pass.h" #include "llvm/Target/TargetData.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/GetElementPtrTypeIterator.h" +#include "llvm/Support/ManagedStatic.h" +#include using namespace llvm; -// Make sure that anything that uses AliasAnalysis pulls in this file... -void llvm::BasicAAStub() {} - namespace { - struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis { - + /// NoAA - This class implements the -no-aa pass, which always returns "I + /// don't know" for alias queries. NoAA is unlike other alias analysis + /// implementations, in that it does not chain to a previous analysis. As + /// such it doesn't follow many of the rules that other alias analyses must. + /// + struct VISIBILITY_HIDDEN NoAA : public ImmutablePass, public AliasAnalysis { virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AliasAnalysis::getAnalysisUsage(AU); + AU.addRequired(); + } + + virtual void initializePass() { + TD = &getAnalysis(); + } + + virtual AliasResult alias(const Value *V1, unsigned V1Size, + const Value *V2, unsigned V2Size) { + return MayAlias; + } + + virtual ModRefBehavior getModRefBehavior(Function *F, CallSite CS, + std::vector *Info) { + return UnknownModRefBehavior; } - - virtual void initializePass(); + virtual void getArgumentAccesses(Function *F, CallSite CS, + std::vector &Info) { + assert(0 && "This method may not be called on this function!"); + } + + virtual void getMustAliases(Value *P, std::vector &RetVals) { } + virtual bool pointsToConstantMemory(const Value *P) { return false; } + virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) { + return ModRef; + } + virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) { + return ModRef; + } + virtual bool hasNoModRefInfoForCalls() const { return true; } + + virtual void deleteValue(Value *V) {} + virtual void copyValue(Value *From, Value *To) {} + }; + + // Register this pass... + RegisterPass + U("no-aa", "No Alias Analysis (always returns 'may' alias)"); + + // Declare that we implement the AliasAnalysis interface + RegisterAnalysisGroup V(U); +} // End of anonymous namespace + +ImmutablePass *llvm::createNoAAPass() { return new NoAA(); } + +namespace { + /// BasicAliasAnalysis - This is the default alias analysis implementation. + /// Because it doesn't chain to a previous alias analysis (like -no-aa), it + /// derives from the NoAA class. + struct VISIBILITY_HIDDEN BasicAliasAnalysis : public NoAA { AliasResult alias(const Value *V1, unsigned V1Size, const Value *V2, unsigned V2Size); ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size); + ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) { + return NoAA::getModRefInfo(CS1,CS2); + } + + /// hasNoModRefInfoForCalls - We can provide mod/ref information against + /// non-escaping allocations. + virtual bool hasNoModRefInfoForCalls() const { return false; } /// pointsToConstantMemory - Chase pointers until we find a (constant /// global) or not. bool pointsToConstantMemory(const Value *P); + virtual ModRefBehavior getModRefBehavior(Function *F, CallSite CS, + std::vector *Info); + private: // CheckGEPInstructions - Check two GEP instructions with known // must-aliasing base pointers. This checks to see if the index expressions @@ -63,23 +116,17 @@ namespace { const Type *BasePtr2Ty, std::vector &GEP2Ops, unsigned G2Size); }; - + // Register this pass... - RegisterOpt + RegisterPass X("basicaa", "Basic Alias Analysis (default AA impl)"); // Declare that we implement the AliasAnalysis interface - RegisterAnalysisGroup Y; + RegisterAnalysisGroup Y(X); } // End of anonymous namespace -void BasicAliasAnalysis::initializePass() { - InitializeAliasAnalysis(this); -} - -// hasUniqueAddress - Return true if the specified value points to something -// with a unique, discernable, address. -static inline bool hasUniqueAddress(const Value *V) { - return isa(V) || isa(V); +ImmutablePass *llvm::createBasicAliasAnalysisPass() { + return new BasicAliasAnalysis(); } // getUnderlyingObject - This traverses the use chain to figure out what object @@ -88,19 +135,19 @@ static inline bool hasUniqueAddress(const Value *V) { static const Value *getUnderlyingObject(const Value *V) { if (!isa(V->getType())) return 0; - // If we are at some type of object... return it. - if (hasUniqueAddress(V) || isa(V)) return V; - + // If we are at some type of object, return it. GlobalValues and Allocations + // have unique addresses. + if (isa(V) || isa(V) || isa(V)) + return V; + // Traverse through different addressing mechanisms... if (const Instruction *I = dyn_cast(V)) { - if (isa(I) || isa(I)) + if (isa(I) || isa(I)) return getUnderlyingObject(I->getOperand(0)); } else if (const ConstantExpr *CE = dyn_cast(V)) { - if (CE->getOpcode() == Instruction::Cast || + if (CE->getOpcode() == Instruction::BitCast || CE->getOpcode() == Instruction::GetElementPtr) return getUnderlyingObject(CE->getOperand(0)); - } else if (const ConstantPointerRef *CPR = dyn_cast(V)) { - return CPR->getValue(); } return 0; } @@ -122,7 +169,7 @@ static const Value *GetGEPOperands(const Value *V, std::vector &GEPOps){ V = cast(V)->getOperand(0); while (const User *G = isGEP(V)) { - if (!isa(GEPOps[0]) || + if (!isa(GEPOps[0]) || isa(GEPOps[0]) || !cast(GEPOps[0])->isNullValue()) break; // Don't handle folding arbitrary pointer offsets yet... GEPOps.erase(GEPOps.begin()); // Drop the zero index @@ -141,24 +188,34 @@ bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) { return false; } +// Determine if an AllocationInst instruction escapes from the function it is +// contained in. If it does not escape, there is no way for another function to +// mod/ref it. We do this by looking at its uses and determining if the uses +// can escape (recursively). static bool AddressMightEscape(const Value *V) { for (Value::use_const_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI) { const Instruction *I = cast(*UI); switch (I->getOpcode()) { - case Instruction::Load: break; + case Instruction::Load: + break; //next use. case Instruction::Store: if (I->getOperand(0) == V) return true; // Escapes if the pointer is stored. - break; + break; // next use. case Instruction::GetElementPtr: - if (AddressMightEscape(I)) return true; - break; - case Instruction::Cast: + if (AddressMightEscape(I)) + return true; + case Instruction::BitCast: if (!isa(I->getType())) return true; - if (AddressMightEscape(I)) return true; - break; + if (AddressMightEscape(I)) + return true; + break; // next use + case Instruction::Ret: + // If returned, the address will escape to calling functions, but no + // callees could modify it. + break; // next use default: return true; } @@ -173,7 +230,7 @@ static bool AddressMightEscape(const Value *V) { // AliasAnalysis::ModRefResult BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) { - if (!isa(P) && !isa(P)) + if (!isa(P)) if (const AllocationInst *AI = dyn_cast_or_null(getUnderlyingObject(P))) { // Okay, the pointer is to a stack allocated object. If we can prove that @@ -181,11 +238,16 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) { // because it simply can't get its address. if (!AddressMightEscape(AI)) return NoModRef; + + // If this is a tail call and P points to a stack location, we know that + // the tail call cannot access or modify the local stack. + if (CallInst *CI = dyn_cast(CS.getInstruction())) + if (CI->isTailCall() && isa(AI)) + return NoModRef; } - // If P points to a constant memory location, the call definitely could not - // modify the memory location. - return pointsToConstantMemory(P) ? Ref : ModRef; + // The AliasAnalysis base class has some smarts, lets use them. + return AliasAnalysis::getModRefInfo(CS, P, Size); } // alias - Provide a bunch of ad-hoc rules to disambiguate in common cases, such @@ -197,58 +259,87 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, const Value *V2, unsigned V2Size) { // Strip off any constant expression casts if they exist if (const ConstantExpr *CE = dyn_cast(V1)) - if (CE->getOpcode() == Instruction::Cast) + if (CE->isCast() && isa(CE->getOperand(0)->getType())) V1 = CE->getOperand(0); if (const ConstantExpr *CE = dyn_cast(V2)) - if (CE->getOpcode() == Instruction::Cast) + if (CE->isCast() && isa(CE->getOperand(0)->getType())) V2 = CE->getOperand(0); - // Strip off constant pointer refs if they exist - if (const ConstantPointerRef *CPR = dyn_cast(V1)) - V1 = CPR->getValue(); - if (const ConstantPointerRef *CPR = dyn_cast(V2)) - V2 = CPR->getValue(); - // Are we checking for alias of the same value? if (V1 == V2) return MustAlias; if ((!isa(V1->getType()) || !isa(V2->getType())) && - V1->getType() != Type::LongTy && V2->getType() != Type::LongTy) + V1->getType() != Type::Int64Ty && V2->getType() != Type::Int64Ty) return NoAlias; // Scalars cannot alias each other // Strip off cast instructions... - if (const Instruction *I = dyn_cast(V1)) - return alias(I->getOperand(0), V1Size, V2, V2Size); - if (const Instruction *I = dyn_cast(V2)) - return alias(V1, V1Size, I->getOperand(0), V2Size); + if (const BitCastInst *I = dyn_cast(V1)) + if (isa(I->getOperand(0)->getType())) + return alias(I->getOperand(0), V1Size, V2, V2Size); + if (const BitCastInst *I = dyn_cast(V2)) + if (isa(I->getOperand(0)->getType())) + return alias(V1, V1Size, I->getOperand(0), V2Size); // Figure out what objects these things are pointing to if we can... const Value *O1 = getUnderlyingObject(V1); const Value *O2 = getUnderlyingObject(V2); // Pointing at a discernible object? - if (O1 && O2) { - if (isa(O1)) { - // Incoming argument cannot alias locally allocated object! - if (isa(O2)) return NoAlias; - // Otherwise, nothing is known... - } else if (isa(O2)) { - // Incoming argument cannot alias locally allocated object! - if (isa(O1)) return NoAlias; - // Otherwise, nothing is known... - } else { - // If they are two different objects, we know that we have no alias... - if (O1 != O2) return NoAlias; + if (O1) { + if (O2) { + if (isa(O1)) { + // Incoming argument cannot alias locally allocated object! + if (isa(O2)) return NoAlias; + // Otherwise, nothing is known... + } else if (isa(O2)) { + // Incoming argument cannot alias locally allocated object! + if (isa(O1)) return NoAlias; + // Otherwise, nothing is known... + } else if (O1 != O2) { + // If they are two different objects, we know that we have no alias... + return NoAlias; + } + + // If they are the same object, they we can look at the indexes. If they + // index off of the object is the same for both pointers, they must alias. + // If they are provably different, they must not alias. Otherwise, we + // can't tell anything. } - // If they are the same object, they we can look at the indexes. If they - // index off of the object is the same for both pointers, they must alias. - // If they are provably different, they must not alias. Otherwise, we can't - // tell anything. - } else if (O1 && !isa(O1) && isa(V2)) { - return NoAlias; // Unique values don't alias null - } else if (O2 && !isa(O2) && isa(V1)) { - return NoAlias; // Unique values don't alias null + + if (!isa(O1) && isa(V2)) + return NoAlias; // Unique values don't alias null + + if (isa(O1) || + (isa(O1) && + !cast(O1)->isArrayAllocation())) + if (cast(O1->getType())->getElementType()->isSized()) { + // If the size of the other access is larger than the total size of the + // global/alloca/malloc, it cannot be accessing the global (it's + // undefined to load or store bytes before or after an object). + const Type *ElTy = cast(O1->getType())->getElementType(); + unsigned GlobalSize = getTargetData().getTypeSize(ElTy); + if (GlobalSize < V2Size && V2Size != ~0U) + return NoAlias; + } + } + + if (O2) { + if (!isa(O2) && isa(V1)) + return NoAlias; // Unique values don't alias null + + if (isa(O2) || + (isa(O2) && + !cast(O2)->isArrayAllocation())) + if (cast(O2->getType())->getElementType()->isSized()) { + // If the size of the other access is larger than the total size of the + // global/alloca/malloc, it cannot be accessing the object (it's + // undefined to load or store bytes before or after an object). + const Type *ElTy = cast(O2->getType())->getElementType(); + unsigned GlobalSize = getTargetData().getTypeSize(ElTy); + if (GlobalSize < V1Size && V1Size != ~0U) + return NoAlias; + } } // If we have two gep instructions with must-alias'ing base pointers, figure @@ -263,12 +354,12 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, do { BasePtr1 = cast(BasePtr1)->getOperand(0); } while (isGEP(BasePtr1) && - cast(BasePtr1)->getOperand(1) == + cast(BasePtr1)->getOperand(1) == Constant::getNullValue(cast(BasePtr1)->getOperand(1)->getType())); do { BasePtr2 = cast(BasePtr2)->getOperand(0); } while (isGEP(BasePtr2) && - cast(BasePtr2)->getOperand(1) == + cast(BasePtr2)->getOperand(1) == Constant::getNullValue(cast(BasePtr2)->getOperand(1)->getType())); // Do the base pointers alias? @@ -284,11 +375,15 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, BasePtr1 = GetGEPOperands(V1, GEP1Ops); BasePtr2 = GetGEPOperands(V2, GEP2Ops); - AliasResult GAlias = - CheckGEPInstructions(BasePtr1->getType(), GEP1Ops, V1Size, - BasePtr2->getType(), GEP2Ops, V2Size); - if (GAlias != MayAlias) - return GAlias; + // If GetGEPOperands were able to fold to the same must-aliased pointer, + // do the comparison. + if (BasePtr1 == BasePtr2) { + AliasResult GAlias = + CheckGEPInstructions(BasePtr1->getType(), GEP1Ops, V1Size, + BasePtr2->getType(), GEP2Ops, V2Size); + if (GAlias != MayAlias) + return GAlias; + } } } @@ -302,7 +397,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, } if (V1Size != ~0U && V2Size != ~0U) - if (const User *GEP = isGEP(V1)) { + if (isGEP(V1)) { std::vector GEPOperands; const Value *BasePtr = GetGEPOperands(V1, GEPOperands); @@ -331,41 +426,62 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size, if (ConstantFound) { if (V2Size <= 1 && V1Size <= 1) // Just pointer check? return NoAlias; - + // Otherwise we have to check to see that the distance is more than // the size of the argument... build an index vector that is equal to // the arguments provided, except substitute 0's for any variable // indexes we find... - for (unsigned i = 0; i != GEPOperands.size(); ++i) - if (!isa(GEPOperands[i]) || - isa(GEPOperands[i])) - GEPOperands[i] =Constant::getNullValue(GEPOperands[i]->getType()); - int64_t Offset = getTargetData().getIndexedOffset(BasePtr->getType(), - GEPOperands); - if (Offset >= (int64_t)V2Size || Offset <= -(int64_t)V1Size) - return NoAlias; + if (cast( + BasePtr->getType())->getElementType()->isSized()) { + for (unsigned i = 0; i != GEPOperands.size(); ++i) + if (!isa(GEPOperands[i]) || + GEPOperands[i]->getType() == Type::Int1Ty) + GEPOperands[i] = + Constant::getNullValue(GEPOperands[i]->getType()); + int64_t Offset = + getTargetData().getIndexedOffset(BasePtr->getType(), GEPOperands); + + if (Offset >= (int64_t)V2Size || Offset <= -(int64_t)V1Size) + return NoAlias; + } } } } - + return MayAlias; } +// This function is used to determin if the indices of two GEP instructions are +// equal. V1 and V2 are the indices. +static bool IndexOperandsEqual(Value *V1, Value *V2) { + if (V1->getType() == V2->getType()) + return V1 == V2; + if (Constant *C1 = dyn_cast(V1)) + if (Constant *C2 = dyn_cast(V2)) { + // Sign extend the constants to long types, if necessary + if (C1->getType() != Type::Int64Ty) + C1 = ConstantExpr::getSExt(C1, Type::Int64Ty); + if (C2->getType() != Type::Int64Ty) + C2 = ConstantExpr::getSExt(C2, Type::Int64Ty); + return C1 == C2; + } + return false; +} + /// CheckGEPInstructions - Check two GEP instructions with known must-aliasing /// base pointers. This checks to see if the index expressions preclude the /// pointers from aliasing... -AliasAnalysis::AliasResult BasicAliasAnalysis:: -CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, - unsigned G1S, - const Type *BasePtr2Ty, std::vector &GEP2Ops, - unsigned G2S) { +AliasAnalysis::AliasResult +BasicAliasAnalysis::CheckGEPInstructions( + const Type* BasePtr1Ty, std::vector &GEP1Ops, unsigned G1S, + const Type *BasePtr2Ty, std::vector &GEP2Ops, unsigned G2S) { // We currently can't handle the case when the base pointers have different // primitive types. Since this is uncommon anyway, we are happy being // extremely conservative. if (BasePtr1Ty != BasePtr2Ty) return MayAlias; - const Type *GEPPointerTy = BasePtr1Ty; + const PointerType *GEPPointerTy = cast(BasePtr1Ty); // Find the (possibly empty) initial sequence of equal values... which are not // necessarily constants. @@ -374,7 +490,7 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, unsigned MaxOperands = std::max(NumGEP1Operands, NumGEP2Operands); unsigned UnequalOper = 0; while (UnequalOper != MinOperands && - GEP1Ops[UnequalOper] == GEP2Ops[UnequalOper]) { + IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper])) { // Advance through the type as we go... ++UnequalOper; if (const CompositeType *CT = dyn_cast(BasePtr1Ty)) @@ -394,7 +510,7 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, // If so, return mustalias. if (UnequalOper == MinOperands) { if (GEP1Ops.size() < GEP2Ops.size()) std::swap(GEP1Ops, GEP2Ops); - + bool AllAreZeros = true; for (unsigned i = UnequalOper; i != MaxOperands; ++i) if (!isa(GEP1Ops[i]) || @@ -405,43 +521,86 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, if (AllAreZeros) return MustAlias; } - + // So now we know that the indexes derived from the base pointers, // which are known to alias, are different. We can still determine a // no-alias result if there are differing constant pairs in the index // chain. For example: // A[i][0] != A[j][1] iff (&A[0][1]-&A[0][0] >= std::max(G1S, G2S)) // + // We have to be careful here about array accesses. In particular, consider: + // A[1][0] vs A[0][i] + // In this case, we don't *know* that the array will be accessed in bounds: + // the index could even be negative. Because of this, we have to + // conservatively *give up* and return may alias. We disregard differing + // array subscripts that are followed by a variable index without going + // through a struct. + // unsigned SizeMax = std::max(G1S, G2S); - if (SizeMax == ~0U) return MayAlias; // Avoid frivolous work... + if (SizeMax == ~0U) return MayAlias; // Avoid frivolous work. // Scan for the first operand that is constant and unequal in the - // two getelemenptrs... + // two getelementptrs... unsigned FirstConstantOper = UnequalOper; for (; FirstConstantOper != MinOperands; ++FirstConstantOper) { const Value *G1Oper = GEP1Ops[FirstConstantOper]; const Value *G2Oper = GEP2Ops[FirstConstantOper]; - + if (G1Oper != G2Oper) // Found non-equal constant indexes... - if (Constant *G1OC = dyn_cast(const_cast(G1Oper))) - if (Constant *G2OC = dyn_cast(const_cast(G2Oper))) { - // Make sure they are comparable (ie, not constant expressions)... - // and make sure the GEP with the smaller leading constant is GEP1. - Constant *Compare = ConstantExpr::get(Instruction::SetGT, G1OC, G2OC); - if (ConstantBool *CV = dyn_cast(Compare)) { - if (CV->getValue()) // If they are comparable and G2 > G1 - std::swap(GEP1Ops, GEP2Ops); // Make GEP1 < GEP2 - break; + if (Constant *G1OC = dyn_cast(const_cast(G1Oper))) + if (Constant *G2OC = dyn_cast(const_cast(G2Oper))){ + if (G1OC->getType() != G2OC->getType()) { + // Sign extend both operands to long. + if (G1OC->getType() != Type::Int64Ty) + G1OC = ConstantExpr::getSExt(G1OC, Type::Int64Ty); + if (G2OC->getType() != Type::Int64Ty) + G2OC = ConstantExpr::getSExt(G2OC, Type::Int64Ty); + GEP1Ops[FirstConstantOper] = G1OC; + GEP2Ops[FirstConstantOper] = G2OC; + } + + if (G1OC != G2OC) { + // Handle the "be careful" case above: if this is an array/packed + // subscript, scan for a subsequent variable array index. + if (isa(BasePtr1Ty)) { + const Type *NextTy = + cast(BasePtr1Ty)->getElementType(); + bool isBadCase = false; + + for (unsigned Idx = FirstConstantOper+1; + Idx != MinOperands && isa(NextTy); ++Idx) { + const Value *V1 = GEP1Ops[Idx], *V2 = GEP2Ops[Idx]; + if (!isa(V1) || !isa(V2)) { + isBadCase = true; + break; + } + NextTy = cast(NextTy)->getElementType(); + } + + if (isBadCase) G1OC = 0; + } + + // Make sure they are comparable (ie, not constant expressions), and + // make sure the GEP with the smaller leading constant is GEP1. + if (G1OC) { + Constant *Compare = ConstantExpr::getICmp(ICmpInst::ICMP_SGT, + G1OC, G2OC); + if (ConstantInt *CV = dyn_cast(Compare)) { + if (CV->getZExtValue()) // If they are comparable and G2 > G1 + std::swap(GEP1Ops, GEP2Ops); // Make GEP1 < GEP2 + break; + } + } } } BasePtr1Ty = cast(BasePtr1Ty)->getTypeAtIndex(G1Oper); } - + // No shared constant operands, and we ran out of common operands. At this // point, the GEP instructions have run through all of their operands, and we // haven't found evidence that there are any deltas between the GEP's. // However, one GEP may have more operands than the other. If this is the - // case, there may still be hope. This this now. + // case, there may still be hope. Check this now. if (FirstConstantOper == MinOperands) { // Make GEP1Ops be the longer one if there is a longer one. if (GEP1Ops.size() < GEP2Ops.size()) @@ -450,13 +609,15 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, // Is there anything to check? if (GEP1Ops.size() > MinOperands) { for (unsigned i = FirstConstantOper; i != MaxOperands; ++i) - if (isa(GEP1Ops[i]) && !isa(GEP1Ops[i]) && + if (isa(GEP1Ops[i]) && + GEP1Ops[i]->getType() != Type::Int1Ty && !cast(GEP1Ops[i])->isNullValue()) { // Yup, there's a constant in the tail. Set all variables to // constants in the GEP instruction to make it suiteable for // TargetData::getIndexedOffset. for (i = 0; i != MaxOperands; ++i) - if (!isa(GEP1Ops[i]) || isa(GEP1Ops[i])) + if (!isa(GEP1Ops[i]) || + GEP1Ops[i]->getType() == Type::Int1Ty) GEP1Ops[i] = Constant::getNullValue(GEP1Ops[i]->getType()); // Okay, now get the offset. This is the relative offset for the full // instruction. @@ -466,13 +627,13 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, // Now crop off any constants from the end... GEP1Ops.resize(MinOperands); int64_t Offset2 = TD.getIndexedOffset(GEPPointerTy, GEP1Ops); - + // If the tail provided a bit enough offset, return noalias! if ((uint64_t)(Offset2-Offset1) >= SizeMax) return NoAlias; } } - + // Couldn't find anything useful. return MayAlias; } @@ -483,39 +644,46 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, // than the first constant index of GEP2. // Advance BasePtr[12]Ty over this first differing constant operand. - BasePtr2Ty = cast(BasePtr1Ty)->getTypeAtIndex(GEP2Ops[FirstConstantOper]); - BasePtr1Ty = cast(BasePtr1Ty)->getTypeAtIndex(GEP1Ops[FirstConstantOper]); - + BasePtr2Ty = cast(BasePtr1Ty)-> + getTypeAtIndex(GEP2Ops[FirstConstantOper]); + BasePtr1Ty = cast(BasePtr1Ty)-> + getTypeAtIndex(GEP1Ops[FirstConstantOper]); + // We are going to be using TargetData::getIndexedOffset to determine the // offset that each of the GEP's is reaching. To do this, we have to convert // all variable references to constant references. To do this, we convert the - // initial equal sequence of variables into constant zeros to start with. + // initial sequence of array subscripts into constant zeros to start with. + const Type *ZeroIdxTy = GEPPointerTy; for (unsigned i = 0; i != FirstConstantOper; ++i) { - if (!isa(GEP1Ops[i]) || isa(GEP1Ops[i]) || - !isa(GEP2Ops[i]) || isa(GEP2Ops[i])) { - GEP1Ops[i] = Constant::getNullValue(GEP1Ops[i]->getType()); - GEP2Ops[i] = Constant::getNullValue(GEP2Ops[i]->getType()); - } + if (!isa(ZeroIdxTy)) + GEP1Ops[i] = GEP2Ops[i] = Constant::getNullValue(Type::Int32Ty); + + if (const CompositeType *CT = dyn_cast(ZeroIdxTy)) + ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]); } // We know that GEP1Ops[FirstConstantOper] & GEP2Ops[FirstConstantOper] are ok - + // Loop over the rest of the operands... for (unsigned i = FirstConstantOper+1; i != MaxOperands; ++i) { const Value *Op1 = i < GEP1Ops.size() ? GEP1Ops[i] : 0; const Value *Op2 = i < GEP2Ops.size() ? GEP2Ops[i] : 0; // If they are equal, use a zero index... if (Op1 == Op2 && BasePtr1Ty == BasePtr2Ty) { - if (!isa(Op1) || isa(Op1)) + if (!isa(Op1) || Op1->getType() == Type::Int1Ty) GEP1Ops[i] = GEP2Ops[i] = Constant::getNullValue(Op1->getType()); // Otherwise, just keep the constants we have. } else { if (Op1) { if (const ConstantInt *Op1C = dyn_cast(Op1)) { // If this is an array index, make sure the array element is in range. - if (const ArrayType *AT = dyn_cast(BasePtr1Ty)) - if (Op1C->getRawValue() >= AT->getNumElements()) + if (const ArrayType *AT = dyn_cast(BasePtr1Ty)) { + if (Op1C->getZExtValue() >= AT->getNumElements()) return MayAlias; // Be conservative with out-of-range accesses + } else if (const PackedType *PT = dyn_cast(BasePtr1Ty)) { + if (Op1C->getZExtValue() >= PT->getNumElements()) + return MayAlias; // Be conservative with out-of-range accesses + } } else { // GEP1 is known to produce a value less than GEP2. To be @@ -529,16 +697,23 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, // value possible. // if (const ArrayType *AT = dyn_cast(BasePtr1Ty)) - GEP1Ops[i] = ConstantSInt::get(Type::LongTy,AT->getNumElements()-1); + GEP1Ops[i] = ConstantInt::get(Type::Int64Ty, AT->getNumElements()-1); + else if (const PackedType *PT = dyn_cast(BasePtr1Ty)) + GEP1Ops[i] = ConstantInt::get(Type::Int64Ty, PT->getNumElements()-1); + } } - + if (Op2) { if (const ConstantInt *Op2C = dyn_cast(Op2)) { // If this is an array index, make sure the array element is in range. - if (const ArrayType *AT = dyn_cast(BasePtr1Ty)) - if (Op2C->getRawValue() >= AT->getNumElements()) + if (const ArrayType *AT = dyn_cast(BasePtr1Ty)) { + if (Op2C->getZExtValue() >= AT->getNumElements()) + return MayAlias; // Be conservative with out-of-range accesses + } else if (const PackedType *PT = dyn_cast(BasePtr1Ty)) { + if (Op2C->getZExtValue() >= PT->getNumElements()) return MayAlias; // Be conservative with out-of-range accesses + } } else { // Conservatively assume the minimum value for this index GEP2Ops[i] = Constant::getNullValue(Op2->getType()); } @@ -559,16 +734,138 @@ CheckGEPInstructions(const Type* BasePtr1Ty, std::vector &GEP1Ops, BasePtr2Ty = 0; } } - - int64_t Offset1 = getTargetData().getIndexedOffset(GEPPointerTy, GEP1Ops); - int64_t Offset2 = getTargetData().getIndexedOffset(GEPPointerTy, GEP2Ops); - assert(Offset1 < Offset2 &&"There is at least one different constant here!"); - - if ((uint64_t)(Offset2-Offset1) >= SizeMax) { - //std::cerr << "Determined that these two GEP's don't alias [" - // << SizeMax << " bytes]: \n" << *GEP1 << *GEP2; - return NoAlias; + + if (GEPPointerTy->getElementType()->isSized()) { + int64_t Offset1 = getTargetData().getIndexedOffset(GEPPointerTy, GEP1Ops); + int64_t Offset2 = getTargetData().getIndexedOffset(GEPPointerTy, GEP2Ops); + assert(Offset1= SizeMax) { + //cerr << "Determined that these two GEP's don't alias [" + // << SizeMax << " bytes]: \n" << *GEP1 << *GEP2; + return NoAlias; + } } return MayAlias; } +namespace { + struct StringCompare { + bool operator()(const char *LHS, const char *RHS) { + return strcmp(LHS, RHS) < 0; + } + }; +} + +// Note that this list cannot contain libm functions (such as acos and sqrt) +// that set errno on a domain or other error. +static const char *DoesntAccessMemoryFns[] = { + "abs", "labs", "llabs", "imaxabs", "fabs", "fabsf", "fabsl", + "trunc", "truncf", "truncl", "ldexp", + + "atan", "atanf", "atanl", "atan2", "atan2f", "atan2l", + "cbrt", + "cos", "cosf", "cosl", + "exp", "expf", "expl", + "hypot", + "sin", "sinf", "sinl", + "tan", "tanf", "tanl", "tanh", "tanhf", "tanhl", + + "floor", "floorf", "floorl", "ceil", "ceilf", "ceill", + + // ctype.h + "isalnum", "isalpha", "iscntrl", "isdigit", "isgraph", "islower", "isprint" + "ispunct", "isspace", "isupper", "isxdigit", "tolower", "toupper", + + // wctype.h" + "iswalnum", "iswalpha", "iswcntrl", "iswdigit", "iswgraph", "iswlower", + "iswprint", "iswpunct", "iswspace", "iswupper", "iswxdigit", + + "iswctype", "towctrans", "towlower", "towupper", + + "btowc", "wctob", + + "isinf", "isnan", "finite", + + // C99 math functions + "copysign", "copysignf", "copysignd", + "nexttoward", "nexttowardf", "nexttowardd", + "nextafter", "nextafterf", "nextafterd", + + // ISO C99: + "__signbit", "__signbitf", "__signbitl", +}; + + +static const char *OnlyReadsMemoryFns[] = { + "atoi", "atol", "atof", "atoll", "atoq", "a64l", + "bcmp", "memcmp", "memchr", "memrchr", "wmemcmp", "wmemchr", + + // Strings + "strcmp", "strcasecmp", "strcoll", "strncmp", "strncasecmp", + "strchr", "strcspn", "strlen", "strpbrk", "strrchr", "strspn", "strstr", + "index", "rindex", + + // Wide char strings + "wcschr", "wcscmp", "wcscoll", "wcscspn", "wcslen", "wcsncmp", "wcspbrk", + "wcsrchr", "wcsspn", "wcsstr", + + // glibc + "alphasort", "alphasort64", "versionsort", "versionsort64", + + // C99 + "nan", "nanf", "nand", + + // File I/O + "feof", "ferror", "fileno", + "feof_unlocked", "ferror_unlocked", "fileno_unlocked" +}; + +static ManagedStatic > NoMemoryTable; +static ManagedStatic > OnlyReadsMemoryTable; + + +AliasAnalysis::ModRefBehavior +BasicAliasAnalysis::getModRefBehavior(Function *F, CallSite CS, + std::vector *Info) { + if (!F->isExternal()) return UnknownModRefBehavior; + + static bool Initialized = false; + if (!Initialized) { + NoMemoryTable->insert(NoMemoryTable->end(), + DoesntAccessMemoryFns, + DoesntAccessMemoryFns+ + sizeof(DoesntAccessMemoryFns)/sizeof(DoesntAccessMemoryFns[0])); + + OnlyReadsMemoryTable->insert(OnlyReadsMemoryTable->end(), + OnlyReadsMemoryFns, + OnlyReadsMemoryFns+ + sizeof(OnlyReadsMemoryFns)/sizeof(OnlyReadsMemoryFns[0])); +#define GET_MODREF_BEHAVIOR +#include "llvm/Intrinsics.gen" +#undef GET_MODREF_BEHAVIOR + + // Sort the table the first time through. + std::sort(NoMemoryTable->begin(), NoMemoryTable->end(), StringCompare()); + std::sort(OnlyReadsMemoryTable->begin(), OnlyReadsMemoryTable->end(), + StringCompare()); + Initialized = true; + } + + std::vector::iterator Ptr = + std::lower_bound(NoMemoryTable->begin(), NoMemoryTable->end(), + F->getName().c_str(), StringCompare()); + if (Ptr != NoMemoryTable->end() && *Ptr == F->getName()) + return DoesNotAccessMemory; + + Ptr = std::lower_bound(OnlyReadsMemoryTable->begin(), + OnlyReadsMemoryTable->end(), + F->getName().c_str(), StringCompare()); + if (Ptr != OnlyReadsMemoryTable->end() && *Ptr == F->getName()) + return OnlyReadsMemory; + + return UnknownModRefBehavior; +} + +// Make sure that anything that uses AliasAnalysis pulls in this file... +DEFINING_FILE_FOR(BasicAliasAnalysis)