1 //===- BasicAliasAnalysis.cpp - Local Alias Analysis Impl -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the default implementation of the Alias Analysis interface
11 // that simply implements a few identities (two different globals cannot alias,
12 // etc), but otherwise does no analysis.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/CaptureTracking.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Analysis/Passes.h"
20 #include "llvm/Constants.h"
21 #include "llvm/DerivedTypes.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalVariable.h"
24 #include "llvm/Instructions.h"
25 #include "llvm/IntrinsicInst.h"
26 #include "llvm/Operator.h"
27 #include "llvm/Pass.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/GetElementPtrTypeIterator.h"
37 //===----------------------------------------------------------------------===//
39 //===----------------------------------------------------------------------===//
41 static const Value *GetGEPOperands(const Value *V,
42 SmallVector<Value*, 16> &GEPOps) {
43 assert(GEPOps.empty() && "Expect empty list to populate!");
44 GEPOps.insert(GEPOps.end(), cast<User>(V)->op_begin()+1,
45 cast<User>(V)->op_end());
47 // Accumulate all of the chained indexes into the operand array
48 V = cast<User>(V)->getOperand(0);
50 while (const GEPOperator *G = dyn_cast<GEPOperator>(V)) {
51 if (!isa<Constant>(GEPOps[0]) || isa<GlobalValue>(GEPOps[0]) ||
52 !cast<Constant>(GEPOps[0])->isNullValue())
53 break; // Don't handle folding arbitrary pointer offsets yet...
54 GEPOps.erase(GEPOps.begin()); // Drop the zero index
55 GEPOps.insert(GEPOps.begin(), G->op_begin()+1, G->op_end());
61 /// isKnownNonNull - Return true if we know that the specified value is never
63 static bool isKnownNonNull(const Value *V) {
64 // Alloca never returns null, malloc might.
65 if (isa<AllocaInst>(V)) return true;
67 // A byval argument is never null.
68 if (const Argument *A = dyn_cast<Argument>(V))
69 return A->hasByValAttr();
71 // Global values are not null unless extern weak.
72 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
73 return !GV->hasExternalWeakLinkage();
77 /// isNonEscapingLocalObject - Return true if the pointer is to a function-local
78 /// object that never escapes from the function.
79 static bool isNonEscapingLocalObject(const Value *V) {
80 // If this is a local allocation, check to see if it escapes.
81 if (isa<AllocaInst>(V) || isNoAliasCall(V))
82 // Set StoreCaptures to True so that we can assume in our callers that the
83 // pointer is not the result of a load instruction. Currently
84 // PointerMayBeCaptured doesn't have any special analysis for the
85 // StoreCaptures=false case; if it did, our callers could be refined to be
87 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
89 // If this is an argument that corresponds to a byval or noalias argument,
90 // then it has not escaped before entering the function. Check if it escapes
91 // inside the function.
92 if (const Argument *A = dyn_cast<Argument>(V))
93 if (A->hasByValAttr() || A->hasNoAliasAttr()) {
94 // Don't bother analyzing arguments already known not to escape.
95 if (A->hasNoCaptureAttr())
97 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
103 /// isObjectSmallerThan - Return true if we can prove that the object specified
104 /// by V is smaller than Size.
105 static bool isObjectSmallerThan(const Value *V, unsigned Size,
106 const TargetData &TD) {
107 const Type *AccessTy;
108 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
109 AccessTy = GV->getType()->getElementType();
110 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
111 if (!AI->isArrayAllocation())
112 AccessTy = AI->getType()->getElementType();
115 } else if (const CallInst* CI = extractMallocCall(V)) {
116 if (!isArrayMalloc(V, &TD))
117 // The size is the argument to the malloc call.
118 if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getOperand(1)))
119 return (C->getZExtValue() < Size);
121 } else if (const Argument *A = dyn_cast<Argument>(V)) {
122 if (A->hasByValAttr())
123 AccessTy = cast<PointerType>(A->getType())->getElementType();
130 if (AccessTy->isSized())
131 return TD.getTypeAllocSize(AccessTy) < Size;
135 //===----------------------------------------------------------------------===//
137 //===----------------------------------------------------------------------===//
140 /// NoAA - This class implements the -no-aa pass, which always returns "I
141 /// don't know" for alias queries. NoAA is unlike other alias analysis
142 /// implementations, in that it does not chain to a previous analysis. As
143 /// such it doesn't follow many of the rules that other alias analyses must.
145 struct NoAA : public ImmutablePass, public AliasAnalysis {
146 static char ID; // Class identification, replacement for typeinfo
147 NoAA() : ImmutablePass(&ID) {}
148 explicit NoAA(void *PID) : ImmutablePass(PID) { }
150 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
153 virtual void initializePass() {
154 TD = getAnalysisIfAvailable<TargetData>();
157 virtual AliasResult alias(const Value *V1, unsigned V1Size,
158 const Value *V2, unsigned V2Size) {
162 virtual void getArgumentAccesses(Function *F, CallSite CS,
163 std::vector<PointerAccessInfo> &Info) {
164 llvm_unreachable("This method may not be called on this function!");
167 virtual bool pointsToConstantMemory(const Value *P) { return false; }
168 virtual ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size) {
171 virtual ModRefResult getModRefInfo(CallSite CS1, CallSite CS2) {
175 virtual void deleteValue(Value *V) {}
176 virtual void copyValue(Value *From, Value *To) {}
178 } // End of anonymous namespace
180 // Register this pass...
182 static RegisterPass<NoAA>
183 U("no-aa", "No Alias Analysis (always returns 'may' alias)", true, true);
185 // Declare that we implement the AliasAnalysis interface
186 static RegisterAnalysisGroup<AliasAnalysis> V(U);
188 ImmutablePass *llvm::createNoAAPass() { return new NoAA(); }
190 //===----------------------------------------------------------------------===//
192 //===----------------------------------------------------------------------===//
195 /// BasicAliasAnalysis - This is the default alias analysis implementation.
196 /// Because it doesn't chain to a previous alias analysis (like -no-aa), it
197 /// derives from the NoAA class.
198 struct BasicAliasAnalysis : public NoAA {
199 static char ID; // Class identification, replacement for typeinfo
200 BasicAliasAnalysis() : NoAA(&ID) {}
201 AliasResult alias(const Value *V1, unsigned V1Size,
202 const Value *V2, unsigned V2Size) {
203 assert(VisitedPHIs.empty() && "VisitedPHIs must be cleared after use!");
204 AliasResult Alias = aliasCheck(V1, V1Size, V2, V2Size);
209 ModRefResult getModRefInfo(CallSite CS, Value *P, unsigned Size);
210 ModRefResult getModRefInfo(CallSite CS1, CallSite CS2);
212 /// pointsToConstantMemory - Chase pointers until we find a (constant
214 bool pointsToConstantMemory(const Value *P);
217 // VisitedPHIs - Track PHI nodes visited by a aliasCheck() call.
218 SmallPtrSet<const Value*, 16> VisitedPHIs;
220 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
221 // instruction against another.
222 AliasResult aliasGEP(const Value *V1, unsigned V1Size,
223 const Value *V2, unsigned V2Size);
225 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI
226 // instruction against another.
227 AliasResult aliasPHI(const PHINode *PN, unsigned PNSize,
228 const Value *V2, unsigned V2Size);
230 /// aliasSelect - Disambiguate a Select instruction against another value.
231 AliasResult aliasSelect(const SelectInst *SI, unsigned SISize,
232 const Value *V2, unsigned V2Size);
234 AliasResult aliasCheck(const Value *V1, unsigned V1Size,
235 const Value *V2, unsigned V2Size);
237 // CheckGEPInstructions - Check two GEP instructions with known
238 // must-aliasing base pointers. This checks to see if the index expressions
239 // preclude the pointers from aliasing.
241 CheckGEPInstructions(const Type* BasePtr1Ty,
242 Value **GEP1Ops, unsigned NumGEP1Ops, unsigned G1Size,
243 const Type *BasePtr2Ty,
244 Value **GEP2Ops, unsigned NumGEP2Ops, unsigned G2Size);
246 } // End of anonymous namespace
248 // Register this pass...
249 char BasicAliasAnalysis::ID = 0;
250 static RegisterPass<BasicAliasAnalysis>
251 X("basicaa", "Basic Alias Analysis (default AA impl)", false, true);
253 // Declare that we implement the AliasAnalysis interface
254 static RegisterAnalysisGroup<AliasAnalysis, true> Y(X);
256 ImmutablePass *llvm::createBasicAliasAnalysisPass() {
257 return new BasicAliasAnalysis();
261 /// pointsToConstantMemory - Chase pointers until we find a (constant
263 bool BasicAliasAnalysis::pointsToConstantMemory(const Value *P) {
264 if (const GlobalVariable *GV =
265 dyn_cast<GlobalVariable>(P->getUnderlyingObject()))
266 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
267 // global to be marked constant in some modules and non-constant in others.
268 // GV may even be a declaration, not a definition.
269 return GV->isConstant();
274 /// getModRefInfo - Check to see if the specified callsite can clobber the
275 /// specified memory object. Since we only look at local properties of this
276 /// function, we really can't say much about this query. We do, however, use
277 /// simple "address taken" analysis on local objects.
278 AliasAnalysis::ModRefResult
279 BasicAliasAnalysis::getModRefInfo(CallSite CS, Value *P, unsigned Size) {
280 const Value *Object = P->getUnderlyingObject();
282 // If this is a tail call and P points to a stack location, we know that
283 // the tail call cannot access or modify the local stack.
284 // We cannot exclude byval arguments here; these belong to the caller of
285 // the current function not to the current function, and a tail callee
286 // may reference them.
287 if (isa<AllocaInst>(Object))
288 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
289 if (CI->isTailCall())
292 // If the pointer is to a locally allocated object that does not escape,
293 // then the call can not mod/ref the pointer unless the call takes the pointer
294 // as an argument, and itself doesn't capture it.
295 if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
296 isNonEscapingLocalObject(Object)) {
297 bool PassedAsArg = false;
299 for (CallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
300 CI != CE; ++CI, ++ArgNo) {
301 // Only look at the no-capture pointer arguments.
302 if (!isa<PointerType>((*CI)->getType()) ||
303 !CS.paramHasAttr(ArgNo+1, Attribute::NoCapture))
306 // If this is a no-capture pointer argument, see if we can tell that it
307 // is impossible to alias the pointer we're checking. If not, we have to
308 // assume that the call could touch the pointer, even though it doesn't
310 if (!isNoAlias(cast<Value>(CI), ~0U, P, ~0U)) {
320 // Finally, handle specific knowledge of intrinsics.
321 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
323 return AliasAnalysis::getModRefInfo(CS, P, Size);
325 switch (II->getIntrinsicID()) {
327 case Intrinsic::memcpy:
328 case Intrinsic::memmove: {
330 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3)))
331 Len = LenCI->getZExtValue();
332 Value *Dest = II->getOperand(1);
333 Value *Src = II->getOperand(2);
334 if (isNoAlias(Dest, Len, P, Size)) {
335 if (isNoAlias(Src, Len, P, Size))
341 case Intrinsic::memset:
342 // Since memset is 'accesses arguments' only, the AliasAnalysis base class
343 // will handle it for the variable length case.
344 if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getOperand(3))) {
345 unsigned Len = LenCI->getZExtValue();
346 Value *Dest = II->getOperand(1);
347 if (isNoAlias(Dest, Len, P, Size))
351 case Intrinsic::atomic_cmp_swap:
352 case Intrinsic::atomic_swap:
353 case Intrinsic::atomic_load_add:
354 case Intrinsic::atomic_load_sub:
355 case Intrinsic::atomic_load_and:
356 case Intrinsic::atomic_load_nand:
357 case Intrinsic::atomic_load_or:
358 case Intrinsic::atomic_load_xor:
359 case Intrinsic::atomic_load_max:
360 case Intrinsic::atomic_load_min:
361 case Intrinsic::atomic_load_umax:
362 case Intrinsic::atomic_load_umin:
364 Value *Op1 = II->getOperand(1);
365 unsigned Op1Size = TD->getTypeStoreSize(Op1->getType());
366 if (isNoAlias(Op1, Op1Size, P, Size))
370 case Intrinsic::lifetime_start:
371 case Intrinsic::lifetime_end:
372 case Intrinsic::invariant_start: {
373 unsigned PtrSize = cast<ConstantInt>(II->getOperand(1))->getZExtValue();
374 if (isNoAlias(II->getOperand(2), PtrSize, P, Size))
378 case Intrinsic::invariant_end: {
379 unsigned PtrSize = cast<ConstantInt>(II->getOperand(2))->getZExtValue();
380 if (isNoAlias(II->getOperand(3), PtrSize, P, Size))
386 // The AliasAnalysis base class has some smarts, lets use them.
387 return AliasAnalysis::getModRefInfo(CS, P, Size);
391 AliasAnalysis::ModRefResult
392 BasicAliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
393 // If CS1 or CS2 are readnone, they don't interact.
394 ModRefBehavior CS1B = AliasAnalysis::getModRefBehavior(CS1);
395 if (CS1B == DoesNotAccessMemory) return NoModRef;
397 ModRefBehavior CS2B = AliasAnalysis::getModRefBehavior(CS2);
398 if (CS2B == DoesNotAccessMemory) return NoModRef;
400 // If they both only read from memory, just return ref.
401 if (CS1B == OnlyReadsMemory && CS2B == OnlyReadsMemory)
404 // Otherwise, fall back to NoAA (mod+ref).
405 return NoAA::getModRefInfo(CS1, CS2);
408 // aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
411 AliasAnalysis::AliasResult
412 BasicAliasAnalysis::aliasGEP(const Value *V1, unsigned V1Size,
413 const Value *V2, unsigned V2Size) {
414 // If we have two gep instructions with must-alias'ing base pointers, figure
415 // out if the indexes to the GEP tell us anything about the derived pointer.
416 // Note that we also handle chains of getelementptr instructions as well as
417 // constant expression getelementptrs here.
419 if (isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
420 const User *GEP1 = cast<User>(V1);
421 const User *GEP2 = cast<User>(V2);
423 // If V1 and V2 are identical GEPs, just recurse down on both of them.
424 // This allows us to analyze things like:
425 // P = gep A, 0, i, 1
426 // Q = gep B, 0, i, 1
427 // by just analyzing A and B. This is even safe for variable indices.
428 if (GEP1->getType() == GEP2->getType() &&
429 GEP1->getNumOperands() == GEP2->getNumOperands() &&
430 GEP1->getOperand(0)->getType() == GEP2->getOperand(0)->getType() &&
431 // All operands are the same, ignoring the base.
432 std::equal(GEP1->op_begin()+1, GEP1->op_end(), GEP2->op_begin()+1))
433 return aliasCheck(GEP1->getOperand(0), V1Size,
434 GEP2->getOperand(0), V2Size);
436 // Drill down into the first non-gep value, to test for must-aliasing of
437 // the base pointers.
438 while (isa<GEPOperator>(GEP1->getOperand(0)) &&
439 GEP1->getOperand(1) ==
440 Constant::getNullValue(GEP1->getOperand(1)->getType()))
441 GEP1 = cast<User>(GEP1->getOperand(0));
442 const Value *BasePtr1 = GEP1->getOperand(0);
444 while (isa<GEPOperator>(GEP2->getOperand(0)) &&
445 GEP2->getOperand(1) ==
446 Constant::getNullValue(GEP2->getOperand(1)->getType()))
447 GEP2 = cast<User>(GEP2->getOperand(0));
448 const Value *BasePtr2 = GEP2->getOperand(0);
450 // Do the base pointers alias?
451 AliasResult BaseAlias = aliasCheck(BasePtr1, ~0U, BasePtr2, ~0U);
452 if (BaseAlias == NoAlias) return NoAlias;
453 if (BaseAlias == MustAlias) {
454 // If the base pointers alias each other exactly, check to see if we can
455 // figure out anything about the resultant pointers, to try to prove
458 // Collect all of the chained GEP operands together into one simple place
459 SmallVector<Value*, 16> GEP1Ops, GEP2Ops;
460 BasePtr1 = GetGEPOperands(V1, GEP1Ops);
461 BasePtr2 = GetGEPOperands(V2, GEP2Ops);
463 // If GetGEPOperands were able to fold to the same must-aliased pointer,
464 // do the comparison.
465 if (BasePtr1 == BasePtr2) {
467 CheckGEPInstructions(BasePtr1->getType(),
468 &GEP1Ops[0], GEP1Ops.size(), V1Size,
470 &GEP2Ops[0], GEP2Ops.size(), V2Size);
471 if (GAlias != MayAlias)
477 // Check to see if these two pointers are related by a getelementptr
478 // instruction. If one pointer is a GEP with a non-zero index of the other
479 // pointer, we know they cannot alias.
481 if (V1Size == ~0U || V2Size == ~0U)
484 SmallVector<Value*, 16> GEPOperands;
485 const Value *BasePtr = GetGEPOperands(V1, GEPOperands);
487 AliasResult R = aliasCheck(BasePtr, ~0U, V2, V2Size);
489 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
490 // If V2 is known not to alias GEP base pointer, then the two values
491 // cannot alias per GEP semantics: "A pointer value formed from a
492 // getelementptr instruction is associated with the addresses associated
493 // with the first operand of the getelementptr".
496 // If there is at least one non-zero constant index, we know they cannot
498 bool ConstantFound = false;
499 bool AllZerosFound = true;
500 for (unsigned i = 0, e = GEPOperands.size(); i != e; ++i)
501 if (const Constant *C = dyn_cast<Constant>(GEPOperands[i])) {
502 if (!C->isNullValue()) {
503 ConstantFound = true;
504 AllZerosFound = false;
508 AllZerosFound = false;
511 // If we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2 must aliases
512 // the ptr, the end result is a must alias also.
517 if (V2Size <= 1 && V1Size <= 1) // Just pointer check?
520 // Otherwise we have to check to see that the distance is more than
521 // the size of the argument... build an index vector that is equal to
522 // the arguments provided, except substitute 0's for any variable
523 // indexes we find...
525 cast<PointerType>(BasePtr->getType())->getElementType()->isSized()) {
526 for (unsigned i = 0; i != GEPOperands.size(); ++i)
527 if (!isa<ConstantInt>(GEPOperands[i]))
528 GEPOperands[i] = Constant::getNullValue(GEPOperands[i]->getType());
529 int64_t Offset = TD->getIndexedOffset(BasePtr->getType(),
533 if (Offset >= (int64_t)V2Size || Offset <= -(int64_t)V1Size)
541 /// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
542 /// instruction against another.
543 AliasAnalysis::AliasResult
544 BasicAliasAnalysis::aliasSelect(const SelectInst *SI, unsigned SISize,
545 const Value *V2, unsigned V2Size) {
546 // If the values are Selects with the same condition, we can do a more precise
547 // check: just check for aliases between the values on corresponding arms.
548 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
549 if (SI->getCondition() == SI2->getCondition()) {
551 aliasCheck(SI->getTrueValue(), SISize,
552 SI2->getTrueValue(), V2Size);
553 if (Alias == MayAlias)
555 AliasResult ThisAlias =
556 aliasCheck(SI->getFalseValue(), SISize,
557 SI2->getFalseValue(), V2Size);
558 if (ThisAlias != Alias)
563 // If both arms of the Select node NoAlias or MustAlias V2, then returns
564 // NoAlias / MustAlias. Otherwise, returns MayAlias.
566 aliasCheck(SI->getTrueValue(), SISize, V2, V2Size);
567 if (Alias == MayAlias)
569 AliasResult ThisAlias =
570 aliasCheck(SI->getFalseValue(), SISize, V2, V2Size);
571 if (ThisAlias != Alias)
576 // aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
578 AliasAnalysis::AliasResult
579 BasicAliasAnalysis::aliasPHI(const PHINode *PN, unsigned PNSize,
580 const Value *V2, unsigned V2Size) {
581 // The PHI node has already been visited, avoid recursion any further.
582 if (!VisitedPHIs.insert(PN))
585 // If the values are PHIs in the same block, we can do a more precise
586 // as well as efficient check: just check for aliases between the values
587 // on corresponding edges.
588 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
589 if (PN2->getParent() == PN->getParent()) {
591 aliasCheck(PN->getIncomingValue(0), PNSize,
592 PN2->getIncomingValueForBlock(PN->getIncomingBlock(0)),
594 if (Alias == MayAlias)
596 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
597 AliasResult ThisAlias =
598 aliasCheck(PN->getIncomingValue(i), PNSize,
599 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
601 if (ThisAlias != Alias)
607 SmallPtrSet<Value*, 4> UniqueSrc;
608 SmallVector<Value*, 4> V1Srcs;
609 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
610 Value *PV1 = PN->getIncomingValue(i);
611 if (isa<PHINode>(PV1))
612 // If any of the source itself is a PHI, return MayAlias conservatively
613 // to avoid compile time explosion. The worst possible case is if both
614 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
615 // and 'n' are the number of PHI sources.
617 if (UniqueSrc.insert(PV1))
618 V1Srcs.push_back(PV1);
621 AliasResult Alias = aliasCheck(V2, V2Size, V1Srcs[0], PNSize);
622 // Early exit if the check of the first PHI source against V2 is MayAlias.
623 // Other results are not possible.
624 if (Alias == MayAlias)
627 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
628 // NoAlias / MustAlias. Otherwise, returns MayAlias.
629 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
630 Value *V = V1Srcs[i];
632 // If V2 is a PHI, the recursive case will have been caught in the
633 // above aliasCheck call, so these subsequent calls to aliasCheck
634 // don't need to assume that V2 is being visited recursively.
635 VisitedPHIs.erase(V2);
637 AliasResult ThisAlias = aliasCheck(V2, V2Size, V, PNSize);
638 if (ThisAlias != Alias || ThisAlias == MayAlias)
645 // aliasCheck - Provide a bunch of ad-hoc rules to disambiguate in common cases,
646 // such as array references.
648 AliasAnalysis::AliasResult
649 BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
650 const Value *V2, unsigned V2Size) {
651 // Strip off any casts if they exist.
652 V1 = V1->stripPointerCasts();
653 V2 = V2->stripPointerCasts();
655 // Are we checking for alias of the same value?
656 if (V1 == V2) return MustAlias;
658 if (!isa<PointerType>(V1->getType()) || !isa<PointerType>(V2->getType()))
659 return NoAlias; // Scalars cannot alias each other
661 // Figure out what objects these things are pointing to if we can.
662 const Value *O1 = V1->getUnderlyingObject();
663 const Value *O2 = V2->getUnderlyingObject();
665 // Null values in the default address space don't point to any object, so they
666 // don't alias any other pointer.
667 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
668 if (CPN->getType()->getAddressSpace() == 0)
670 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
671 if (CPN->getType()->getAddressSpace() == 0)
675 // If V1/V2 point to two different objects we know that we have no alias.
676 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
679 // Constant pointers can't alias with non-const isIdentifiedObject objects.
680 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
681 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
684 // Arguments can't alias with local allocations or noalias calls.
685 if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
686 (isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))
689 // Most objects can't alias null.
690 if ((isa<ConstantPointerNull>(V2) && isKnownNonNull(O1)) ||
691 (isa<ConstantPointerNull>(V1) && isKnownNonNull(O2)))
695 // If the size of one access is larger than the entire object on the other
696 // side, then we know such behavior is undefined and can assume no alias.
698 if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, *TD)) ||
699 (V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD)))
702 // If one pointer is the result of a call/invoke or load and the other is a
703 // non-escaping local object, then we know the object couldn't escape to a
704 // point where the call could return it. The load case works because
705 // isNonEscapingLocalObject considers all stores to be escapes (it
706 // passes true for the StoreCaptures argument to PointerMayBeCaptured).
708 if ((isa<CallInst>(O1) || isa<InvokeInst>(O1) || isa<LoadInst>(O1) ||
709 isa<Argument>(O1)) &&
710 isNonEscapingLocalObject(O2))
712 if ((isa<CallInst>(O2) || isa<InvokeInst>(O2) || isa<LoadInst>(O2) ||
713 isa<Argument>(O2)) &&
714 isNonEscapingLocalObject(O1))
718 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
720 std::swap(V1Size, V2Size);
722 if (isa<GEPOperator>(V1))
723 return aliasGEP(V1, V1Size, V2, V2Size);
725 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
727 std::swap(V1Size, V2Size);
729 if (const PHINode *PN = dyn_cast<PHINode>(V1))
730 return aliasPHI(PN, V1Size, V2, V2Size);
732 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
734 std::swap(V1Size, V2Size);
736 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1))
737 return aliasSelect(S1, V1Size, V2, V2Size);
742 // This function is used to determine if the indices of two GEP instructions are
743 // equal. V1 and V2 are the indices.
744 static bool IndexOperandsEqual(Value *V1, Value *V2) {
745 if (V1->getType() == V2->getType())
747 if (Constant *C1 = dyn_cast<Constant>(V1))
748 if (Constant *C2 = dyn_cast<Constant>(V2)) {
749 // Sign extend the constants to long types, if necessary
750 if (C1->getType() != Type::getInt64Ty(C1->getContext()))
751 C1 = ConstantExpr::getSExt(C1, Type::getInt64Ty(C1->getContext()));
752 if (C2->getType() != Type::getInt64Ty(C1->getContext()))
753 C2 = ConstantExpr::getSExt(C2, Type::getInt64Ty(C1->getContext()));
759 /// CheckGEPInstructions - Check two GEP instructions with known must-aliasing
760 /// base pointers. This checks to see if the index expressions preclude the
761 /// pointers from aliasing.
762 AliasAnalysis::AliasResult
763 BasicAliasAnalysis::CheckGEPInstructions(
764 const Type* BasePtr1Ty, Value **GEP1Ops, unsigned NumGEP1Ops, unsigned G1S,
765 const Type *BasePtr2Ty, Value **GEP2Ops, unsigned NumGEP2Ops, unsigned G2S) {
766 // We currently can't handle the case when the base pointers have different
767 // primitive types. Since this is uncommon anyway, we are happy being
768 // extremely conservative.
769 if (BasePtr1Ty != BasePtr2Ty)
772 const PointerType *GEPPointerTy = cast<PointerType>(BasePtr1Ty);
774 // Find the (possibly empty) initial sequence of equal values... which are not
775 // necessarily constants.
776 unsigned NumGEP1Operands = NumGEP1Ops, NumGEP2Operands = NumGEP2Ops;
777 unsigned MinOperands = std::min(NumGEP1Operands, NumGEP2Operands);
778 unsigned MaxOperands = std::max(NumGEP1Operands, NumGEP2Operands);
779 unsigned UnequalOper = 0;
780 while (UnequalOper != MinOperands &&
781 IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper])) {
782 // Advance through the type as we go...
784 if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty))
785 BasePtr1Ty = CT->getTypeAtIndex(GEP1Ops[UnequalOper-1]);
787 // If all operands equal each other, then the derived pointers must
788 // alias each other...
790 assert(UnequalOper == NumGEP1Operands && UnequalOper == NumGEP2Operands &&
791 "Ran out of type nesting, but not out of operands?");
796 // If we have seen all constant operands, and run out of indexes on one of the
797 // getelementptrs, check to see if the tail of the leftover one is all zeros.
798 // If so, return mustalias.
799 if (UnequalOper == MinOperands) {
800 if (NumGEP1Ops < NumGEP2Ops) {
801 std::swap(GEP1Ops, GEP2Ops);
802 std::swap(NumGEP1Ops, NumGEP2Ops);
805 bool AllAreZeros = true;
806 for (unsigned i = UnequalOper; i != MaxOperands; ++i)
807 if (!isa<Constant>(GEP1Ops[i]) ||
808 !cast<Constant>(GEP1Ops[i])->isNullValue()) {
812 if (AllAreZeros) return MustAlias;
816 // So now we know that the indexes derived from the base pointers,
817 // which are known to alias, are different. We can still determine a
818 // no-alias result if there are differing constant pairs in the index
819 // chain. For example:
820 // A[i][0] != A[j][1] iff (&A[0][1]-&A[0][0] >= std::max(G1S, G2S))
822 // We have to be careful here about array accesses. In particular, consider:
823 // A[1][0] vs A[0][i]
824 // In this case, we don't *know* that the array will be accessed in bounds:
825 // the index could even be negative. Because of this, we have to
826 // conservatively *give up* and return may alias. We disregard differing
827 // array subscripts that are followed by a variable index without going
830 unsigned SizeMax = std::max(G1S, G2S);
831 if (SizeMax == ~0U) return MayAlias; // Avoid frivolous work.
833 // Scan for the first operand that is constant and unequal in the
834 // two getelementptrs...
835 unsigned FirstConstantOper = UnequalOper;
836 for (; FirstConstantOper != MinOperands; ++FirstConstantOper) {
837 const Value *G1Oper = GEP1Ops[FirstConstantOper];
838 const Value *G2Oper = GEP2Ops[FirstConstantOper];
840 if (G1Oper != G2Oper) // Found non-equal constant indexes...
841 if (Constant *G1OC = dyn_cast<ConstantInt>(const_cast<Value*>(G1Oper)))
842 if (Constant *G2OC = dyn_cast<ConstantInt>(const_cast<Value*>(G2Oper))){
843 if (G1OC->getType() != G2OC->getType()) {
844 // Sign extend both operands to long.
845 const Type *Int64Ty = Type::getInt64Ty(G1OC->getContext());
846 if (G1OC->getType() != Int64Ty)
847 G1OC = ConstantExpr::getSExt(G1OC, Int64Ty);
848 if (G2OC->getType() != Int64Ty)
849 G2OC = ConstantExpr::getSExt(G2OC, Int64Ty);
850 GEP1Ops[FirstConstantOper] = G1OC;
851 GEP2Ops[FirstConstantOper] = G2OC;
855 // Handle the "be careful" case above: if this is an array/vector
856 // subscript, scan for a subsequent variable array index.
857 if (const SequentialType *STy =
858 dyn_cast<SequentialType>(BasePtr1Ty)) {
859 const Type *NextTy = STy;
860 bool isBadCase = false;
862 for (unsigned Idx = FirstConstantOper;
863 Idx != MinOperands && isa<SequentialType>(NextTy); ++Idx) {
864 const Value *V1 = GEP1Ops[Idx], *V2 = GEP2Ops[Idx];
865 if (!isa<Constant>(V1) || !isa<Constant>(V2)) {
869 // If the array is indexed beyond the bounds of the static type
870 // at this level, it will also fall into the "be careful" case.
871 // It would theoretically be possible to analyze these cases,
872 // but for now just be conservatively correct.
873 if (const ArrayType *ATy = dyn_cast<ArrayType>(STy))
874 if (cast<ConstantInt>(G1OC)->getZExtValue() >=
875 ATy->getNumElements() ||
876 cast<ConstantInt>(G2OC)->getZExtValue() >=
877 ATy->getNumElements()) {
881 if (const VectorType *VTy = dyn_cast<VectorType>(STy))
882 if (cast<ConstantInt>(G1OC)->getZExtValue() >=
883 VTy->getNumElements() ||
884 cast<ConstantInt>(G2OC)->getZExtValue() >=
885 VTy->getNumElements()) {
889 STy = cast<SequentialType>(NextTy);
890 NextTy = cast<SequentialType>(NextTy)->getElementType();
893 if (isBadCase) G1OC = 0;
896 // Make sure they are comparable (ie, not constant expressions), and
897 // make sure the GEP with the smaller leading constant is GEP1.
899 Constant *Compare = ConstantExpr::getICmp(ICmpInst::ICMP_SGT,
901 if (ConstantInt *CV = dyn_cast<ConstantInt>(Compare)) {
902 if (CV->getZExtValue()) { // If they are comparable and G2 > G1
903 std::swap(GEP1Ops, GEP2Ops); // Make GEP1 < GEP2
904 std::swap(NumGEP1Ops, NumGEP2Ops);
911 BasePtr1Ty = cast<CompositeType>(BasePtr1Ty)->getTypeAtIndex(G1Oper);
914 // No shared constant operands, and we ran out of common operands. At this
915 // point, the GEP instructions have run through all of their operands, and we
916 // haven't found evidence that there are any deltas between the GEP's.
917 // However, one GEP may have more operands than the other. If this is the
918 // case, there may still be hope. Check this now.
919 if (FirstConstantOper == MinOperands) {
920 // Without TargetData, we won't know what the offsets are.
924 // Make GEP1Ops be the longer one if there is a longer one.
925 if (NumGEP1Ops < NumGEP2Ops) {
926 std::swap(GEP1Ops, GEP2Ops);
927 std::swap(NumGEP1Ops, NumGEP2Ops);
930 // Is there anything to check?
931 if (NumGEP1Ops > MinOperands) {
932 for (unsigned i = FirstConstantOper; i != MaxOperands; ++i)
933 if (isa<ConstantInt>(GEP1Ops[i]) &&
934 !cast<ConstantInt>(GEP1Ops[i])->isZero()) {
935 // Yup, there's a constant in the tail. Set all variables to
936 // constants in the GEP instruction to make it suitable for
937 // TargetData::getIndexedOffset.
938 for (i = 0; i != MaxOperands; ++i)
939 if (!isa<ConstantInt>(GEP1Ops[i]))
940 GEP1Ops[i] = Constant::getNullValue(GEP1Ops[i]->getType());
941 // Okay, now get the offset. This is the relative offset for the full
943 int64_t Offset1 = TD->getIndexedOffset(GEPPointerTy, GEP1Ops,
946 // Now check without any constants at the end.
947 int64_t Offset2 = TD->getIndexedOffset(GEPPointerTy, GEP1Ops,
950 // Make sure we compare the absolute difference.
951 if (Offset1 > Offset2)
952 std::swap(Offset1, Offset2);
954 // If the tail provided a bit enough offset, return noalias!
955 if ((uint64_t)(Offset2-Offset1) >= SizeMax)
957 // Otherwise break - we don't look for another constant in the tail.
962 // Couldn't find anything useful.
966 // If there are non-equal constants arguments, then we can figure
967 // out a minimum known delta between the two index expressions... at
968 // this point we know that the first constant index of GEP1 is less
969 // than the first constant index of GEP2.
971 // Advance BasePtr[12]Ty over this first differing constant operand.
972 BasePtr2Ty = cast<CompositeType>(BasePtr1Ty)->
973 getTypeAtIndex(GEP2Ops[FirstConstantOper]);
974 BasePtr1Ty = cast<CompositeType>(BasePtr1Ty)->
975 getTypeAtIndex(GEP1Ops[FirstConstantOper]);
977 // We are going to be using TargetData::getIndexedOffset to determine the
978 // offset that each of the GEP's is reaching. To do this, we have to convert
979 // all variable references to constant references. To do this, we convert the
980 // initial sequence of array subscripts into constant zeros to start with.
981 const Type *ZeroIdxTy = GEPPointerTy;
982 for (unsigned i = 0; i != FirstConstantOper; ++i) {
983 if (!isa<StructType>(ZeroIdxTy))
984 GEP1Ops[i] = GEP2Ops[i] =
985 Constant::getNullValue(Type::getInt32Ty(ZeroIdxTy->getContext()));
987 if (const CompositeType *CT = dyn_cast<CompositeType>(ZeroIdxTy))
988 ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]);
991 // We know that GEP1Ops[FirstConstantOper] & GEP2Ops[FirstConstantOper] are ok
993 // Loop over the rest of the operands...
994 for (unsigned i = FirstConstantOper+1; i != MaxOperands; ++i) {
995 const Value *Op1 = i < NumGEP1Ops ? GEP1Ops[i] : 0;
996 const Value *Op2 = i < NumGEP2Ops ? GEP2Ops[i] : 0;
997 // If they are equal, use a zero index...
998 if (Op1 == Op2 && BasePtr1Ty == BasePtr2Ty) {
999 if (!isa<ConstantInt>(Op1))
1000 GEP1Ops[i] = GEP2Ops[i] = Constant::getNullValue(Op1->getType());
1001 // Otherwise, just keep the constants we have.
1004 if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1005 // If this is an array index, make sure the array element is in range.
1006 if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty)) {
1007 if (Op1C->getZExtValue() >= AT->getNumElements())
1008 return MayAlias; // Be conservative with out-of-range accesses
1009 } else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty)) {
1010 if (Op1C->getZExtValue() >= VT->getNumElements())
1011 return MayAlias; // Be conservative with out-of-range accesses
1015 // GEP1 is known to produce a value less than GEP2. To be
1016 // conservatively correct, we must assume the largest possible
1017 // constant is used in this position. This cannot be the initial
1018 // index to the GEP instructions (because we know we have at least one
1019 // element before this one with the different constant arguments), so
1020 // we know that the current index must be into either a struct or
1021 // array. Because we know it's not constant, this cannot be a
1022 // structure index. Because of this, we can calculate the maximum
1025 if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty))
1027 ConstantInt::get(Type::getInt64Ty(AT->getContext()),
1028 AT->getNumElements()-1);
1029 else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty))
1031 ConstantInt::get(Type::getInt64Ty(VT->getContext()),
1032 VT->getNumElements()-1);
1037 if (const ConstantInt *Op2C = dyn_cast<ConstantInt>(Op2)) {
1038 // If this is an array index, make sure the array element is in range.
1039 if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr2Ty)) {
1040 if (Op2C->getZExtValue() >= AT->getNumElements())
1041 return MayAlias; // Be conservative with out-of-range accesses
1042 } else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr2Ty)) {
1043 if (Op2C->getZExtValue() >= VT->getNumElements())
1044 return MayAlias; // Be conservative with out-of-range accesses
1046 } else { // Conservatively assume the minimum value for this index
1047 GEP2Ops[i] = Constant::getNullValue(Op2->getType());
1052 if (BasePtr1Ty && Op1) {
1053 if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty))
1054 BasePtr1Ty = CT->getTypeAtIndex(GEP1Ops[i]);
1059 if (BasePtr2Ty && Op2) {
1060 if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr2Ty))
1061 BasePtr2Ty = CT->getTypeAtIndex(GEP2Ops[i]);
1067 if (TD && GEPPointerTy->getElementType()->isSized()) {
1069 TD->getIndexedOffset(GEPPointerTy, GEP1Ops, NumGEP1Ops);
1071 TD->getIndexedOffset(GEPPointerTy, GEP2Ops, NumGEP2Ops);
1072 assert(Offset1 != Offset2 &&
1073 "There is at least one different constant here!");
1075 // Make sure we compare the absolute difference.
1076 if (Offset1 > Offset2)
1077 std::swap(Offset1, Offset2);
1079 if ((uint64_t)(Offset2-Offset1) >= SizeMax) {
1080 //cerr << "Determined that these two GEP's don't alias ["
1081 // << SizeMax << " bytes]: \n" << *GEP1 << *GEP2;
1088 // Make sure that anything that uses AliasAnalysis pulls in this file.
1089 DEFINING_FILE_FOR(BasicAliasAnalysis)