X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=include%2Fllvm%2FAnalysis%2FValueTracking.h;h=72b6417a8a6aafa9f77e275a7dae2bd39407b4f9;hp=83b5408fb1c2aff3e2a8911b37a328d941081434;hb=0dc1606c51dfa074879de08c818f3b818420a5f8;hpb=e4d0a5ec1841ac5a407c3a07b62749923dda74c2 diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h index 83b5408fb1c..72b6417a8a6 100644 --- a/include/llvm/Analysis/ValueTracking.h +++ b/include/llvm/Analysis/ValueTracking.h @@ -16,6 +16,7 @@ #define LLVM_ANALYSIS_VALUETRACKING_H #include "llvm/ADT/ArrayRef.h" +#include "llvm/IR/Instruction.h" #include "llvm/Support/DataTypes.h" namespace llvm { @@ -25,55 +26,77 @@ namespace llvm { class DataLayout; class StringRef; class MDNode; + class AssumptionCache; + class DominatorTree; class TargetLibraryInfo; + class LoopInfo; + class Loop; /// Determine which bits of V are known to be either zero or one and return /// them in the KnownZero/KnownOne bit sets. /// /// This function is defined on values with integer type, values with pointer - /// type (but only if TD is non-null), and vectors of integers. In the case + /// type, and vectors of integers. In the case /// where V is a vector, the known zero and known one values are the /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. - void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, - const DataLayout *TD = nullptr, unsigned Depth = 0); + void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne, + const DataLayout &DL, unsigned Depth = 0, + AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, + const DominatorTree *DT = nullptr); /// Compute known bits from the range metadata. /// \p KnownZero the set of bits that are known to be zero void computeKnownBitsFromRangeMetadata(const MDNode &Ranges, APInt &KnownZero); + /// Return true if LHS and RHS have no common bits set. + bool haveNoCommonBitsSet(Value *LHS, Value *RHS, const DataLayout &DL, + AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, + const DominatorTree *DT = nullptr); /// ComputeSignBit - Determine whether the sign bit is known to be zero or /// one. Convenience wrapper around computeKnownBits. void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, - const DataLayout *TD = nullptr, unsigned Depth = 0); + const DataLayout &DL, unsigned Depth = 0, + AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, + const DominatorTree *DT = nullptr); /// isKnownToBeAPowerOfTwo - Return true if the given value is known to have /// exactly one bit set when defined. For vectors return true if every /// element is known to be a power of two when defined. Supports values with /// integer or pointer type and vectors of integers. If 'OrZero' is set then - /// returns true if the given value is either a power of two or zero. - bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero = false, unsigned Depth = 0); + /// return true if the given value is either a power of two or zero. + bool isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL, + bool OrZero = false, unsigned Depth = 0, + AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, + const DominatorTree *DT = nullptr); /// isKnownNonZero - Return true if the given value is known to be non-zero /// when defined. For vectors return true if every element is known to be /// non-zero when defined. Supports values with integer or pointer type and /// vectors of integers. - bool isKnownNonZero(Value *V, const DataLayout *TD = nullptr, - unsigned Depth = 0); + bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth = 0, + AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, + const DominatorTree *DT = nullptr); /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use /// this predicate to simplify operations downstream. Mask is known to be /// zero for bits that V cannot have. /// /// This function is defined on values with integer type, values with pointer - /// type (but only if TD is non-null), and vectors of integers. In the case + /// type, and vectors of integers. In the case /// where V is a vector, the mask, known zero, and known one values are the /// same width as the vector element, and the bit is set only if it is true /// for all of the elements in the vector. - bool MaskedValueIsZero(Value *V, const APInt &Mask, - const DataLayout *TD = nullptr, unsigned Depth = 0); + bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL, + unsigned Depth = 0, AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, + const DominatorTree *DT = nullptr); - /// ComputeNumSignBits - Return the number of times the sign bit of the /// register is replicated into the other bits. We know that at least 1 bit /// is always equal to the sign bit (itself), but other cases can give us @@ -82,8 +105,10 @@ namespace llvm { /// /// 'Op' must have a scalar integer type. /// - unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = nullptr, - unsigned Depth = 0); + unsigned ComputeNumSignBits(Value *Op, const DataLayout &DL, + unsigned Depth = 0, AssumptionCache *AC = nullptr, + const Instruction *CxtI = nullptr, + const DominatorTree *DT = nullptr); /// ComputeMultiple - This function computes the integer multiple of Base that /// equals V. If successful, it returns true and returns the multiple in @@ -99,6 +124,11 @@ namespace llvm { /// bool CannotBeNegativeZero(const Value *V, unsigned Depth = 0); + /// CannotBeOrderedLessThanZero - Return true if we can prove that the + /// specified FP value is either a NaN or never less than 0.0. + /// + bool CannotBeOrderedLessThanZero(const Value *V, unsigned Depth = 0); + /// isBytewiseValue - If the specified value can be set by repeating the same /// byte in memory, return the i8 value that it is represented with. This is /// true for all i8 values obviously, but is also true for i32 0, i32 -1, @@ -120,11 +150,12 @@ namespace llvm { /// it can be expressed as a base pointer plus a constant offset. Return the /// base and offset to the caller. Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, - const DataLayout *TD); + const DataLayout &DL); static inline const Value * GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, - const DataLayout *TD) { - return GetPointerBaseWithConstantOffset(const_cast(Ptr), Offset,TD); + const DataLayout &DL) { + return GetPointerBaseWithConstantOffset(const_cast(Ptr), Offset, + DL); } /// getConstantStringInfo - This function computes the length of a @@ -145,26 +176,59 @@ namespace llvm { /// being addressed. Note that the returned value has pointer type if the /// specified value does. If the MaxLookup value is non-zero, it limits the /// number of instructions to be stripped off. - Value *GetUnderlyingObject(Value *V, const DataLayout *TD = nullptr, + Value *GetUnderlyingObject(Value *V, const DataLayout &DL, unsigned MaxLookup = 6); - static inline const Value * - GetUnderlyingObject(const Value *V, const DataLayout *TD = nullptr, - unsigned MaxLookup = 6) { - return GetUnderlyingObject(const_cast(V), TD, MaxLookup); + static inline const Value *GetUnderlyingObject(const Value *V, + const DataLayout &DL, + unsigned MaxLookup = 6) { + return GetUnderlyingObject(const_cast(V), DL, MaxLookup); } - /// GetUnderlyingObjects - This method is similar to GetUnderlyingObject - /// except that it can look through phi and select instructions and return - /// multiple objects. - void GetUnderlyingObjects(Value *V, - SmallVectorImpl &Objects, - const DataLayout *TD = nullptr, + /// \brief This method is similar to GetUnderlyingObject except that it can + /// look through phi and select instructions and return multiple objects. + /// + /// If LoopInfo is passed, loop phis are further analyzed. If a pointer + /// accesses different objects in each iteration, we don't look through the + /// phi node. E.g. consider this loop nest: + /// + /// int **A; + /// for (i) + /// for (j) { + /// A[i][j] = A[i-1][j] * B[j] + /// } + /// + /// This is transformed by Load-PRE to stash away A[i] for the next iteration + /// of the outer loop: + /// + /// Curr = A[0]; // Prev_0 + /// for (i: 1..N) { + /// Prev = Curr; // Prev = PHI (Prev_0, Curr) + /// Curr = A[i]; + /// for (j: 0..N) { + /// Curr[j] = Prev[j] * B[j] + /// } + /// } + /// + /// Since A[i] and A[i-1] are independent pointers, getUnderlyingObjects + /// should not assume that Curr and Prev share the same underlying object thus + /// it shouldn't look through the phi above. + void GetUnderlyingObjects(Value *V, SmallVectorImpl &Objects, + const DataLayout &DL, LoopInfo *LI = nullptr, unsigned MaxLookup = 6); /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer /// are lifetime markers. bool onlyUsedByLifetimeMarkers(const Value *V); + /// isDereferenceablePointer - Return true if this is always a dereferenceable + /// pointer. If the context instruction is specified perform context-sensitive + /// analysis and return true if the pointer is dereferenceable at the + /// specified instruction. + bool isDereferenceablePointer(const Value *V, const DataLayout &DL, + const Instruction *CtxI = nullptr, + const DominatorTree *DT = nullptr, + const TargetLibraryInfo *TLI = nullptr); + /// isSafeToSpeculativelyExecute - Return true if the instruction does not /// have any effects besides calculating the result and does not have /// undefined behavior. @@ -178,19 +242,158 @@ namespace llvm { /// memory leak. It also returns false for instructions related to control /// flow, specifically terminators and PHI nodes. /// - /// This method only looks at the instruction itself and its operands, so if - /// this method returns true, it is safe to move the instruction as long as - /// the correct dominance relationships for the operands and users hold. - /// However, this method can return true for instructions that read memory; + /// If the CtxI is specified this method performs context-sensitive analysis + /// and returns true if it is safe to execute the instruction immediately + /// before the CtxI. + /// + /// If the CtxI is NOT specified this method only looks at the instruction + /// itself and its operands, so if this method returns true, it is safe to + /// move the instruction as long as the correct dominance relationships for + /// the operands and users hold. + /// + /// This method can return true for instructions that read memory; /// for such instructions, moving them may change the resulting value. bool isSafeToSpeculativelyExecute(const Value *V, - const DataLayout *TD = nullptr); + const Instruction *CtxI = nullptr, + const DominatorTree *DT = nullptr, + const TargetLibraryInfo *TLI = nullptr); + + /// Returns true if the result or effects of the given instructions \p I + /// depend on or influence global memory. + /// Memory dependence arises for example if the the instruction reads from + /// memory or may produce effects or undefined behaviour. Memory dependent + /// instructions generally cannot be reorderd with respect to other memory + /// dependent instructions or moved into non-dominated basic blocks. + /// Instructions which just compute a value based on the values of their + /// operands are not memory dependent. + bool mayBeMemoryDependent(const Instruction &I); /// isKnownNonNull - Return true if this pointer couldn't possibly be null by /// its definition. This returns true for allocas, non-extern-weak globals /// and byval arguments. bool isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI = nullptr); + /// isKnownNonNullAt - Return true if this pointer couldn't possibly be null. + /// If the context instruction is specified perform context-sensitive analysis + /// and return true if the pointer couldn't possibly be null at the specified + /// instruction. + bool isKnownNonNullAt(const Value *V, + const Instruction *CtxI = nullptr, + const DominatorTree *DT = nullptr, + const TargetLibraryInfo *TLI = nullptr); + + /// Return true if it is valid to use the assumptions provided by an + /// assume intrinsic, I, at the point in the control-flow identified by the + /// context instruction, CxtI. + bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, + const DominatorTree *DT = nullptr); + + enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows }; + OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); + OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, + const DataLayout &DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); + + /// Return true if this function can prove that the instruction I will + /// always transfer execution to one of its successors (including the next + /// instruction that follows within a basic block). E.g. this is not + /// guaranteed for function calls that could loop infinitely. + /// + /// In other words, this function returns false for instructions that may + /// transfer execution or fail to transfer execution in a way that is not + /// captured in the CFG nor in the sequence of instructions within a basic + /// block. + /// + /// Undefined behavior is assumed not to happen, so e.g. division is + /// guaranteed to transfer execution to the following instruction even + /// though division by zero might cause undefined behavior. + bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I); + + /// Return true if this function can prove that the instruction I + /// is executed for every iteration of the loop L. + /// + /// Note that this currently only considers the loop header. + bool isGuaranteedToExecuteForEveryIteration(const Instruction *I, + const Loop *L); + + /// Return true if this function can prove that I is guaranteed to yield + /// full-poison (all bits poison) if at least one of its operands are + /// full-poison (all bits poison). + /// + /// The exact rules for how poison propagates through instructions have + /// not been settled as of 2015-07-10, so this function is conservative + /// and only considers poison to be propagated in uncontroversial + /// cases. There is no attempt to track values that may be only partially + /// poison. + bool propagatesFullPoison(const Instruction *I); + + /// Return either nullptr or an operand of I such that I will trigger + /// undefined behavior if I is executed and that operand has a full-poison + /// value (all bits poison). + const Value *getGuaranteedNonFullPoisonOp(const Instruction *I); + + /// Return true if this function can prove that if PoisonI is executed + /// and yields a full-poison value (all bits poison), then that will + /// trigger undefined behavior. + /// + /// Note that this currently only considers the basic block that is + /// the parent of I. + bool isKnownNotFullPoison(const Instruction *PoisonI); + + /// \brief Specific patterns of select instructions we can match. + enum SelectPatternFlavor { + SPF_UNKNOWN = 0, + SPF_SMIN, /// Signed minimum + SPF_UMIN, /// Unsigned minimum + SPF_SMAX, /// Signed maximum + SPF_UMAX, /// Unsigned maximum + SPF_FMINNUM, /// Floating point minnum + SPF_FMAXNUM, /// Floating point maxnum + SPF_ABS, /// Absolute value + SPF_NABS /// Negated absolute value + }; + /// \brief Behavior when a floating point min/max is given one NaN and one + /// non-NaN as input. + enum SelectPatternNaNBehavior { + SPNB_NA = 0, /// NaN behavior not applicable. + SPNB_RETURNS_NAN, /// Given one NaN input, returns the NaN. + SPNB_RETURNS_OTHER, /// Given one NaN input, returns the non-NaN. + SPNB_RETURNS_ANY /// Given one NaN input, can return either (or + /// it has been determined that no operands can + /// be NaN). + }; + struct SelectPatternResult { + SelectPatternFlavor Flavor; + SelectPatternNaNBehavior NaNBehavior; /// Only applicable if Flavor is + /// SPF_FMINNUM or SPF_FMAXNUM. + bool Ordered; /// When implementing this min/max pattern as + /// fcmp; select, does the fcmp have to be + /// ordered? + }; + /// Pattern match integer [SU]MIN, [SU]MAX and ABS idioms, returning the kind + /// and providing the out parameter results if we successfully match. + /// + /// If CastOp is not nullptr, also match MIN/MAX idioms where the type does + /// not match that of the original select. If this is the case, the cast + /// operation (one of Trunc,SExt,Zext) that must be done to transform the + /// type of LHS and RHS into the type of V is returned in CastOp. + /// + /// For example: + /// %1 = icmp slt i32 %a, i32 4 + /// %2 = sext i32 %a to i64 + /// %3 = select i1 %1, i64 %2, i64 4 + /// + /// -> LHS = %a, RHS = i32 4, *CastOp = Instruction::SExt + /// + SelectPatternResult matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, + Instruction::CastOps *CastOp = nullptr); + } // end namespace llvm #endif