#include "llvm/Analysis/ValueTracking.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Support/PatternMatch.h"
#include <cstring>
using namespace llvm;
using namespace llvm::PatternMatch;
static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
- assert(isa<PointerType>(Ty) && "Expected a pointer type!");
- return TD ? TD->getPointerSizeInBits() : 0;
+
+ return TD ? TD->getPointerTypeSizeInBits(Ty) : 0;
}
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
}
if (Align > 0)
KnownZero = APInt::getLowBitsSet(BitWidth,
- CountTrailingZeros_32(Align));
+ countTrailingZeros(Align));
else
KnownZero.clearAllBits();
KnownOne.clearAllBits();
if (Argument *A = dyn_cast<Argument>(V)) {
unsigned Align = 0;
- if (A->hasByValAttr()) {
- // Get alignment information off byval arguments if specified in the IR.
+ if (A->hasByValOrInAllocaAttr()) {
+ // Get alignment information off byval/inalloca arguments if specified in
+ // the IR.
Align = A->getParamAlignment();
} else if (TD && A->hasStructRetAttr()) {
// An sret parameter has at least the ABI alignment of the return type.
}
if (Align)
- KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
+ KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
return;
}
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
if (Align > 0)
- KnownZero = APInt::getLowBitsSet(BitWidth, CountTrailingZeros_32(Align));
+ KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
break;
}
case Instruction::GetElementPtr: {
Value *Index = I->getOperand(i);
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
// Handle struct member offset arithmetic.
- if (!TD) return;
- const StructLayout *SL = TD->getStructLayout(STy);
+ if (!TD)
+ return;
+
+ // Handle case when index is vector zeroinitializer
+ Constant *CIndex = cast<Constant>(Index);
+ if (CIndex->isZeroValue())
+ continue;
+
+ if (CIndex->getType()->isVectorTy())
+ Index = CIndex->getSplatValue();
+
unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
+ const StructLayout *SL = TD->getStructLayout(STy);
uint64_t Offset = SL->getElementOffset(Idx);
- TrailZ = std::min(TrailZ,
- CountTrailingZeros_64(Offset));
+ TrailZ = std::min<unsigned>(TrailZ,
+ countTrailingZeros(Offset));
} else {
// Handle array index arithmetic.
Type *IndexedTy = GTI.getIndexedType();
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
ComputeMaskedBits(Index, LocalKnownZero, LocalKnownOne, TD, Depth+1);
TrailZ = std::min(TrailZ,
- unsigned(CountTrailingZeros_64(TypeSize) +
+ unsigned(countTrailingZeros(TypeSize) +
LocalKnownZero.countTrailingOnes()));
}
}
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
break;
}
- case Intrinsic::x86_sse42_crc32_64_8:
case Intrinsic::x86_sse42_crc32_64_64:
KnownZero = APInt::getHighBitsSet(64, 32);
break;
return false;
}
+ // Adding a power-of-two or zero to the same power-of-two or zero yields
+ // either the original power-of-two, a larger power-of-two or zero.
+ if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
+ OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
+ if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
+ if (match(X, m_And(m_Specific(Y), m_Value())) ||
+ match(X, m_And(m_Value(), m_Specific(Y))))
+ if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth))
+ return true;
+ if (match(Y, m_And(m_Specific(X), m_Value())) ||
+ match(Y, m_And(m_Value(), m_Specific(X))))
+ if (isKnownToBeAPowerOfTwo(X, OrZero, Depth))
+ return true;
+
+ unsigned BitWidth = V->getType()->getScalarSizeInBits();
+ APInt LHSZeroBits(BitWidth, 0), LHSOneBits(BitWidth, 0);
+ ComputeMaskedBits(X, LHSZeroBits, LHSOneBits, 0, Depth);
+
+ APInt RHSZeroBits(BitWidth, 0), RHSOneBits(BitWidth, 0);
+ ComputeMaskedBits(Y, RHSZeroBits, RHSOneBits, 0, Depth);
+ // If i8 V is a power of two or zero:
+ // ZeroBits: 1 1 1 0 1 1 1 1
+ // ~ZeroBits: 0 0 0 1 0 0 0 0
+ if ((~(LHSZeroBits & RHSZeroBits)).isPowerOf2())
+ // If OrZero isn't set, we cannot give back a zero result.
+ // Make sure either the LHS or RHS has a bit set.
+ if (OrZero || RHSOneBits.getBoolValue() || LHSOneBits.getBoolValue())
+ return true;
+ }
+ }
+
// An exact divide or right shift can only shift off zero bits, so the result
// is a power of two only if the first operand is a power of two and not
// copying a sign bit (sdiv int_min, 2).
// Check for pointer simplifications.
if (V->getType()->isPointerTy()) {
+ if (isKnownNonNull(V))
+ return true;
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
if (isGEPKnownNonNull(GEP, TD, Depth))
return true;
return true;
// (add x, 0.0) is guaranteed to return +0.0, not -0.0.
- if (I->getOpcode() == Instruction::FAdd &&
- isa<ConstantFP>(I->getOperand(1)) &&
- cast<ConstantFP>(I->getOperand(1))->isNullValue())
- return true;
+ if (I->getOpcode() == Instruction::FAdd)
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(I->getOperand(1)))
+ if (CFP->isNullValue())
+ return true;
// sitofp and uitofp turn into +0.0 for zero.
if (isa<SIToFPInst>(I) || isa<UIToFPInst>(I))
// struct. To is the result struct built so far, new insertvalue instructions
// build on that.
static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
- SmallVector<unsigned, 10> &Idxs,
+ SmallVectorImpl<unsigned> &Idxs,
unsigned IdxSkip,
Instruction *InsertBefore) {
llvm::StructType *STy = dyn_cast<llvm::StructType>(IndexedType);
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
- const DataLayout &TD) {
- unsigned BitWidth = TD.getPointerSizeInBits();
+ const DataLayout *DL) {
+ // Without DataLayout, conservatively assume 64-bit offsets, which is
+ // the widest we support.
+ unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(Ptr->getType()) : 64;
APInt ByteOffset(BitWidth, 0);
while (1) {
if (Ptr->getType()->isVectorTy())
break;
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
- APInt GEPOffset(BitWidth, 0);
- if (!GEP->accumulateConstantOffset(TD, GEPOffset))
- break;
- ByteOffset += GEPOffset;
+ if (DL) {
+ APInt GEPOffset(BitWidth, 0);
+ if (!GEP->accumulateConstantOffset(*DL, GEPOffset))
+ break;
+
+ ByteOffset += GEPOffset;
+ }
+
Ptr = GEP->getPointerOperand();
} else if (Operator::getOpcode(Ptr) == Instruction::BitCast) {
Ptr = cast<Operator>(Ptr)->getOperand(0);
/// are lifetime markers.
///
bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
- for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
- UI != UE; ++UI) {
- const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI);
+ for (const User *U : V->users()) {
+ const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
if (!II) return false;
if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
}
case Instruction::Load: {
const LoadInst *LI = cast<LoadInst>(Inst);
- if (!LI->isUnordered())
+ if (!LI->isUnordered() ||
+ // Speculative load may create a race that did not exist in the source.
+ LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return false;
return LI->getPointerOperand()->isDereferenceablePointer();
}
case Intrinsic::umul_with_overflow:
case Intrinsic::usub_with_overflow:
return true;
+ // Sqrt should be OK, since the llvm sqrt intrinsic isn't defined to set
+ // errno like libm sqrt would.
+ case Intrinsic::sqrt:
+ case Intrinsic::fma:
+ case Intrinsic::fmuladd:
+ return true;
// TODO: some fp intrinsics are marked as having the same error handling
// as libm. They're safe to speculate when they won't error.
// TODO: are convert_{from,to}_fp16 safe?
return false; // Misc instructions which have effects
}
}
+
+/// isKnownNonNull - Return true if we know that the specified value is never
+/// null.
+bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
+ // Alloca never returns null, malloc might.
+ if (isa<AllocaInst>(V)) return true;
+
+ // A byval or inalloca argument is never null.
+ if (const Argument *A = dyn_cast<Argument>(V))
+ return A->hasByValOrInAllocaAttr();
+
+ // Global values are not null unless extern weak.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
+ return !GV->hasExternalWeakLinkage();
+
+ // operator new never returns null.
+ if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))
+ return true;
+
+ return false;
+}