//===----------------------------------------------------------------------===//
#include "InstCombine.h"
-#include "llvm/Support/CallSite.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/PatternMatch.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
+using namespace PatternMatch;
+
+#define DEBUG_TYPE "instcombine"
+
+STATISTIC(NumSimplified, "Number of library calls simplified");
/// getPromotedType - Return the specified type promoted as it would be to pass
/// though a va_arg area.
return Ty;
}
+/// reduceToSingleValueType - Given an aggregate type which ultimately holds a
+/// single scalar element, like {{{type}}} or [1 x type], return type.
+static Type *reduceToSingleValueType(Type *T) {
+ while (!T->isSingleValueType()) {
+ if (StructType *STy = dyn_cast<StructType>(T)) {
+ if (STy->getNumElements() == 1)
+ T = STy->getElementType(0);
+ else
+ break;
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
+ if (ATy->getNumElements() == 1)
+ T = ATy->getElementType();
+ else
+ break;
+ } else
+ break;
+ }
+
+ return T;
+}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
- unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
+ unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL);
+ unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL);
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
// if the size is something we can handle with a single primitive load/store.
// A single load+store correctly handles overlapping memory in the memmove
// case.
- unsigned Size = MemOpLength->getZExtValue();
- if (Size == 0) return MI; // Delete this mem transfer.
+ uint64_t Size = MemOpLength->getLimitedValue();
+ assert(Size && "0-sized memory transferring should be removed already.");
if (Size > 8 || (Size&(Size-1)))
return 0; // If not 1/2/4/8 bytes, exit.
// dest address will be promotable. See if we can find a better type than the
// integer datatype.
Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
+ MDNode *CopyMD = 0;
if (StrippedDest != MI->getArgOperand(0)) {
Type *SrcETy = cast<PointerType>(StrippedDest->getType())
->getElementType();
- if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
+ if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
// down through these levels if so.
- while (!SrcETy->isSingleValueType()) {
- if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
- if (STy->getNumElements() == 1)
- SrcETy = STy->getElementType(0);
- else
- break;
- } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
- if (ATy->getNumElements() == 1)
- SrcETy = ATy->getElementType();
- else
- break;
- } else
- break;
- }
+ SrcETy = reduceToSingleValueType(SrcETy);
if (SrcETy->isSingleValueType()) {
NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
+
+ // If the memcpy has metadata describing the members, see if we can
+ // get the TBAA tag describing our copy.
+ if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
+ if (M->getNumOperands() == 3 &&
+ M->getOperand(0) &&
+ isa<ConstantInt>(M->getOperand(0)) &&
+ cast<ConstantInt>(M->getOperand(0))->isNullValue() &&
+ M->getOperand(1) &&
+ isa<ConstantInt>(M->getOperand(1)) &&
+ cast<ConstantInt>(M->getOperand(1))->getValue() == Size &&
+ M->getOperand(2) &&
+ isa<MDNode>(M->getOperand(2)))
+ CopyMD = cast<MDNode>(M->getOperand(2));
+ }
}
}
}
-
// If the memcpy/memmove provides better alignment info than we can
// infer, use it.
SrcAlign = std::max(SrcAlign, CopyAlign);
Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
L->setAlignment(SrcAlign);
+ if (CopyMD)
+ L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
S->setAlignment(DstAlign);
+ if (CopyMD)
+ S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
// Set the size of the copy to 0, it will be deleted on the next iteration.
MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
}
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
- unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
+ unsigned Alignment = getKnownAlignment(MI->getDest(), DL);
if (MI->getAlignment() < Alignment) {
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
Alignment, false));
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
return 0;
- uint64_t Len = LenC->getZExtValue();
+ uint64_t Len = LenC->getLimitedValue();
Alignment = MI->getAlignment();
-
- // If the length is zero, this is a no-op
- if (Len == 0) return MI; // memset(d,c,0,a) -> noop
+ assert(Len && "0-sized memory setting should be removed already.");
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
return 0;
}
-/// computeAllocSize - compute the object size allocated by an allocation
-/// site. Returns 0 if the size is not constant (in SizeValue), 1 if the size
-/// is constant (in Size), and 2 if the size could not be determined within the
-/// given maximum Penalty that the computation would incurr at run-time.
-static int computeAllocSize(Value *Alloc, uint64_t &Size, Value* &SizeValue,
- uint64_t Penalty, TargetData *TD,
- InstCombiner::BuilderTy *Builder) {
- if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Alloc)) {
- if (GV->hasDefinitiveInitializer()) {
- Constant *C = GV->getInitializer();
- Size = TD->getTypeAllocSize(C->getType());
- return 1;
- }
- // Can't determine size of the GV.
- return 2;
-
- } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Alloc)) {
- if (!AI->getAllocatedType()->isSized())
- return 2;
-
- Size = TD->getTypeAllocSize(AI->getAllocatedType());
- if (!AI->isArrayAllocation())
- return 1; // we are done
-
- Value *ArraySize = AI->getArraySize();
- if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
- Size *= C->getZExtValue();
- return 1;
- }
-
- if (Penalty < 2)
- return 2;
-
- SizeValue = ConstantInt::get(ArraySize->getType(), Size);
- SizeValue = Builder->CreateMul(SizeValue, ArraySize);
- return 0;
-
- } else if (CallInst *MI = extractMallocCall(Alloc)) {
- SizeValue = MI->getArgOperand(0);
- if (ConstantInt *CI = dyn_cast<ConstantInt>(SizeValue)) {
- Size = CI->getZExtValue();
- return 1;
- }
- return Penalty >= 2 ? 0 : 2;
-
- } else if (CallInst *MI = extractCallocCall(Alloc)) {
- Value *Arg1 = MI->getArgOperand(0);
- Value *Arg2 = MI->getArgOperand(1);
- if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1)) {
- if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2)) {
- Size = (CI1->getValue() * CI2->getValue()).getZExtValue();
- return 1;
- }
- }
-
- if (Penalty < 2)
- return 2;
-
- SizeValue = Builder->CreateMul(Arg1, Arg2);
- return 0;
- }
-
- DEBUG(errs() << "computeAllocSize failed:\n");
- DEBUG(Alloc->dump());
- return 2;
-}
-
/// visitCallInst - CallInst simplification. This mostly only handles folding
/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
/// the heavy lifting.
///
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
- if (isFreeCall(&CI))
+ if (isFreeCall(&CI, TLI))
return visitFree(CI);
- if (extractMallocCall(&CI) || extractCallocCall(&CI))
- return visitMalloc(CI);
// If the caller function is nounwind, mark the call as nounwind, even if the
// callee isn't.
switch (II->getIntrinsicID()) {
default: break;
case Intrinsic::objectsize: {
- // We need target data for just about everything so depend on it.
- if (!TD) return 0;
-
- Type *ReturnTy = CI.getType();
- uint64_t Penalty = cast<ConstantInt>(II->getArgOperand(2))->getZExtValue();
-
- // Get to the real allocated thing and offset as fast as possible.
- Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
- GEPOperator *GEP;
-
- if ((GEP = dyn_cast<GEPOperator>(Op1))) {
- // check if we will be able to get the offset
- if (!GEP->hasAllConstantIndices() && Penalty < 2)
- return 0;
- Op1 = GEP->getPointerOperand()->stripPointerCasts();
- }
-
uint64_t Size;
- Value *SizeValue;
- int ConstAlloc = computeAllocSize(Op1, Size, SizeValue, Penalty, TD,
- Builder);
-
- // Do not return "I don't know" here. Later optimization passes could
- // make it possible to evaluate objectsize to a constant.
- if (ConstAlloc == 2)
- return 0;
-
- uint64_t Offset = 0;
- Value *OffsetValue = 0;
-
- if (GEP) {
- if (GEP->hasAllConstantIndices()) {
- SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
- assert(GEP->getPointerOperandType()->isPointerTy());
- Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
- } else
- OffsetValue = EmitGEPOffset(GEP, true /*NoNUW*/);
- }
-
- if (!OffsetValue && ConstAlloc) {
- if (Size < Offset) {
- // Out of bounds
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 0));
- }
- return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
- }
-
- if (!OffsetValue)
- OffsetValue = ConstantInt::get(ReturnTy, Offset);
- if (ConstAlloc)
- SizeValue = ConstantInt::get(ReturnTy, Size);
-
- Value *Val = Builder->CreateSub(SizeValue, OffsetValue);
- // return 0 if there's an overflow
- Value *Cmp = Builder->CreateICmpULT(SizeValue, OffsetValue);
- Val = Builder->CreateSelect(Cmp, ConstantInt::get(ReturnTy, 0), Val);
- return ReplaceInstUsesWith(CI, Val);
+ if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
+ return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
+ return 0;
}
- case Intrinsic::bswap:
+ case Intrinsic::bswap: {
+ Value *IIOperand = II->getArgOperand(0);
+ Value *X = 0;
+
// bswap(bswap(x)) -> x
- if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
- if (Operand->getIntrinsicID() == Intrinsic::bswap)
- return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
+ if (match(IIOperand, m_BSwap(m_Value(X))))
+ return ReplaceInstUsesWith(CI, X);
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
- if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
- if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
- if (Operand->getIntrinsicID() == Intrinsic::bswap) {
- unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
- TI->getType()->getPrimitiveSizeInBits();
- Value *CV = ConstantInt::get(Operand->getType(), C);
- Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
- return new TruncInst(V, TI->getType());
- }
+ if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
+ unsigned C = X->getType()->getPrimitiveSizeInBits() -
+ IIOperand->getType()->getPrimitiveSizeInBits();
+ Value *CV = ConstantInt::get(X->getType(), C);
+ Value *V = Builder->CreateLShr(X, CV);
+ return new TruncInst(V, IIOperand->getType());
}
-
break;
+ }
+
case Intrinsic::powi:
if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
// powi(x, 0) -> 1.0
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
// Turn PPC lvx -> load if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
case Intrinsic::x86_sse2_storeu_pd:
case Intrinsic::x86_sse2_storeu_dq:
// Turn X86 storeu -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(1)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
break;
}
+ // Constant fold <A x Bi> << Ci.
+ // FIXME: We don't handle _dq because it's a shift of an i128, but is
+ // represented in the IR as <2 x i64>. A per element shift is wrong.
+ case Intrinsic::x86_sse2_psll_d:
+ case Intrinsic::x86_sse2_psll_q:
+ case Intrinsic::x86_sse2_psll_w:
+ case Intrinsic::x86_sse2_pslli_d:
+ case Intrinsic::x86_sse2_pslli_q:
+ case Intrinsic::x86_sse2_pslli_w:
+ case Intrinsic::x86_avx2_psll_d:
+ case Intrinsic::x86_avx2_psll_q:
+ case Intrinsic::x86_avx2_psll_w:
+ case Intrinsic::x86_avx2_pslli_d:
+ case Intrinsic::x86_avx2_pslli_q:
+ case Intrinsic::x86_avx2_pslli_w: {
+ // Simplify if count is constant. To 0 if > BitWidth, otherwise to shl.
+ auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
+ auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
+ if (!CDV && !CInt)
+ break;
+ ConstantInt *Count;
+ if (CDV)
+ Count = cast<ConstantInt>(CDV->getElementAsConstant(0));
+ else
+ Count = CInt;
+
+ auto Vec = II->getArgOperand(0);
+ auto VT = cast<VectorType>(Vec->getType());
+ if (Count->getZExtValue() >
+ VT->getElementType()->getPrimitiveSizeInBits() - 1)
+ return ReplaceInstUsesWith(
+ CI, ConstantAggregateZero::get(Vec->getType()));
+ else {
+ unsigned VWidth = VT->getNumElements();
+ // Get a constant vector of the same type as the first operand.
+ auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
+ return BinaryOperator::CreateShl(
+ Vec, Builder->CreateVectorSplat(VWidth, VTCI));
+ }
+ break;
+ }
case Intrinsic::x86_sse41_pmovsxbw:
case Intrinsic::x86_sse41_pmovsxwd:
break;
}
+ case Intrinsic::x86_sse4a_insertqi: {
+ // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
+ // ones undef
+ // TODO: eventually we should lower this intrinsic to IR
+ if (auto CIWidth = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
+ if (auto CIStart = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
+ if (CIWidth->equalsInt(64) && CIStart->isZero()) {
+ Value *Vec = II->getArgOperand(1);
+ Value *Undef = UndefValue::get(Vec->getType());
+ const uint32_t Mask[] = { 0, 2 };
+ return ReplaceInstUsesWith(
+ CI,
+ Builder->CreateShuffleVector(
+ Vec, Undef, ConstantDataVector::get(
+ II->getContext(), ArrayRef<uint32_t>(Mask))));
+
+ } else if (auto Source =
+ dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
+ if (Source->hasOneUse() &&
+ Source->getArgOperand(1) == II->getArgOperand(1)) {
+ // If the source of the insert has only one use and it's another
+ // insert (and they're both inserting from the same vector), try to
+ // bundle both together.
+ auto CISourceWidth =
+ dyn_cast<ConstantInt>(Source->getArgOperand(2));
+ auto CISourceStart =
+ dyn_cast<ConstantInt>(Source->getArgOperand(3));
+ if (CISourceStart && CISourceWidth) {
+ unsigned Start = CIStart->getZExtValue();
+ unsigned Width = CIWidth->getZExtValue();
+ unsigned End = Start + Width;
+ unsigned SourceStart = CISourceStart->getZExtValue();
+ unsigned SourceWidth = CISourceWidth->getZExtValue();
+ unsigned SourceEnd = SourceStart + SourceWidth;
+ unsigned NewStart, NewWidth;
+ bool ShouldReplace = false;
+ if (Start <= SourceStart && SourceStart <= End) {
+ NewStart = Start;
+ NewWidth = std::max(End, SourceEnd) - NewStart;
+ ShouldReplace = true;
+ } else if (SourceStart <= Start && Start <= SourceEnd) {
+ NewStart = SourceStart;
+ NewWidth = std::max(SourceEnd, End) - NewStart;
+ ShouldReplace = true;
+ }
+
+ if (ShouldReplace) {
+ Constant *ConstantWidth = ConstantInt::get(
+ II->getArgOperand(2)->getType(), NewWidth, false);
+ Constant *ConstantStart = ConstantInt::get(
+ II->getArgOperand(3)->getType(), NewStart, false);
+ Value *Args[4] = { Source->getArgOperand(0),
+ II->getArgOperand(1), ConstantWidth,
+ ConstantStart };
+ Module *M = CI.getParent()->getParent()->getParent();
+ Value *F =
+ Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
+ return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
+ }
+ }
+ }
+ }
+ }
+ }
+ break;
+ }
+
+ case Intrinsic::x86_avx_vpermilvar_ps:
+ case Intrinsic::x86_avx_vpermilvar_ps_256:
+ case Intrinsic::x86_avx_vpermilvar_pd:
+ case Intrinsic::x86_avx_vpermilvar_pd_256: {
+ // Convert vpermil* to shufflevector if the mask is constant.
+ Value *V = II->getArgOperand(1);
+ if (auto C = dyn_cast<ConstantDataVector>(V)) {
+ auto V1 = II->getArgOperand(0);
+ auto V2 = UndefValue::get(V1->getType());
+ auto Shuffle = Builder->CreateShuffleVector(V1, V2, C);
+ return ReplaceInstUsesWith(CI, Shuffle);
+ }
+ break;
+ }
+
case Intrinsic::ppc_altivec_vperm:
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
case Intrinsic::arm_neon_vst2lane:
case Intrinsic::arm_neon_vst3lane:
case Intrinsic::arm_neon_vst4lane: {
- unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
+ unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL);
unsigned AlignArg = II->getNumArgOperands() - 1;
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
}
case Intrinsic::arm_neon_vmulls:
- case Intrinsic::arm_neon_vmullu: {
+ case Intrinsic::arm_neon_vmullu:
+ case Intrinsic::arm64_neon_smull:
+ case Intrinsic::arm64_neon_umull: {
Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1);
}
// Check for constant LHS & RHS - in this case we just simplify.
- bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu);
+ bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
+ II->getIntrinsicID() == Intrinsic::arm64_neon_umull);
VectorType *NewVT = cast<VectorType>(II->getType());
- unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth();
- if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) {
- if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
- VectorType* VT = cast<VectorType>(CV0->getType());
- SmallVector<Constant*, 4> NewElems;
- for (unsigned i = 0; i < VT->getNumElements(); ++i) {
- APInt CV0E =
- (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue();
- CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth);
- APInt CV1E =
- (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue();
- CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth);
- NewElems.push_back(
- ConstantInt::get(NewVT->getElementType(), CV0E * CV1E));
- }
- return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
+ if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
+ if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
+ CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
+ CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
+
+ return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
}
- // Couldn't simplify - cannonicalize constant to the RHS.
+ // Couldn't simplify - canonicalize constant to the RHS.
std::swap(Arg0, Arg1);
}
// Handle mul by one:
- if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
+ if (Constant *CV1 = dyn_cast<Constant>(Arg1))
if (ConstantInt *Splat =
- dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) {
- if (Splat->isOne()) {
- if (Zext)
- return CastInst::CreateZExtOrBitCast(Arg0, II->getType());
- // else
- return CastInst::CreateSExtOrBitCast(Arg0, II->getType());
- }
- }
- }
+ dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
+ if (Splat->isOne())
+ return CastInst::CreateIntegerCast(Arg0, II->getType(),
+ /*isSigned=*/!Zext);
break;
}
TerminatorInst *TI = II->getParent()->getTerminator();
bool CannotRemove = false;
for (++BI; &*BI != TI; ++BI) {
- if (isa<AllocaInst>(BI) || isMalloc(BI)) {
+ if (isa<AllocaInst>(BI)) {
CannotRemove = true;
break;
}
/// passed through the varargs area, we can eliminate the use of the cast.
static bool isSafeToEliminateVarargsCast(const CallSite CS,
const CastInst * const CI,
- const TargetData * const TD,
+ const DataLayout * const DL,
const int ix) {
if (!CI->isLosslessCast())
return false;
- // The size of ByVal arguments is derived from the type, so we
+ // The size of ByVal or InAlloca arguments is derived from the type, so we
// can't change to a type with a different size. If the size were
// passed explicitly we could avoid this check.
- if (!CS.isByValArgument(ix))
+ if (!CS.isByValOrInAllocaArgument(ix))
return true;
Type* SrcTy =
Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
if (!SrcTy->isSized() || !DstTy->isSized())
return false;
- if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
+ if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
return false;
return true;
}
-namespace {
-class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
- InstCombiner *IC;
-protected:
- void replaceCall(Value *With) {
- NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
- }
- bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
- if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
- return true;
- if (ConstantInt *SizeCI =
- dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
- if (SizeCI->isAllOnesValue())
- return true;
- if (isString) {
- uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp));
- // If the length is 0 we don't know how long it is and so we can't
- // remove the check.
- if (Len == 0) return false;
- return SizeCI->getZExtValue() >= Len;
- }
- if (ConstantInt *Arg = dyn_cast<ConstantInt>(
- CI->getArgOperand(SizeArgOp)))
- return SizeCI->getZExtValue() >= Arg->getZExtValue();
- }
- return false;
- }
-public:
- InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
- Instruction *NewInstruction;
-};
-} // end anonymous namespace
-
// Try to fold some different type of calls here.
// Currently we're only working with the checking functions, memcpy_chk,
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
// strcat_chk and strncat_chk.
-Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
+Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
if (CI->getCalledFunction() == 0) return 0;
- InstCombineFortifiedLibCalls Simplifier(this);
- Simplifier.fold(CI, TD);
- return Simplifier.NewInstruction;
+ if (Value *With = Simplifier->optimizeCall(CI)) {
+ ++NumSimplified;
+ return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
+ }
+
+ return 0;
}
static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
// is good enough in practice and simpler than handling any number of casts.
Value *Underlying = TrampMem->stripPointerCasts();
if (Underlying != TrampMem &&
- (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
+ (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
return 0;
if (!isa<AllocaInst>(Underlying))
return 0;
IntrinsicInst *InitTrampoline = 0;
- for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
- I != E; I++) {
- IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
+ for (User *U : TrampMem->users()) {
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
if (!II)
return 0;
if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
// visitCallSite - Improvements for call and invoke instructions.
//
Instruction *InstCombiner::visitCallSite(CallSite CS) {
+ if (isAllocLikeFn(CS.getInstruction(), TLI))
+ return visitAllocSite(*CS.getInstruction());
+
bool Changed = false;
// If the callee is a pointer to a function, attempt to move any casts to the
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
OldCall);
- // If OldCall dues not return void then replaceAllUsesWith undef.
+ // If OldCall does not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
if (!OldCall->getType()->isVoidTy())
ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
}
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
- // This instruction is not reachable, just remove it. We insert a store to
- // undef so that we know that this code is not reachable, despite the fact
- // that we can't modify the CFG here.
- new StoreInst(ConstantInt::getTrue(Callee->getContext()),
- UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
- CS.getInstruction());
-
// If CS does not return void then replaceAllUsesWith undef.
// This allows ValueHandlers and custom metadata to adjust itself.
if (!CS.getInstruction()->getType()->isVoidTy())
ReplaceInstUsesWith(*CS.getInstruction(),
UndefValue::get(CS.getInstruction()->getType()));
- if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
- // Don't break the CFG, insert a dummy cond branch.
- BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
- ConstantInt::getTrue(Callee->getContext()), II);
+ if (isa<InvokeInst>(CS.getInstruction())) {
+ // Can't remove an invoke because we cannot change the CFG.
+ return 0;
}
+
+ // This instruction is not reachable, just remove it. We insert a store to
+ // undef so that we know that this code is not reachable, despite the fact
+ // that we can't modify the CFG here.
+ new StoreInst(ConstantInt::getTrue(Callee->getContext()),
+ UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
+ CS.getInstruction());
+
return EraseInstFromFunction(*CS.getInstruction());
}
int ix = FTy->getNumParams();
// See if we can optimize any arguments passed through the varargs area of
// the call.
- for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
+ for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
E = CS.arg_end(); I != E; ++I, ++ix) {
CastInst *CI = dyn_cast<CastInst>(*I);
- if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
+ if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
*I = CI->getOperand(0);
Changed = true;
}
Changed = true;
}
- // Try to optimize the call if possible, we require TargetData for most of
+ // Try to optimize the call if possible, we require DataLayout for most of
// this. None of these calls are seen as possibly dead so go ahead and
// delete the instruction now.
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
- Instruction *I = tryOptimizeCall(CI, TD);
+ Instruction *I = tryOptimizeCall(CI, DL);
// If we changed something return the result, etc. Otherwise let
// the fallthrough check.
if (I) return EraseInstFromFunction(*I);
if (Callee == 0)
return false;
Instruction *Caller = CS.getInstruction();
- const AttrListPtr &CallerPAL = CS.getAttributes();
+ const AttributeSet &CallerPAL = CS.getAttributes();
// Okay, this is a cast from a function to a different type. Unless doing so
// would cause a type conversion of one of our arguments, change this call to
Type *OldRetTy = Caller->getType();
Type *NewRetTy = FT->getReturnType();
- if (NewRetTy->isStructTy())
- return false; // TODO: Handle multiple return values.
-
// Check to see if we are changing the return type...
if (OldRetTy != NewRetTy) {
- if (Callee->isDeclaration() &&
- // Conversion is ok if changing from one pointer type to another or from
- // a pointer to an integer of the same size.
- !((OldRetTy->isPointerTy() || !TD ||
- OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
- (NewRetTy->isPointerTy() || !TD ||
- NewRetTy == TD->getIntPtrType(Caller->getContext()))))
- return false; // Cannot transform this return value.
- if (!Caller->use_empty() &&
- // void -> non-void is handled specially
- !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
+ if (NewRetTy->isStructTy())
+ return false; // TODO: Handle multiple return values.
+
+ if (!CastInst::isBitCastable(NewRetTy, OldRetTy)) {
+ if (Callee->isDeclaration())
+ return false; // Cannot transform this return value.
+
+ if (!Caller->use_empty() &&
+ // void -> non-void is handled specially
+ !NewRetTy->isVoidTy())
return false; // Cannot transform this return value.
+ }
if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
- Attributes RAttrs = CallerPAL.getRetAttributes();
- if (RAttrs & Attribute::typeIncompatible(NewRetTy))
+ AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
+ if (RAttrs.
+ hasAttributes(AttributeFuncs::
+ typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
+ AttributeSet::ReturnIndex))
return false; // Attribute not compatible with transformed value.
}
// the critical edge). Bail out in this case.
if (!Caller->use_empty())
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
- for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
- UI != E; ++UI)
- if (PHINode *PN = dyn_cast<PHINode>(*UI))
+ for (User *U : II->users())
+ if (PHINode *PN = dyn_cast<PHINode>(U))
if (PN->getParent() == II->getNormalDest() ||
PN->getParent() == II->getUnwindDest())
return false;
}
- unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
+ unsigned NumActualArgs = CS.arg_size();
unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
CallSite::arg_iterator AI = CS.arg_begin();
Type *ParamTy = FT->getParamType(i);
Type *ActTy = (*AI)->getType();
- if (!CastInst::isCastable(ActTy, ParamTy))
+ if (!CastInst::isBitCastable(ActTy, ParamTy))
return false; // Cannot transform this parameter value.
- Attributes Attrs = CallerPAL.getParamAttributes(i + 1);
- if (Attrs & Attribute::typeIncompatible(ParamTy))
+ if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
+ hasAttributes(AttributeFuncs::
+ typeIncompatible(ParamTy, i + 1), i + 1))
return false; // Attribute not compatible with transformed value.
+ if (CS.isInAllocaArgument(i))
+ return false; // Cannot transform to and from inalloca.
+
// If the parameter is passed as a byval argument, then we have to have a
// sized type and the sized type has to have the same size as the old type.
- if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
+ if (ParamTy != ActTy &&
+ CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
+ Attribute::ByVal)) {
PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
- if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
+ if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || DL == 0)
return false;
- Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
- if (TD->getTypeAllocSize(CurElTy) !=
- TD->getTypeAllocSize(ParamPTy->getElementType()))
+ Type *CurElTy = ActTy->getPointerElementType();
+ if (DL->getTypeAllocSize(CurElTy) !=
+ DL->getTypeAllocSize(ParamPTy->getElementType()))
return false;
}
-
- // Converting from one pointer type to another or between a pointer and an
- // integer of the same size is safe even if we do not have a body.
- bool isConvertible = ActTy == ParamTy ||
- (TD && ((ParamTy->isPointerTy() ||
- ParamTy == TD->getIntPtrType(Caller->getContext())) &&
- (ActTy->isPointerTy() ||
- ActTy == TD->getIntPtrType(Caller->getContext()))));
- if (Callee->isDeclaration() && !isConvertible) return false;
}
if (Callee->isDeclaration()) {
// won't be dropping them. Check that these extra arguments have attributes
// that are compatible with being a vararg call argument.
for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
- if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
+ unsigned Index = CallerPAL.getSlotIndex(i - 1);
+ if (Index <= FT->getNumParams())
break;
- Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
- if (PAttrs & Attribute::VarArgsIncompatible)
+
+ // Check if it has an attribute that's incompatible with varargs.
+ AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
+ if (PAttrs.hasAttribute(Index, Attribute::StructRet))
return false;
}
// inserting cast instructions as necessary.
std::vector<Value*> Args;
Args.reserve(NumActualArgs);
- SmallVector<AttributeWithIndex, 8> attrVec;
+ SmallVector<AttributeSet, 8> attrVec;
attrVec.reserve(NumCommonArgs);
// Get any return attributes.
- Attributes RAttrs = CallerPAL.getRetAttributes();
+ AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
// If the return value is not being used, the type may not be compatible
// with the existing attributes. Wipe out any problematic attributes.
- RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
+ RAttrs.
+ removeAttributes(AttributeFuncs::
+ typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
+ AttributeSet::ReturnIndex);
// Add the new return attributes.
- if (RAttrs)
- attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
+ if (RAttrs.hasAttributes())
+ attrVec.push_back(AttributeSet::get(Caller->getContext(),
+ AttributeSet::ReturnIndex, RAttrs));
AI = CS.arg_begin();
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
Type *ParamTy = FT->getParamType(i);
+
if ((*AI)->getType() == ParamTy) {
Args.push_back(*AI);
} else {
- Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
- false, ParamTy, false);
- Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy));
+ Args.push_back(Builder->CreateBitCast(*AI, ParamTy));
}
// Add any parameter attributes.
- if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
- attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
+ AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
+ if (PAttrs.hasAttributes())
+ attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
+ PAttrs));
}
// If the function takes more arguments than the call was taking, add them
// If we are removing arguments to the function, emit an obnoxious warning.
if (FT->getNumParams() < NumActualArgs) {
- if (!FT->isVarArg()) {
- errs() << "WARNING: While resolving call to function '"
- << Callee->getName() << "' arguments were dropped!\n";
- } else {
+ // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
+ if (FT->isVarArg()) {
// Add all of the arguments in their promoted form to the arg list.
for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
Type *PTy = getPromotedType((*AI)->getType());
}
// Add any parameter attributes.
- if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
- attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
+ AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
+ if (PAttrs.hasAttributes())
+ attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
+ PAttrs));
}
}
}
- if (Attributes FnAttrs = CallerPAL.getFnAttributes())
- attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
+ AttributeSet FnAttrs = CallerPAL.getFnAttributes();
+ if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
+ attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
if (NewRetTy->isVoidTy())
Caller->setName(""); // Void type should not have a name.
- const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
- attrVec.end());
+ const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
+ attrVec);
Instruction *NC;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
Value *NV = NC;
if (OldRetTy != NV->getType() && !Caller->use_empty()) {
if (!NV->getType()->isVoidTy()) {
- Instruction::CastOps opcode =
- CastInst::getCastOpcode(NC, false, OldRetTy, false);
- NV = NC = CastInst::Create(opcode, NC, OldRetTy);
+ NV = NC = CastInst::Create(CastInst::BitCast, NC, OldRetTy);
NC->setDebugLoc(Caller->getDebugLoc());
// If this is an invoke instruction, we should insert it after the first
if (!Caller->use_empty())
ReplaceInstUsesWith(*Caller, NV);
+ else if (Caller->hasValueHandle())
+ ValueHandleBase::ValueIsRAUWd(Caller, NV);
EraseInstFromFunction(*Caller);
return true;
Value *Callee = CS.getCalledValue();
PointerType *PTy = cast<PointerType>(Callee->getType());
FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
- const AttrListPtr &Attrs = CS.getAttributes();
+ const AttributeSet &Attrs = CS.getAttributes();
// If the call already has the 'nest' attribute somewhere then give up -
// otherwise 'nest' would occur twice after splicing in the chain.
PointerType *NestFPTy = cast<PointerType>(NestF->getType());
FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
- const AttrListPtr &NestAttrs = NestF->getAttributes();
+ const AttributeSet &NestAttrs = NestF->getAttributes();
if (!NestAttrs.isEmpty()) {
unsigned NestIdx = 1;
Type *NestTy = 0;
- Attributes NestAttr = Attribute::None;
+ AttributeSet NestAttr;
// Look for a parameter marked with the 'nest' attribute.
for (FunctionType::param_iterator I = NestFTy->param_begin(),
E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
- if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
+ if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
// Record the parameter type and any other attributes.
NestTy = *I;
NestAttr = NestAttrs.getParamAttributes(NestIdx);
if (NestTy) {
Instruction *Caller = CS.getInstruction();
std::vector<Value*> NewArgs;
- NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
+ NewArgs.reserve(CS.arg_size() + 1);
- SmallVector<AttributeWithIndex, 8> NewAttrs;
+ SmallVector<AttributeSet, 8> NewAttrs;
NewAttrs.reserve(Attrs.getNumSlots() + 1);
// Insert the nest argument into the call argument list, which may
// mean appending it. Likewise for attributes.
// Add any result attributes.
- if (Attributes Attr = Attrs.getRetAttributes())
- NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
+ if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
+ NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
+ Attrs.getRetAttributes()));
{
unsigned Idx = 1;
if (NestVal->getType() != NestTy)
NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
NewArgs.push_back(NestVal);
- NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
+ NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
+ NestAttr));
}
if (I == E)
// Add the original argument and attributes.
NewArgs.push_back(*I);
- if (Attributes Attr = Attrs.getParamAttributes(Idx))
- NewAttrs.push_back
- (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
+ AttributeSet Attr = Attrs.getParamAttributes(Idx);
+ if (Attr.hasAttributes(Idx)) {
+ AttrBuilder B(Attr, Idx);
+ NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
+ Idx + (Idx >= NestIdx), B));
+ }
++Idx, ++I;
} while (1);
}
// Add any function attributes.
- if (Attributes Attr = Attrs.getFnAttributes())
- NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
+ if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
+ NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
+ Attrs.getFnAttributes()));
// The trampoline may have been bitcast to a bogus type (FTy).
// Handle this by synthesizing a new function type, equal to FTy
NestF->getType() == PointerType::getUnqual(NewFTy) ?
NestF : ConstantExpr::getBitCast(NestF,
PointerType::getUnqual(NewFTy));
- const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
- NewAttrs.end());
+ const AttributeSet &NewPAL =
+ AttributeSet::get(FTy->getContext(), NewAttrs);
Instruction *NewCaller;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {