1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/IR/CallSite.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/PatternMatch.h"
21 #include "llvm/IR/Statepoint.h"
22 #include "llvm/Transforms/Utils/BuildLibCalls.h"
23 #include "llvm/Transforms/Utils/Local.h"
24 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
26 using namespace PatternMatch;
28 #define DEBUG_TYPE "instcombine"
30 STATISTIC(NumSimplified, "Number of library calls simplified");
32 /// getPromotedType - Return the specified type promoted as it would be to pass
33 /// though a va_arg area.
34 static Type *getPromotedType(Type *Ty) {
35 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
36 if (ITy->getBitWidth() < 32)
37 return Type::getInt32Ty(Ty->getContext());
42 /// reduceToSingleValueType - Given an aggregate type which ultimately holds a
43 /// single scalar element, like {{{type}}} or [1 x type], return type.
44 static Type *reduceToSingleValueType(Type *T) {
45 while (!T->isSingleValueType()) {
46 if (StructType *STy = dyn_cast<StructType>(T)) {
47 if (STy->getNumElements() == 1)
48 T = STy->getElementType(0);
51 } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
52 if (ATy->getNumElements() == 1)
53 T = ATy->getElementType();
63 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
64 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
65 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
66 unsigned MinAlign = std::min(DstAlign, SrcAlign);
67 unsigned CopyAlign = MI->getAlignment();
69 if (CopyAlign < MinAlign) {
70 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), MinAlign, false));
74 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
76 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
77 if (!MemOpLength) return nullptr;
79 // Source and destination pointer types are always "i8*" for intrinsic. See
80 // if the size is something we can handle with a single primitive load/store.
81 // A single load+store correctly handles overlapping memory in the memmove
83 uint64_t Size = MemOpLength->getLimitedValue();
84 assert(Size && "0-sized memory transferring should be removed already.");
86 if (Size > 8 || (Size&(Size-1)))
87 return nullptr; // If not 1/2/4/8 bytes, exit.
89 // Use an integer load+store unless we can find something better.
91 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
93 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
95 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
96 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
97 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
99 // Memcpy forces the use of i8* for the source and destination. That means
100 // that if you're using memcpy to move one double around, you'll get a cast
101 // from double* to i8*. We'd much rather use a double load+store rather than
102 // an i64 load+store, here because this improves the odds that the source or
103 // dest address will be promotable. See if we can find a better type than the
105 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
106 MDNode *CopyMD = nullptr;
107 if (StrippedDest != MI->getArgOperand(0)) {
108 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
110 if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
111 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
112 // down through these levels if so.
113 SrcETy = reduceToSingleValueType(SrcETy);
115 if (SrcETy->isSingleValueType()) {
116 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
117 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
119 // If the memcpy has metadata describing the members, see if we can
120 // get the TBAA tag describing our copy.
121 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
122 if (M->getNumOperands() == 3 && M->getOperand(0) &&
123 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
124 mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
126 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
127 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
129 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
130 CopyMD = cast<MDNode>(M->getOperand(2));
136 // If the memcpy/memmove provides better alignment info than we can
138 SrcAlign = std::max(SrcAlign, CopyAlign);
139 DstAlign = std::max(DstAlign, CopyAlign);
141 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
142 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
143 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
144 L->setAlignment(SrcAlign);
146 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
147 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
148 S->setAlignment(DstAlign);
150 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
152 // Set the size of the copy to 0, it will be deleted on the next iteration.
153 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
157 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
158 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
159 if (MI->getAlignment() < Alignment) {
160 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
165 // Extract the length and alignment and fill if they are constant.
166 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
167 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
168 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
170 uint64_t Len = LenC->getLimitedValue();
171 Alignment = MI->getAlignment();
172 assert(Len && "0-sized memory setting should be removed already.");
174 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
175 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
176 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
178 Value *Dest = MI->getDest();
179 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
180 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
181 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
183 // Alignment 0 is identity for alignment 1 for memset, but not store.
184 if (Alignment == 0) Alignment = 1;
186 // Extract the fill value and store.
187 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
188 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
190 S->setAlignment(Alignment);
192 // Set the size of the copy to 0, it will be deleted on the next iteration.
193 MI->setLength(Constant::getNullValue(LenC->getType()));
200 static Value *SimplifyX86immshift(const IntrinsicInst &II,
201 InstCombiner::BuilderTy &Builder) {
202 bool LogicalShift = false;
203 bool ShiftLeft = false;
205 switch (II.getIntrinsicID()) {
208 case Intrinsic::x86_sse2_psra_d:
209 case Intrinsic::x86_sse2_psra_w:
210 case Intrinsic::x86_sse2_psrai_d:
211 case Intrinsic::x86_sse2_psrai_w:
212 case Intrinsic::x86_avx2_psra_d:
213 case Intrinsic::x86_avx2_psra_w:
214 case Intrinsic::x86_avx2_psrai_d:
215 case Intrinsic::x86_avx2_psrai_w:
216 LogicalShift = false; ShiftLeft = false;
218 case Intrinsic::x86_sse2_psrl_d:
219 case Intrinsic::x86_sse2_psrl_q:
220 case Intrinsic::x86_sse2_psrl_w:
221 case Intrinsic::x86_sse2_psrli_d:
222 case Intrinsic::x86_sse2_psrli_q:
223 case Intrinsic::x86_sse2_psrli_w:
224 case Intrinsic::x86_avx2_psrl_d:
225 case Intrinsic::x86_avx2_psrl_q:
226 case Intrinsic::x86_avx2_psrl_w:
227 case Intrinsic::x86_avx2_psrli_d:
228 case Intrinsic::x86_avx2_psrli_q:
229 case Intrinsic::x86_avx2_psrli_w:
230 LogicalShift = true; ShiftLeft = false;
232 case Intrinsic::x86_sse2_psll_d:
233 case Intrinsic::x86_sse2_psll_q:
234 case Intrinsic::x86_sse2_psll_w:
235 case Intrinsic::x86_sse2_pslli_d:
236 case Intrinsic::x86_sse2_pslli_q:
237 case Intrinsic::x86_sse2_pslli_w:
238 case Intrinsic::x86_avx2_psll_d:
239 case Intrinsic::x86_avx2_psll_q:
240 case Intrinsic::x86_avx2_psll_w:
241 case Intrinsic::x86_avx2_pslli_d:
242 case Intrinsic::x86_avx2_pslli_q:
243 case Intrinsic::x86_avx2_pslli_w:
244 LogicalShift = true; ShiftLeft = true;
247 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
249 // Simplify if count is constant.
250 auto Arg1 = II.getArgOperand(1);
251 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
252 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
253 auto CInt = dyn_cast<ConstantInt>(Arg1);
254 if (!CAZ && !CDV && !CInt)
259 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
260 // operand to compute the shift amount.
261 auto VT = cast<VectorType>(CDV->getType());
262 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
263 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
264 unsigned NumSubElts = 64 / BitWidth;
266 // Concatenate the sub-elements to create the 64-bit value.
267 for (unsigned i = 0; i != NumSubElts; ++i) {
268 unsigned SubEltIdx = (NumSubElts - 1) - i;
269 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
270 Count = Count.shl(BitWidth);
271 Count |= SubElt->getValue().zextOrTrunc(64);
275 Count = CInt->getValue();
277 auto Vec = II.getArgOperand(0);
278 auto VT = cast<VectorType>(Vec->getType());
279 auto SVT = VT->getElementType();
280 unsigned VWidth = VT->getNumElements();
281 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
283 // If shift-by-zero then just return the original value.
287 // Handle cases when Shift >= BitWidth.
288 if (Count.uge(BitWidth)) {
289 // If LogicalShift - just return zero.
291 return ConstantAggregateZero::get(VT);
293 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
294 Count = APInt(64, BitWidth - 1);
297 // Get a constant vector of the same type as the first operand.
298 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
299 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
302 return Builder.CreateShl(Vec, ShiftVec);
305 return Builder.CreateLShr(Vec, ShiftVec);
307 return Builder.CreateAShr(Vec, ShiftVec);
310 static Value *SimplifyX86extend(const IntrinsicInst &II,
311 InstCombiner::BuilderTy &Builder,
313 VectorType *SrcTy = cast<VectorType>(II.getArgOperand(0)->getType());
314 VectorType *DstTy = cast<VectorType>(II.getType());
315 unsigned NumDstElts = DstTy->getNumElements();
317 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
318 SmallVector<int, 8> ShuffleMask;
319 for (int i = 0; i != (int)NumDstElts; ++i)
320 ShuffleMask.push_back(i);
322 Value *SV = Builder.CreateShuffleVector(II.getArgOperand(0),
323 UndefValue::get(SrcTy), ShuffleMask);
324 return SignExtend ? Builder.CreateSExt(SV, DstTy)
325 : Builder.CreateZExt(SV, DstTy);
328 static Value *SimplifyX86insertps(const IntrinsicInst &II,
329 InstCombiner::BuilderTy &Builder) {
330 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
331 VectorType *VecTy = cast<VectorType>(II.getType());
332 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
334 // The immediate permute control byte looks like this:
335 // [3:0] - zero mask for each 32-bit lane
336 // [5:4] - select one 32-bit destination lane
337 // [7:6] - select one 32-bit source lane
339 uint8_t Imm = CInt->getZExtValue();
340 uint8_t ZMask = Imm & 0xf;
341 uint8_t DestLane = (Imm >> 4) & 0x3;
342 uint8_t SourceLane = (Imm >> 6) & 0x3;
344 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
346 // If all zero mask bits are set, this was just a weird way to
347 // generate a zero vector.
351 // Initialize by passing all of the first source bits through.
352 int ShuffleMask[4] = { 0, 1, 2, 3 };
354 // We may replace the second operand with the zero vector.
355 Value *V1 = II.getArgOperand(1);
358 // If the zero mask is being used with a single input or the zero mask
359 // overrides the destination lane, this is a shuffle with the zero vector.
360 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
361 (ZMask & (1 << DestLane))) {
363 // We may still move 32-bits of the first source vector from one lane
365 ShuffleMask[DestLane] = SourceLane;
366 // The zero mask may override the previous insert operation.
367 for (unsigned i = 0; i < 4; ++i)
368 if ((ZMask >> i) & 0x1)
369 ShuffleMask[i] = i + 4;
371 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
375 // Replace the selected destination lane with the selected source lane.
376 ShuffleMask[DestLane] = SourceLane + 4;
379 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
384 /// The shuffle mask for a perm2*128 selects any two halves of two 256-bit
385 /// source vectors, unless a zero bit is set. If a zero bit is set,
386 /// then ignore that half of the mask and clear that half of the vector.
387 static Value *SimplifyX86vperm2(const IntrinsicInst &II,
388 InstCombiner::BuilderTy &Builder) {
389 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
390 VectorType *VecTy = cast<VectorType>(II.getType());
391 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
393 // The immediate permute control byte looks like this:
394 // [1:0] - select 128 bits from sources for low half of destination
396 // [3] - zero low half of destination
397 // [5:4] - select 128 bits from sources for high half of destination
399 // [7] - zero high half of destination
401 uint8_t Imm = CInt->getZExtValue();
403 bool LowHalfZero = Imm & 0x08;
404 bool HighHalfZero = Imm & 0x80;
406 // If both zero mask bits are set, this was just a weird way to
407 // generate a zero vector.
408 if (LowHalfZero && HighHalfZero)
411 // If 0 or 1 zero mask bits are set, this is a simple shuffle.
412 unsigned NumElts = VecTy->getNumElements();
413 unsigned HalfSize = NumElts / 2;
414 SmallVector<int, 8> ShuffleMask(NumElts);
416 // The high bit of the selection field chooses the 1st or 2nd operand.
417 bool LowInputSelect = Imm & 0x02;
418 bool HighInputSelect = Imm & 0x20;
420 // The low bit of the selection field chooses the low or high half
421 // of the selected operand.
422 bool LowHalfSelect = Imm & 0x01;
423 bool HighHalfSelect = Imm & 0x10;
425 // Determine which operand(s) are actually in use for this instruction.
426 Value *V0 = LowInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
427 Value *V1 = HighInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
429 // If needed, replace operands based on zero mask.
430 V0 = LowHalfZero ? ZeroVector : V0;
431 V1 = HighHalfZero ? ZeroVector : V1;
433 // Permute low half of result.
434 unsigned StartIndex = LowHalfSelect ? HalfSize : 0;
435 for (unsigned i = 0; i < HalfSize; ++i)
436 ShuffleMask[i] = StartIndex + i;
438 // Permute high half of result.
439 StartIndex = HighHalfSelect ? HalfSize : 0;
440 StartIndex += NumElts;
441 for (unsigned i = 0; i < HalfSize; ++i)
442 ShuffleMask[i + HalfSize] = StartIndex + i;
444 return Builder.CreateShuffleVector(V0, V1, ShuffleMask);
449 /// Decode XOP integer vector comparison intrinsics.
450 static Value *SimplifyX86vpcom(const IntrinsicInst &II,
451 InstCombiner::BuilderTy &Builder, bool IsSigned) {
452 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
453 uint64_t Imm = CInt->getZExtValue() & 0x7;
454 VectorType *VecTy = cast<VectorType>(II.getType());
455 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
459 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
462 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
465 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
468 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
471 Pred = ICmpInst::ICMP_EQ; break;
473 Pred = ICmpInst::ICMP_NE; break;
475 return ConstantInt::getSigned(VecTy, 0); // FALSE
477 return ConstantInt::getSigned(VecTy, -1); // TRUE
480 if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0), II.getArgOperand(1)))
481 return Builder.CreateSExtOrTrunc(Cmp, VecTy);
486 /// visitCallInst - CallInst simplification. This mostly only handles folding
487 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
488 /// the heavy lifting.
490 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
491 auto Args = CI.arg_operands();
492 if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
494 return ReplaceInstUsesWith(CI, V);
496 if (isFreeCall(&CI, TLI))
497 return visitFree(CI);
499 // If the caller function is nounwind, mark the call as nounwind, even if the
501 if (CI.getParent()->getParent()->doesNotThrow() &&
502 !CI.doesNotThrow()) {
503 CI.setDoesNotThrow();
507 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
508 if (!II) return visitCallSite(&CI);
510 // Intrinsics cannot occur in an invoke, so handle them here instead of in
512 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
513 bool Changed = false;
515 // memmove/cpy/set of zero bytes is a noop.
516 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
517 if (NumBytes->isNullValue())
518 return EraseInstFromFunction(CI);
520 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
521 if (CI->getZExtValue() == 1) {
522 // Replace the instruction with just byte operations. We would
523 // transform other cases to loads/stores, but we don't know if
524 // alignment is sufficient.
528 // No other transformations apply to volatile transfers.
529 if (MI->isVolatile())
532 // If we have a memmove and the source operation is a constant global,
533 // then the source and dest pointers can't alias, so we can change this
534 // into a call to memcpy.
535 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
536 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
537 if (GVSrc->isConstant()) {
538 Module *M = CI.getParent()->getParent()->getParent();
539 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
540 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
541 CI.getArgOperand(1)->getType(),
542 CI.getArgOperand(2)->getType() };
543 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
548 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
549 // memmove(x,x,size) -> noop.
550 if (MTI->getSource() == MTI->getDest())
551 return EraseInstFromFunction(CI);
554 // If we can determine a pointer alignment that is bigger than currently
555 // set, update the alignment.
556 if (isa<MemTransferInst>(MI)) {
557 if (Instruction *I = SimplifyMemTransfer(MI))
559 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
560 if (Instruction *I = SimplifyMemSet(MSI))
564 if (Changed) return II;
567 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width, unsigned DemandedWidth)
569 APInt UndefElts(Width, 0);
570 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
571 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
574 switch (II->getIntrinsicID()) {
576 case Intrinsic::objectsize: {
578 if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
579 return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
582 case Intrinsic::bswap: {
583 Value *IIOperand = II->getArgOperand(0);
586 // bswap(bswap(x)) -> x
587 if (match(IIOperand, m_BSwap(m_Value(X))))
588 return ReplaceInstUsesWith(CI, X);
590 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
591 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
592 unsigned C = X->getType()->getPrimitiveSizeInBits() -
593 IIOperand->getType()->getPrimitiveSizeInBits();
594 Value *CV = ConstantInt::get(X->getType(), C);
595 Value *V = Builder->CreateLShr(X, CV);
596 return new TruncInst(V, IIOperand->getType());
601 case Intrinsic::powi:
602 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
605 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
608 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
609 // powi(x, -1) -> 1/x
610 if (Power->isAllOnesValue())
611 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
612 II->getArgOperand(0));
615 case Intrinsic::cttz: {
616 // If all bits below the first known one are known zero,
617 // this value is constant.
618 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
619 // FIXME: Try to simplify vectors of integers.
621 uint32_t BitWidth = IT->getBitWidth();
622 APInt KnownZero(BitWidth, 0);
623 APInt KnownOne(BitWidth, 0);
624 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
625 unsigned TrailingZeros = KnownOne.countTrailingZeros();
626 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
627 if ((Mask & KnownZero) == Mask)
628 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
629 APInt(BitWidth, TrailingZeros)));
633 case Intrinsic::ctlz: {
634 // If all bits above the first known one are known zero,
635 // this value is constant.
636 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
637 // FIXME: Try to simplify vectors of integers.
639 uint32_t BitWidth = IT->getBitWidth();
640 APInt KnownZero(BitWidth, 0);
641 APInt KnownOne(BitWidth, 0);
642 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
643 unsigned LeadingZeros = KnownOne.countLeadingZeros();
644 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
645 if ((Mask & KnownZero) == Mask)
646 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
647 APInt(BitWidth, LeadingZeros)));
652 case Intrinsic::uadd_with_overflow:
653 case Intrinsic::sadd_with_overflow:
654 case Intrinsic::umul_with_overflow:
655 case Intrinsic::smul_with_overflow:
656 if (isa<Constant>(II->getArgOperand(0)) &&
657 !isa<Constant>(II->getArgOperand(1))) {
658 // Canonicalize constants into the RHS.
659 Value *LHS = II->getArgOperand(0);
660 II->setArgOperand(0, II->getArgOperand(1));
661 II->setArgOperand(1, LHS);
666 case Intrinsic::usub_with_overflow:
667 case Intrinsic::ssub_with_overflow: {
668 OverflowCheckFlavor OCF =
669 IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
670 assert(OCF != OCF_INVALID && "unexpected!");
672 Value *OperationResult = nullptr;
673 Constant *OverflowResult = nullptr;
674 if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
675 *II, OperationResult, OverflowResult))
676 return CreateOverflowTuple(II, OperationResult, OverflowResult);
681 case Intrinsic::minnum:
682 case Intrinsic::maxnum: {
683 Value *Arg0 = II->getArgOperand(0);
684 Value *Arg1 = II->getArgOperand(1);
688 return ReplaceInstUsesWith(CI, Arg0);
690 const ConstantFP *C0 = dyn_cast<ConstantFP>(Arg0);
691 const ConstantFP *C1 = dyn_cast<ConstantFP>(Arg1);
693 // Canonicalize constants into the RHS.
695 II->setArgOperand(0, Arg1);
696 II->setArgOperand(1, Arg0);
701 if (C1 && C1->isNaN())
702 return ReplaceInstUsesWith(CI, Arg0);
704 // This is the value because if undef were NaN, we would return the other
705 // value and cannot return a NaN unless both operands are.
707 // fmin(undef, x) -> x
708 if (isa<UndefValue>(Arg0))
709 return ReplaceInstUsesWith(CI, Arg1);
711 // fmin(x, undef) -> x
712 if (isa<UndefValue>(Arg1))
713 return ReplaceInstUsesWith(CI, Arg0);
717 if (II->getIntrinsicID() == Intrinsic::minnum) {
718 // fmin(x, fmin(x, y)) -> fmin(x, y)
719 // fmin(y, fmin(x, y)) -> fmin(x, y)
720 if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
721 if (Arg0 == X || Arg0 == Y)
722 return ReplaceInstUsesWith(CI, Arg1);
725 // fmin(fmin(x, y), x) -> fmin(x, y)
726 // fmin(fmin(x, y), y) -> fmin(x, y)
727 if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
728 if (Arg1 == X || Arg1 == Y)
729 return ReplaceInstUsesWith(CI, Arg0);
732 // TODO: fmin(nnan x, inf) -> x
733 // TODO: fmin(nnan ninf x, flt_max) -> x
734 if (C1 && C1->isInfinity()) {
735 // fmin(x, -inf) -> -inf
736 if (C1->isNegative())
737 return ReplaceInstUsesWith(CI, Arg1);
740 assert(II->getIntrinsicID() == Intrinsic::maxnum);
741 // fmax(x, fmax(x, y)) -> fmax(x, y)
742 // fmax(y, fmax(x, y)) -> fmax(x, y)
743 if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
744 if (Arg0 == X || Arg0 == Y)
745 return ReplaceInstUsesWith(CI, Arg1);
748 // fmax(fmax(x, y), x) -> fmax(x, y)
749 // fmax(fmax(x, y), y) -> fmax(x, y)
750 if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
751 if (Arg1 == X || Arg1 == Y)
752 return ReplaceInstUsesWith(CI, Arg0);
755 // TODO: fmax(nnan x, -inf) -> x
756 // TODO: fmax(nnan ninf x, -flt_max) -> x
757 if (C1 && C1->isInfinity()) {
758 // fmax(x, inf) -> inf
759 if (!C1->isNegative())
760 return ReplaceInstUsesWith(CI, Arg1);
765 case Intrinsic::ppc_altivec_lvx:
766 case Intrinsic::ppc_altivec_lvxl:
767 // Turn PPC lvx -> load if the pointer is known aligned.
768 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
770 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
771 PointerType::getUnqual(II->getType()));
772 return new LoadInst(Ptr);
775 case Intrinsic::ppc_vsx_lxvw4x:
776 case Intrinsic::ppc_vsx_lxvd2x: {
777 // Turn PPC VSX loads into normal loads.
778 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
779 PointerType::getUnqual(II->getType()));
780 return new LoadInst(Ptr, Twine(""), false, 1);
782 case Intrinsic::ppc_altivec_stvx:
783 case Intrinsic::ppc_altivec_stvxl:
784 // Turn stvx -> store if the pointer is known aligned.
785 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
788 PointerType::getUnqual(II->getArgOperand(0)->getType());
789 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
790 return new StoreInst(II->getArgOperand(0), Ptr);
793 case Intrinsic::ppc_vsx_stxvw4x:
794 case Intrinsic::ppc_vsx_stxvd2x: {
795 // Turn PPC VSX stores into normal stores.
796 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
797 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
798 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
800 case Intrinsic::ppc_qpx_qvlfs:
801 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
802 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
804 Type *VTy = VectorType::get(Builder->getFloatTy(),
805 II->getType()->getVectorNumElements());
806 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
807 PointerType::getUnqual(VTy));
808 Value *Load = Builder->CreateLoad(Ptr);
809 return new FPExtInst(Load, II->getType());
812 case Intrinsic::ppc_qpx_qvlfd:
813 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
814 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
816 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
817 PointerType::getUnqual(II->getType()));
818 return new LoadInst(Ptr);
821 case Intrinsic::ppc_qpx_qvstfs:
822 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
823 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
825 Type *VTy = VectorType::get(Builder->getFloatTy(),
826 II->getArgOperand(0)->getType()->getVectorNumElements());
827 Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
828 Type *OpPtrTy = PointerType::getUnqual(VTy);
829 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
830 return new StoreInst(TOp, Ptr);
833 case Intrinsic::ppc_qpx_qvstfd:
834 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
835 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
838 PointerType::getUnqual(II->getArgOperand(0)->getType());
839 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
840 return new StoreInst(II->getArgOperand(0), Ptr);
844 case Intrinsic::x86_sse_storeu_ps:
845 case Intrinsic::x86_sse2_storeu_pd:
846 case Intrinsic::x86_sse2_storeu_dq:
847 // Turn X86 storeu -> store if the pointer is known aligned.
848 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
851 PointerType::getUnqual(II->getArgOperand(1)->getType());
852 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
853 return new StoreInst(II->getArgOperand(1), Ptr);
857 case Intrinsic::x86_vcvtph2ps_128:
858 case Intrinsic::x86_vcvtph2ps_256: {
859 auto Arg = II->getArgOperand(0);
860 auto ArgType = cast<VectorType>(Arg->getType());
861 auto RetType = cast<VectorType>(II->getType());
862 unsigned ArgWidth = ArgType->getNumElements();
863 unsigned RetWidth = RetType->getNumElements();
864 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
865 assert(ArgType->isIntOrIntVectorTy() &&
866 ArgType->getScalarSizeInBits() == 16 &&
867 "CVTPH2PS input type should be 16-bit integer vector");
868 assert(RetType->getScalarType()->isFloatTy() &&
869 "CVTPH2PS output type should be 32-bit float vector");
871 // Constant folding: Convert to generic half to single conversion.
872 if (isa<ConstantAggregateZero>(Arg))
873 return ReplaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
875 if (isa<ConstantDataVector>(Arg)) {
876 auto VectorHalfAsShorts = Arg;
877 if (RetWidth < ArgWidth) {
878 SmallVector<int, 8> SubVecMask;
879 for (unsigned i = 0; i != RetWidth; ++i)
880 SubVecMask.push_back((int)i);
881 VectorHalfAsShorts = Builder->CreateShuffleVector(
882 Arg, UndefValue::get(ArgType), SubVecMask);
885 auto VectorHalfType =
886 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
888 Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType);
889 auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType);
890 return ReplaceInstUsesWith(*II, VectorFloats);
893 // We only use the lowest lanes of the argument.
894 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
895 II->setArgOperand(0, V);
901 case Intrinsic::x86_sse_cvtss2si:
902 case Intrinsic::x86_sse_cvtss2si64:
903 case Intrinsic::x86_sse_cvttss2si:
904 case Intrinsic::x86_sse_cvttss2si64:
905 case Intrinsic::x86_sse2_cvtsd2si:
906 case Intrinsic::x86_sse2_cvtsd2si64:
907 case Intrinsic::x86_sse2_cvttsd2si:
908 case Intrinsic::x86_sse2_cvttsd2si64: {
909 // These intrinsics only demand the 0th element of their input vectors. If
910 // we can simplify the input based on that, do so now.
911 Value *Arg = II->getArgOperand(0);
912 unsigned VWidth = Arg->getType()->getVectorNumElements();
913 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
914 II->setArgOperand(0, V);
920 // Constant fold ashr( <A x Bi>, Ci ).
921 // Constant fold lshr( <A x Bi>, Ci ).
922 // Constant fold shl( <A x Bi>, Ci ).
923 case Intrinsic::x86_sse2_psrai_d:
924 case Intrinsic::x86_sse2_psrai_w:
925 case Intrinsic::x86_avx2_psrai_d:
926 case Intrinsic::x86_avx2_psrai_w:
927 case Intrinsic::x86_sse2_psrli_d:
928 case Intrinsic::x86_sse2_psrli_q:
929 case Intrinsic::x86_sse2_psrli_w:
930 case Intrinsic::x86_avx2_psrli_d:
931 case Intrinsic::x86_avx2_psrli_q:
932 case Intrinsic::x86_avx2_psrli_w:
933 case Intrinsic::x86_sse2_pslli_d:
934 case Intrinsic::x86_sse2_pslli_q:
935 case Intrinsic::x86_sse2_pslli_w:
936 case Intrinsic::x86_avx2_pslli_d:
937 case Intrinsic::x86_avx2_pslli_q:
938 case Intrinsic::x86_avx2_pslli_w:
939 if (Value *V = SimplifyX86immshift(*II, *Builder))
940 return ReplaceInstUsesWith(*II, V);
943 case Intrinsic::x86_sse2_psra_d:
944 case Intrinsic::x86_sse2_psra_w:
945 case Intrinsic::x86_avx2_psra_d:
946 case Intrinsic::x86_avx2_psra_w:
947 case Intrinsic::x86_sse2_psrl_d:
948 case Intrinsic::x86_sse2_psrl_q:
949 case Intrinsic::x86_sse2_psrl_w:
950 case Intrinsic::x86_avx2_psrl_d:
951 case Intrinsic::x86_avx2_psrl_q:
952 case Intrinsic::x86_avx2_psrl_w:
953 case Intrinsic::x86_sse2_psll_d:
954 case Intrinsic::x86_sse2_psll_q:
955 case Intrinsic::x86_sse2_psll_w:
956 case Intrinsic::x86_avx2_psll_d:
957 case Intrinsic::x86_avx2_psll_q:
958 case Intrinsic::x86_avx2_psll_w: {
959 if (Value *V = SimplifyX86immshift(*II, *Builder))
960 return ReplaceInstUsesWith(*II, V);
962 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
963 // operand to compute the shift amount.
964 Value *Arg1 = II->getArgOperand(1);
965 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
966 "Unexpected packed shift size");
967 unsigned VWidth = Arg1->getType()->getVectorNumElements();
969 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
970 II->setArgOperand(1, V);
976 case Intrinsic::x86_avx2_pmovsxbd:
977 case Intrinsic::x86_avx2_pmovsxbq:
978 case Intrinsic::x86_avx2_pmovsxbw:
979 case Intrinsic::x86_avx2_pmovsxdq:
980 case Intrinsic::x86_avx2_pmovsxwd:
981 case Intrinsic::x86_avx2_pmovsxwq:
982 if (Value *V = SimplifyX86extend(*II, *Builder, true))
983 return ReplaceInstUsesWith(*II, V);
986 case Intrinsic::x86_sse41_pmovzxbd:
987 case Intrinsic::x86_sse41_pmovzxbq:
988 case Intrinsic::x86_sse41_pmovzxbw:
989 case Intrinsic::x86_sse41_pmovzxdq:
990 case Intrinsic::x86_sse41_pmovzxwd:
991 case Intrinsic::x86_sse41_pmovzxwq:
992 case Intrinsic::x86_avx2_pmovzxbd:
993 case Intrinsic::x86_avx2_pmovzxbq:
994 case Intrinsic::x86_avx2_pmovzxbw:
995 case Intrinsic::x86_avx2_pmovzxdq:
996 case Intrinsic::x86_avx2_pmovzxwd:
997 case Intrinsic::x86_avx2_pmovzxwq:
998 if (Value *V = SimplifyX86extend(*II, *Builder, false))
999 return ReplaceInstUsesWith(*II, V);
1002 case Intrinsic::x86_sse41_insertps:
1003 if (Value *V = SimplifyX86insertps(*II, *Builder))
1004 return ReplaceInstUsesWith(*II, V);
1007 case Intrinsic::x86_sse4a_extrq: {
1008 // EXTRQ uses only the lowest 64-bits of the first 128-bit vector
1009 // operands and the lowest 16-bits of the second.
1010 Value *Op0 = II->getArgOperand(0);
1011 Value *Op1 = II->getArgOperand(1);
1012 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1013 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1014 assert(VWidth0 == 2 && VWidth1 == 16 && "Unexpected operand sizes");
1016 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1017 II->setArgOperand(0, V);
1020 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
1021 II->setArgOperand(1, V);
1027 case Intrinsic::x86_sse4a_extrqi: {
1028 // EXTRQI uses only the lowest 64-bits of the first 128-bit vector
1030 Value *Op = II->getArgOperand(0);
1031 unsigned VWidth = Op->getType()->getVectorNumElements();
1032 assert(VWidth == 2 && "Unexpected operand size");
1034 if (Value *V = SimplifyDemandedVectorEltsLow(Op, VWidth, 1)) {
1035 II->setArgOperand(0, V);
1041 case Intrinsic::x86_sse4a_insertq: {
1042 // INSERTQ uses only the lowest 64-bits of the first 128-bit vector
1044 Value *Op = II->getArgOperand(0);
1045 unsigned VWidth = Op->getType()->getVectorNumElements();
1046 assert(VWidth == 2 && "Unexpected operand size");
1048 if (Value *V = SimplifyDemandedVectorEltsLow(Op, VWidth, 1)) {
1049 II->setArgOperand(0, V);
1055 case Intrinsic::x86_sse4a_insertqi: {
1056 // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
1058 // TODO: eventually we should lower this intrinsic to IR
1059 if (auto CILength = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
1060 if (auto CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
1061 unsigned Index = CIIndex->getZExtValue();
1062 // From AMD documentation: "a value of zero in the field length is
1063 // defined as length of 64".
1064 unsigned Length = CILength->equalsInt(0) ? 64 : CILength->getZExtValue();
1066 // From AMD documentation: "If the sum of the bit index + length field
1067 // is greater than 64, the results are undefined".
1068 unsigned End = Index + Length;
1070 // Note that both field index and field length are 8-bit quantities.
1071 // Since variables 'Index' and 'Length' are unsigned values
1072 // obtained from zero-extending field index and field length
1073 // respectively, their sum should never wrap around.
1075 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
1077 if (Length == 64 && Index == 0) {
1078 Value *Vec = II->getArgOperand(1);
1079 Value *Undef = UndefValue::get(Vec->getType());
1080 const uint32_t Mask[] = { 0, 2 };
1081 return ReplaceInstUsesWith(
1083 Builder->CreateShuffleVector(
1084 Vec, Undef, ConstantDataVector::get(
1085 II->getContext(), makeArrayRef(Mask))));
1086 } else if (auto Source =
1087 dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1088 if (Source->hasOneUse() &&
1089 Source->getArgOperand(1) == II->getArgOperand(1)) {
1090 // If the source of the insert has only one use and it's another
1091 // insert (and they're both inserting from the same vector), try to
1092 // bundle both together.
1093 auto CISourceLength =
1094 dyn_cast<ConstantInt>(Source->getArgOperand(2));
1095 auto CISourceIndex =
1096 dyn_cast<ConstantInt>(Source->getArgOperand(3));
1097 if (CISourceIndex && CISourceLength) {
1098 unsigned SourceIndex = CISourceIndex->getZExtValue();
1099 unsigned SourceLength = CISourceLength->getZExtValue();
1100 unsigned SourceEnd = SourceIndex + SourceLength;
1101 unsigned NewIndex, NewLength;
1102 bool ShouldReplace = false;
1103 if (Index <= SourceIndex && SourceIndex <= End) {
1105 NewLength = std::max(End, SourceEnd) - NewIndex;
1106 ShouldReplace = true;
1107 } else if (SourceIndex <= Index && Index <= SourceEnd) {
1108 NewIndex = SourceIndex;
1109 NewLength = std::max(SourceEnd, End) - NewIndex;
1110 ShouldReplace = true;
1113 if (ShouldReplace) {
1114 Constant *ConstantLength = ConstantInt::get(
1115 II->getArgOperand(2)->getType(), NewLength, false);
1116 Constant *ConstantIndex = ConstantInt::get(
1117 II->getArgOperand(3)->getType(), NewIndex, false);
1118 Value *Args[4] = { Source->getArgOperand(0),
1119 II->getArgOperand(1), ConstantLength,
1121 Module *M = CI.getParent()->getParent()->getParent();
1123 Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
1124 return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
1132 // INSERTQI uses only the lowest 64-bits of the first two 128-bit vector
1134 Value *Op0 = II->getArgOperand(0);
1135 Value *Op1 = II->getArgOperand(1);
1136 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1137 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1138 assert(VWidth0 == 2 && VWidth1 == 2 && "Unexpected operand sizes");
1140 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1141 II->setArgOperand(0, V);
1145 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
1146 II->setArgOperand(1, V);
1152 case Intrinsic::x86_sse41_pblendvb:
1153 case Intrinsic::x86_sse41_blendvps:
1154 case Intrinsic::x86_sse41_blendvpd:
1155 case Intrinsic::x86_avx_blendv_ps_256:
1156 case Intrinsic::x86_avx_blendv_pd_256:
1157 case Intrinsic::x86_avx2_pblendvb: {
1158 // Convert blendv* to vector selects if the mask is constant.
1159 // This optimization is convoluted because the intrinsic is defined as
1160 // getting a vector of floats or doubles for the ps and pd versions.
1161 // FIXME: That should be changed.
1163 Value *Op0 = II->getArgOperand(0);
1164 Value *Op1 = II->getArgOperand(1);
1165 Value *Mask = II->getArgOperand(2);
1167 // fold (blend A, A, Mask) -> A
1169 return ReplaceInstUsesWith(CI, Op0);
1171 // Zero Mask - select 1st argument.
1172 if (isa<ConstantAggregateZero>(Mask))
1173 return ReplaceInstUsesWith(CI, Op0);
1175 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
1176 if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
1177 auto Tyi1 = Builder->getInt1Ty();
1178 auto SelectorType = cast<VectorType>(Mask->getType());
1179 auto EltTy = SelectorType->getElementType();
1180 unsigned Size = SelectorType->getNumElements();
1184 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
1185 assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
1186 "Wrong arguments for variable blend intrinsic");
1187 SmallVector<Constant *, 32> Selectors;
1188 for (unsigned I = 0; I < Size; ++I) {
1189 // The intrinsics only read the top bit
1192 Selector = C->getElementAsInteger(I);
1194 Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
1195 Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
1197 auto NewSelector = ConstantVector::get(Selectors);
1198 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
1203 case Intrinsic::x86_ssse3_pshuf_b_128:
1204 case Intrinsic::x86_avx2_pshuf_b: {
1205 // Turn pshufb(V1,mask) -> shuffle(V1,Zero,mask) if mask is a constant.
1206 auto *V = II->getArgOperand(1);
1207 auto *VTy = cast<VectorType>(V->getType());
1208 unsigned NumElts = VTy->getNumElements();
1209 assert((NumElts == 16 || NumElts == 32) &&
1210 "Unexpected number of elements in shuffle mask!");
1211 // Initialize the resulting shuffle mask to all zeroes.
1212 uint32_t Indexes[32] = {0};
1214 if (auto *Mask = dyn_cast<ConstantDataVector>(V)) {
1215 // Each byte in the shuffle control mask forms an index to permute the
1216 // corresponding byte in the destination operand.
1217 for (unsigned I = 0; I < NumElts; ++I) {
1218 int8_t Index = Mask->getElementAsInteger(I);
1219 // If the most significant bit (bit[7]) of each byte of the shuffle
1220 // control mask is set, then zero is written in the result byte.
1221 // The zero vector is in the right-hand side of the resulting
1224 // The value of each index is the least significant 4 bits of the
1225 // shuffle control byte.
1226 Indexes[I] = (Index < 0) ? NumElts : Index & 0xF;
1228 } else if (!isa<ConstantAggregateZero>(V))
1231 // The value of each index for the high 128-bit lane is the least
1232 // significant 4 bits of the respective shuffle control byte.
1233 for (unsigned I = 16; I < NumElts; ++I)
1234 Indexes[I] += I & 0xF0;
1236 auto NewC = ConstantDataVector::get(V->getContext(),
1237 makeArrayRef(Indexes, NumElts));
1238 auto V1 = II->getArgOperand(0);
1239 auto V2 = Constant::getNullValue(II->getType());
1240 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
1241 return ReplaceInstUsesWith(CI, Shuffle);
1244 case Intrinsic::x86_avx_vpermilvar_ps:
1245 case Intrinsic::x86_avx_vpermilvar_ps_256:
1246 case Intrinsic::x86_avx_vpermilvar_pd:
1247 case Intrinsic::x86_avx_vpermilvar_pd_256: {
1248 // Convert vpermil* to shufflevector if the mask is constant.
1249 Value *V = II->getArgOperand(1);
1250 unsigned Size = cast<VectorType>(V->getType())->getNumElements();
1251 assert(Size == 8 || Size == 4 || Size == 2);
1252 uint32_t Indexes[8];
1253 if (auto C = dyn_cast<ConstantDataVector>(V)) {
1254 // The intrinsics only read one or two bits, clear the rest.
1255 for (unsigned I = 0; I < Size; ++I) {
1256 uint32_t Index = C->getElementAsInteger(I) & 0x3;
1257 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
1258 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
1262 } else if (isa<ConstantAggregateZero>(V)) {
1263 for (unsigned I = 0; I < Size; ++I)
1268 // The _256 variants are a bit trickier since the mask bits always index
1269 // into the corresponding 128 half. In order to convert to a generic
1270 // shuffle, we have to make that explicit.
1271 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
1272 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
1273 for (unsigned I = Size / 2; I < Size; ++I)
1274 Indexes[I] += Size / 2;
1277 ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
1278 auto V1 = II->getArgOperand(0);
1279 auto V2 = UndefValue::get(V1->getType());
1280 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
1281 return ReplaceInstUsesWith(CI, Shuffle);
1284 case Intrinsic::x86_avx_vperm2f128_pd_256:
1285 case Intrinsic::x86_avx_vperm2f128_ps_256:
1286 case Intrinsic::x86_avx_vperm2f128_si_256:
1287 case Intrinsic::x86_avx2_vperm2i128:
1288 if (Value *V = SimplifyX86vperm2(*II, *Builder))
1289 return ReplaceInstUsesWith(*II, V);
1292 case Intrinsic::x86_xop_vpcomb:
1293 case Intrinsic::x86_xop_vpcomd:
1294 case Intrinsic::x86_xop_vpcomq:
1295 case Intrinsic::x86_xop_vpcomw:
1296 if (Value *V = SimplifyX86vpcom(*II, *Builder, true))
1297 return ReplaceInstUsesWith(*II, V);
1300 case Intrinsic::x86_xop_vpcomub:
1301 case Intrinsic::x86_xop_vpcomud:
1302 case Intrinsic::x86_xop_vpcomuq:
1303 case Intrinsic::x86_xop_vpcomuw:
1304 if (Value *V = SimplifyX86vpcom(*II, *Builder, false))
1305 return ReplaceInstUsesWith(*II, V);
1308 case Intrinsic::ppc_altivec_vperm:
1309 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
1310 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
1311 // a vectorshuffle for little endian, we must undo the transformation
1312 // performed on vec_perm in altivec.h. That is, we must complement
1313 // the permutation mask with respect to 31 and reverse the order of
1315 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
1316 assert(Mask->getType()->getVectorNumElements() == 16 &&
1317 "Bad type for intrinsic!");
1319 // Check that all of the elements are integer constants or undefs.
1320 bool AllEltsOk = true;
1321 for (unsigned i = 0; i != 16; ++i) {
1322 Constant *Elt = Mask->getAggregateElement(i);
1323 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
1330 // Cast the input vectors to byte vectors.
1331 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
1333 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
1335 Value *Result = UndefValue::get(Op0->getType());
1337 // Only extract each element once.
1338 Value *ExtractedElts[32];
1339 memset(ExtractedElts, 0, sizeof(ExtractedElts));
1341 for (unsigned i = 0; i != 16; ++i) {
1342 if (isa<UndefValue>(Mask->getAggregateElement(i)))
1345 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
1346 Idx &= 31; // Match the hardware behavior.
1347 if (DL.isLittleEndian())
1350 if (!ExtractedElts[Idx]) {
1351 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
1352 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
1353 ExtractedElts[Idx] =
1354 Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
1355 Builder->getInt32(Idx&15));
1358 // Insert this value into the result vector.
1359 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
1360 Builder->getInt32(i));
1362 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
1367 case Intrinsic::arm_neon_vld1:
1368 case Intrinsic::arm_neon_vld2:
1369 case Intrinsic::arm_neon_vld3:
1370 case Intrinsic::arm_neon_vld4:
1371 case Intrinsic::arm_neon_vld2lane:
1372 case Intrinsic::arm_neon_vld3lane:
1373 case Intrinsic::arm_neon_vld4lane:
1374 case Intrinsic::arm_neon_vst1:
1375 case Intrinsic::arm_neon_vst2:
1376 case Intrinsic::arm_neon_vst3:
1377 case Intrinsic::arm_neon_vst4:
1378 case Intrinsic::arm_neon_vst2lane:
1379 case Intrinsic::arm_neon_vst3lane:
1380 case Intrinsic::arm_neon_vst4lane: {
1381 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
1382 unsigned AlignArg = II->getNumArgOperands() - 1;
1383 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
1384 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
1385 II->setArgOperand(AlignArg,
1386 ConstantInt::get(Type::getInt32Ty(II->getContext()),
1393 case Intrinsic::arm_neon_vmulls:
1394 case Intrinsic::arm_neon_vmullu:
1395 case Intrinsic::aarch64_neon_smull:
1396 case Intrinsic::aarch64_neon_umull: {
1397 Value *Arg0 = II->getArgOperand(0);
1398 Value *Arg1 = II->getArgOperand(1);
1400 // Handle mul by zero first:
1401 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1402 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1405 // Check for constant LHS & RHS - in this case we just simplify.
1406 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
1407 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
1408 VectorType *NewVT = cast<VectorType>(II->getType());
1409 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
1410 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
1411 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
1412 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
1414 return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
1417 // Couldn't simplify - canonicalize constant to the RHS.
1418 std::swap(Arg0, Arg1);
1421 // Handle mul by one:
1422 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
1423 if (ConstantInt *Splat =
1424 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
1426 return CastInst::CreateIntegerCast(Arg0, II->getType(),
1427 /*isSigned=*/!Zext);
1432 case Intrinsic::AMDGPU_rcp: {
1433 if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
1434 const APFloat &ArgVal = C->getValueAPF();
1435 APFloat Val(ArgVal.getSemantics(), 1.0);
1436 APFloat::opStatus Status = Val.divide(ArgVal,
1437 APFloat::rmNearestTiesToEven);
1438 // Only do this if it was exact and therefore not dependent on the
1440 if (Status == APFloat::opOK)
1441 return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
1446 case Intrinsic::stackrestore: {
1447 // If the save is right next to the restore, remove the restore. This can
1448 // happen when variable allocas are DCE'd.
1449 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1450 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1451 BasicBlock::iterator BI = SS;
1453 return EraseInstFromFunction(CI);
1457 // Scan down this block to see if there is another stack restore in the
1458 // same block without an intervening call/alloca.
1459 BasicBlock::iterator BI = II;
1460 TerminatorInst *TI = II->getParent()->getTerminator();
1461 bool CannotRemove = false;
1462 for (++BI; &*BI != TI; ++BI) {
1463 if (isa<AllocaInst>(BI)) {
1464 CannotRemove = true;
1467 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1468 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
1469 // If there is a stackrestore below this one, remove this one.
1470 if (II->getIntrinsicID() == Intrinsic::stackrestore)
1471 return EraseInstFromFunction(CI);
1472 // Otherwise, ignore the intrinsic.
1474 // If we found a non-intrinsic call, we can't remove the stack
1476 CannotRemove = true;
1482 // If the stack restore is in a return, resume, or unwind block and if there
1483 // are no allocas or calls between the restore and the return, nuke the
1485 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1486 return EraseInstFromFunction(CI);
1489 case Intrinsic::lifetime_start: {
1490 // Remove trivially empty lifetime_start/end ranges, i.e. a start
1491 // immediately followed by an end (ignoring debuginfo or other
1492 // lifetime markers in between).
1493 BasicBlock::iterator BI = II, BE = II->getParent()->end();
1494 for (++BI; BI != BE; ++BI) {
1495 if (IntrinsicInst *LTE = dyn_cast<IntrinsicInst>(BI)) {
1496 if (isa<DbgInfoIntrinsic>(LTE) ||
1497 LTE->getIntrinsicID() == Intrinsic::lifetime_start)
1499 if (LTE->getIntrinsicID() == Intrinsic::lifetime_end) {
1500 if (II->getOperand(0) == LTE->getOperand(0) &&
1501 II->getOperand(1) == LTE->getOperand(1)) {
1502 EraseInstFromFunction(*LTE);
1503 return EraseInstFromFunction(*II);
1512 case Intrinsic::assume: {
1513 // Canonicalize assume(a && b) -> assume(a); assume(b);
1514 // Note: New assumption intrinsics created here are registered by
1515 // the InstCombineIRInserter object.
1516 Value *IIOperand = II->getArgOperand(0), *A, *B,
1517 *AssumeIntrinsic = II->getCalledValue();
1518 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1519 Builder->CreateCall(AssumeIntrinsic, A, II->getName());
1520 Builder->CreateCall(AssumeIntrinsic, B, II->getName());
1521 return EraseInstFromFunction(*II);
1523 // assume(!(a || b)) -> assume(!a); assume(!b);
1524 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1525 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
1527 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
1529 return EraseInstFromFunction(*II);
1532 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1533 // (if assume is valid at the load)
1534 if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
1535 Value *LHS = ICmp->getOperand(0);
1536 Value *RHS = ICmp->getOperand(1);
1537 if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
1538 isa<LoadInst>(LHS) &&
1539 isa<Constant>(RHS) &&
1540 RHS->getType()->isPointerTy() &&
1541 cast<Constant>(RHS)->isNullValue()) {
1542 LoadInst* LI = cast<LoadInst>(LHS);
1543 if (isValidAssumeForContext(II, LI, DT)) {
1544 MDNode *MD = MDNode::get(II->getContext(), None);
1545 LI->setMetadata(LLVMContext::MD_nonnull, MD);
1546 return EraseInstFromFunction(*II);
1549 // TODO: apply nonnull return attributes to calls and invokes
1550 // TODO: apply range metadata for range check patterns?
1552 // If there is a dominating assume with the same condition as this one,
1553 // then this one is redundant, and should be removed.
1554 APInt KnownZero(1, 0), KnownOne(1, 0);
1555 computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
1556 if (KnownOne.isAllOnesValue())
1557 return EraseInstFromFunction(*II);
1561 case Intrinsic::experimental_gc_relocate: {
1562 // Translate facts known about a pointer before relocating into
1563 // facts about the relocate value, while being careful to
1564 // preserve relocation semantics.
1565 GCRelocateOperands Operands(II);
1566 Value *DerivedPtr = Operands.getDerivedPtr();
1567 auto *GCRelocateType = cast<PointerType>(II->getType());
1569 // Remove the relocation if unused, note that this check is required
1570 // to prevent the cases below from looping forever.
1571 if (II->use_empty())
1572 return EraseInstFromFunction(*II);
1574 // Undef is undef, even after relocation.
1575 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
1576 // most practical collectors, but there was discussion in the review thread
1577 // about whether it was legal for all possible collectors.
1578 if (isa<UndefValue>(DerivedPtr)) {
1579 // gc_relocate is uncasted. Use undef of gc_relocate's type to replace it.
1580 return ReplaceInstUsesWith(*II, UndefValue::get(GCRelocateType));
1583 // The relocation of null will be null for most any collector.
1584 // TODO: provide a hook for this in GCStrategy. There might be some weird
1585 // collector this property does not hold for.
1586 if (isa<ConstantPointerNull>(DerivedPtr)) {
1587 // gc_relocate is uncasted. Use null-pointer of gc_relocate's type to replace it.
1588 return ReplaceInstUsesWith(*II, ConstantPointerNull::get(GCRelocateType));
1591 // isKnownNonNull -> nonnull attribute
1592 if (isKnownNonNullAt(DerivedPtr, II, DT, TLI))
1593 II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
1595 // isDereferenceablePointer -> deref attribute
1596 if (isDereferenceablePointer(DerivedPtr, DL)) {
1597 if (Argument *A = dyn_cast<Argument>(DerivedPtr)) {
1598 uint64_t Bytes = A->getDereferenceableBytes();
1599 II->addDereferenceableAttr(AttributeSet::ReturnIndex, Bytes);
1603 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
1604 // Canonicalize on the type from the uses to the defs
1606 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
1610 return visitCallSite(II);
1613 // InvokeInst simplification
1615 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
1616 return visitCallSite(&II);
1619 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
1620 /// passed through the varargs area, we can eliminate the use of the cast.
1621 static bool isSafeToEliminateVarargsCast(const CallSite CS,
1622 const DataLayout &DL,
1623 const CastInst *const CI,
1625 if (!CI->isLosslessCast())
1628 // If this is a GC intrinsic, avoid munging types. We need types for
1629 // statepoint reconstruction in SelectionDAG.
1630 // TODO: This is probably something which should be expanded to all
1631 // intrinsics since the entire point of intrinsics is that
1632 // they are understandable by the optimizer.
1633 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
1636 // The size of ByVal or InAlloca arguments is derived from the type, so we
1637 // can't change to a type with a different size. If the size were
1638 // passed explicitly we could avoid this check.
1639 if (!CS.isByValOrInAllocaArgument(ix))
1643 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1644 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
1645 if (!SrcTy->isSized() || !DstTy->isSized())
1647 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
1652 // Try to fold some different type of calls here.
1653 // Currently we're only working with the checking functions, memcpy_chk,
1654 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
1655 // strcat_chk and strncat_chk.
1656 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
1657 if (!CI->getCalledFunction()) return nullptr;
1659 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
1660 ReplaceInstUsesWith(*From, With);
1662 LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
1663 if (Value *With = Simplifier.optimizeCall(CI)) {
1665 return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
1671 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
1672 // Strip off at most one level of pointer casts, looking for an alloca. This
1673 // is good enough in practice and simpler than handling any number of casts.
1674 Value *Underlying = TrampMem->stripPointerCasts();
1675 if (Underlying != TrampMem &&
1676 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1678 if (!isa<AllocaInst>(Underlying))
1681 IntrinsicInst *InitTrampoline = nullptr;
1682 for (User *U : TrampMem->users()) {
1683 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1686 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1688 // More than one init_trampoline writes to this value. Give up.
1690 InitTrampoline = II;
1693 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1694 // Allow any number of calls to adjust.trampoline.
1699 // No call to init.trampoline found.
1700 if (!InitTrampoline)
1703 // Check that the alloca is being used in the expected way.
1704 if (InitTrampoline->getOperand(0) != TrampMem)
1707 return InitTrampoline;
1710 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1712 // Visit all the previous instructions in the basic block, and try to find a
1713 // init.trampoline which has a direct path to the adjust.trampoline.
1714 for (BasicBlock::iterator I = AdjustTramp,
1715 E = AdjustTramp->getParent()->begin(); I != E; ) {
1716 Instruction *Inst = --I;
1717 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1718 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1719 II->getOperand(0) == TrampMem)
1721 if (Inst->mayWriteToMemory())
1727 // Given a call to llvm.adjust.trampoline, find and return the corresponding
1728 // call to llvm.init.trampoline if the call to the trampoline can be optimized
1729 // to a direct call to a function. Otherwise return NULL.
1731 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
1732 Callee = Callee->stripPointerCasts();
1733 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1735 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1738 Value *TrampMem = AdjustTramp->getOperand(0);
1740 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
1742 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1747 // visitCallSite - Improvements for call and invoke instructions.
1749 Instruction *InstCombiner::visitCallSite(CallSite CS) {
1751 if (isAllocLikeFn(CS.getInstruction(), TLI))
1752 return visitAllocSite(*CS.getInstruction());
1754 bool Changed = false;
1756 // Mark any parameters that are known to be non-null with the nonnull
1757 // attribute. This is helpful for inlining calls to functions with null
1758 // checks on their arguments.
1760 for (Value *V : CS.args()) {
1761 if (V->getType()->isPointerTy() && !CS.paramHasAttr(ArgNo+1, Attribute::NonNull) &&
1762 isKnownNonNullAt(V, CS.getInstruction(), DT, TLI)) {
1763 AttributeSet AS = CS.getAttributes();
1764 AS = AS.addAttribute(CS.getInstruction()->getContext(), ArgNo+1,
1765 Attribute::NonNull);
1766 CS.setAttributes(AS);
1771 assert(ArgNo == CS.arg_size() && "sanity check");
1773 // If the callee is a pointer to a function, attempt to move any casts to the
1774 // arguments of the call/invoke.
1775 Value *Callee = CS.getCalledValue();
1776 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1779 if (Function *CalleeF = dyn_cast<Function>(Callee))
1780 // If the call and callee calling conventions don't match, this call must
1781 // be unreachable, as the call is undefined.
1782 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1783 // Only do this for calls to a function with a body. A prototype may
1784 // not actually end up matching the implementation's calling conv for a
1785 // variety of reasons (e.g. it may be written in assembly).
1786 !CalleeF->isDeclaration()) {
1787 Instruction *OldCall = CS.getInstruction();
1788 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1789 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1791 // If OldCall does not return void then replaceAllUsesWith undef.
1792 // This allows ValueHandlers and custom metadata to adjust itself.
1793 if (!OldCall->getType()->isVoidTy())
1794 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1795 if (isa<CallInst>(OldCall))
1796 return EraseInstFromFunction(*OldCall);
1798 // We cannot remove an invoke, because it would change the CFG, just
1799 // change the callee to a null pointer.
1800 cast<InvokeInst>(OldCall)->setCalledFunction(
1801 Constant::getNullValue(CalleeF->getType()));
1805 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1806 // If CS does not return void then replaceAllUsesWith undef.
1807 // This allows ValueHandlers and custom metadata to adjust itself.
1808 if (!CS.getInstruction()->getType()->isVoidTy())
1809 ReplaceInstUsesWith(*CS.getInstruction(),
1810 UndefValue::get(CS.getInstruction()->getType()));
1812 if (isa<InvokeInst>(CS.getInstruction())) {
1813 // Can't remove an invoke because we cannot change the CFG.
1817 // This instruction is not reachable, just remove it. We insert a store to
1818 // undef so that we know that this code is not reachable, despite the fact
1819 // that we can't modify the CFG here.
1820 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1821 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1822 CS.getInstruction());
1824 return EraseInstFromFunction(*CS.getInstruction());
1827 if (IntrinsicInst *II = FindInitTrampoline(Callee))
1828 return transformCallThroughTrampoline(CS, II);
1830 PointerType *PTy = cast<PointerType>(Callee->getType());
1831 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1832 if (FTy->isVarArg()) {
1833 int ix = FTy->getNumParams();
1834 // See if we can optimize any arguments passed through the varargs area of
1836 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
1837 E = CS.arg_end(); I != E; ++I, ++ix) {
1838 CastInst *CI = dyn_cast<CastInst>(*I);
1839 if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
1840 *I = CI->getOperand(0);
1846 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1847 // Inline asm calls cannot throw - mark them 'nounwind'.
1848 CS.setDoesNotThrow();
1852 // Try to optimize the call if possible, we require DataLayout for most of
1853 // this. None of these calls are seen as possibly dead so go ahead and
1854 // delete the instruction now.
1855 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1856 Instruction *I = tryOptimizeCall(CI);
1857 // If we changed something return the result, etc. Otherwise let
1858 // the fallthrough check.
1859 if (I) return EraseInstFromFunction(*I);
1862 return Changed ? CS.getInstruction() : nullptr;
1865 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
1866 // attempt to move the cast to the arguments of the call/invoke.
1868 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1870 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1873 // The prototype of thunks are a lie, don't try to directly call such
1875 if (Callee->hasFnAttribute("thunk"))
1877 Instruction *Caller = CS.getInstruction();
1878 const AttributeSet &CallerPAL = CS.getAttributes();
1880 // Okay, this is a cast from a function to a different type. Unless doing so
1881 // would cause a type conversion of one of our arguments, change this call to
1882 // be a direct call with arguments casted to the appropriate types.
1884 FunctionType *FT = Callee->getFunctionType();
1885 Type *OldRetTy = Caller->getType();
1886 Type *NewRetTy = FT->getReturnType();
1888 // Check to see if we are changing the return type...
1889 if (OldRetTy != NewRetTy) {
1891 if (NewRetTy->isStructTy())
1892 return false; // TODO: Handle multiple return values.
1894 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
1895 if (Callee->isDeclaration())
1896 return false; // Cannot transform this return value.
1898 if (!Caller->use_empty() &&
1899 // void -> non-void is handled specially
1900 !NewRetTy->isVoidTy())
1901 return false; // Cannot transform this return value.
1904 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1905 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1906 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
1907 return false; // Attribute not compatible with transformed value.
1910 // If the callsite is an invoke instruction, and the return value is used by
1911 // a PHI node in a successor, we cannot change the return type of the call
1912 // because there is no place to put the cast instruction (without breaking
1913 // the critical edge). Bail out in this case.
1914 if (!Caller->use_empty())
1915 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1916 for (User *U : II->users())
1917 if (PHINode *PN = dyn_cast<PHINode>(U))
1918 if (PN->getParent() == II->getNormalDest() ||
1919 PN->getParent() == II->getUnwindDest())
1923 unsigned NumActualArgs = CS.arg_size();
1924 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1926 // Prevent us turning:
1927 // declare void @takes_i32_inalloca(i32* inalloca)
1928 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
1931 // call void @takes_i32_inalloca(i32* null)
1933 // Similarly, avoid folding away bitcasts of byval calls.
1934 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
1935 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
1938 CallSite::arg_iterator AI = CS.arg_begin();
1939 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1940 Type *ParamTy = FT->getParamType(i);
1941 Type *ActTy = (*AI)->getType();
1943 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
1944 return false; // Cannot transform this parameter value.
1946 if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
1947 overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
1948 return false; // Attribute not compatible with transformed value.
1950 if (CS.isInAllocaArgument(i))
1951 return false; // Cannot transform to and from inalloca.
1953 // If the parameter is passed as a byval argument, then we have to have a
1954 // sized type and the sized type has to have the same size as the old type.
1955 if (ParamTy != ActTy &&
1956 CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
1957 Attribute::ByVal)) {
1958 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1959 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
1962 Type *CurElTy = ActTy->getPointerElementType();
1963 if (DL.getTypeAllocSize(CurElTy) !=
1964 DL.getTypeAllocSize(ParamPTy->getElementType()))
1969 if (Callee->isDeclaration()) {
1970 // Do not delete arguments unless we have a function body.
1971 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1974 // If the callee is just a declaration, don't change the varargsness of the
1975 // call. We don't want to introduce a varargs call where one doesn't
1977 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1978 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1981 // If both the callee and the cast type are varargs, we still have to make
1982 // sure the number of fixed parameters are the same or we have the same
1983 // ABI issues as if we introduce a varargs call.
1984 if (FT->isVarArg() &&
1985 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1986 FT->getNumParams() !=
1987 cast<FunctionType>(APTy->getElementType())->getNumParams())
1991 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1992 !CallerPAL.isEmpty())
1993 // In this case we have more arguments than the new function type, but we
1994 // won't be dropping them. Check that these extra arguments have attributes
1995 // that are compatible with being a vararg call argument.
1996 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1997 unsigned Index = CallerPAL.getSlotIndex(i - 1);
1998 if (Index <= FT->getNumParams())
2001 // Check if it has an attribute that's incompatible with varargs.
2002 AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
2003 if (PAttrs.hasAttribute(Index, Attribute::StructRet))
2008 // Okay, we decided that this is a safe thing to do: go ahead and start
2009 // inserting cast instructions as necessary.
2010 std::vector<Value*> Args;
2011 Args.reserve(NumActualArgs);
2012 SmallVector<AttributeSet, 8> attrVec;
2013 attrVec.reserve(NumCommonArgs);
2015 // Get any return attributes.
2016 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
2018 // If the return value is not being used, the type may not be compatible
2019 // with the existing attributes. Wipe out any problematic attributes.
2020 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
2022 // Add the new return attributes.
2023 if (RAttrs.hasAttributes())
2024 attrVec.push_back(AttributeSet::get(Caller->getContext(),
2025 AttributeSet::ReturnIndex, RAttrs));
2027 AI = CS.arg_begin();
2028 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
2029 Type *ParamTy = FT->getParamType(i);
2031 if ((*AI)->getType() == ParamTy) {
2032 Args.push_back(*AI);
2034 Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
2037 // Add any parameter attributes.
2038 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2039 if (PAttrs.hasAttributes())
2040 attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
2044 // If the function takes more arguments than the call was taking, add them
2046 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
2047 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
2049 // If we are removing arguments to the function, emit an obnoxious warning.
2050 if (FT->getNumParams() < NumActualArgs) {
2051 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
2052 if (FT->isVarArg()) {
2053 // Add all of the arguments in their promoted form to the arg list.
2054 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
2055 Type *PTy = getPromotedType((*AI)->getType());
2056 if (PTy != (*AI)->getType()) {
2057 // Must promote to pass through va_arg area!
2058 Instruction::CastOps opcode =
2059 CastInst::getCastOpcode(*AI, false, PTy, false);
2060 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
2062 Args.push_back(*AI);
2065 // Add any parameter attributes.
2066 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2067 if (PAttrs.hasAttributes())
2068 attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
2074 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
2075 if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
2076 attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
2078 if (NewRetTy->isVoidTy())
2079 Caller->setName(""); // Void type should not have a name.
2081 const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
2085 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2086 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
2087 II->getUnwindDest(), Args);
2089 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
2090 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
2092 CallInst *CI = cast<CallInst>(Caller);
2093 NC = Builder->CreateCall(Callee, Args);
2095 if (CI->isTailCall())
2096 cast<CallInst>(NC)->setTailCall();
2097 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
2098 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
2101 // Insert a cast of the return type as necessary.
2103 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
2104 if (!NV->getType()->isVoidTy()) {
2105 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
2106 NC->setDebugLoc(Caller->getDebugLoc());
2108 // If this is an invoke instruction, we should insert it after the first
2109 // non-phi, instruction in the normal successor block.
2110 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2111 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
2112 InsertNewInstBefore(NC, *I);
2114 // Otherwise, it's a call, just insert cast right after the call.
2115 InsertNewInstBefore(NC, *Caller);
2117 Worklist.AddUsersToWorkList(*Caller);
2119 NV = UndefValue::get(Caller->getType());
2123 if (!Caller->use_empty())
2124 ReplaceInstUsesWith(*Caller, NV);
2125 else if (Caller->hasValueHandle()) {
2126 if (OldRetTy == NV->getType())
2127 ValueHandleBase::ValueIsRAUWd(Caller, NV);
2129 // We cannot call ValueIsRAUWd with a different type, and the
2130 // actual tracked value will disappear.
2131 ValueHandleBase::ValueIsDeleted(Caller);
2134 EraseInstFromFunction(*Caller);
2138 // transformCallThroughTrampoline - Turn a call to a function created by
2139 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
2140 // underlying function.
2143 InstCombiner::transformCallThroughTrampoline(CallSite CS,
2144 IntrinsicInst *Tramp) {
2145 Value *Callee = CS.getCalledValue();
2146 PointerType *PTy = cast<PointerType>(Callee->getType());
2147 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2148 const AttributeSet &Attrs = CS.getAttributes();
2150 // If the call already has the 'nest' attribute somewhere then give up -
2151 // otherwise 'nest' would occur twice after splicing in the chain.
2152 if (Attrs.hasAttrSomewhere(Attribute::Nest))
2156 "transformCallThroughTrampoline called with incorrect CallSite.");
2158 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
2159 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
2160 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
2162 const AttributeSet &NestAttrs = NestF->getAttributes();
2163 if (!NestAttrs.isEmpty()) {
2164 unsigned NestIdx = 1;
2165 Type *NestTy = nullptr;
2166 AttributeSet NestAttr;
2168 // Look for a parameter marked with the 'nest' attribute.
2169 for (FunctionType::param_iterator I = NestFTy->param_begin(),
2170 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
2171 if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
2172 // Record the parameter type and any other attributes.
2174 NestAttr = NestAttrs.getParamAttributes(NestIdx);
2179 Instruction *Caller = CS.getInstruction();
2180 std::vector<Value*> NewArgs;
2181 NewArgs.reserve(CS.arg_size() + 1);
2183 SmallVector<AttributeSet, 8> NewAttrs;
2184 NewAttrs.reserve(Attrs.getNumSlots() + 1);
2186 // Insert the nest argument into the call argument list, which may
2187 // mean appending it. Likewise for attributes.
2189 // Add any result attributes.
2190 if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
2191 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2192 Attrs.getRetAttributes()));
2196 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
2198 if (Idx == NestIdx) {
2199 // Add the chain argument and attributes.
2200 Value *NestVal = Tramp->getArgOperand(2);
2201 if (NestVal->getType() != NestTy)
2202 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
2203 NewArgs.push_back(NestVal);
2204 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2211 // Add the original argument and attributes.
2212 NewArgs.push_back(*I);
2213 AttributeSet Attr = Attrs.getParamAttributes(Idx);
2214 if (Attr.hasAttributes(Idx)) {
2215 AttrBuilder B(Attr, Idx);
2216 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2217 Idx + (Idx >= NestIdx), B));
2224 // Add any function attributes.
2225 if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
2226 NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
2227 Attrs.getFnAttributes()));
2229 // The trampoline may have been bitcast to a bogus type (FTy).
2230 // Handle this by synthesizing a new function type, equal to FTy
2231 // with the chain parameter inserted.
2233 std::vector<Type*> NewTypes;
2234 NewTypes.reserve(FTy->getNumParams()+1);
2236 // Insert the chain's type into the list of parameter types, which may
2237 // mean appending it.
2240 FunctionType::param_iterator I = FTy->param_begin(),
2241 E = FTy->param_end();
2245 // Add the chain's type.
2246 NewTypes.push_back(NestTy);
2251 // Add the original type.
2252 NewTypes.push_back(*I);
2258 // Replace the trampoline call with a direct call. Let the generic
2259 // code sort out any function type mismatches.
2260 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
2262 Constant *NewCallee =
2263 NestF->getType() == PointerType::getUnqual(NewFTy) ?
2264 NestF : ConstantExpr::getBitCast(NestF,
2265 PointerType::getUnqual(NewFTy));
2266 const AttributeSet &NewPAL =
2267 AttributeSet::get(FTy->getContext(), NewAttrs);
2269 Instruction *NewCaller;
2270 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2271 NewCaller = InvokeInst::Create(NewCallee,
2272 II->getNormalDest(), II->getUnwindDest(),
2274 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
2275 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
2277 NewCaller = CallInst::Create(NewCallee, NewArgs);
2278 if (cast<CallInst>(Caller)->isTailCall())
2279 cast<CallInst>(NewCaller)->setTailCall();
2280 cast<CallInst>(NewCaller)->
2281 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
2282 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
2289 // Replace the trampoline call with a direct call. Since there is no 'nest'
2290 // parameter, there is no need to adjust the argument list. Let the generic
2291 // code sort out any function type mismatches.
2292 Constant *NewCallee =
2293 NestF->getType() == PTy ? NestF :
2294 ConstantExpr::getBitCast(NestF, PTy);
2295 CS.setCalledFunction(NewCallee);
2296 return CS.getInstruction();