1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/IR/CallSite.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/PatternMatch.h"
21 #include "llvm/IR/Statepoint.h"
22 #include "llvm/Transforms/Utils/BuildLibCalls.h"
23 #include "llvm/Transforms/Utils/Local.h"
24 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
26 using namespace PatternMatch;
28 #define DEBUG_TYPE "instcombine"
30 STATISTIC(NumSimplified, "Number of library calls simplified");
32 /// getPromotedType - Return the specified type promoted as it would be to pass
33 /// though a va_arg area.
34 static Type *getPromotedType(Type *Ty) {
35 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
36 if (ITy->getBitWidth() < 32)
37 return Type::getInt32Ty(Ty->getContext());
42 /// reduceToSingleValueType - Given an aggregate type which ultimately holds a
43 /// single scalar element, like {{{type}}} or [1 x type], return type.
44 static Type *reduceToSingleValueType(Type *T) {
45 while (!T->isSingleValueType()) {
46 if (StructType *STy = dyn_cast<StructType>(T)) {
47 if (STy->getNumElements() == 1)
48 T = STy->getElementType(0);
51 } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
52 if (ATy->getNumElements() == 1)
53 T = ATy->getElementType();
63 Instruction *InstCombiner::SimplifyMemTransfer(MemTransferInst *MI) {
64 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
65 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
66 unsigned CopyDestAlign = MI->getDestAlignment();
67 unsigned CopySrcAlign = MI->getSrcAlignment();
69 if (CopyDestAlign < DstAlign) {
70 MI->setDestAlignment(DstAlign);
73 if (CopySrcAlign < SrcAlign) {
74 MI->setSrcAlignment(SrcAlign);
78 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
80 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
81 if (!MemOpLength) return nullptr;
83 // Source and destination pointer types are always "i8*" for intrinsic. See
84 // if the size is something we can handle with a single primitive load/store.
85 // A single load+store correctly handles overlapping memory in the memmove
87 uint64_t Size = MemOpLength->getLimitedValue();
88 assert(Size && "0-sized memory transferring should be removed already.");
90 if (Size > 8 || (Size&(Size-1)))
91 return nullptr; // If not 1/2/4/8 bytes, exit.
93 // Use an integer load+store unless we can find something better.
95 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
97 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
99 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
100 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
101 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
103 // Memcpy forces the use of i8* for the source and destination. That means
104 // that if you're using memcpy to move one double around, you'll get a cast
105 // from double* to i8*. We'd much rather use a double load+store rather than
106 // an i64 load+store, here because this improves the odds that the source or
107 // dest address will be promotable. See if we can find a better type than the
109 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
110 MDNode *CopyMD = nullptr;
111 if (StrippedDest != MI->getArgOperand(0)) {
112 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
114 if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
115 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
116 // down through these levels if so.
117 SrcETy = reduceToSingleValueType(SrcETy);
119 if (SrcETy->isSingleValueType()) {
120 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
121 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
123 // If the memcpy has metadata describing the members, see if we can
124 // get the TBAA tag describing our copy.
125 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
126 if (M->getNumOperands() == 3 && M->getOperand(0) &&
127 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
128 mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
130 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
131 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
133 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
134 CopyMD = cast<MDNode>(M->getOperand(2));
140 // If the memcpy/memmove provides better alignment info than we can
142 SrcAlign = std::max(SrcAlign, CopySrcAlign);
143 DstAlign = std::max(DstAlign, CopyDestAlign);
145 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
146 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
147 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
148 L->setAlignment(SrcAlign);
150 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
151 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
152 S->setAlignment(DstAlign);
154 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
156 // Set the size of the copy to 0, it will be deleted on the next iteration.
157 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
161 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
162 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
163 if (MI->getDestAlignment() < Alignment) {
164 MI->setDestAlignment(Alignment);
168 // Extract the length and alignment and fill if they are constant.
169 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
170 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
171 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
173 uint64_t Len = LenC->getLimitedValue();
174 Alignment = MI->getDestAlignment();
175 assert(Len && "0-sized memory setting should be removed already.");
177 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
178 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
179 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
181 Value *Dest = MI->getDest();
182 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
183 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
184 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
186 // Alignment 0 is identity for alignment 1 for memset, but not store.
187 if (Alignment == 0) Alignment = 1;
189 // Extract the fill value and store.
190 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
191 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
193 S->setAlignment(Alignment);
195 // Set the size of the copy to 0, it will be deleted on the next iteration.
196 MI->setLength(Constant::getNullValue(LenC->getType()));
203 static Value *SimplifyX86immshift(const IntrinsicInst &II,
204 InstCombiner::BuilderTy &Builder) {
205 bool LogicalShift = false;
206 bool ShiftLeft = false;
208 switch (II.getIntrinsicID()) {
211 case Intrinsic::x86_sse2_psra_d:
212 case Intrinsic::x86_sse2_psra_w:
213 case Intrinsic::x86_sse2_psrai_d:
214 case Intrinsic::x86_sse2_psrai_w:
215 case Intrinsic::x86_avx2_psra_d:
216 case Intrinsic::x86_avx2_psra_w:
217 case Intrinsic::x86_avx2_psrai_d:
218 case Intrinsic::x86_avx2_psrai_w:
219 LogicalShift = false; ShiftLeft = false;
221 case Intrinsic::x86_sse2_psrl_d:
222 case Intrinsic::x86_sse2_psrl_q:
223 case Intrinsic::x86_sse2_psrl_w:
224 case Intrinsic::x86_sse2_psrli_d:
225 case Intrinsic::x86_sse2_psrli_q:
226 case Intrinsic::x86_sse2_psrli_w:
227 case Intrinsic::x86_avx2_psrl_d:
228 case Intrinsic::x86_avx2_psrl_q:
229 case Intrinsic::x86_avx2_psrl_w:
230 case Intrinsic::x86_avx2_psrli_d:
231 case Intrinsic::x86_avx2_psrli_q:
232 case Intrinsic::x86_avx2_psrli_w:
233 LogicalShift = true; ShiftLeft = false;
235 case Intrinsic::x86_sse2_psll_d:
236 case Intrinsic::x86_sse2_psll_q:
237 case Intrinsic::x86_sse2_psll_w:
238 case Intrinsic::x86_sse2_pslli_d:
239 case Intrinsic::x86_sse2_pslli_q:
240 case Intrinsic::x86_sse2_pslli_w:
241 case Intrinsic::x86_avx2_psll_d:
242 case Intrinsic::x86_avx2_psll_q:
243 case Intrinsic::x86_avx2_psll_w:
244 case Intrinsic::x86_avx2_pslli_d:
245 case Intrinsic::x86_avx2_pslli_q:
246 case Intrinsic::x86_avx2_pslli_w:
247 LogicalShift = true; ShiftLeft = true;
250 assert((LogicalShift || !ShiftLeft) && "Only logical shifts can shift left");
252 // Simplify if count is constant.
253 auto Arg1 = II.getArgOperand(1);
254 auto CAZ = dyn_cast<ConstantAggregateZero>(Arg1);
255 auto CDV = dyn_cast<ConstantDataVector>(Arg1);
256 auto CInt = dyn_cast<ConstantInt>(Arg1);
257 if (!CAZ && !CDV && !CInt)
262 // SSE2/AVX2 uses all the first 64-bits of the 128-bit vector
263 // operand to compute the shift amount.
264 auto VT = cast<VectorType>(CDV->getType());
265 unsigned BitWidth = VT->getElementType()->getPrimitiveSizeInBits();
266 assert((64 % BitWidth) == 0 && "Unexpected packed shift size");
267 unsigned NumSubElts = 64 / BitWidth;
269 // Concatenate the sub-elements to create the 64-bit value.
270 for (unsigned i = 0; i != NumSubElts; ++i) {
271 unsigned SubEltIdx = (NumSubElts - 1) - i;
272 auto SubElt = cast<ConstantInt>(CDV->getElementAsConstant(SubEltIdx));
273 Count = Count.shl(BitWidth);
274 Count |= SubElt->getValue().zextOrTrunc(64);
278 Count = CInt->getValue();
280 auto Vec = II.getArgOperand(0);
281 auto VT = cast<VectorType>(Vec->getType());
282 auto SVT = VT->getElementType();
283 unsigned VWidth = VT->getNumElements();
284 unsigned BitWidth = SVT->getPrimitiveSizeInBits();
286 // If shift-by-zero then just return the original value.
290 // Handle cases when Shift >= BitWidth.
291 if (Count.uge(BitWidth)) {
292 // If LogicalShift - just return zero.
294 return ConstantAggregateZero::get(VT);
296 // If ArithmeticShift - clamp Shift to (BitWidth - 1).
297 Count = APInt(64, BitWidth - 1);
300 // Get a constant vector of the same type as the first operand.
301 auto ShiftAmt = ConstantInt::get(SVT, Count.zextOrTrunc(BitWidth));
302 auto ShiftVec = Builder.CreateVectorSplat(VWidth, ShiftAmt);
305 return Builder.CreateShl(Vec, ShiftVec);
308 return Builder.CreateLShr(Vec, ShiftVec);
310 return Builder.CreateAShr(Vec, ShiftVec);
313 static Value *SimplifyX86extend(const IntrinsicInst &II,
314 InstCombiner::BuilderTy &Builder,
316 VectorType *SrcTy = cast<VectorType>(II.getArgOperand(0)->getType());
317 VectorType *DstTy = cast<VectorType>(II.getType());
318 unsigned NumDstElts = DstTy->getNumElements();
320 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
321 SmallVector<int, 8> ShuffleMask;
322 for (int i = 0; i != (int)NumDstElts; ++i)
323 ShuffleMask.push_back(i);
325 Value *SV = Builder.CreateShuffleVector(II.getArgOperand(0),
326 UndefValue::get(SrcTy), ShuffleMask);
327 return SignExtend ? Builder.CreateSExt(SV, DstTy)
328 : Builder.CreateZExt(SV, DstTy);
331 static Value *SimplifyX86insertps(const IntrinsicInst &II,
332 InstCombiner::BuilderTy &Builder) {
333 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
334 VectorType *VecTy = cast<VectorType>(II.getType());
335 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
337 // The immediate permute control byte looks like this:
338 // [3:0] - zero mask for each 32-bit lane
339 // [5:4] - select one 32-bit destination lane
340 // [7:6] - select one 32-bit source lane
342 uint8_t Imm = CInt->getZExtValue();
343 uint8_t ZMask = Imm & 0xf;
344 uint8_t DestLane = (Imm >> 4) & 0x3;
345 uint8_t SourceLane = (Imm >> 6) & 0x3;
347 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
349 // If all zero mask bits are set, this was just a weird way to
350 // generate a zero vector.
354 // Initialize by passing all of the first source bits through.
355 int ShuffleMask[4] = { 0, 1, 2, 3 };
357 // We may replace the second operand with the zero vector.
358 Value *V1 = II.getArgOperand(1);
361 // If the zero mask is being used with a single input or the zero mask
362 // overrides the destination lane, this is a shuffle with the zero vector.
363 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
364 (ZMask & (1 << DestLane))) {
366 // We may still move 32-bits of the first source vector from one lane
368 ShuffleMask[DestLane] = SourceLane;
369 // The zero mask may override the previous insert operation.
370 for (unsigned i = 0; i < 4; ++i)
371 if ((ZMask >> i) & 0x1)
372 ShuffleMask[i] = i + 4;
374 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
378 // Replace the selected destination lane with the selected source lane.
379 ShuffleMask[DestLane] = SourceLane + 4;
382 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
387 /// Attempt to simplify SSE4A EXTRQ/EXTRQI instructions using constant folding
388 /// or conversion to a shuffle vector.
389 static Value *SimplifyX86extrq(IntrinsicInst &II, Value *Op0,
390 ConstantInt *CILength, ConstantInt *CIIndex,
391 InstCombiner::BuilderTy &Builder) {
392 auto LowConstantHighUndef = [&](uint64_t Val) {
393 Type *IntTy64 = Type::getInt64Ty(II.getContext());
394 Constant *Args[] = {ConstantInt::get(IntTy64, Val),
395 UndefValue::get(IntTy64)};
396 return ConstantVector::get(Args);
399 // See if we're dealing with constant values.
400 Constant *C0 = dyn_cast<Constant>(Op0);
402 C0 ? dyn_cast<ConstantInt>(C0->getAggregateElement((unsigned)0))
405 // Attempt to constant fold.
406 if (CILength && CIIndex) {
407 // From AMD documentation: "The bit index and field length are each six
408 // bits in length other bits of the field are ignored."
409 APInt APIndex = CIIndex->getValue().zextOrTrunc(6);
410 APInt APLength = CILength->getValue().zextOrTrunc(6);
412 unsigned Index = APIndex.getZExtValue();
414 // From AMD documentation: "a value of zero in the field length is
415 // defined as length of 64".
416 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
418 // From AMD documentation: "If the sum of the bit index + length field
419 // is greater than 64, the results are undefined".
420 unsigned End = Index + Length;
422 // Note that both field index and field length are 8-bit quantities.
423 // Since variables 'Index' and 'Length' are unsigned values
424 // obtained from zero-extending field index and field length
425 // respectively, their sum should never wrap around.
427 return UndefValue::get(II.getType());
429 // If we are inserting whole bytes, we can convert this to a shuffle.
430 // Lowering can recognize EXTRQI shuffle masks.
431 if ((Length % 8) == 0 && (Index % 8) == 0) {
432 // Convert bit indices to byte indices.
436 Type *IntTy8 = Type::getInt8Ty(II.getContext());
437 Type *IntTy32 = Type::getInt32Ty(II.getContext());
438 VectorType *ShufTy = VectorType::get(IntTy8, 16);
440 SmallVector<Constant *, 16> ShuffleMask;
441 for (int i = 0; i != (int)Length; ++i)
442 ShuffleMask.push_back(
443 Constant::getIntegerValue(IntTy32, APInt(32, i + Index)));
444 for (int i = Length; i != 8; ++i)
445 ShuffleMask.push_back(
446 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
447 for (int i = 8; i != 16; ++i)
448 ShuffleMask.push_back(UndefValue::get(IntTy32));
450 Value *SV = Builder.CreateShuffleVector(
451 Builder.CreateBitCast(Op0, ShufTy),
452 ConstantAggregateZero::get(ShufTy), ConstantVector::get(ShuffleMask));
453 return Builder.CreateBitCast(SV, II.getType());
456 // Constant Fold - shift Index'th bit to lowest position and mask off
459 APInt Elt = CI0->getValue();
460 Elt = Elt.lshr(Index).zextOrTrunc(Length);
461 return LowConstantHighUndef(Elt.getZExtValue());
464 // If we were an EXTRQ call, we'll save registers if we convert to EXTRQI.
465 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_extrq) {
466 Value *Args[] = {Op0, CILength, CIIndex};
467 Module *M = II.getParent()->getParent()->getParent();
468 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_extrqi);
469 return Builder.CreateCall(F, Args);
473 // Constant Fold - extraction from zero is always {zero, undef}.
474 if (CI0 && CI0->equalsInt(0))
475 return LowConstantHighUndef(0);
480 /// Attempt to simplify SSE4A INSERTQ/INSERTQI instructions using constant
481 /// folding or conversion to a shuffle vector.
482 static Value *SimplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1,
483 APInt APLength, APInt APIndex,
484 InstCombiner::BuilderTy &Builder) {
486 // From AMD documentation: "The bit index and field length are each six bits
487 // in length other bits of the field are ignored."
488 APIndex = APIndex.zextOrTrunc(6);
489 APLength = APLength.zextOrTrunc(6);
491 // Attempt to constant fold.
492 unsigned Index = APIndex.getZExtValue();
494 // From AMD documentation: "a value of zero in the field length is
495 // defined as length of 64".
496 unsigned Length = APLength == 0 ? 64 : APLength.getZExtValue();
498 // From AMD documentation: "If the sum of the bit index + length field
499 // is greater than 64, the results are undefined".
500 unsigned End = Index + Length;
502 // Note that both field index and field length are 8-bit quantities.
503 // Since variables 'Index' and 'Length' are unsigned values
504 // obtained from zero-extending field index and field length
505 // respectively, their sum should never wrap around.
507 return UndefValue::get(II.getType());
509 // If we are inserting whole bytes, we can convert this to a shuffle.
510 // Lowering can recognize INSERTQI shuffle masks.
511 if ((Length % 8) == 0 && (Index % 8) == 0) {
512 // Convert bit indices to byte indices.
516 Type *IntTy8 = Type::getInt8Ty(II.getContext());
517 Type *IntTy32 = Type::getInt32Ty(II.getContext());
518 VectorType *ShufTy = VectorType::get(IntTy8, 16);
520 SmallVector<Constant *, 16> ShuffleMask;
521 for (int i = 0; i != (int)Index; ++i)
522 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
523 for (int i = 0; i != (int)Length; ++i)
524 ShuffleMask.push_back(
525 Constant::getIntegerValue(IntTy32, APInt(32, i + 16)));
526 for (int i = Index + Length; i != 8; ++i)
527 ShuffleMask.push_back(Constant::getIntegerValue(IntTy32, APInt(32, i)));
528 for (int i = 8; i != 16; ++i)
529 ShuffleMask.push_back(UndefValue::get(IntTy32));
531 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy),
532 Builder.CreateBitCast(Op1, ShufTy),
533 ConstantVector::get(ShuffleMask));
534 return Builder.CreateBitCast(SV, II.getType());
537 // See if we're dealing with constant values.
538 Constant *C0 = dyn_cast<Constant>(Op0);
539 Constant *C1 = dyn_cast<Constant>(Op1);
541 C0 ? dyn_cast<ConstantInt>(C0->getAggregateElement((unsigned)0))
544 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)0))
547 // Constant Fold - insert bottom Length bits starting at the Index'th bit.
549 APInt V00 = CI00->getValue();
550 APInt V10 = CI10->getValue();
551 APInt Mask = APInt::getLowBitsSet(64, Length).shl(Index);
553 V10 = V10.zextOrTrunc(Length).zextOrTrunc(64).shl(Index);
554 APInt Val = V00 | V10;
555 Type *IntTy64 = Type::getInt64Ty(II.getContext());
556 Constant *Args[] = {ConstantInt::get(IntTy64, Val.getZExtValue()),
557 UndefValue::get(IntTy64)};
558 return ConstantVector::get(Args);
561 // If we were an INSERTQ call, we'll save demanded elements if we convert to
563 if (II.getIntrinsicID() == Intrinsic::x86_sse4a_insertq) {
564 Type *IntTy8 = Type::getInt8Ty(II.getContext());
565 Constant *CILength = ConstantInt::get(IntTy8, Length, false);
566 Constant *CIIndex = ConstantInt::get(IntTy8, Index, false);
568 Value *Args[] = {Op0, Op1, CILength, CIIndex};
569 Module *M = II.getParent()->getParent()->getParent();
570 Value *F = Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
571 return Builder.CreateCall(F, Args);
577 /// The shuffle mask for a perm2*128 selects any two halves of two 256-bit
578 /// source vectors, unless a zero bit is set. If a zero bit is set,
579 /// then ignore that half of the mask and clear that half of the vector.
580 static Value *SimplifyX86vperm2(const IntrinsicInst &II,
581 InstCombiner::BuilderTy &Builder) {
582 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
583 VectorType *VecTy = cast<VectorType>(II.getType());
584 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
586 // The immediate permute control byte looks like this:
587 // [1:0] - select 128 bits from sources for low half of destination
589 // [3] - zero low half of destination
590 // [5:4] - select 128 bits from sources for high half of destination
592 // [7] - zero high half of destination
594 uint8_t Imm = CInt->getZExtValue();
596 bool LowHalfZero = Imm & 0x08;
597 bool HighHalfZero = Imm & 0x80;
599 // If both zero mask bits are set, this was just a weird way to
600 // generate a zero vector.
601 if (LowHalfZero && HighHalfZero)
604 // If 0 or 1 zero mask bits are set, this is a simple shuffle.
605 unsigned NumElts = VecTy->getNumElements();
606 unsigned HalfSize = NumElts / 2;
607 SmallVector<int, 8> ShuffleMask(NumElts);
609 // The high bit of the selection field chooses the 1st or 2nd operand.
610 bool LowInputSelect = Imm & 0x02;
611 bool HighInputSelect = Imm & 0x20;
613 // The low bit of the selection field chooses the low or high half
614 // of the selected operand.
615 bool LowHalfSelect = Imm & 0x01;
616 bool HighHalfSelect = Imm & 0x10;
618 // Determine which operand(s) are actually in use for this instruction.
619 Value *V0 = LowInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
620 Value *V1 = HighInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
622 // If needed, replace operands based on zero mask.
623 V0 = LowHalfZero ? ZeroVector : V0;
624 V1 = HighHalfZero ? ZeroVector : V1;
626 // Permute low half of result.
627 unsigned StartIndex = LowHalfSelect ? HalfSize : 0;
628 for (unsigned i = 0; i < HalfSize; ++i)
629 ShuffleMask[i] = StartIndex + i;
631 // Permute high half of result.
632 StartIndex = HighHalfSelect ? HalfSize : 0;
633 StartIndex += NumElts;
634 for (unsigned i = 0; i < HalfSize; ++i)
635 ShuffleMask[i + HalfSize] = StartIndex + i;
637 return Builder.CreateShuffleVector(V0, V1, ShuffleMask);
642 /// Decode XOP integer vector comparison intrinsics.
643 static Value *SimplifyX86vpcom(const IntrinsicInst &II,
644 InstCombiner::BuilderTy &Builder, bool IsSigned) {
645 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
646 uint64_t Imm = CInt->getZExtValue() & 0x7;
647 VectorType *VecTy = cast<VectorType>(II.getType());
648 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
652 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
655 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
658 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
661 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
664 Pred = ICmpInst::ICMP_EQ; break;
666 Pred = ICmpInst::ICMP_NE; break;
668 return ConstantInt::getSigned(VecTy, 0); // FALSE
670 return ConstantInt::getSigned(VecTy, -1); // TRUE
673 if (Value *Cmp = Builder.CreateICmp(Pred, II.getArgOperand(0), II.getArgOperand(1)))
674 return Builder.CreateSExtOrTrunc(Cmp, VecTy);
679 /// visitCallInst - CallInst simplification. This mostly only handles folding
680 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
681 /// the heavy lifting.
683 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
684 auto Args = CI.arg_operands();
685 if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
687 return ReplaceInstUsesWith(CI, V);
689 if (isFreeCall(&CI, TLI))
690 return visitFree(CI);
692 // If the caller function is nounwind, mark the call as nounwind, even if the
694 if (CI.getParent()->getParent()->doesNotThrow() &&
695 !CI.doesNotThrow()) {
696 CI.setDoesNotThrow();
700 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
701 if (!II) return visitCallSite(&CI);
703 // Intrinsics cannot occur in an invoke, so handle them here instead of in
705 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
706 bool Changed = false;
708 // memmove/cpy/set of zero bytes is a noop.
709 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
710 if (NumBytes->isNullValue())
711 return EraseInstFromFunction(CI);
713 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
714 if (CI->getZExtValue() == 1) {
715 // Replace the instruction with just byte operations. We would
716 // transform other cases to loads/stores, but we don't know if
717 // alignment is sufficient.
721 // No other transformations apply to volatile transfers.
722 if (MI->isVolatile())
725 // If we have a memmove and the source operation is a constant global,
726 // then the source and dest pointers can't alias, so we can change this
727 // into a call to memcpy.
728 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
729 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
730 if (GVSrc->isConstant()) {
731 Module *M = CI.getParent()->getParent()->getParent();
732 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
733 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
734 CI.getArgOperand(1)->getType(),
735 CI.getArgOperand(2)->getType() };
736 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
741 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
742 // memmove(x,x,size) -> noop.
743 if (MTI->getSource() == MTI->getDest())
744 return EraseInstFromFunction(CI);
747 // If we can determine a pointer alignment that is bigger than currently
748 // set, update the alignment.
749 if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
750 if (Instruction *I = SimplifyMemTransfer(MTI))
752 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
753 if (Instruction *I = SimplifyMemSet(MSI))
757 if (Changed) return II;
760 auto SimplifyDemandedVectorEltsLow = [this](Value *Op, unsigned Width, unsigned DemandedWidth)
762 APInt UndefElts(Width, 0);
763 APInt DemandedElts = APInt::getLowBitsSet(Width, DemandedWidth);
764 return SimplifyDemandedVectorElts(Op, DemandedElts, UndefElts);
767 switch (II->getIntrinsicID()) {
769 case Intrinsic::objectsize: {
771 if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
772 return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
775 case Intrinsic::bswap: {
776 Value *IIOperand = II->getArgOperand(0);
779 // bswap(bswap(x)) -> x
780 if (match(IIOperand, m_BSwap(m_Value(X))))
781 return ReplaceInstUsesWith(CI, X);
783 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
784 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
785 unsigned C = X->getType()->getPrimitiveSizeInBits() -
786 IIOperand->getType()->getPrimitiveSizeInBits();
787 Value *CV = ConstantInt::get(X->getType(), C);
788 Value *V = Builder->CreateLShr(X, CV);
789 return new TruncInst(V, IIOperand->getType());
794 case Intrinsic::bitreverse: {
795 Value *IIOperand = II->getArgOperand(0);
798 // bitreverse(bitreverse(x)) -> x
799 if (match(IIOperand, m_Intrinsic<Intrinsic::bitreverse>(m_Value(X))))
800 return ReplaceInstUsesWith(CI, X);
804 case Intrinsic::powi:
805 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
808 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
811 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
812 // powi(x, -1) -> 1/x
813 if (Power->isAllOnesValue())
814 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
815 II->getArgOperand(0));
818 case Intrinsic::cttz: {
819 // If all bits below the first known one are known zero,
820 // this value is constant.
821 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
822 // FIXME: Try to simplify vectors of integers.
824 uint32_t BitWidth = IT->getBitWidth();
825 APInt KnownZero(BitWidth, 0);
826 APInt KnownOne(BitWidth, 0);
827 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
828 unsigned TrailingZeros = KnownOne.countTrailingZeros();
829 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
830 if ((Mask & KnownZero) == Mask)
831 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
832 APInt(BitWidth, TrailingZeros)));
836 case Intrinsic::ctlz: {
837 // If all bits above the first known one are known zero,
838 // this value is constant.
839 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
840 // FIXME: Try to simplify vectors of integers.
842 uint32_t BitWidth = IT->getBitWidth();
843 APInt KnownZero(BitWidth, 0);
844 APInt KnownOne(BitWidth, 0);
845 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
846 unsigned LeadingZeros = KnownOne.countLeadingZeros();
847 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
848 if ((Mask & KnownZero) == Mask)
849 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
850 APInt(BitWidth, LeadingZeros)));
855 case Intrinsic::uadd_with_overflow:
856 case Intrinsic::sadd_with_overflow:
857 case Intrinsic::umul_with_overflow:
858 case Intrinsic::smul_with_overflow:
859 if (isa<Constant>(II->getArgOperand(0)) &&
860 !isa<Constant>(II->getArgOperand(1))) {
861 // Canonicalize constants into the RHS.
862 Value *LHS = II->getArgOperand(0);
863 II->setArgOperand(0, II->getArgOperand(1));
864 II->setArgOperand(1, LHS);
869 case Intrinsic::usub_with_overflow:
870 case Intrinsic::ssub_with_overflow: {
871 OverflowCheckFlavor OCF =
872 IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
873 assert(OCF != OCF_INVALID && "unexpected!");
875 Value *OperationResult = nullptr;
876 Constant *OverflowResult = nullptr;
877 if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
878 *II, OperationResult, OverflowResult))
879 return CreateOverflowTuple(II, OperationResult, OverflowResult);
884 case Intrinsic::minnum:
885 case Intrinsic::maxnum: {
886 Value *Arg0 = II->getArgOperand(0);
887 Value *Arg1 = II->getArgOperand(1);
891 return ReplaceInstUsesWith(CI, Arg0);
893 const ConstantFP *C0 = dyn_cast<ConstantFP>(Arg0);
894 const ConstantFP *C1 = dyn_cast<ConstantFP>(Arg1);
896 // Canonicalize constants into the RHS.
898 II->setArgOperand(0, Arg1);
899 II->setArgOperand(1, Arg0);
904 if (C1 && C1->isNaN())
905 return ReplaceInstUsesWith(CI, Arg0);
907 // This is the value because if undef were NaN, we would return the other
908 // value and cannot return a NaN unless both operands are.
910 // fmin(undef, x) -> x
911 if (isa<UndefValue>(Arg0))
912 return ReplaceInstUsesWith(CI, Arg1);
914 // fmin(x, undef) -> x
915 if (isa<UndefValue>(Arg1))
916 return ReplaceInstUsesWith(CI, Arg0);
920 if (II->getIntrinsicID() == Intrinsic::minnum) {
921 // fmin(x, fmin(x, y)) -> fmin(x, y)
922 // fmin(y, fmin(x, y)) -> fmin(x, y)
923 if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
924 if (Arg0 == X || Arg0 == Y)
925 return ReplaceInstUsesWith(CI, Arg1);
928 // fmin(fmin(x, y), x) -> fmin(x, y)
929 // fmin(fmin(x, y), y) -> fmin(x, y)
930 if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
931 if (Arg1 == X || Arg1 == Y)
932 return ReplaceInstUsesWith(CI, Arg0);
935 // TODO: fmin(nnan x, inf) -> x
936 // TODO: fmin(nnan ninf x, flt_max) -> x
937 if (C1 && C1->isInfinity()) {
938 // fmin(x, -inf) -> -inf
939 if (C1->isNegative())
940 return ReplaceInstUsesWith(CI, Arg1);
943 assert(II->getIntrinsicID() == Intrinsic::maxnum);
944 // fmax(x, fmax(x, y)) -> fmax(x, y)
945 // fmax(y, fmax(x, y)) -> fmax(x, y)
946 if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
947 if (Arg0 == X || Arg0 == Y)
948 return ReplaceInstUsesWith(CI, Arg1);
951 // fmax(fmax(x, y), x) -> fmax(x, y)
952 // fmax(fmax(x, y), y) -> fmax(x, y)
953 if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
954 if (Arg1 == X || Arg1 == Y)
955 return ReplaceInstUsesWith(CI, Arg0);
958 // TODO: fmax(nnan x, -inf) -> x
959 // TODO: fmax(nnan ninf x, -flt_max) -> x
960 if (C1 && C1->isInfinity()) {
961 // fmax(x, inf) -> inf
962 if (!C1->isNegative())
963 return ReplaceInstUsesWith(CI, Arg1);
968 case Intrinsic::ppc_altivec_lvx:
969 case Intrinsic::ppc_altivec_lvxl:
970 // Turn PPC lvx -> load if the pointer is known aligned.
971 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
973 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
974 PointerType::getUnqual(II->getType()));
975 return new LoadInst(Ptr);
978 case Intrinsic::ppc_vsx_lxvw4x:
979 case Intrinsic::ppc_vsx_lxvd2x: {
980 // Turn PPC VSX loads into normal loads.
981 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
982 PointerType::getUnqual(II->getType()));
983 return new LoadInst(Ptr, Twine(""), false, 1);
985 case Intrinsic::ppc_altivec_stvx:
986 case Intrinsic::ppc_altivec_stvxl:
987 // Turn stvx -> store if the pointer is known aligned.
988 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
991 PointerType::getUnqual(II->getArgOperand(0)->getType());
992 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
993 return new StoreInst(II->getArgOperand(0), Ptr);
996 case Intrinsic::ppc_vsx_stxvw4x:
997 case Intrinsic::ppc_vsx_stxvd2x: {
998 // Turn PPC VSX stores into normal stores.
999 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
1000 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1001 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
1003 case Intrinsic::ppc_qpx_qvlfs:
1004 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
1005 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
1007 Type *VTy = VectorType::get(Builder->getFloatTy(),
1008 II->getType()->getVectorNumElements());
1009 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1010 PointerType::getUnqual(VTy));
1011 Value *Load = Builder->CreateLoad(Ptr);
1012 return new FPExtInst(Load, II->getType());
1015 case Intrinsic::ppc_qpx_qvlfd:
1016 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
1017 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
1019 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
1020 PointerType::getUnqual(II->getType()));
1021 return new LoadInst(Ptr);
1024 case Intrinsic::ppc_qpx_qvstfs:
1025 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
1026 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
1028 Type *VTy = VectorType::get(Builder->getFloatTy(),
1029 II->getArgOperand(0)->getType()->getVectorNumElements());
1030 Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
1031 Type *OpPtrTy = PointerType::getUnqual(VTy);
1032 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1033 return new StoreInst(TOp, Ptr);
1036 case Intrinsic::ppc_qpx_qvstfd:
1037 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
1038 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
1041 PointerType::getUnqual(II->getArgOperand(0)->getType());
1042 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
1043 return new StoreInst(II->getArgOperand(0), Ptr);
1047 case Intrinsic::x86_sse_storeu_ps:
1048 case Intrinsic::x86_sse2_storeu_pd:
1049 case Intrinsic::x86_sse2_storeu_dq:
1050 // Turn X86 storeu -> store if the pointer is known aligned.
1051 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
1054 PointerType::getUnqual(II->getArgOperand(1)->getType());
1055 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
1056 return new StoreInst(II->getArgOperand(1), Ptr);
1060 case Intrinsic::x86_vcvtph2ps_128:
1061 case Intrinsic::x86_vcvtph2ps_256: {
1062 auto Arg = II->getArgOperand(0);
1063 auto ArgType = cast<VectorType>(Arg->getType());
1064 auto RetType = cast<VectorType>(II->getType());
1065 unsigned ArgWidth = ArgType->getNumElements();
1066 unsigned RetWidth = RetType->getNumElements();
1067 assert(RetWidth <= ArgWidth && "Unexpected input/return vector widths");
1068 assert(ArgType->isIntOrIntVectorTy() &&
1069 ArgType->getScalarSizeInBits() == 16 &&
1070 "CVTPH2PS input type should be 16-bit integer vector");
1071 assert(RetType->getScalarType()->isFloatTy() &&
1072 "CVTPH2PS output type should be 32-bit float vector");
1074 // Constant folding: Convert to generic half to single conversion.
1075 if (isa<ConstantAggregateZero>(Arg))
1076 return ReplaceInstUsesWith(*II, ConstantAggregateZero::get(RetType));
1078 if (isa<ConstantDataVector>(Arg)) {
1079 auto VectorHalfAsShorts = Arg;
1080 if (RetWidth < ArgWidth) {
1081 SmallVector<int, 8> SubVecMask;
1082 for (unsigned i = 0; i != RetWidth; ++i)
1083 SubVecMask.push_back((int)i);
1084 VectorHalfAsShorts = Builder->CreateShuffleVector(
1085 Arg, UndefValue::get(ArgType), SubVecMask);
1088 auto VectorHalfType =
1089 VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
1091 Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType);
1092 auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType);
1093 return ReplaceInstUsesWith(*II, VectorFloats);
1096 // We only use the lowest lanes of the argument.
1097 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, ArgWidth, RetWidth)) {
1098 II->setArgOperand(0, V);
1104 case Intrinsic::x86_sse_cvtss2si:
1105 case Intrinsic::x86_sse_cvtss2si64:
1106 case Intrinsic::x86_sse_cvttss2si:
1107 case Intrinsic::x86_sse_cvttss2si64:
1108 case Intrinsic::x86_sse2_cvtsd2si:
1109 case Intrinsic::x86_sse2_cvtsd2si64:
1110 case Intrinsic::x86_sse2_cvttsd2si:
1111 case Intrinsic::x86_sse2_cvttsd2si64: {
1112 // These intrinsics only demand the 0th element of their input vectors. If
1113 // we can simplify the input based on that, do so now.
1114 Value *Arg = II->getArgOperand(0);
1115 unsigned VWidth = Arg->getType()->getVectorNumElements();
1116 if (Value *V = SimplifyDemandedVectorEltsLow(Arg, VWidth, 1)) {
1117 II->setArgOperand(0, V);
1123 // Constant fold ashr( <A x Bi>, Ci ).
1124 // Constant fold lshr( <A x Bi>, Ci ).
1125 // Constant fold shl( <A x Bi>, Ci ).
1126 case Intrinsic::x86_sse2_psrai_d:
1127 case Intrinsic::x86_sse2_psrai_w:
1128 case Intrinsic::x86_avx2_psrai_d:
1129 case Intrinsic::x86_avx2_psrai_w:
1130 case Intrinsic::x86_sse2_psrli_d:
1131 case Intrinsic::x86_sse2_psrli_q:
1132 case Intrinsic::x86_sse2_psrli_w:
1133 case Intrinsic::x86_avx2_psrli_d:
1134 case Intrinsic::x86_avx2_psrli_q:
1135 case Intrinsic::x86_avx2_psrli_w:
1136 case Intrinsic::x86_sse2_pslli_d:
1137 case Intrinsic::x86_sse2_pslli_q:
1138 case Intrinsic::x86_sse2_pslli_w:
1139 case Intrinsic::x86_avx2_pslli_d:
1140 case Intrinsic::x86_avx2_pslli_q:
1141 case Intrinsic::x86_avx2_pslli_w:
1142 if (Value *V = SimplifyX86immshift(*II, *Builder))
1143 return ReplaceInstUsesWith(*II, V);
1146 case Intrinsic::x86_sse2_psra_d:
1147 case Intrinsic::x86_sse2_psra_w:
1148 case Intrinsic::x86_avx2_psra_d:
1149 case Intrinsic::x86_avx2_psra_w:
1150 case Intrinsic::x86_sse2_psrl_d:
1151 case Intrinsic::x86_sse2_psrl_q:
1152 case Intrinsic::x86_sse2_psrl_w:
1153 case Intrinsic::x86_avx2_psrl_d:
1154 case Intrinsic::x86_avx2_psrl_q:
1155 case Intrinsic::x86_avx2_psrl_w:
1156 case Intrinsic::x86_sse2_psll_d:
1157 case Intrinsic::x86_sse2_psll_q:
1158 case Intrinsic::x86_sse2_psll_w:
1159 case Intrinsic::x86_avx2_psll_d:
1160 case Intrinsic::x86_avx2_psll_q:
1161 case Intrinsic::x86_avx2_psll_w: {
1162 if (Value *V = SimplifyX86immshift(*II, *Builder))
1163 return ReplaceInstUsesWith(*II, V);
1165 // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
1166 // operand to compute the shift amount.
1167 Value *Arg1 = II->getArgOperand(1);
1168 assert(Arg1->getType()->getPrimitiveSizeInBits() == 128 &&
1169 "Unexpected packed shift size");
1170 unsigned VWidth = Arg1->getType()->getVectorNumElements();
1172 if (Value *V = SimplifyDemandedVectorEltsLow(Arg1, VWidth, VWidth / 2)) {
1173 II->setArgOperand(1, V);
1179 case Intrinsic::x86_avx2_pmovsxbd:
1180 case Intrinsic::x86_avx2_pmovsxbq:
1181 case Intrinsic::x86_avx2_pmovsxbw:
1182 case Intrinsic::x86_avx2_pmovsxdq:
1183 case Intrinsic::x86_avx2_pmovsxwd:
1184 case Intrinsic::x86_avx2_pmovsxwq:
1185 if (Value *V = SimplifyX86extend(*II, *Builder, true))
1186 return ReplaceInstUsesWith(*II, V);
1189 case Intrinsic::x86_sse41_pmovzxbd:
1190 case Intrinsic::x86_sse41_pmovzxbq:
1191 case Intrinsic::x86_sse41_pmovzxbw:
1192 case Intrinsic::x86_sse41_pmovzxdq:
1193 case Intrinsic::x86_sse41_pmovzxwd:
1194 case Intrinsic::x86_sse41_pmovzxwq:
1195 case Intrinsic::x86_avx2_pmovzxbd:
1196 case Intrinsic::x86_avx2_pmovzxbq:
1197 case Intrinsic::x86_avx2_pmovzxbw:
1198 case Intrinsic::x86_avx2_pmovzxdq:
1199 case Intrinsic::x86_avx2_pmovzxwd:
1200 case Intrinsic::x86_avx2_pmovzxwq:
1201 if (Value *V = SimplifyX86extend(*II, *Builder, false))
1202 return ReplaceInstUsesWith(*II, V);
1205 case Intrinsic::x86_sse41_insertps:
1206 if (Value *V = SimplifyX86insertps(*II, *Builder))
1207 return ReplaceInstUsesWith(*II, V);
1210 case Intrinsic::x86_sse4a_extrq: {
1211 Value *Op0 = II->getArgOperand(0);
1212 Value *Op1 = II->getArgOperand(1);
1213 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1214 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1215 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1216 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
1217 VWidth1 == 16 && "Unexpected operand sizes");
1219 // See if we're dealing with constant values.
1220 Constant *C1 = dyn_cast<Constant>(Op1);
1221 ConstantInt *CILength =
1222 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)0))
1224 ConstantInt *CIIndex =
1225 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)1))
1228 // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
1229 if (Value *V = SimplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
1230 return ReplaceInstUsesWith(*II, V);
1232 // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
1233 // operands and the lowest 16-bits of the second.
1234 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1235 II->setArgOperand(0, V);
1238 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 2)) {
1239 II->setArgOperand(1, V);
1245 case Intrinsic::x86_sse4a_extrqi: {
1246 // EXTRQI: Extract Length bits starting from Index. Zero pad the remaining
1247 // bits of the lower 64-bits. The upper 64-bits are undefined.
1248 Value *Op0 = II->getArgOperand(0);
1249 unsigned VWidth = Op0->getType()->getVectorNumElements();
1250 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
1251 "Unexpected operand size");
1253 // See if we're dealing with constant values.
1254 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(1));
1255 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
1257 // Attempt to simplify to a constant or shuffle vector.
1258 if (Value *V = SimplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder))
1259 return ReplaceInstUsesWith(*II, V);
1261 // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
1263 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
1264 II->setArgOperand(0, V);
1270 case Intrinsic::x86_sse4a_insertq: {
1271 Value *Op0 = II->getArgOperand(0);
1272 Value *Op1 = II->getArgOperand(1);
1273 unsigned VWidth = Op0->getType()->getVectorNumElements();
1274 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1275 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth == 2 &&
1276 Op1->getType()->getVectorNumElements() == 2 &&
1277 "Unexpected operand size");
1279 // See if we're dealing with constant values.
1280 Constant *C1 = dyn_cast<Constant>(Op1);
1282 C1 ? dyn_cast<ConstantInt>(C1->getAggregateElement((unsigned)1))
1285 // Attempt to simplify to a constant, shuffle vector or INSERTQI call.
1287 APInt V11 = CI11->getValue();
1288 APInt Len = V11.zextOrTrunc(6);
1289 APInt Idx = V11.lshr(8).zextOrTrunc(6);
1290 if (Value *V = SimplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
1291 return ReplaceInstUsesWith(*II, V);
1294 // INSERTQ only uses the lowest 64-bits of the first 128-bit vector
1296 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth, 1)) {
1297 II->setArgOperand(0, V);
1303 case Intrinsic::x86_sse4a_insertqi: {
1304 // INSERTQI: Extract lowest Length bits from lower half of second source and
1305 // insert over first source starting at Index bit. The upper 64-bits are
1307 Value *Op0 = II->getArgOperand(0);
1308 Value *Op1 = II->getArgOperand(1);
1309 unsigned VWidth0 = Op0->getType()->getVectorNumElements();
1310 unsigned VWidth1 = Op1->getType()->getVectorNumElements();
1311 assert(Op0->getType()->getPrimitiveSizeInBits() == 128 &&
1312 Op1->getType()->getPrimitiveSizeInBits() == 128 && VWidth0 == 2 &&
1313 VWidth1 == 2 && "Unexpected operand sizes");
1315 // See if we're dealing with constant values.
1316 ConstantInt *CILength = dyn_cast<ConstantInt>(II->getArgOperand(2));
1317 ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3));
1319 // Attempt to simplify to a constant or shuffle vector.
1320 if (CILength && CIIndex) {
1321 APInt Len = CILength->getValue().zextOrTrunc(6);
1322 APInt Idx = CIIndex->getValue().zextOrTrunc(6);
1323 if (Value *V = SimplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder))
1324 return ReplaceInstUsesWith(*II, V);
1327 // INSERTQI only uses the lowest 64-bits of the first two 128-bit vector
1329 if (Value *V = SimplifyDemandedVectorEltsLow(Op0, VWidth0, 1)) {
1330 II->setArgOperand(0, V);
1334 if (Value *V = SimplifyDemandedVectorEltsLow(Op1, VWidth1, 1)) {
1335 II->setArgOperand(1, V);
1341 case Intrinsic::x86_sse41_pblendvb:
1342 case Intrinsic::x86_sse41_blendvps:
1343 case Intrinsic::x86_sse41_blendvpd:
1344 case Intrinsic::x86_avx_blendv_ps_256:
1345 case Intrinsic::x86_avx_blendv_pd_256:
1346 case Intrinsic::x86_avx2_pblendvb: {
1347 // Convert blendv* to vector selects if the mask is constant.
1348 // This optimization is convoluted because the intrinsic is defined as
1349 // getting a vector of floats or doubles for the ps and pd versions.
1350 // FIXME: That should be changed.
1352 Value *Op0 = II->getArgOperand(0);
1353 Value *Op1 = II->getArgOperand(1);
1354 Value *Mask = II->getArgOperand(2);
1356 // fold (blend A, A, Mask) -> A
1358 return ReplaceInstUsesWith(CI, Op0);
1360 // Zero Mask - select 1st argument.
1361 if (isa<ConstantAggregateZero>(Mask))
1362 return ReplaceInstUsesWith(CI, Op0);
1364 // Constant Mask - select 1st/2nd argument lane based on top bit of mask.
1365 if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
1366 auto Tyi1 = Builder->getInt1Ty();
1367 auto SelectorType = cast<VectorType>(Mask->getType());
1368 auto EltTy = SelectorType->getElementType();
1369 unsigned Size = SelectorType->getNumElements();
1373 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
1374 assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
1375 "Wrong arguments for variable blend intrinsic");
1376 SmallVector<Constant *, 32> Selectors;
1377 for (unsigned I = 0; I < Size; ++I) {
1378 // The intrinsics only read the top bit
1381 Selector = C->getElementAsInteger(I);
1383 Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
1384 Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
1386 auto NewSelector = ConstantVector::get(Selectors);
1387 return SelectInst::Create(NewSelector, Op1, Op0, "blendv");
1392 case Intrinsic::x86_ssse3_pshuf_b_128:
1393 case Intrinsic::x86_avx2_pshuf_b: {
1394 // Turn pshufb(V1,mask) -> shuffle(V1,Zero,mask) if mask is a constant.
1395 auto *V = II->getArgOperand(1);
1396 auto *VTy = cast<VectorType>(V->getType());
1397 unsigned NumElts = VTy->getNumElements();
1398 assert((NumElts == 16 || NumElts == 32) &&
1399 "Unexpected number of elements in shuffle mask!");
1400 // Initialize the resulting shuffle mask to all zeroes.
1401 uint32_t Indexes[32] = {0};
1403 if (auto *Mask = dyn_cast<ConstantDataVector>(V)) {
1404 // Each byte in the shuffle control mask forms an index to permute the
1405 // corresponding byte in the destination operand.
1406 for (unsigned I = 0; I < NumElts; ++I) {
1407 int8_t Index = Mask->getElementAsInteger(I);
1408 // If the most significant bit (bit[7]) of each byte of the shuffle
1409 // control mask is set, then zero is written in the result byte.
1410 // The zero vector is in the right-hand side of the resulting
1413 // The value of each index is the least significant 4 bits of the
1414 // shuffle control byte.
1415 Indexes[I] = (Index < 0) ? NumElts : Index & 0xF;
1417 } else if (!isa<ConstantAggregateZero>(V))
1420 // The value of each index for the high 128-bit lane is the least
1421 // significant 4 bits of the respective shuffle control byte.
1422 for (unsigned I = 16; I < NumElts; ++I)
1423 Indexes[I] += I & 0xF0;
1425 auto NewC = ConstantDataVector::get(V->getContext(),
1426 makeArrayRef(Indexes, NumElts));
1427 auto V1 = II->getArgOperand(0);
1428 auto V2 = Constant::getNullValue(II->getType());
1429 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
1430 return ReplaceInstUsesWith(CI, Shuffle);
1433 case Intrinsic::x86_avx_vpermilvar_ps:
1434 case Intrinsic::x86_avx_vpermilvar_ps_256:
1435 case Intrinsic::x86_avx_vpermilvar_pd:
1436 case Intrinsic::x86_avx_vpermilvar_pd_256: {
1437 // Convert vpermil* to shufflevector if the mask is constant.
1438 Value *V = II->getArgOperand(1);
1439 unsigned Size = cast<VectorType>(V->getType())->getNumElements();
1440 assert(Size == 8 || Size == 4 || Size == 2);
1441 uint32_t Indexes[8];
1442 if (auto C = dyn_cast<ConstantDataVector>(V)) {
1443 // The intrinsics only read one or two bits, clear the rest.
1444 for (unsigned I = 0; I < Size; ++I) {
1445 uint32_t Index = C->getElementAsInteger(I) & 0x3;
1446 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
1447 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
1451 } else if (isa<ConstantAggregateZero>(V)) {
1452 for (unsigned I = 0; I < Size; ++I)
1457 // The _256 variants are a bit trickier since the mask bits always index
1458 // into the corresponding 128 half. In order to convert to a generic
1459 // shuffle, we have to make that explicit.
1460 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
1461 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
1462 for (unsigned I = Size / 2; I < Size; ++I)
1463 Indexes[I] += Size / 2;
1466 ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
1467 auto V1 = II->getArgOperand(0);
1468 auto V2 = UndefValue::get(V1->getType());
1469 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
1470 return ReplaceInstUsesWith(CI, Shuffle);
1473 case Intrinsic::x86_avx_vperm2f128_pd_256:
1474 case Intrinsic::x86_avx_vperm2f128_ps_256:
1475 case Intrinsic::x86_avx_vperm2f128_si_256:
1476 case Intrinsic::x86_avx2_vperm2i128:
1477 if (Value *V = SimplifyX86vperm2(*II, *Builder))
1478 return ReplaceInstUsesWith(*II, V);
1481 case Intrinsic::x86_xop_vpcomb:
1482 case Intrinsic::x86_xop_vpcomd:
1483 case Intrinsic::x86_xop_vpcomq:
1484 case Intrinsic::x86_xop_vpcomw:
1485 if (Value *V = SimplifyX86vpcom(*II, *Builder, true))
1486 return ReplaceInstUsesWith(*II, V);
1489 case Intrinsic::x86_xop_vpcomub:
1490 case Intrinsic::x86_xop_vpcomud:
1491 case Intrinsic::x86_xop_vpcomuq:
1492 case Intrinsic::x86_xop_vpcomuw:
1493 if (Value *V = SimplifyX86vpcom(*II, *Builder, false))
1494 return ReplaceInstUsesWith(*II, V);
1497 case Intrinsic::ppc_altivec_vperm:
1498 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
1499 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
1500 // a vectorshuffle for little endian, we must undo the transformation
1501 // performed on vec_perm in altivec.h. That is, we must complement
1502 // the permutation mask with respect to 31 and reverse the order of
1504 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
1505 assert(Mask->getType()->getVectorNumElements() == 16 &&
1506 "Bad type for intrinsic!");
1508 // Check that all of the elements are integer constants or undefs.
1509 bool AllEltsOk = true;
1510 for (unsigned i = 0; i != 16; ++i) {
1511 Constant *Elt = Mask->getAggregateElement(i);
1512 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
1519 // Cast the input vectors to byte vectors.
1520 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
1522 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
1524 Value *Result = UndefValue::get(Op0->getType());
1526 // Only extract each element once.
1527 Value *ExtractedElts[32];
1528 memset(ExtractedElts, 0, sizeof(ExtractedElts));
1530 for (unsigned i = 0; i != 16; ++i) {
1531 if (isa<UndefValue>(Mask->getAggregateElement(i)))
1534 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
1535 Idx &= 31; // Match the hardware behavior.
1536 if (DL.isLittleEndian())
1539 if (!ExtractedElts[Idx]) {
1540 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
1541 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
1542 ExtractedElts[Idx] =
1543 Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
1544 Builder->getInt32(Idx&15));
1547 // Insert this value into the result vector.
1548 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
1549 Builder->getInt32(i));
1551 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
1556 case Intrinsic::arm_neon_vld1:
1557 case Intrinsic::arm_neon_vld2:
1558 case Intrinsic::arm_neon_vld3:
1559 case Intrinsic::arm_neon_vld4:
1560 case Intrinsic::arm_neon_vld2lane:
1561 case Intrinsic::arm_neon_vld3lane:
1562 case Intrinsic::arm_neon_vld4lane:
1563 case Intrinsic::arm_neon_vst1:
1564 case Intrinsic::arm_neon_vst2:
1565 case Intrinsic::arm_neon_vst3:
1566 case Intrinsic::arm_neon_vst4:
1567 case Intrinsic::arm_neon_vst2lane:
1568 case Intrinsic::arm_neon_vst3lane:
1569 case Intrinsic::arm_neon_vst4lane: {
1570 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
1571 unsigned AlignArg = II->getNumArgOperands() - 1;
1572 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
1573 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
1574 II->setArgOperand(AlignArg,
1575 ConstantInt::get(Type::getInt32Ty(II->getContext()),
1582 case Intrinsic::arm_neon_vmulls:
1583 case Intrinsic::arm_neon_vmullu:
1584 case Intrinsic::aarch64_neon_smull:
1585 case Intrinsic::aarch64_neon_umull: {
1586 Value *Arg0 = II->getArgOperand(0);
1587 Value *Arg1 = II->getArgOperand(1);
1589 // Handle mul by zero first:
1590 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1591 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1594 // Check for constant LHS & RHS - in this case we just simplify.
1595 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
1596 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
1597 VectorType *NewVT = cast<VectorType>(II->getType());
1598 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
1599 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
1600 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
1601 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
1603 return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
1606 // Couldn't simplify - canonicalize constant to the RHS.
1607 std::swap(Arg0, Arg1);
1610 // Handle mul by one:
1611 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
1612 if (ConstantInt *Splat =
1613 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
1615 return CastInst::CreateIntegerCast(Arg0, II->getType(),
1616 /*isSigned=*/!Zext);
1621 case Intrinsic::AMDGPU_rcp: {
1622 if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
1623 const APFloat &ArgVal = C->getValueAPF();
1624 APFloat Val(ArgVal.getSemantics(), 1.0);
1625 APFloat::opStatus Status = Val.divide(ArgVal,
1626 APFloat::rmNearestTiesToEven);
1627 // Only do this if it was exact and therefore not dependent on the
1629 if (Status == APFloat::opOK)
1630 return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
1635 case Intrinsic::stackrestore: {
1636 // If the save is right next to the restore, remove the restore. This can
1637 // happen when variable allocas are DCE'd.
1638 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1639 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1640 if (&*++SS->getIterator() == II)
1641 return EraseInstFromFunction(CI);
1645 // Scan down this block to see if there is another stack restore in the
1646 // same block without an intervening call/alloca.
1647 BasicBlock::iterator BI(II);
1648 TerminatorInst *TI = II->getParent()->getTerminator();
1649 bool CannotRemove = false;
1650 for (++BI; &*BI != TI; ++BI) {
1651 if (isa<AllocaInst>(BI)) {
1652 CannotRemove = true;
1655 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1656 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
1657 // If there is a stackrestore below this one, remove this one.
1658 if (II->getIntrinsicID() == Intrinsic::stackrestore)
1659 return EraseInstFromFunction(CI);
1660 // Otherwise, ignore the intrinsic.
1662 // If we found a non-intrinsic call, we can't remove the stack
1664 CannotRemove = true;
1670 // If the stack restore is in a return, resume, or unwind block and if there
1671 // are no allocas or calls between the restore and the return, nuke the
1673 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1674 return EraseInstFromFunction(CI);
1677 case Intrinsic::lifetime_start: {
1678 // Remove trivially empty lifetime_start/end ranges, i.e. a start
1679 // immediately followed by an end (ignoring debuginfo or other
1680 // lifetime markers in between).
1681 BasicBlock::iterator BI = II->getIterator(), BE = II->getParent()->end();
1682 for (++BI; BI != BE; ++BI) {
1683 if (IntrinsicInst *LTE = dyn_cast<IntrinsicInst>(BI)) {
1684 if (isa<DbgInfoIntrinsic>(LTE) ||
1685 LTE->getIntrinsicID() == Intrinsic::lifetime_start)
1687 if (LTE->getIntrinsicID() == Intrinsic::lifetime_end) {
1688 if (II->getOperand(0) == LTE->getOperand(0) &&
1689 II->getOperand(1) == LTE->getOperand(1)) {
1690 EraseInstFromFunction(*LTE);
1691 return EraseInstFromFunction(*II);
1700 case Intrinsic::assume: {
1701 // Canonicalize assume(a && b) -> assume(a); assume(b);
1702 // Note: New assumption intrinsics created here are registered by
1703 // the InstCombineIRInserter object.
1704 Value *IIOperand = II->getArgOperand(0), *A, *B,
1705 *AssumeIntrinsic = II->getCalledValue();
1706 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1707 Builder->CreateCall(AssumeIntrinsic, A, II->getName());
1708 Builder->CreateCall(AssumeIntrinsic, B, II->getName());
1709 return EraseInstFromFunction(*II);
1711 // assume(!(a || b)) -> assume(!a); assume(!b);
1712 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1713 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
1715 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
1717 return EraseInstFromFunction(*II);
1720 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1721 // (if assume is valid at the load)
1722 if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
1723 Value *LHS = ICmp->getOperand(0);
1724 Value *RHS = ICmp->getOperand(1);
1725 if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
1726 isa<LoadInst>(LHS) &&
1727 isa<Constant>(RHS) &&
1728 RHS->getType()->isPointerTy() &&
1729 cast<Constant>(RHS)->isNullValue()) {
1730 LoadInst* LI = cast<LoadInst>(LHS);
1731 if (isValidAssumeForContext(II, LI, DT)) {
1732 MDNode *MD = MDNode::get(II->getContext(), None);
1733 LI->setMetadata(LLVMContext::MD_nonnull, MD);
1734 return EraseInstFromFunction(*II);
1737 // TODO: apply nonnull return attributes to calls and invokes
1738 // TODO: apply range metadata for range check patterns?
1740 // If there is a dominating assume with the same condition as this one,
1741 // then this one is redundant, and should be removed.
1742 APInt KnownZero(1, 0), KnownOne(1, 0);
1743 computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
1744 if (KnownOne.isAllOnesValue())
1745 return EraseInstFromFunction(*II);
1749 case Intrinsic::experimental_gc_relocate: {
1750 // Translate facts known about a pointer before relocating into
1751 // facts about the relocate value, while being careful to
1752 // preserve relocation semantics.
1753 GCRelocateOperands Operands(II);
1754 Value *DerivedPtr = Operands.getDerivedPtr();
1755 auto *GCRelocateType = cast<PointerType>(II->getType());
1757 // Remove the relocation if unused, note that this check is required
1758 // to prevent the cases below from looping forever.
1759 if (II->use_empty())
1760 return EraseInstFromFunction(*II);
1762 // Undef is undef, even after relocation.
1763 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
1764 // most practical collectors, but there was discussion in the review thread
1765 // about whether it was legal for all possible collectors.
1766 if (isa<UndefValue>(DerivedPtr)) {
1767 // gc_relocate is uncasted. Use undef of gc_relocate's type to replace it.
1768 return ReplaceInstUsesWith(*II, UndefValue::get(GCRelocateType));
1771 // The relocation of null will be null for most any collector.
1772 // TODO: provide a hook for this in GCStrategy. There might be some weird
1773 // collector this property does not hold for.
1774 if (isa<ConstantPointerNull>(DerivedPtr)) {
1775 // gc_relocate is uncasted. Use null-pointer of gc_relocate's type to replace it.
1776 return ReplaceInstUsesWith(*II, ConstantPointerNull::get(GCRelocateType));
1779 // isKnownNonNull -> nonnull attribute
1780 if (isKnownNonNullAt(DerivedPtr, II, DT, TLI))
1781 II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
1783 // isDereferenceablePointer -> deref attribute
1784 if (isDereferenceablePointer(DerivedPtr, DL)) {
1785 if (Argument *A = dyn_cast<Argument>(DerivedPtr)) {
1786 uint64_t Bytes = A->getDereferenceableBytes();
1787 II->addDereferenceableAttr(AttributeSet::ReturnIndex, Bytes);
1791 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
1792 // Canonicalize on the type from the uses to the defs
1794 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
1798 return visitCallSite(II);
1801 // InvokeInst simplification
1803 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
1804 return visitCallSite(&II);
1807 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
1808 /// passed through the varargs area, we can eliminate the use of the cast.
1809 static bool isSafeToEliminateVarargsCast(const CallSite CS,
1810 const DataLayout &DL,
1811 const CastInst *const CI,
1813 if (!CI->isLosslessCast())
1816 // If this is a GC intrinsic, avoid munging types. We need types for
1817 // statepoint reconstruction in SelectionDAG.
1818 // TODO: This is probably something which should be expanded to all
1819 // intrinsics since the entire point of intrinsics is that
1820 // they are understandable by the optimizer.
1821 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
1824 // The size of ByVal or InAlloca arguments is derived from the type, so we
1825 // can't change to a type with a different size. If the size were
1826 // passed explicitly we could avoid this check.
1827 if (!CS.isByValOrInAllocaArgument(ix))
1831 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1832 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
1833 if (!SrcTy->isSized() || !DstTy->isSized())
1835 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
1840 // Try to fold some different type of calls here.
1841 // Currently we're only working with the checking functions, memcpy_chk,
1842 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
1843 // strcat_chk and strncat_chk.
1844 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
1845 if (!CI->getCalledFunction()) return nullptr;
1847 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
1848 ReplaceInstUsesWith(*From, With);
1850 LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
1851 if (Value *With = Simplifier.optimizeCall(CI)) {
1853 return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
1859 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
1860 // Strip off at most one level of pointer casts, looking for an alloca. This
1861 // is good enough in practice and simpler than handling any number of casts.
1862 Value *Underlying = TrampMem->stripPointerCasts();
1863 if (Underlying != TrampMem &&
1864 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1866 if (!isa<AllocaInst>(Underlying))
1869 IntrinsicInst *InitTrampoline = nullptr;
1870 for (User *U : TrampMem->users()) {
1871 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1874 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1876 // More than one init_trampoline writes to this value. Give up.
1878 InitTrampoline = II;
1881 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1882 // Allow any number of calls to adjust.trampoline.
1887 // No call to init.trampoline found.
1888 if (!InitTrampoline)
1891 // Check that the alloca is being used in the expected way.
1892 if (InitTrampoline->getOperand(0) != TrampMem)
1895 return InitTrampoline;
1898 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1900 // Visit all the previous instructions in the basic block, and try to find a
1901 // init.trampoline which has a direct path to the adjust.trampoline.
1902 for (BasicBlock::iterator I = AdjustTramp->getIterator(),
1903 E = AdjustTramp->getParent()->begin();
1905 Instruction *Inst = &*--I;
1906 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1907 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1908 II->getOperand(0) == TrampMem)
1910 if (Inst->mayWriteToMemory())
1916 // Given a call to llvm.adjust.trampoline, find and return the corresponding
1917 // call to llvm.init.trampoline if the call to the trampoline can be optimized
1918 // to a direct call to a function. Otherwise return NULL.
1920 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
1921 Callee = Callee->stripPointerCasts();
1922 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1924 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1927 Value *TrampMem = AdjustTramp->getOperand(0);
1929 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
1931 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1936 // visitCallSite - Improvements for call and invoke instructions.
1938 Instruction *InstCombiner::visitCallSite(CallSite CS) {
1940 if (isAllocLikeFn(CS.getInstruction(), TLI))
1941 return visitAllocSite(*CS.getInstruction());
1943 bool Changed = false;
1945 // Mark any parameters that are known to be non-null with the nonnull
1946 // attribute. This is helpful for inlining calls to functions with null
1947 // checks on their arguments.
1949 for (Value *V : CS.args()) {
1950 if (V->getType()->isPointerTy() && !CS.paramHasAttr(ArgNo+1, Attribute::NonNull) &&
1951 isKnownNonNullAt(V, CS.getInstruction(), DT, TLI)) {
1952 AttributeSet AS = CS.getAttributes();
1953 AS = AS.addAttribute(CS.getInstruction()->getContext(), ArgNo+1,
1954 Attribute::NonNull);
1955 CS.setAttributes(AS);
1960 assert(ArgNo == CS.arg_size() && "sanity check");
1962 // If the callee is a pointer to a function, attempt to move any casts to the
1963 // arguments of the call/invoke.
1964 Value *Callee = CS.getCalledValue();
1965 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1968 if (Function *CalleeF = dyn_cast<Function>(Callee))
1969 // If the call and callee calling conventions don't match, this call must
1970 // be unreachable, as the call is undefined.
1971 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1972 // Only do this for calls to a function with a body. A prototype may
1973 // not actually end up matching the implementation's calling conv for a
1974 // variety of reasons (e.g. it may be written in assembly).
1975 !CalleeF->isDeclaration()) {
1976 Instruction *OldCall = CS.getInstruction();
1977 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1978 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1980 // If OldCall does not return void then replaceAllUsesWith undef.
1981 // This allows ValueHandlers and custom metadata to adjust itself.
1982 if (!OldCall->getType()->isVoidTy())
1983 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1984 if (isa<CallInst>(OldCall))
1985 return EraseInstFromFunction(*OldCall);
1987 // We cannot remove an invoke, because it would change the CFG, just
1988 // change the callee to a null pointer.
1989 cast<InvokeInst>(OldCall)->setCalledFunction(
1990 Constant::getNullValue(CalleeF->getType()));
1994 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1995 // If CS does not return void then replaceAllUsesWith undef.
1996 // This allows ValueHandlers and custom metadata to adjust itself.
1997 if (!CS.getInstruction()->getType()->isVoidTy())
1998 ReplaceInstUsesWith(*CS.getInstruction(),
1999 UndefValue::get(CS.getInstruction()->getType()));
2001 if (isa<InvokeInst>(CS.getInstruction())) {
2002 // Can't remove an invoke because we cannot change the CFG.
2006 // This instruction is not reachable, just remove it. We insert a store to
2007 // undef so that we know that this code is not reachable, despite the fact
2008 // that we can't modify the CFG here.
2009 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
2010 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
2011 CS.getInstruction());
2013 return EraseInstFromFunction(*CS.getInstruction());
2016 if (IntrinsicInst *II = FindInitTrampoline(Callee))
2017 return transformCallThroughTrampoline(CS, II);
2019 PointerType *PTy = cast<PointerType>(Callee->getType());
2020 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2021 if (FTy->isVarArg()) {
2022 int ix = FTy->getNumParams();
2023 // See if we can optimize any arguments passed through the varargs area of
2025 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
2026 E = CS.arg_end(); I != E; ++I, ++ix) {
2027 CastInst *CI = dyn_cast<CastInst>(*I);
2028 if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
2029 *I = CI->getOperand(0);
2035 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
2036 // Inline asm calls cannot throw - mark them 'nounwind'.
2037 CS.setDoesNotThrow();
2041 // Try to optimize the call if possible, we require DataLayout for most of
2042 // this. None of these calls are seen as possibly dead so go ahead and
2043 // delete the instruction now.
2044 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
2045 Instruction *I = tryOptimizeCall(CI);
2046 // If we changed something return the result, etc. Otherwise let
2047 // the fallthrough check.
2048 if (I) return EraseInstFromFunction(*I);
2051 return Changed ? CS.getInstruction() : nullptr;
2054 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
2055 // attempt to move the cast to the arguments of the call/invoke.
2057 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
2059 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
2062 // The prototype of thunks are a lie, don't try to directly call such
2064 if (Callee->hasFnAttribute("thunk"))
2066 Instruction *Caller = CS.getInstruction();
2067 const AttributeSet &CallerPAL = CS.getAttributes();
2069 // Okay, this is a cast from a function to a different type. Unless doing so
2070 // would cause a type conversion of one of our arguments, change this call to
2071 // be a direct call with arguments casted to the appropriate types.
2073 FunctionType *FT = Callee->getFunctionType();
2074 Type *OldRetTy = Caller->getType();
2075 Type *NewRetTy = FT->getReturnType();
2077 // Check to see if we are changing the return type...
2078 if (OldRetTy != NewRetTy) {
2080 if (NewRetTy->isStructTy())
2081 return false; // TODO: Handle multiple return values.
2083 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
2084 if (Callee->isDeclaration())
2085 return false; // Cannot transform this return value.
2087 if (!Caller->use_empty() &&
2088 // void -> non-void is handled specially
2089 !NewRetTy->isVoidTy())
2090 return false; // Cannot transform this return value.
2093 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
2094 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
2095 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
2096 return false; // Attribute not compatible with transformed value.
2099 // If the callsite is an invoke instruction, and the return value is used by
2100 // a PHI node in a successor, we cannot change the return type of the call
2101 // because there is no place to put the cast instruction (without breaking
2102 // the critical edge). Bail out in this case.
2103 if (!Caller->use_empty())
2104 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
2105 for (User *U : II->users())
2106 if (PHINode *PN = dyn_cast<PHINode>(U))
2107 if (PN->getParent() == II->getNormalDest() ||
2108 PN->getParent() == II->getUnwindDest())
2112 unsigned NumActualArgs = CS.arg_size();
2113 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
2115 // Prevent us turning:
2116 // declare void @takes_i32_inalloca(i32* inalloca)
2117 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
2120 // call void @takes_i32_inalloca(i32* null)
2122 // Similarly, avoid folding away bitcasts of byval calls.
2123 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
2124 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
2127 CallSite::arg_iterator AI = CS.arg_begin();
2128 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
2129 Type *ParamTy = FT->getParamType(i);
2130 Type *ActTy = (*AI)->getType();
2132 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
2133 return false; // Cannot transform this parameter value.
2135 if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
2136 overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
2137 return false; // Attribute not compatible with transformed value.
2139 if (CS.isInAllocaArgument(i))
2140 return false; // Cannot transform to and from inalloca.
2142 // If the parameter is passed as a byval argument, then we have to have a
2143 // sized type and the sized type has to have the same size as the old type.
2144 if (ParamTy != ActTy &&
2145 CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
2146 Attribute::ByVal)) {
2147 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
2148 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
2151 Type *CurElTy = ActTy->getPointerElementType();
2152 if (DL.getTypeAllocSize(CurElTy) !=
2153 DL.getTypeAllocSize(ParamPTy->getElementType()))
2158 if (Callee->isDeclaration()) {
2159 // Do not delete arguments unless we have a function body.
2160 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
2163 // If the callee is just a declaration, don't change the varargsness of the
2164 // call. We don't want to introduce a varargs call where one doesn't
2166 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
2167 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
2170 // If both the callee and the cast type are varargs, we still have to make
2171 // sure the number of fixed parameters are the same or we have the same
2172 // ABI issues as if we introduce a varargs call.
2173 if (FT->isVarArg() &&
2174 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
2175 FT->getNumParams() !=
2176 cast<FunctionType>(APTy->getElementType())->getNumParams())
2180 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
2181 !CallerPAL.isEmpty())
2182 // In this case we have more arguments than the new function type, but we
2183 // won't be dropping them. Check that these extra arguments have attributes
2184 // that are compatible with being a vararg call argument.
2185 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
2186 unsigned Index = CallerPAL.getSlotIndex(i - 1);
2187 if (Index <= FT->getNumParams())
2190 // Check if it has an attribute that's incompatible with varargs.
2191 AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
2192 if (PAttrs.hasAttribute(Index, Attribute::StructRet))
2197 // Okay, we decided that this is a safe thing to do: go ahead and start
2198 // inserting cast instructions as necessary.
2199 std::vector<Value*> Args;
2200 Args.reserve(NumActualArgs);
2201 SmallVector<AttributeSet, 8> attrVec;
2202 attrVec.reserve(NumCommonArgs);
2204 // Get any return attributes.
2205 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
2207 // If the return value is not being used, the type may not be compatible
2208 // with the existing attributes. Wipe out any problematic attributes.
2209 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
2211 // Add the new return attributes.
2212 if (RAttrs.hasAttributes())
2213 attrVec.push_back(AttributeSet::get(Caller->getContext(),
2214 AttributeSet::ReturnIndex, RAttrs));
2216 AI = CS.arg_begin();
2217 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
2218 Type *ParamTy = FT->getParamType(i);
2220 if ((*AI)->getType() == ParamTy) {
2221 Args.push_back(*AI);
2223 Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
2226 // Add any parameter attributes.
2227 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2228 if (PAttrs.hasAttributes())
2229 attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
2233 // If the function takes more arguments than the call was taking, add them
2235 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
2236 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
2238 // If we are removing arguments to the function, emit an obnoxious warning.
2239 if (FT->getNumParams() < NumActualArgs) {
2240 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
2241 if (FT->isVarArg()) {
2242 // Add all of the arguments in their promoted form to the arg list.
2243 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
2244 Type *PTy = getPromotedType((*AI)->getType());
2245 if (PTy != (*AI)->getType()) {
2246 // Must promote to pass through va_arg area!
2247 Instruction::CastOps opcode =
2248 CastInst::getCastOpcode(*AI, false, PTy, false);
2249 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
2251 Args.push_back(*AI);
2254 // Add any parameter attributes.
2255 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
2256 if (PAttrs.hasAttributes())
2257 attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
2263 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
2264 if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
2265 attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
2267 if (NewRetTy->isVoidTy())
2268 Caller->setName(""); // Void type should not have a name.
2270 const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
2274 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2275 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
2276 II->getUnwindDest(), Args);
2278 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
2279 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
2281 CallInst *CI = cast<CallInst>(Caller);
2282 NC = Builder->CreateCall(Callee, Args);
2284 if (CI->isTailCall())
2285 cast<CallInst>(NC)->setTailCall();
2286 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
2287 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
2290 // Insert a cast of the return type as necessary.
2292 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
2293 if (!NV->getType()->isVoidTy()) {
2294 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
2295 NC->setDebugLoc(Caller->getDebugLoc());
2297 // If this is an invoke instruction, we should insert it after the first
2298 // non-phi, instruction in the normal successor block.
2299 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2300 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
2301 InsertNewInstBefore(NC, *I);
2303 // Otherwise, it's a call, just insert cast right after the call.
2304 InsertNewInstBefore(NC, *Caller);
2306 Worklist.AddUsersToWorkList(*Caller);
2308 NV = UndefValue::get(Caller->getType());
2312 if (!Caller->use_empty())
2313 ReplaceInstUsesWith(*Caller, NV);
2314 else if (Caller->hasValueHandle()) {
2315 if (OldRetTy == NV->getType())
2316 ValueHandleBase::ValueIsRAUWd(Caller, NV);
2318 // We cannot call ValueIsRAUWd with a different type, and the
2319 // actual tracked value will disappear.
2320 ValueHandleBase::ValueIsDeleted(Caller);
2323 EraseInstFromFunction(*Caller);
2327 // transformCallThroughTrampoline - Turn a call to a function created by
2328 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
2329 // underlying function.
2332 InstCombiner::transformCallThroughTrampoline(CallSite CS,
2333 IntrinsicInst *Tramp) {
2334 Value *Callee = CS.getCalledValue();
2335 PointerType *PTy = cast<PointerType>(Callee->getType());
2336 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
2337 const AttributeSet &Attrs = CS.getAttributes();
2339 // If the call already has the 'nest' attribute somewhere then give up -
2340 // otherwise 'nest' would occur twice after splicing in the chain.
2341 if (Attrs.hasAttrSomewhere(Attribute::Nest))
2345 "transformCallThroughTrampoline called with incorrect CallSite.");
2347 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
2348 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
2349 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
2351 const AttributeSet &NestAttrs = NestF->getAttributes();
2352 if (!NestAttrs.isEmpty()) {
2353 unsigned NestIdx = 1;
2354 Type *NestTy = nullptr;
2355 AttributeSet NestAttr;
2357 // Look for a parameter marked with the 'nest' attribute.
2358 for (FunctionType::param_iterator I = NestFTy->param_begin(),
2359 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
2360 if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
2361 // Record the parameter type and any other attributes.
2363 NestAttr = NestAttrs.getParamAttributes(NestIdx);
2368 Instruction *Caller = CS.getInstruction();
2369 std::vector<Value*> NewArgs;
2370 NewArgs.reserve(CS.arg_size() + 1);
2372 SmallVector<AttributeSet, 8> NewAttrs;
2373 NewAttrs.reserve(Attrs.getNumSlots() + 1);
2375 // Insert the nest argument into the call argument list, which may
2376 // mean appending it. Likewise for attributes.
2378 // Add any result attributes.
2379 if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
2380 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2381 Attrs.getRetAttributes()));
2385 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
2387 if (Idx == NestIdx) {
2388 // Add the chain argument and attributes.
2389 Value *NestVal = Tramp->getArgOperand(2);
2390 if (NestVal->getType() != NestTy)
2391 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
2392 NewArgs.push_back(NestVal);
2393 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2400 // Add the original argument and attributes.
2401 NewArgs.push_back(*I);
2402 AttributeSet Attr = Attrs.getParamAttributes(Idx);
2403 if (Attr.hasAttributes(Idx)) {
2404 AttrBuilder B(Attr, Idx);
2405 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
2406 Idx + (Idx >= NestIdx), B));
2413 // Add any function attributes.
2414 if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
2415 NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
2416 Attrs.getFnAttributes()));
2418 // The trampoline may have been bitcast to a bogus type (FTy).
2419 // Handle this by synthesizing a new function type, equal to FTy
2420 // with the chain parameter inserted.
2422 std::vector<Type*> NewTypes;
2423 NewTypes.reserve(FTy->getNumParams()+1);
2425 // Insert the chain's type into the list of parameter types, which may
2426 // mean appending it.
2429 FunctionType::param_iterator I = FTy->param_begin(),
2430 E = FTy->param_end();
2434 // Add the chain's type.
2435 NewTypes.push_back(NestTy);
2440 // Add the original type.
2441 NewTypes.push_back(*I);
2447 // Replace the trampoline call with a direct call. Let the generic
2448 // code sort out any function type mismatches.
2449 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
2451 Constant *NewCallee =
2452 NestF->getType() == PointerType::getUnqual(NewFTy) ?
2453 NestF : ConstantExpr::getBitCast(NestF,
2454 PointerType::getUnqual(NewFTy));
2455 const AttributeSet &NewPAL =
2456 AttributeSet::get(FTy->getContext(), NewAttrs);
2458 Instruction *NewCaller;
2459 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
2460 NewCaller = InvokeInst::Create(NewCallee,
2461 II->getNormalDest(), II->getUnwindDest(),
2463 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
2464 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
2466 NewCaller = CallInst::Create(NewCallee, NewArgs);
2467 if (cast<CallInst>(Caller)->isTailCall())
2468 cast<CallInst>(NewCaller)->setTailCall();
2469 cast<CallInst>(NewCaller)->
2470 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
2471 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
2478 // Replace the trampoline call with a direct call. Since there is no 'nest'
2479 // parameter, there is no need to adjust the argument list. Let the generic
2480 // code sort out any function type mismatches.
2481 Constant *NewCallee =
2482 NestF->getType() == PTy ? NestF :
2483 ConstantExpr::getBitCast(NestF, PTy);
2484 CS.setCalledFunction(NewCallee);
2485 return CS.getInstruction();