1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/IR/CallSite.h"
19 #include "llvm/IR/Dominators.h"
20 #include "llvm/IR/PatternMatch.h"
21 #include "llvm/IR/Statepoint.h"
22 #include "llvm/Transforms/Utils/BuildLibCalls.h"
23 #include "llvm/Transforms/Utils/Local.h"
24 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
26 using namespace PatternMatch;
28 #define DEBUG_TYPE "instcombine"
30 STATISTIC(NumSimplified, "Number of library calls simplified");
32 /// getPromotedType - Return the specified type promoted as it would be to pass
33 /// though a va_arg area.
34 static Type *getPromotedType(Type *Ty) {
35 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
36 if (ITy->getBitWidth() < 32)
37 return Type::getInt32Ty(Ty->getContext());
42 /// reduceToSingleValueType - Given an aggregate type which ultimately holds a
43 /// single scalar element, like {{{type}}} or [1 x type], return type.
44 static Type *reduceToSingleValueType(Type *T) {
45 while (!T->isSingleValueType()) {
46 if (StructType *STy = dyn_cast<StructType>(T)) {
47 if (STy->getNumElements() == 1)
48 T = STy->getElementType(0);
51 } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
52 if (ATy->getNumElements() == 1)
53 T = ATy->getElementType();
63 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
64 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
65 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
66 unsigned MinAlign = std::min(DstAlign, SrcAlign);
67 unsigned CopyAlign = MI->getAlignment();
69 if (CopyAlign < MinAlign) {
70 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
75 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
77 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
78 if (!MemOpLength) return nullptr;
80 // Source and destination pointer types are always "i8*" for intrinsic. See
81 // if the size is something we can handle with a single primitive load/store.
82 // A single load+store correctly handles overlapping memory in the memmove
84 uint64_t Size = MemOpLength->getLimitedValue();
85 assert(Size && "0-sized memory transferring should be removed already.");
87 if (Size > 8 || (Size&(Size-1)))
88 return nullptr; // If not 1/2/4/8 bytes, exit.
90 // Use an integer load+store unless we can find something better.
92 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
94 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
96 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
97 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
98 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
100 // Memcpy forces the use of i8* for the source and destination. That means
101 // that if you're using memcpy to move one double around, you'll get a cast
102 // from double* to i8*. We'd much rather use a double load+store rather than
103 // an i64 load+store, here because this improves the odds that the source or
104 // dest address will be promotable. See if we can find a better type than the
106 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
107 MDNode *CopyMD = nullptr;
108 if (StrippedDest != MI->getArgOperand(0)) {
109 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
111 if (SrcETy->isSized() && DL.getTypeStoreSize(SrcETy) == Size) {
112 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
113 // down through these levels if so.
114 SrcETy = reduceToSingleValueType(SrcETy);
116 if (SrcETy->isSingleValueType()) {
117 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
118 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
120 // If the memcpy has metadata describing the members, see if we can
121 // get the TBAA tag describing our copy.
122 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
123 if (M->getNumOperands() == 3 && M->getOperand(0) &&
124 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
125 mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
127 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
128 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
130 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
131 CopyMD = cast<MDNode>(M->getOperand(2));
137 // If the memcpy/memmove provides better alignment info than we can
139 SrcAlign = std::max(SrcAlign, CopyAlign);
140 DstAlign = std::max(DstAlign, CopyAlign);
142 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
143 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
144 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
145 L->setAlignment(SrcAlign);
147 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
148 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
149 S->setAlignment(DstAlign);
151 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
153 // Set the size of the copy to 0, it will be deleted on the next iteration.
154 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
158 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
159 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
160 if (MI->getAlignment() < Alignment) {
161 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
166 // Extract the length and alignment and fill if they are constant.
167 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
168 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
169 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
171 uint64_t Len = LenC->getLimitedValue();
172 Alignment = MI->getAlignment();
173 assert(Len && "0-sized memory setting should be removed already.");
175 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
176 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
177 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
179 Value *Dest = MI->getDest();
180 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
181 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
182 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
184 // Alignment 0 is identity for alignment 1 for memset, but not store.
185 if (Alignment == 0) Alignment = 1;
187 // Extract the fill value and store.
188 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
189 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
191 S->setAlignment(Alignment);
193 // Set the size of the copy to 0, it will be deleted on the next iteration.
194 MI->setLength(Constant::getNullValue(LenC->getType()));
201 static Value *SimplifyX86extend(const IntrinsicInst &II,
202 InstCombiner::BuilderTy &Builder,
204 VectorType *SrcTy = cast<VectorType>(II.getArgOperand(0)->getType());
205 VectorType *DstTy = cast<VectorType>(II.getType());
206 unsigned NumDstElts = DstTy->getNumElements();
208 // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
209 SmallVector<int, 8> ShuffleMask;
210 for (int i = 0; i != NumDstElts; ++i)
211 ShuffleMask.push_back(i);
213 Value *SV = Builder.CreateShuffleVector(II.getArgOperand(0),
214 UndefValue::get(SrcTy), ShuffleMask);
215 return SignExtend ? Builder.CreateSExt(SV, DstTy)
216 : Builder.CreateZExt(SV, DstTy);
219 static Value *SimplifyX86insertps(const IntrinsicInst &II,
220 InstCombiner::BuilderTy &Builder) {
221 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
222 VectorType *VecTy = cast<VectorType>(II.getType());
223 assert(VecTy->getNumElements() == 4 && "insertps with wrong vector type");
225 // The immediate permute control byte looks like this:
226 // [3:0] - zero mask for each 32-bit lane
227 // [5:4] - select one 32-bit destination lane
228 // [7:6] - select one 32-bit source lane
230 uint8_t Imm = CInt->getZExtValue();
231 uint8_t ZMask = Imm & 0xf;
232 uint8_t DestLane = (Imm >> 4) & 0x3;
233 uint8_t SourceLane = (Imm >> 6) & 0x3;
235 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
237 // If all zero mask bits are set, this was just a weird way to
238 // generate a zero vector.
242 // Initialize by passing all of the first source bits through.
243 int ShuffleMask[4] = { 0, 1, 2, 3 };
245 // We may replace the second operand with the zero vector.
246 Value *V1 = II.getArgOperand(1);
249 // If the zero mask is being used with a single input or the zero mask
250 // overrides the destination lane, this is a shuffle with the zero vector.
251 if ((II.getArgOperand(0) == II.getArgOperand(1)) ||
252 (ZMask & (1 << DestLane))) {
254 // We may still move 32-bits of the first source vector from one lane
256 ShuffleMask[DestLane] = SourceLane;
257 // The zero mask may override the previous insert operation.
258 for (unsigned i = 0; i < 4; ++i)
259 if ((ZMask >> i) & 0x1)
260 ShuffleMask[i] = i + 4;
262 // TODO: Model this case as 2 shuffles or a 'logical and' plus shuffle?
266 // Replace the selected destination lane with the selected source lane.
267 ShuffleMask[DestLane] = SourceLane + 4;
270 return Builder.CreateShuffleVector(II.getArgOperand(0), V1, ShuffleMask);
275 /// The shuffle mask for a perm2*128 selects any two halves of two 256-bit
276 /// source vectors, unless a zero bit is set. If a zero bit is set,
277 /// then ignore that half of the mask and clear that half of the vector.
278 static Value *SimplifyX86vperm2(const IntrinsicInst &II,
279 InstCombiner::BuilderTy &Builder) {
280 if (auto *CInt = dyn_cast<ConstantInt>(II.getArgOperand(2))) {
281 VectorType *VecTy = cast<VectorType>(II.getType());
282 ConstantAggregateZero *ZeroVector = ConstantAggregateZero::get(VecTy);
284 // The immediate permute control byte looks like this:
285 // [1:0] - select 128 bits from sources for low half of destination
287 // [3] - zero low half of destination
288 // [5:4] - select 128 bits from sources for high half of destination
290 // [7] - zero high half of destination
292 uint8_t Imm = CInt->getZExtValue();
294 bool LowHalfZero = Imm & 0x08;
295 bool HighHalfZero = Imm & 0x80;
297 // If both zero mask bits are set, this was just a weird way to
298 // generate a zero vector.
299 if (LowHalfZero && HighHalfZero)
302 // If 0 or 1 zero mask bits are set, this is a simple shuffle.
303 unsigned NumElts = VecTy->getNumElements();
304 unsigned HalfSize = NumElts / 2;
305 SmallVector<int, 8> ShuffleMask(NumElts);
307 // The high bit of the selection field chooses the 1st or 2nd operand.
308 bool LowInputSelect = Imm & 0x02;
309 bool HighInputSelect = Imm & 0x20;
311 // The low bit of the selection field chooses the low or high half
312 // of the selected operand.
313 bool LowHalfSelect = Imm & 0x01;
314 bool HighHalfSelect = Imm & 0x10;
316 // Determine which operand(s) are actually in use for this instruction.
317 Value *V0 = LowInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
318 Value *V1 = HighInputSelect ? II.getArgOperand(1) : II.getArgOperand(0);
320 // If needed, replace operands based on zero mask.
321 V0 = LowHalfZero ? ZeroVector : V0;
322 V1 = HighHalfZero ? ZeroVector : V1;
324 // Permute low half of result.
325 unsigned StartIndex = LowHalfSelect ? HalfSize : 0;
326 for (unsigned i = 0; i < HalfSize; ++i)
327 ShuffleMask[i] = StartIndex + i;
329 // Permute high half of result.
330 StartIndex = HighHalfSelect ? HalfSize : 0;
331 StartIndex += NumElts;
332 for (unsigned i = 0; i < HalfSize; ++i)
333 ShuffleMask[i + HalfSize] = StartIndex + i;
335 return Builder.CreateShuffleVector(V0, V1, ShuffleMask);
340 /// visitCallInst - CallInst simplification. This mostly only handles folding
341 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
342 /// the heavy lifting.
344 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
345 auto Args = CI.arg_operands();
346 if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
348 return ReplaceInstUsesWith(CI, V);
350 if (isFreeCall(&CI, TLI))
351 return visitFree(CI);
353 // If the caller function is nounwind, mark the call as nounwind, even if the
355 if (CI.getParent()->getParent()->doesNotThrow() &&
356 !CI.doesNotThrow()) {
357 CI.setDoesNotThrow();
361 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
362 if (!II) return visitCallSite(&CI);
364 // Intrinsics cannot occur in an invoke, so handle them here instead of in
366 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
367 bool Changed = false;
369 // memmove/cpy/set of zero bytes is a noop.
370 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
371 if (NumBytes->isNullValue())
372 return EraseInstFromFunction(CI);
374 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
375 if (CI->getZExtValue() == 1) {
376 // Replace the instruction with just byte operations. We would
377 // transform other cases to loads/stores, but we don't know if
378 // alignment is sufficient.
382 // No other transformations apply to volatile transfers.
383 if (MI->isVolatile())
386 // If we have a memmove and the source operation is a constant global,
387 // then the source and dest pointers can't alias, so we can change this
388 // into a call to memcpy.
389 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
390 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
391 if (GVSrc->isConstant()) {
392 Module *M = CI.getParent()->getParent()->getParent();
393 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
394 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
395 CI.getArgOperand(1)->getType(),
396 CI.getArgOperand(2)->getType() };
397 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
402 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
403 // memmove(x,x,size) -> noop.
404 if (MTI->getSource() == MTI->getDest())
405 return EraseInstFromFunction(CI);
408 // If we can determine a pointer alignment that is bigger than currently
409 // set, update the alignment.
410 if (isa<MemTransferInst>(MI)) {
411 if (Instruction *I = SimplifyMemTransfer(MI))
413 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
414 if (Instruction *I = SimplifyMemSet(MSI))
418 if (Changed) return II;
421 switch (II->getIntrinsicID()) {
423 case Intrinsic::objectsize: {
425 if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
426 return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
429 case Intrinsic::bswap: {
430 Value *IIOperand = II->getArgOperand(0);
433 // bswap(bswap(x)) -> x
434 if (match(IIOperand, m_BSwap(m_Value(X))))
435 return ReplaceInstUsesWith(CI, X);
437 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
438 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
439 unsigned C = X->getType()->getPrimitiveSizeInBits() -
440 IIOperand->getType()->getPrimitiveSizeInBits();
441 Value *CV = ConstantInt::get(X->getType(), C);
442 Value *V = Builder->CreateLShr(X, CV);
443 return new TruncInst(V, IIOperand->getType());
448 case Intrinsic::powi:
449 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
452 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
455 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
456 // powi(x, -1) -> 1/x
457 if (Power->isAllOnesValue())
458 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
459 II->getArgOperand(0));
462 case Intrinsic::cttz: {
463 // If all bits below the first known one are known zero,
464 // this value is constant.
465 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
466 // FIXME: Try to simplify vectors of integers.
468 uint32_t BitWidth = IT->getBitWidth();
469 APInt KnownZero(BitWidth, 0);
470 APInt KnownOne(BitWidth, 0);
471 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
472 unsigned TrailingZeros = KnownOne.countTrailingZeros();
473 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
474 if ((Mask & KnownZero) == Mask)
475 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
476 APInt(BitWidth, TrailingZeros)));
480 case Intrinsic::ctlz: {
481 // If all bits above the first known one are known zero,
482 // this value is constant.
483 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
484 // FIXME: Try to simplify vectors of integers.
486 uint32_t BitWidth = IT->getBitWidth();
487 APInt KnownZero(BitWidth, 0);
488 APInt KnownOne(BitWidth, 0);
489 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
490 unsigned LeadingZeros = KnownOne.countLeadingZeros();
491 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
492 if ((Mask & KnownZero) == Mask)
493 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
494 APInt(BitWidth, LeadingZeros)));
499 case Intrinsic::uadd_with_overflow:
500 case Intrinsic::sadd_with_overflow:
501 case Intrinsic::umul_with_overflow:
502 case Intrinsic::smul_with_overflow:
503 if (isa<Constant>(II->getArgOperand(0)) &&
504 !isa<Constant>(II->getArgOperand(1))) {
505 // Canonicalize constants into the RHS.
506 Value *LHS = II->getArgOperand(0);
507 II->setArgOperand(0, II->getArgOperand(1));
508 II->setArgOperand(1, LHS);
513 case Intrinsic::usub_with_overflow:
514 case Intrinsic::ssub_with_overflow: {
515 OverflowCheckFlavor OCF =
516 IntrinsicIDToOverflowCheckFlavor(II->getIntrinsicID());
517 assert(OCF != OCF_INVALID && "unexpected!");
519 Value *OperationResult = nullptr;
520 Constant *OverflowResult = nullptr;
521 if (OptimizeOverflowCheck(OCF, II->getArgOperand(0), II->getArgOperand(1),
522 *II, OperationResult, OverflowResult))
523 return CreateOverflowTuple(II, OperationResult, OverflowResult);
528 case Intrinsic::minnum:
529 case Intrinsic::maxnum: {
530 Value *Arg0 = II->getArgOperand(0);
531 Value *Arg1 = II->getArgOperand(1);
535 return ReplaceInstUsesWith(CI, Arg0);
537 const ConstantFP *C0 = dyn_cast<ConstantFP>(Arg0);
538 const ConstantFP *C1 = dyn_cast<ConstantFP>(Arg1);
540 // Canonicalize constants into the RHS.
542 II->setArgOperand(0, Arg1);
543 II->setArgOperand(1, Arg0);
548 if (C1 && C1->isNaN())
549 return ReplaceInstUsesWith(CI, Arg0);
551 // This is the value because if undef were NaN, we would return the other
552 // value and cannot return a NaN unless both operands are.
554 // fmin(undef, x) -> x
555 if (isa<UndefValue>(Arg0))
556 return ReplaceInstUsesWith(CI, Arg1);
558 // fmin(x, undef) -> x
559 if (isa<UndefValue>(Arg1))
560 return ReplaceInstUsesWith(CI, Arg0);
564 if (II->getIntrinsicID() == Intrinsic::minnum) {
565 // fmin(x, fmin(x, y)) -> fmin(x, y)
566 // fmin(y, fmin(x, y)) -> fmin(x, y)
567 if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
568 if (Arg0 == X || Arg0 == Y)
569 return ReplaceInstUsesWith(CI, Arg1);
572 // fmin(fmin(x, y), x) -> fmin(x, y)
573 // fmin(fmin(x, y), y) -> fmin(x, y)
574 if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
575 if (Arg1 == X || Arg1 == Y)
576 return ReplaceInstUsesWith(CI, Arg0);
579 // TODO: fmin(nnan x, inf) -> x
580 // TODO: fmin(nnan ninf x, flt_max) -> x
581 if (C1 && C1->isInfinity()) {
582 // fmin(x, -inf) -> -inf
583 if (C1->isNegative())
584 return ReplaceInstUsesWith(CI, Arg1);
587 assert(II->getIntrinsicID() == Intrinsic::maxnum);
588 // fmax(x, fmax(x, y)) -> fmax(x, y)
589 // fmax(y, fmax(x, y)) -> fmax(x, y)
590 if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
591 if (Arg0 == X || Arg0 == Y)
592 return ReplaceInstUsesWith(CI, Arg1);
595 // fmax(fmax(x, y), x) -> fmax(x, y)
596 // fmax(fmax(x, y), y) -> fmax(x, y)
597 if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
598 if (Arg1 == X || Arg1 == Y)
599 return ReplaceInstUsesWith(CI, Arg0);
602 // TODO: fmax(nnan x, -inf) -> x
603 // TODO: fmax(nnan ninf x, -flt_max) -> x
604 if (C1 && C1->isInfinity()) {
605 // fmax(x, inf) -> inf
606 if (!C1->isNegative())
607 return ReplaceInstUsesWith(CI, Arg1);
612 case Intrinsic::ppc_altivec_lvx:
613 case Intrinsic::ppc_altivec_lvxl:
614 // Turn PPC lvx -> load if the pointer is known aligned.
615 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
617 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
618 PointerType::getUnqual(II->getType()));
619 return new LoadInst(Ptr);
622 case Intrinsic::ppc_vsx_lxvw4x:
623 case Intrinsic::ppc_vsx_lxvd2x: {
624 // Turn PPC VSX loads into normal loads.
625 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
626 PointerType::getUnqual(II->getType()));
627 return new LoadInst(Ptr, Twine(""), false, 1);
629 case Intrinsic::ppc_altivec_stvx:
630 case Intrinsic::ppc_altivec_stvxl:
631 // Turn stvx -> store if the pointer is known aligned.
632 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
635 PointerType::getUnqual(II->getArgOperand(0)->getType());
636 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
637 return new StoreInst(II->getArgOperand(0), Ptr);
640 case Intrinsic::ppc_vsx_stxvw4x:
641 case Intrinsic::ppc_vsx_stxvd2x: {
642 // Turn PPC VSX stores into normal stores.
643 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
644 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
645 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
647 case Intrinsic::ppc_qpx_qvlfs:
648 // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
649 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
651 Type *VTy = VectorType::get(Builder->getFloatTy(),
652 II->getType()->getVectorNumElements());
653 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
654 PointerType::getUnqual(VTy));
655 Value *Load = Builder->CreateLoad(Ptr);
656 return new FPExtInst(Load, II->getType());
659 case Intrinsic::ppc_qpx_qvlfd:
660 // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
661 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
663 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
664 PointerType::getUnqual(II->getType()));
665 return new LoadInst(Ptr);
668 case Intrinsic::ppc_qpx_qvstfs:
669 // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
670 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
672 Type *VTy = VectorType::get(Builder->getFloatTy(),
673 II->getArgOperand(0)->getType()->getVectorNumElements());
674 Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
675 Type *OpPtrTy = PointerType::getUnqual(VTy);
676 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
677 return new StoreInst(TOp, Ptr);
680 case Intrinsic::ppc_qpx_qvstfd:
681 // Turn PPC QPX qvstfd -> store if the pointer is known aligned.
682 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
685 PointerType::getUnqual(II->getArgOperand(0)->getType());
686 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
687 return new StoreInst(II->getArgOperand(0), Ptr);
690 case Intrinsic::x86_sse_storeu_ps:
691 case Intrinsic::x86_sse2_storeu_pd:
692 case Intrinsic::x86_sse2_storeu_dq:
693 // Turn X86 storeu -> store if the pointer is known aligned.
694 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
697 PointerType::getUnqual(II->getArgOperand(1)->getType());
698 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
699 return new StoreInst(II->getArgOperand(1), Ptr);
703 case Intrinsic::x86_sse_cvtss2si:
704 case Intrinsic::x86_sse_cvtss2si64:
705 case Intrinsic::x86_sse_cvttss2si:
706 case Intrinsic::x86_sse_cvttss2si64:
707 case Intrinsic::x86_sse2_cvtsd2si:
708 case Intrinsic::x86_sse2_cvtsd2si64:
709 case Intrinsic::x86_sse2_cvttsd2si:
710 case Intrinsic::x86_sse2_cvttsd2si64: {
711 // These intrinsics only demand the 0th element of their input vectors. If
712 // we can simplify the input based on that, do so now.
714 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
715 APInt DemandedElts(VWidth, 1);
716 APInt UndefElts(VWidth, 0);
717 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
718 DemandedElts, UndefElts)) {
719 II->setArgOperand(0, V);
725 // Constant fold <A x Bi> << Ci.
726 // FIXME: We don't handle _dq because it's a shift of an i128, but is
727 // represented in the IR as <2 x i64>. A per element shift is wrong.
728 case Intrinsic::x86_sse2_psll_d:
729 case Intrinsic::x86_sse2_psll_q:
730 case Intrinsic::x86_sse2_psll_w:
731 case Intrinsic::x86_sse2_pslli_d:
732 case Intrinsic::x86_sse2_pslli_q:
733 case Intrinsic::x86_sse2_pslli_w:
734 case Intrinsic::x86_avx2_psll_d:
735 case Intrinsic::x86_avx2_psll_q:
736 case Intrinsic::x86_avx2_psll_w:
737 case Intrinsic::x86_avx2_pslli_d:
738 case Intrinsic::x86_avx2_pslli_q:
739 case Intrinsic::x86_avx2_pslli_w:
740 case Intrinsic::x86_sse2_psrl_d:
741 case Intrinsic::x86_sse2_psrl_q:
742 case Intrinsic::x86_sse2_psrl_w:
743 case Intrinsic::x86_sse2_psrli_d:
744 case Intrinsic::x86_sse2_psrli_q:
745 case Intrinsic::x86_sse2_psrli_w:
746 case Intrinsic::x86_avx2_psrl_d:
747 case Intrinsic::x86_avx2_psrl_q:
748 case Intrinsic::x86_avx2_psrl_w:
749 case Intrinsic::x86_avx2_psrli_d:
750 case Intrinsic::x86_avx2_psrli_q:
751 case Intrinsic::x86_avx2_psrli_w: {
752 // Simplify if count is constant. To 0 if >= BitWidth,
753 // otherwise to shl/lshr.
754 auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
755 auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
760 Count = cast<ConstantInt>(CDV->getElementAsConstant(0));
764 auto Vec = II->getArgOperand(0);
765 auto VT = cast<VectorType>(Vec->getType());
766 if (Count->getZExtValue() >
767 VT->getElementType()->getPrimitiveSizeInBits() - 1)
768 return ReplaceInstUsesWith(
769 CI, ConstantAggregateZero::get(Vec->getType()));
771 bool isPackedShiftLeft = true;
772 switch (II->getIntrinsicID()) {
774 case Intrinsic::x86_sse2_psrl_d:
775 case Intrinsic::x86_sse2_psrl_q:
776 case Intrinsic::x86_sse2_psrl_w:
777 case Intrinsic::x86_sse2_psrli_d:
778 case Intrinsic::x86_sse2_psrli_q:
779 case Intrinsic::x86_sse2_psrli_w:
780 case Intrinsic::x86_avx2_psrl_d:
781 case Intrinsic::x86_avx2_psrl_q:
782 case Intrinsic::x86_avx2_psrl_w:
783 case Intrinsic::x86_avx2_psrli_d:
784 case Intrinsic::x86_avx2_psrli_q:
785 case Intrinsic::x86_avx2_psrli_w: isPackedShiftLeft = false; break;
788 unsigned VWidth = VT->getNumElements();
789 // Get a constant vector of the same type as the first operand.
790 auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
791 if (isPackedShiftLeft)
792 return BinaryOperator::CreateShl(Vec,
793 Builder->CreateVectorSplat(VWidth, VTCI));
795 return BinaryOperator::CreateLShr(Vec,
796 Builder->CreateVectorSplat(VWidth, VTCI));
799 case Intrinsic::x86_sse41_pmovsxbd:
800 case Intrinsic::x86_sse41_pmovsxbq:
801 case Intrinsic::x86_sse41_pmovsxbw:
802 case Intrinsic::x86_sse41_pmovsxdq:
803 case Intrinsic::x86_sse41_pmovsxwd:
804 case Intrinsic::x86_sse41_pmovsxwq:
805 case Intrinsic::x86_avx2_pmovsxbd:
806 case Intrinsic::x86_avx2_pmovsxbq:
807 case Intrinsic::x86_avx2_pmovsxbw:
808 case Intrinsic::x86_avx2_pmovsxdq:
809 case Intrinsic::x86_avx2_pmovsxwd:
810 case Intrinsic::x86_avx2_pmovsxwq:
811 if (Value *V = SimplifyX86extend(*II, *Builder, true))
812 return ReplaceInstUsesWith(*II, V);
815 case Intrinsic::x86_sse41_pmovzxbd:
816 case Intrinsic::x86_sse41_pmovzxbq:
817 case Intrinsic::x86_sse41_pmovzxbw:
818 case Intrinsic::x86_sse41_pmovzxdq:
819 case Intrinsic::x86_sse41_pmovzxwd:
820 case Intrinsic::x86_sse41_pmovzxwq:
821 case Intrinsic::x86_avx2_pmovzxbd:
822 case Intrinsic::x86_avx2_pmovzxbq:
823 case Intrinsic::x86_avx2_pmovzxbw:
824 case Intrinsic::x86_avx2_pmovzxdq:
825 case Intrinsic::x86_avx2_pmovzxwd:
826 case Intrinsic::x86_avx2_pmovzxwq:
827 if (Value *V = SimplifyX86extend(*II, *Builder, false))
828 return ReplaceInstUsesWith(*II, V);
831 case Intrinsic::x86_sse41_insertps:
832 if (Value *V = SimplifyX86insertps(*II, *Builder))
833 return ReplaceInstUsesWith(*II, V);
836 case Intrinsic::x86_sse4a_insertqi: {
837 // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
839 // TODO: eventually we should lower this intrinsic to IR
840 if (auto CILength = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
841 if (auto CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
842 unsigned Index = CIIndex->getZExtValue();
843 // From AMD documentation: "a value of zero in the field length is
844 // defined as length of 64".
845 unsigned Length = CILength->equalsInt(0) ? 64 : CILength->getZExtValue();
847 // From AMD documentation: "If the sum of the bit index + length field
848 // is greater than 64, the results are undefined".
849 unsigned End = Index + Length;
851 // Note that both field index and field length are 8-bit quantities.
852 // Since variables 'Index' and 'Length' are unsigned values
853 // obtained from zero-extending field index and field length
854 // respectively, their sum should never wrap around.
856 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
858 if (Length == 64 && Index == 0) {
859 Value *Vec = II->getArgOperand(1);
860 Value *Undef = UndefValue::get(Vec->getType());
861 const uint32_t Mask[] = { 0, 2 };
862 return ReplaceInstUsesWith(
864 Builder->CreateShuffleVector(
865 Vec, Undef, ConstantDataVector::get(
866 II->getContext(), makeArrayRef(Mask))));
867 } else if (auto Source =
868 dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
869 if (Source->hasOneUse() &&
870 Source->getArgOperand(1) == II->getArgOperand(1)) {
871 // If the source of the insert has only one use and it's another
872 // insert (and they're both inserting from the same vector), try to
873 // bundle both together.
874 auto CISourceLength =
875 dyn_cast<ConstantInt>(Source->getArgOperand(2));
877 dyn_cast<ConstantInt>(Source->getArgOperand(3));
878 if (CISourceIndex && CISourceLength) {
879 unsigned SourceIndex = CISourceIndex->getZExtValue();
880 unsigned SourceLength = CISourceLength->getZExtValue();
881 unsigned SourceEnd = SourceIndex + SourceLength;
882 unsigned NewIndex, NewLength;
883 bool ShouldReplace = false;
884 if (Index <= SourceIndex && SourceIndex <= End) {
886 NewLength = std::max(End, SourceEnd) - NewIndex;
887 ShouldReplace = true;
888 } else if (SourceIndex <= Index && Index <= SourceEnd) {
889 NewIndex = SourceIndex;
890 NewLength = std::max(SourceEnd, End) - NewIndex;
891 ShouldReplace = true;
895 Constant *ConstantLength = ConstantInt::get(
896 II->getArgOperand(2)->getType(), NewLength, false);
897 Constant *ConstantIndex = ConstantInt::get(
898 II->getArgOperand(3)->getType(), NewIndex, false);
899 Value *Args[4] = { Source->getArgOperand(0),
900 II->getArgOperand(1), ConstantLength,
902 Module *M = CI.getParent()->getParent()->getParent();
904 Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
905 return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
915 case Intrinsic::x86_sse41_pblendvb:
916 case Intrinsic::x86_sse41_blendvps:
917 case Intrinsic::x86_sse41_blendvpd:
918 case Intrinsic::x86_avx_blendv_ps_256:
919 case Intrinsic::x86_avx_blendv_pd_256:
920 case Intrinsic::x86_avx2_pblendvb: {
921 // Convert blendv* to vector selects if the mask is constant.
922 // This optimization is convoluted because the intrinsic is defined as
923 // getting a vector of floats or doubles for the ps and pd versions.
924 // FIXME: That should be changed.
925 Value *Mask = II->getArgOperand(2);
926 if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
927 auto Tyi1 = Builder->getInt1Ty();
928 auto SelectorType = cast<VectorType>(Mask->getType());
929 auto EltTy = SelectorType->getElementType();
930 unsigned Size = SelectorType->getNumElements();
934 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
935 assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
936 "Wrong arguments for variable blend intrinsic");
937 SmallVector<Constant *, 32> Selectors;
938 for (unsigned I = 0; I < Size; ++I) {
939 // The intrinsics only read the top bit
942 Selector = C->getElementAsInteger(I);
944 Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
945 Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
947 auto NewSelector = ConstantVector::get(Selectors);
948 return SelectInst::Create(NewSelector, II->getArgOperand(1),
949 II->getArgOperand(0), "blendv");
955 case Intrinsic::x86_avx_vpermilvar_ps:
956 case Intrinsic::x86_avx_vpermilvar_ps_256:
957 case Intrinsic::x86_avx_vpermilvar_pd:
958 case Intrinsic::x86_avx_vpermilvar_pd_256: {
959 // Convert vpermil* to shufflevector if the mask is constant.
960 Value *V = II->getArgOperand(1);
961 unsigned Size = cast<VectorType>(V->getType())->getNumElements();
962 assert(Size == 8 || Size == 4 || Size == 2);
964 if (auto C = dyn_cast<ConstantDataVector>(V)) {
965 // The intrinsics only read one or two bits, clear the rest.
966 for (unsigned I = 0; I < Size; ++I) {
967 uint32_t Index = C->getElementAsInteger(I) & 0x3;
968 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
969 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
973 } else if (isa<ConstantAggregateZero>(V)) {
974 for (unsigned I = 0; I < Size; ++I)
979 // The _256 variants are a bit trickier since the mask bits always index
980 // into the corresponding 128 half. In order to convert to a generic
981 // shuffle, we have to make that explicit.
982 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
983 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
984 for (unsigned I = Size / 2; I < Size; ++I)
985 Indexes[I] += Size / 2;
988 ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
989 auto V1 = II->getArgOperand(0);
990 auto V2 = UndefValue::get(V1->getType());
991 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
992 return ReplaceInstUsesWith(CI, Shuffle);
995 case Intrinsic::x86_avx_vperm2f128_pd_256:
996 case Intrinsic::x86_avx_vperm2f128_ps_256:
997 case Intrinsic::x86_avx_vperm2f128_si_256:
998 case Intrinsic::x86_avx2_vperm2i128:
999 if (Value *V = SimplifyX86vperm2(*II, *Builder))
1000 return ReplaceInstUsesWith(*II, V);
1003 case Intrinsic::ppc_altivec_vperm:
1004 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
1005 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
1006 // a vectorshuffle for little endian, we must undo the transformation
1007 // performed on vec_perm in altivec.h. That is, we must complement
1008 // the permutation mask with respect to 31 and reverse the order of
1010 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
1011 assert(Mask->getType()->getVectorNumElements() == 16 &&
1012 "Bad type for intrinsic!");
1014 // Check that all of the elements are integer constants or undefs.
1015 bool AllEltsOk = true;
1016 for (unsigned i = 0; i != 16; ++i) {
1017 Constant *Elt = Mask->getAggregateElement(i);
1018 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
1025 // Cast the input vectors to byte vectors.
1026 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
1028 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
1030 Value *Result = UndefValue::get(Op0->getType());
1032 // Only extract each element once.
1033 Value *ExtractedElts[32];
1034 memset(ExtractedElts, 0, sizeof(ExtractedElts));
1036 for (unsigned i = 0; i != 16; ++i) {
1037 if (isa<UndefValue>(Mask->getAggregateElement(i)))
1040 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
1041 Idx &= 31; // Match the hardware behavior.
1042 if (DL.isLittleEndian())
1045 if (!ExtractedElts[Idx]) {
1046 Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
1047 Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
1048 ExtractedElts[Idx] =
1049 Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
1050 Builder->getInt32(Idx&15));
1053 // Insert this value into the result vector.
1054 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
1055 Builder->getInt32(i));
1057 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
1062 case Intrinsic::arm_neon_vld1:
1063 case Intrinsic::arm_neon_vld2:
1064 case Intrinsic::arm_neon_vld3:
1065 case Intrinsic::arm_neon_vld4:
1066 case Intrinsic::arm_neon_vld2lane:
1067 case Intrinsic::arm_neon_vld3lane:
1068 case Intrinsic::arm_neon_vld4lane:
1069 case Intrinsic::arm_neon_vst1:
1070 case Intrinsic::arm_neon_vst2:
1071 case Intrinsic::arm_neon_vst3:
1072 case Intrinsic::arm_neon_vst4:
1073 case Intrinsic::arm_neon_vst2lane:
1074 case Intrinsic::arm_neon_vst3lane:
1075 case Intrinsic::arm_neon_vst4lane: {
1076 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
1077 unsigned AlignArg = II->getNumArgOperands() - 1;
1078 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
1079 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
1080 II->setArgOperand(AlignArg,
1081 ConstantInt::get(Type::getInt32Ty(II->getContext()),
1088 case Intrinsic::arm_neon_vmulls:
1089 case Intrinsic::arm_neon_vmullu:
1090 case Intrinsic::aarch64_neon_smull:
1091 case Intrinsic::aarch64_neon_umull: {
1092 Value *Arg0 = II->getArgOperand(0);
1093 Value *Arg1 = II->getArgOperand(1);
1095 // Handle mul by zero first:
1096 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1097 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1100 // Check for constant LHS & RHS - in this case we just simplify.
1101 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
1102 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
1103 VectorType *NewVT = cast<VectorType>(II->getType());
1104 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
1105 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
1106 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
1107 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
1109 return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
1112 // Couldn't simplify - canonicalize constant to the RHS.
1113 std::swap(Arg0, Arg1);
1116 // Handle mul by one:
1117 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
1118 if (ConstantInt *Splat =
1119 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
1121 return CastInst::CreateIntegerCast(Arg0, II->getType(),
1122 /*isSigned=*/!Zext);
1127 case Intrinsic::AMDGPU_rcp: {
1128 if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
1129 const APFloat &ArgVal = C->getValueAPF();
1130 APFloat Val(ArgVal.getSemantics(), 1.0);
1131 APFloat::opStatus Status = Val.divide(ArgVal,
1132 APFloat::rmNearestTiesToEven);
1133 // Only do this if it was exact and therefore not dependent on the
1135 if (Status == APFloat::opOK)
1136 return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
1141 case Intrinsic::stackrestore: {
1142 // If the save is right next to the restore, remove the restore. This can
1143 // happen when variable allocas are DCE'd.
1144 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1145 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1146 BasicBlock::iterator BI = SS;
1148 return EraseInstFromFunction(CI);
1152 // Scan down this block to see if there is another stack restore in the
1153 // same block without an intervening call/alloca.
1154 BasicBlock::iterator BI = II;
1155 TerminatorInst *TI = II->getParent()->getTerminator();
1156 bool CannotRemove = false;
1157 for (++BI; &*BI != TI; ++BI) {
1158 if (isa<AllocaInst>(BI)) {
1159 CannotRemove = true;
1162 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1163 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
1164 // If there is a stackrestore below this one, remove this one.
1165 if (II->getIntrinsicID() == Intrinsic::stackrestore)
1166 return EraseInstFromFunction(CI);
1167 // Otherwise, ignore the intrinsic.
1169 // If we found a non-intrinsic call, we can't remove the stack
1171 CannotRemove = true;
1177 // If the stack restore is in a return, resume, or unwind block and if there
1178 // are no allocas or calls between the restore and the return, nuke the
1180 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1181 return EraseInstFromFunction(CI);
1184 case Intrinsic::assume: {
1185 // Canonicalize assume(a && b) -> assume(a); assume(b);
1186 // Note: New assumption intrinsics created here are registered by
1187 // the InstCombineIRInserter object.
1188 Value *IIOperand = II->getArgOperand(0), *A, *B,
1189 *AssumeIntrinsic = II->getCalledValue();
1190 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1191 Builder->CreateCall(AssumeIntrinsic, A, II->getName());
1192 Builder->CreateCall(AssumeIntrinsic, B, II->getName());
1193 return EraseInstFromFunction(*II);
1195 // assume(!(a || b)) -> assume(!a); assume(!b);
1196 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1197 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
1199 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
1201 return EraseInstFromFunction(*II);
1204 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1205 // (if assume is valid at the load)
1206 if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
1207 Value *LHS = ICmp->getOperand(0);
1208 Value *RHS = ICmp->getOperand(1);
1209 if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
1210 isa<LoadInst>(LHS) &&
1211 isa<Constant>(RHS) &&
1212 RHS->getType()->isPointerTy() &&
1213 cast<Constant>(RHS)->isNullValue()) {
1214 LoadInst* LI = cast<LoadInst>(LHS);
1215 if (isValidAssumeForContext(II, LI, DT)) {
1216 MDNode *MD = MDNode::get(II->getContext(), None);
1217 LI->setMetadata(LLVMContext::MD_nonnull, MD);
1218 return EraseInstFromFunction(*II);
1221 // TODO: apply nonnull return attributes to calls and invokes
1222 // TODO: apply range metadata for range check patterns?
1224 // If there is a dominating assume with the same condition as this one,
1225 // then this one is redundant, and should be removed.
1226 APInt KnownZero(1, 0), KnownOne(1, 0);
1227 computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
1228 if (KnownOne.isAllOnesValue())
1229 return EraseInstFromFunction(*II);
1233 case Intrinsic::experimental_gc_relocate: {
1234 // Translate facts known about a pointer before relocating into
1235 // facts about the relocate value, while being careful to
1236 // preserve relocation semantics.
1237 GCRelocateOperands Operands(II);
1238 Value *DerivedPtr = Operands.getDerivedPtr();
1239 auto *GCRelocateType = cast<PointerType>(II->getType());
1241 // Remove the relocation if unused, note that this check is required
1242 // to prevent the cases below from looping forever.
1243 if (II->use_empty())
1244 return EraseInstFromFunction(*II);
1246 // Undef is undef, even after relocation.
1247 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
1248 // most practical collectors, but there was discussion in the review thread
1249 // about whether it was legal for all possible collectors.
1250 if (isa<UndefValue>(DerivedPtr)) {
1251 // gc_relocate is uncasted. Use undef of gc_relocate's type to replace it.
1252 return ReplaceInstUsesWith(*II, UndefValue::get(GCRelocateType));
1255 // The relocation of null will be null for most any collector.
1256 // TODO: provide a hook for this in GCStrategy. There might be some weird
1257 // collector this property does not hold for.
1258 if (isa<ConstantPointerNull>(DerivedPtr)) {
1259 // gc_relocate is uncasted. Use null-pointer of gc_relocate's type to replace it.
1260 return ReplaceInstUsesWith(*II, ConstantPointerNull::get(GCRelocateType));
1263 // isKnownNonNull -> nonnull attribute
1264 if (isKnownNonNull(DerivedPtr))
1265 II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
1267 // isDereferenceablePointer -> deref attribute
1268 if (isDereferenceablePointer(DerivedPtr, DL)) {
1269 if (Argument *A = dyn_cast<Argument>(DerivedPtr)) {
1270 uint64_t Bytes = A->getDereferenceableBytes();
1271 II->addDereferenceableAttr(AttributeSet::ReturnIndex, Bytes);
1275 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
1276 // Canonicalize on the type from the uses to the defs
1278 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
1282 return visitCallSite(II);
1285 // InvokeInst simplification
1287 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
1288 return visitCallSite(&II);
1291 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
1292 /// passed through the varargs area, we can eliminate the use of the cast.
1293 static bool isSafeToEliminateVarargsCast(const CallSite CS,
1294 const DataLayout &DL,
1295 const CastInst *const CI,
1297 if (!CI->isLosslessCast())
1300 // If this is a GC intrinsic, avoid munging types. We need types for
1301 // statepoint reconstruction in SelectionDAG.
1302 // TODO: This is probably something which should be expanded to all
1303 // intrinsics since the entire point of intrinsics is that
1304 // they are understandable by the optimizer.
1305 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
1308 // The size of ByVal or InAlloca arguments is derived from the type, so we
1309 // can't change to a type with a different size. If the size were
1310 // passed explicitly we could avoid this check.
1311 if (!CS.isByValOrInAllocaArgument(ix))
1315 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1316 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
1317 if (!SrcTy->isSized() || !DstTy->isSized())
1319 if (DL.getTypeAllocSize(SrcTy) != DL.getTypeAllocSize(DstTy))
1324 // Try to fold some different type of calls here.
1325 // Currently we're only working with the checking functions, memcpy_chk,
1326 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
1327 // strcat_chk and strncat_chk.
1328 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI) {
1329 if (!CI->getCalledFunction()) return nullptr;
1331 auto InstCombineRAUW = [this](Instruction *From, Value *With) {
1332 ReplaceInstUsesWith(*From, With);
1334 LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
1335 if (Value *With = Simplifier.optimizeCall(CI)) {
1337 return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
1343 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
1344 // Strip off at most one level of pointer casts, looking for an alloca. This
1345 // is good enough in practice and simpler than handling any number of casts.
1346 Value *Underlying = TrampMem->stripPointerCasts();
1347 if (Underlying != TrampMem &&
1348 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1350 if (!isa<AllocaInst>(Underlying))
1353 IntrinsicInst *InitTrampoline = nullptr;
1354 for (User *U : TrampMem->users()) {
1355 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1358 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1360 // More than one init_trampoline writes to this value. Give up.
1362 InitTrampoline = II;
1365 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1366 // Allow any number of calls to adjust.trampoline.
1371 // No call to init.trampoline found.
1372 if (!InitTrampoline)
1375 // Check that the alloca is being used in the expected way.
1376 if (InitTrampoline->getOperand(0) != TrampMem)
1379 return InitTrampoline;
1382 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1384 // Visit all the previous instructions in the basic block, and try to find a
1385 // init.trampoline which has a direct path to the adjust.trampoline.
1386 for (BasicBlock::iterator I = AdjustTramp,
1387 E = AdjustTramp->getParent()->begin(); I != E; ) {
1388 Instruction *Inst = --I;
1389 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1390 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1391 II->getOperand(0) == TrampMem)
1393 if (Inst->mayWriteToMemory())
1399 // Given a call to llvm.adjust.trampoline, find and return the corresponding
1400 // call to llvm.init.trampoline if the call to the trampoline can be optimized
1401 // to a direct call to a function. Otherwise return NULL.
1403 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
1404 Callee = Callee->stripPointerCasts();
1405 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1407 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1410 Value *TrampMem = AdjustTramp->getOperand(0);
1412 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
1414 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1419 // visitCallSite - Improvements for call and invoke instructions.
1421 Instruction *InstCombiner::visitCallSite(CallSite CS) {
1423 if (isAllocLikeFn(CS.getInstruction(), TLI))
1424 return visitAllocSite(*CS.getInstruction());
1426 bool Changed = false;
1428 // Mark any parameters that are known to be non-null with the nonnull
1429 // attribute. This is helpful for inlining calls to functions with null
1430 // checks on their arguments.
1432 for (Value *V : CS.args()) {
1433 if (!CS.paramHasAttr(ArgNo+1, Attribute::NonNull) &&
1434 isKnownNonNull(V)) {
1435 AttributeSet AS = CS.getAttributes();
1436 AS = AS.addAttribute(CS.getInstruction()->getContext(), ArgNo+1,
1437 Attribute::NonNull);
1438 CS.setAttributes(AS);
1443 assert(ArgNo == CS.arg_size() && "sanity check");
1445 // If the callee is a pointer to a function, attempt to move any casts to the
1446 // arguments of the call/invoke.
1447 Value *Callee = CS.getCalledValue();
1448 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1451 if (Function *CalleeF = dyn_cast<Function>(Callee))
1452 // If the call and callee calling conventions don't match, this call must
1453 // be unreachable, as the call is undefined.
1454 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1455 // Only do this for calls to a function with a body. A prototype may
1456 // not actually end up matching the implementation's calling conv for a
1457 // variety of reasons (e.g. it may be written in assembly).
1458 !CalleeF->isDeclaration()) {
1459 Instruction *OldCall = CS.getInstruction();
1460 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1461 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1463 // If OldCall does not return void then replaceAllUsesWith undef.
1464 // This allows ValueHandlers and custom metadata to adjust itself.
1465 if (!OldCall->getType()->isVoidTy())
1466 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1467 if (isa<CallInst>(OldCall))
1468 return EraseInstFromFunction(*OldCall);
1470 // We cannot remove an invoke, because it would change the CFG, just
1471 // change the callee to a null pointer.
1472 cast<InvokeInst>(OldCall)->setCalledFunction(
1473 Constant::getNullValue(CalleeF->getType()));
1477 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1478 // If CS does not return void then replaceAllUsesWith undef.
1479 // This allows ValueHandlers and custom metadata to adjust itself.
1480 if (!CS.getInstruction()->getType()->isVoidTy())
1481 ReplaceInstUsesWith(*CS.getInstruction(),
1482 UndefValue::get(CS.getInstruction()->getType()));
1484 if (isa<InvokeInst>(CS.getInstruction())) {
1485 // Can't remove an invoke because we cannot change the CFG.
1489 // This instruction is not reachable, just remove it. We insert a store to
1490 // undef so that we know that this code is not reachable, despite the fact
1491 // that we can't modify the CFG here.
1492 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1493 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1494 CS.getInstruction());
1496 return EraseInstFromFunction(*CS.getInstruction());
1499 if (IntrinsicInst *II = FindInitTrampoline(Callee))
1500 return transformCallThroughTrampoline(CS, II);
1502 PointerType *PTy = cast<PointerType>(Callee->getType());
1503 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1504 if (FTy->isVarArg()) {
1505 int ix = FTy->getNumParams();
1506 // See if we can optimize any arguments passed through the varargs area of
1508 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
1509 E = CS.arg_end(); I != E; ++I, ++ix) {
1510 CastInst *CI = dyn_cast<CastInst>(*I);
1511 if (CI && isSafeToEliminateVarargsCast(CS, DL, CI, ix)) {
1512 *I = CI->getOperand(0);
1518 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1519 // Inline asm calls cannot throw - mark them 'nounwind'.
1520 CS.setDoesNotThrow();
1524 // Try to optimize the call if possible, we require DataLayout for most of
1525 // this. None of these calls are seen as possibly dead so go ahead and
1526 // delete the instruction now.
1527 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1528 Instruction *I = tryOptimizeCall(CI);
1529 // If we changed something return the result, etc. Otherwise let
1530 // the fallthrough check.
1531 if (I) return EraseInstFromFunction(*I);
1534 return Changed ? CS.getInstruction() : nullptr;
1537 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
1538 // attempt to move the cast to the arguments of the call/invoke.
1540 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1542 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1545 // The prototype of thunks are a lie, don't try to directly call such
1547 if (Callee->hasFnAttribute("thunk"))
1549 Instruction *Caller = CS.getInstruction();
1550 const AttributeSet &CallerPAL = CS.getAttributes();
1552 // Okay, this is a cast from a function to a different type. Unless doing so
1553 // would cause a type conversion of one of our arguments, change this call to
1554 // be a direct call with arguments casted to the appropriate types.
1556 FunctionType *FT = Callee->getFunctionType();
1557 Type *OldRetTy = Caller->getType();
1558 Type *NewRetTy = FT->getReturnType();
1560 // Check to see if we are changing the return type...
1561 if (OldRetTy != NewRetTy) {
1563 if (NewRetTy->isStructTy())
1564 return false; // TODO: Handle multiple return values.
1566 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
1567 if (Callee->isDeclaration())
1568 return false; // Cannot transform this return value.
1570 if (!Caller->use_empty() &&
1571 // void -> non-void is handled specially
1572 !NewRetTy->isVoidTy())
1573 return false; // Cannot transform this return value.
1576 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1577 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1578 if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
1579 return false; // Attribute not compatible with transformed value.
1582 // If the callsite is an invoke instruction, and the return value is used by
1583 // a PHI node in a successor, we cannot change the return type of the call
1584 // because there is no place to put the cast instruction (without breaking
1585 // the critical edge). Bail out in this case.
1586 if (!Caller->use_empty())
1587 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1588 for (User *U : II->users())
1589 if (PHINode *PN = dyn_cast<PHINode>(U))
1590 if (PN->getParent() == II->getNormalDest() ||
1591 PN->getParent() == II->getUnwindDest())
1595 unsigned NumActualArgs = CS.arg_size();
1596 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1598 // Prevent us turning:
1599 // declare void @takes_i32_inalloca(i32* inalloca)
1600 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
1603 // call void @takes_i32_inalloca(i32* null)
1605 // Similarly, avoid folding away bitcasts of byval calls.
1606 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
1607 Callee->getAttributes().hasAttrSomewhere(Attribute::ByVal))
1610 CallSite::arg_iterator AI = CS.arg_begin();
1611 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1612 Type *ParamTy = FT->getParamType(i);
1613 Type *ActTy = (*AI)->getType();
1615 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
1616 return false; // Cannot transform this parameter value.
1618 if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
1619 overlaps(AttributeFuncs::typeIncompatible(ParamTy)))
1620 return false; // Attribute not compatible with transformed value.
1622 if (CS.isInAllocaArgument(i))
1623 return false; // Cannot transform to and from inalloca.
1625 // If the parameter is passed as a byval argument, then we have to have a
1626 // sized type and the sized type has to have the same size as the old type.
1627 if (ParamTy != ActTy &&
1628 CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
1629 Attribute::ByVal)) {
1630 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1631 if (!ParamPTy || !ParamPTy->getElementType()->isSized())
1634 Type *CurElTy = ActTy->getPointerElementType();
1635 if (DL.getTypeAllocSize(CurElTy) !=
1636 DL.getTypeAllocSize(ParamPTy->getElementType()))
1641 if (Callee->isDeclaration()) {
1642 // Do not delete arguments unless we have a function body.
1643 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1646 // If the callee is just a declaration, don't change the varargsness of the
1647 // call. We don't want to introduce a varargs call where one doesn't
1649 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1650 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1653 // If both the callee and the cast type are varargs, we still have to make
1654 // sure the number of fixed parameters are the same or we have the same
1655 // ABI issues as if we introduce a varargs call.
1656 if (FT->isVarArg() &&
1657 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1658 FT->getNumParams() !=
1659 cast<FunctionType>(APTy->getElementType())->getNumParams())
1663 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1664 !CallerPAL.isEmpty())
1665 // In this case we have more arguments than the new function type, but we
1666 // won't be dropping them. Check that these extra arguments have attributes
1667 // that are compatible with being a vararg call argument.
1668 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1669 unsigned Index = CallerPAL.getSlotIndex(i - 1);
1670 if (Index <= FT->getNumParams())
1673 // Check if it has an attribute that's incompatible with varargs.
1674 AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
1675 if (PAttrs.hasAttribute(Index, Attribute::StructRet))
1680 // Okay, we decided that this is a safe thing to do: go ahead and start
1681 // inserting cast instructions as necessary.
1682 std::vector<Value*> Args;
1683 Args.reserve(NumActualArgs);
1684 SmallVector<AttributeSet, 8> attrVec;
1685 attrVec.reserve(NumCommonArgs);
1687 // Get any return attributes.
1688 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1690 // If the return value is not being used, the type may not be compatible
1691 // with the existing attributes. Wipe out any problematic attributes.
1692 RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
1694 // Add the new return attributes.
1695 if (RAttrs.hasAttributes())
1696 attrVec.push_back(AttributeSet::get(Caller->getContext(),
1697 AttributeSet::ReturnIndex, RAttrs));
1699 AI = CS.arg_begin();
1700 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1701 Type *ParamTy = FT->getParamType(i);
1703 if ((*AI)->getType() == ParamTy) {
1704 Args.push_back(*AI);
1706 Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
1709 // Add any parameter attributes.
1710 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1711 if (PAttrs.hasAttributes())
1712 attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
1716 // If the function takes more arguments than the call was taking, add them
1718 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1719 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1721 // If we are removing arguments to the function, emit an obnoxious warning.
1722 if (FT->getNumParams() < NumActualArgs) {
1723 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
1724 if (FT->isVarArg()) {
1725 // Add all of the arguments in their promoted form to the arg list.
1726 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1727 Type *PTy = getPromotedType((*AI)->getType());
1728 if (PTy != (*AI)->getType()) {
1729 // Must promote to pass through va_arg area!
1730 Instruction::CastOps opcode =
1731 CastInst::getCastOpcode(*AI, false, PTy, false);
1732 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1734 Args.push_back(*AI);
1737 // Add any parameter attributes.
1738 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1739 if (PAttrs.hasAttributes())
1740 attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
1746 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
1747 if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
1748 attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
1750 if (NewRetTy->isVoidTy())
1751 Caller->setName(""); // Void type should not have a name.
1753 const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
1757 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1758 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1759 II->getUnwindDest(), Args);
1761 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1762 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1764 CallInst *CI = cast<CallInst>(Caller);
1765 NC = Builder->CreateCall(Callee, Args);
1767 if (CI->isTailCall())
1768 cast<CallInst>(NC)->setTailCall();
1769 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1770 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1773 // Insert a cast of the return type as necessary.
1775 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1776 if (!NV->getType()->isVoidTy()) {
1777 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
1778 NC->setDebugLoc(Caller->getDebugLoc());
1780 // If this is an invoke instruction, we should insert it after the first
1781 // non-phi, instruction in the normal successor block.
1782 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1783 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1784 InsertNewInstBefore(NC, *I);
1786 // Otherwise, it's a call, just insert cast right after the call.
1787 InsertNewInstBefore(NC, *Caller);
1789 Worklist.AddUsersToWorkList(*Caller);
1791 NV = UndefValue::get(Caller->getType());
1795 if (!Caller->use_empty())
1796 ReplaceInstUsesWith(*Caller, NV);
1797 else if (Caller->hasValueHandle()) {
1798 if (OldRetTy == NV->getType())
1799 ValueHandleBase::ValueIsRAUWd(Caller, NV);
1801 // We cannot call ValueIsRAUWd with a different type, and the
1802 // actual tracked value will disappear.
1803 ValueHandleBase::ValueIsDeleted(Caller);
1806 EraseInstFromFunction(*Caller);
1810 // transformCallThroughTrampoline - Turn a call to a function created by
1811 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1812 // underlying function.
1815 InstCombiner::transformCallThroughTrampoline(CallSite CS,
1816 IntrinsicInst *Tramp) {
1817 Value *Callee = CS.getCalledValue();
1818 PointerType *PTy = cast<PointerType>(Callee->getType());
1819 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1820 const AttributeSet &Attrs = CS.getAttributes();
1822 // If the call already has the 'nest' attribute somewhere then give up -
1823 // otherwise 'nest' would occur twice after splicing in the chain.
1824 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1828 "transformCallThroughTrampoline called with incorrect CallSite.");
1830 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1831 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1832 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1834 const AttributeSet &NestAttrs = NestF->getAttributes();
1835 if (!NestAttrs.isEmpty()) {
1836 unsigned NestIdx = 1;
1837 Type *NestTy = nullptr;
1838 AttributeSet NestAttr;
1840 // Look for a parameter marked with the 'nest' attribute.
1841 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1842 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1843 if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
1844 // Record the parameter type and any other attributes.
1846 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1851 Instruction *Caller = CS.getInstruction();
1852 std::vector<Value*> NewArgs;
1853 NewArgs.reserve(CS.arg_size() + 1);
1855 SmallVector<AttributeSet, 8> NewAttrs;
1856 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1858 // Insert the nest argument into the call argument list, which may
1859 // mean appending it. Likewise for attributes.
1861 // Add any result attributes.
1862 if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
1863 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1864 Attrs.getRetAttributes()));
1868 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1870 if (Idx == NestIdx) {
1871 // Add the chain argument and attributes.
1872 Value *NestVal = Tramp->getArgOperand(2);
1873 if (NestVal->getType() != NestTy)
1874 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1875 NewArgs.push_back(NestVal);
1876 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1883 // Add the original argument and attributes.
1884 NewArgs.push_back(*I);
1885 AttributeSet Attr = Attrs.getParamAttributes(Idx);
1886 if (Attr.hasAttributes(Idx)) {
1887 AttrBuilder B(Attr, Idx);
1888 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1889 Idx + (Idx >= NestIdx), B));
1896 // Add any function attributes.
1897 if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
1898 NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
1899 Attrs.getFnAttributes()));
1901 // The trampoline may have been bitcast to a bogus type (FTy).
1902 // Handle this by synthesizing a new function type, equal to FTy
1903 // with the chain parameter inserted.
1905 std::vector<Type*> NewTypes;
1906 NewTypes.reserve(FTy->getNumParams()+1);
1908 // Insert the chain's type into the list of parameter types, which may
1909 // mean appending it.
1912 FunctionType::param_iterator I = FTy->param_begin(),
1913 E = FTy->param_end();
1917 // Add the chain's type.
1918 NewTypes.push_back(NestTy);
1923 // Add the original type.
1924 NewTypes.push_back(*I);
1930 // Replace the trampoline call with a direct call. Let the generic
1931 // code sort out any function type mismatches.
1932 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1934 Constant *NewCallee =
1935 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1936 NestF : ConstantExpr::getBitCast(NestF,
1937 PointerType::getUnqual(NewFTy));
1938 const AttributeSet &NewPAL =
1939 AttributeSet::get(FTy->getContext(), NewAttrs);
1941 Instruction *NewCaller;
1942 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1943 NewCaller = InvokeInst::Create(NewCallee,
1944 II->getNormalDest(), II->getUnwindDest(),
1946 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1947 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1949 NewCaller = CallInst::Create(NewCallee, NewArgs);
1950 if (cast<CallInst>(Caller)->isTailCall())
1951 cast<CallInst>(NewCaller)->setTailCall();
1952 cast<CallInst>(NewCaller)->
1953 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1954 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1961 // Replace the trampoline call with a direct call. Since there is no 'nest'
1962 // parameter, there is no need to adjust the argument list. Let the generic
1963 // code sort out any function type mismatches.
1964 Constant *NewCallee =
1965 NestF->getType() == PTy ? NestF :
1966 ConstantExpr::getBitCast(NestF, PTy);
1967 CS.setCalledFunction(NewCallee);
1968 return CS.getInstruction();