1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Support/CallSite.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
21 /// getPromotedType - Return the specified type promoted as it would be to pass
22 /// though a va_arg area.
23 static const Type *getPromotedType(const Type *Ty) {
24 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
25 if (ITy->getBitWidth() < 32)
26 return Type::getInt32Ty(Ty->getContext());
31 /// EnforceKnownAlignment - If the specified pointer points to an object that
32 /// we control, modify the object's alignment to PrefAlign. This isn't
33 /// often possible though. If alignment is important, a more reliable approach
34 /// is to simply align all global variables and allocation instructions to
35 /// their preferred alignment from the beginning.
37 static unsigned EnforceKnownAlignment(Value *V,
38 unsigned Align, unsigned PrefAlign) {
40 User *U = dyn_cast<User>(V);
43 switch (Operator::getOpcode(U)) {
45 case Instruction::BitCast:
46 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
47 case Instruction::GetElementPtr: {
48 // If all indexes are zero, it is just the alignment of the base pointer.
49 bool AllZeroOperands = true;
50 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
51 if (!isa<Constant>(*i) ||
52 !cast<Constant>(*i)->isNullValue()) {
53 AllZeroOperands = false;
57 if (AllZeroOperands) {
58 // Treat this like a bitcast.
59 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
65 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
66 // If there is a large requested alignment and we can, bump up the alignment
68 if (!GV->isDeclaration()) {
69 if (GV->getAlignment() >= PrefAlign)
70 Align = GV->getAlignment();
72 GV->setAlignment(PrefAlign);
76 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
77 // If there is a requested alignment and if this is an alloca, round up.
78 if (AI->getAlignment() >= PrefAlign)
79 Align = AI->getAlignment();
81 AI->setAlignment(PrefAlign);
89 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
90 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
91 /// and it is more than the alignment of the ultimate object, see if we can
92 /// increase the alignment of the ultimate object, making this check succeed.
93 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
95 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
96 sizeof(PrefAlign) * CHAR_BIT;
97 APInt Mask = APInt::getAllOnesValue(BitWidth);
98 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
99 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
100 unsigned TrailZ = KnownZero.countTrailingOnes();
101 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
103 if (PrefAlign > Align)
104 Align = EnforceKnownAlignment(V, Align, PrefAlign);
106 // We don't need to make any adjustment.
110 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
111 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
112 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
113 unsigned MinAlign = std::min(DstAlign, SrcAlign);
114 unsigned CopyAlign = MI->getAlignment();
116 if (CopyAlign < MinAlign) {
117 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
122 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
124 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
125 if (MemOpLength == 0) return 0;
127 // Source and destination pointer types are always "i8*" for intrinsic. See
128 // if the size is something we can handle with a single primitive load/store.
129 // A single load+store correctly handles overlapping memory in the memmove
131 unsigned Size = MemOpLength->getZExtValue();
132 if (Size == 0) return MI; // Delete this mem transfer.
134 if (Size > 8 || (Size&(Size-1)))
135 return 0; // If not 1/2/4/8 bytes, exit.
137 // Use an integer load+store unless we can find something better.
139 PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
141 // Memcpy forces the use of i8* for the source and destination. That means
142 // that if you're using memcpy to move one double around, you'll get a cast
143 // from double* to i8*. We'd much rather use a double load+store rather than
144 // an i64 load+store, here because this improves the odds that the source or
145 // dest address will be promotable. See if we can find a better type than the
147 Value *StrippedDest = MI->getOperand(1)->stripPointerCasts();
148 if (StrippedDest != MI->getOperand(1)) {
149 const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
151 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
152 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
153 // down through these levels if so.
154 while (!SrcETy->isSingleValueType()) {
155 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
156 if (STy->getNumElements() == 1)
157 SrcETy = STy->getElementType(0);
160 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
161 if (ATy->getNumElements() == 1)
162 SrcETy = ATy->getElementType();
169 if (SrcETy->isSingleValueType())
170 NewPtrTy = PointerType::getUnqual(SrcETy);
175 // If the memcpy/memmove provides better alignment info than we can
177 SrcAlign = std::max(SrcAlign, CopyAlign);
178 DstAlign = std::max(DstAlign, CopyAlign);
180 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
181 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
182 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
183 InsertNewInstBefore(L, *MI);
184 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
186 // Set the size of the copy to 0, it will be deleted on the next iteration.
187 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
191 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
192 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
193 if (MI->getAlignment() < Alignment) {
194 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
199 // Extract the length and alignment and fill if they are constant.
200 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
201 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
202 if (!LenC || !FillC || !FillC->getType()->isInteger(8))
204 uint64_t Len = LenC->getZExtValue();
205 Alignment = MI->getAlignment();
207 // If the length is zero, this is a no-op
208 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
210 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
211 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
212 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
214 Value *Dest = MI->getDest();
215 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
217 // Alignment 0 is identity for alignment 1 for memset, but not store.
218 if (Alignment == 0) Alignment = 1;
220 // Extract the fill value and store.
221 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
222 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
223 Dest, false, Alignment), *MI);
225 // Set the size of the copy to 0, it will be deleted on the next iteration.
226 MI->setLength(Constant::getNullValue(LenC->getType()));
234 /// visitCallInst - CallInst simplification. This mostly only handles folding
235 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
236 /// the heavy lifting.
238 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
240 return visitFree(CI);
242 // If the caller function is nounwind, mark the call as nounwind, even if the
244 if (CI.getParent()->getParent()->doesNotThrow() &&
245 !CI.doesNotThrow()) {
246 CI.setDoesNotThrow();
250 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
251 if (!II) return visitCallSite(&CI);
253 // Intrinsics cannot occur in an invoke, so handle them here instead of in
255 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
256 bool Changed = false;
258 // memmove/cpy/set of zero bytes is a noop.
259 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
260 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
262 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
263 if (CI->getZExtValue() == 1) {
264 // Replace the instruction with just byte operations. We would
265 // transform other cases to loads/stores, but we don't know if
266 // alignment is sufficient.
270 // If we have a memmove and the source operation is a constant global,
271 // then the source and dest pointers can't alias, so we can change this
272 // into a call to memcpy.
273 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
274 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
275 if (GVSrc->isConstant()) {
276 Module *M = CI.getParent()->getParent()->getParent();
277 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
279 Tys[0] = CI.getOperand(3)->getType();
281 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
286 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
287 // memmove(x,x,size) -> noop.
288 if (MTI->getSource() == MTI->getDest())
289 return EraseInstFromFunction(CI);
292 // If we can determine a pointer alignment that is bigger than currently
293 // set, update the alignment.
294 if (isa<MemTransferInst>(MI)) {
295 if (Instruction *I = SimplifyMemTransfer(MI))
297 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
298 if (Instruction *I = SimplifyMemSet(MSI))
302 if (Changed) return II;
305 switch (II->getIntrinsicID()) {
307 case Intrinsic::bswap:
308 // bswap(bswap(x)) -> x
309 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
310 if (Operand->getIntrinsicID() == Intrinsic::bswap)
311 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
313 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
314 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
315 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
316 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
317 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
318 TI->getType()->getPrimitiveSizeInBits();
319 Value *CV = ConstantInt::get(Operand->getType(), C);
320 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
321 return new TruncInst(V, TI->getType());
326 case Intrinsic::powi:
327 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
330 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
333 return ReplaceInstUsesWith(CI, II->getOperand(1));
334 // powi(x, -1) -> 1/x
335 if (Power->isAllOnesValue())
336 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
340 case Intrinsic::cttz: {
341 // If all bits below the first known one are known zero,
342 // this value is constant.
343 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
344 uint32_t BitWidth = IT->getBitWidth();
345 APInt KnownZero(BitWidth, 0);
346 APInt KnownOne(BitWidth, 0);
347 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
348 KnownZero, KnownOne);
349 unsigned TrailingZeros = KnownOne.countTrailingZeros();
350 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
351 if ((Mask & KnownZero) == Mask)
352 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
353 APInt(BitWidth, TrailingZeros)));
357 case Intrinsic::ctlz: {
358 // If all bits above the first known one are known zero,
359 // this value is constant.
360 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
361 uint32_t BitWidth = IT->getBitWidth();
362 APInt KnownZero(BitWidth, 0);
363 APInt KnownOne(BitWidth, 0);
364 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
365 KnownZero, KnownOne);
366 unsigned LeadingZeros = KnownOne.countLeadingZeros();
367 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
368 if ((Mask & KnownZero) == Mask)
369 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
370 APInt(BitWidth, LeadingZeros)));
374 case Intrinsic::uadd_with_overflow: {
375 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
376 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
377 uint32_t BitWidth = IT->getBitWidth();
378 APInt Mask = APInt::getSignBit(BitWidth);
379 APInt LHSKnownZero(BitWidth, 0);
380 APInt LHSKnownOne(BitWidth, 0);
381 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
382 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
383 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
385 if (LHSKnownNegative || LHSKnownPositive) {
386 APInt RHSKnownZero(BitWidth, 0);
387 APInt RHSKnownOne(BitWidth, 0);
388 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
389 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
390 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
391 if (LHSKnownNegative && RHSKnownNegative) {
392 // The sign bit is set in both cases: this MUST overflow.
393 // Create a simple add instruction, and insert it into the struct.
394 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
397 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
399 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
400 return InsertValueInst::Create(Struct, Add, 0);
403 if (LHSKnownPositive && RHSKnownPositive) {
404 // The sign bit is clear in both cases: this CANNOT overflow.
405 // Create a simple add instruction, and insert it into the struct.
406 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
409 UndefValue::get(LHS->getType()),
410 ConstantInt::getFalse(II->getContext())
412 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
413 return InsertValueInst::Create(Struct, Add, 0);
417 // FALL THROUGH uadd into sadd
418 case Intrinsic::sadd_with_overflow:
419 // Canonicalize constants into the RHS.
420 if (isa<Constant>(II->getOperand(1)) &&
421 !isa<Constant>(II->getOperand(2))) {
422 Value *LHS = II->getOperand(1);
423 II->setOperand(1, II->getOperand(2));
424 II->setOperand(2, LHS);
428 // X + undef -> undef
429 if (isa<UndefValue>(II->getOperand(2)))
430 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
432 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
433 // X + 0 -> {X, false}
436 UndefValue::get(II->getOperand(0)->getType()),
437 ConstantInt::getFalse(II->getContext())
439 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
440 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
444 case Intrinsic::usub_with_overflow:
445 case Intrinsic::ssub_with_overflow:
446 // undef - X -> undef
447 // X - undef -> undef
448 if (isa<UndefValue>(II->getOperand(1)) ||
449 isa<UndefValue>(II->getOperand(2)))
450 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
452 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
453 // X - 0 -> {X, false}
456 UndefValue::get(II->getOperand(1)->getType()),
457 ConstantInt::getFalse(II->getContext())
459 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
460 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
464 case Intrinsic::umul_with_overflow:
465 case Intrinsic::smul_with_overflow:
466 // Canonicalize constants into the RHS.
467 if (isa<Constant>(II->getOperand(1)) &&
468 !isa<Constant>(II->getOperand(2))) {
469 Value *LHS = II->getOperand(1);
470 II->setOperand(1, II->getOperand(2));
471 II->setOperand(2, LHS);
475 // X * undef -> undef
476 if (isa<UndefValue>(II->getOperand(2)))
477 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
479 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
482 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
484 // X * 1 -> {X, false}
485 if (RHSI->equalsInt(1)) {
487 UndefValue::get(II->getOperand(1)->getType()),
488 ConstantInt::getFalse(II->getContext())
490 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
491 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
495 case Intrinsic::ppc_altivec_lvx:
496 case Intrinsic::ppc_altivec_lvxl:
497 case Intrinsic::x86_sse_loadu_ps:
498 case Intrinsic::x86_sse2_loadu_pd:
499 case Intrinsic::x86_sse2_loadu_dq:
500 // Turn PPC lvx -> load if the pointer is known aligned.
501 // Turn X86 loadups -> load if the pointer is known aligned.
502 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
503 Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
504 PointerType::getUnqual(II->getType()));
505 return new LoadInst(Ptr);
508 case Intrinsic::ppc_altivec_stvx:
509 case Intrinsic::ppc_altivec_stvxl:
510 // Turn stvx -> store if the pointer is known aligned.
511 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
512 const Type *OpPtrTy =
513 PointerType::getUnqual(II->getOperand(1)->getType());
514 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
515 return new StoreInst(II->getOperand(1), Ptr);
518 case Intrinsic::x86_sse_storeu_ps:
519 case Intrinsic::x86_sse2_storeu_pd:
520 case Intrinsic::x86_sse2_storeu_dq:
521 // Turn X86 storeu -> store if the pointer is known aligned.
522 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
523 const Type *OpPtrTy =
524 PointerType::getUnqual(II->getOperand(2)->getType());
525 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
526 return new StoreInst(II->getOperand(2), Ptr);
530 case Intrinsic::x86_sse_cvttss2si: {
531 // These intrinsics only demands the 0th element of its input vector. If
532 // we can simplify the input based on that, do so now.
534 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
535 APInt DemandedElts(VWidth, 1);
536 APInt UndefElts(VWidth, 0);
537 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
539 II->setOperand(1, V);
545 case Intrinsic::ppc_altivec_vperm:
546 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
547 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
548 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
550 // Check that all of the elements are integer constants or undefs.
551 bool AllEltsOk = true;
552 for (unsigned i = 0; i != 16; ++i) {
553 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
554 !isa<UndefValue>(Mask->getOperand(i))) {
561 // Cast the input vectors to byte vectors.
562 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
563 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
564 Value *Result = UndefValue::get(Op0->getType());
566 // Only extract each element once.
567 Value *ExtractedElts[32];
568 memset(ExtractedElts, 0, sizeof(ExtractedElts));
570 for (unsigned i = 0; i != 16; ++i) {
571 if (isa<UndefValue>(Mask->getOperand(i)))
573 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
574 Idx &= 31; // Match the hardware behavior.
576 if (ExtractedElts[Idx] == 0) {
578 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
579 ConstantInt::get(Type::getInt32Ty(II->getContext()),
580 Idx&15, false), "tmp");
583 // Insert this value into the result vector.
584 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
585 ConstantInt::get(Type::getInt32Ty(II->getContext()),
588 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
593 case Intrinsic::stackrestore: {
594 // If the save is right next to the restore, remove the restore. This can
595 // happen when variable allocas are DCE'd.
596 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
597 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
598 BasicBlock::iterator BI = SS;
600 return EraseInstFromFunction(CI);
604 // Scan down this block to see if there is another stack restore in the
605 // same block without an intervening call/alloca.
606 BasicBlock::iterator BI = II;
607 TerminatorInst *TI = II->getParent()->getTerminator();
608 bool CannotRemove = false;
609 for (++BI; &*BI != TI; ++BI) {
610 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
614 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
615 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
616 // If there is a stackrestore below this one, remove this one.
617 if (II->getIntrinsicID() == Intrinsic::stackrestore)
618 return EraseInstFromFunction(CI);
619 // Otherwise, ignore the intrinsic.
621 // If we found a non-intrinsic call, we can't remove the stack
629 // If the stack restore is in a return/unwind block and if there are no
630 // allocas or calls between the restore and the return, nuke the restore.
631 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
632 return EraseInstFromFunction(CI);
637 return visitCallSite(II);
640 // InvokeInst simplification
642 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
643 return visitCallSite(&II);
646 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
647 /// passed through the varargs area, we can eliminate the use of the cast.
648 static bool isSafeToEliminateVarargsCast(const CallSite CS,
649 const CastInst * const CI,
650 const TargetData * const TD,
652 if (!CI->isLosslessCast())
655 // The size of ByVal arguments is derived from the type, so we
656 // can't change to a type with a different size. If the size were
657 // passed explicitly we could avoid this check.
658 if (!CS.paramHasAttr(ix, Attribute::ByVal))
662 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
663 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
664 if (!SrcTy->isSized() || !DstTy->isSized())
666 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
671 // visitCallSite - Improvements for call and invoke instructions.
673 Instruction *InstCombiner::visitCallSite(CallSite CS) {
674 bool Changed = false;
676 // If the callee is a constexpr cast of a function, attempt to move the cast
677 // to the arguments of the call/invoke.
678 if (transformConstExprCastCall(CS)) return 0;
680 Value *Callee = CS.getCalledValue();
682 if (Function *CalleeF = dyn_cast<Function>(Callee))
683 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
684 Instruction *OldCall = CS.getInstruction();
685 // If the call and callee calling conventions don't match, this call must
686 // be unreachable, as the call is undefined.
687 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
688 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
690 // If OldCall dues not return void then replaceAllUsesWith undef.
691 // This allows ValueHandlers and custom metadata to adjust itself.
692 if (!OldCall->getType()->isVoidTy())
693 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
694 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
695 return EraseInstFromFunction(*OldCall);
699 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
700 // This instruction is not reachable, just remove it. We insert a store to
701 // undef so that we know that this code is not reachable, despite the fact
702 // that we can't modify the CFG here.
703 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
704 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
705 CS.getInstruction());
707 // If CS dues not return void then replaceAllUsesWith undef.
708 // This allows ValueHandlers and custom metadata to adjust itself.
709 if (!CS.getInstruction()->getType()->isVoidTy())
710 CS.getInstruction()->
711 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
713 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
714 // Don't break the CFG, insert a dummy cond branch.
715 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
716 ConstantInt::getTrue(Callee->getContext()), II);
718 return EraseInstFromFunction(*CS.getInstruction());
721 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
722 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
723 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
724 return transformCallThroughTrampoline(CS);
726 const PointerType *PTy = cast<PointerType>(Callee->getType());
727 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
728 if (FTy->isVarArg()) {
729 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
730 // See if we can optimize any arguments passed through the varargs area of
732 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
733 E = CS.arg_end(); I != E; ++I, ++ix) {
734 CastInst *CI = dyn_cast<CastInst>(*I);
735 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
736 *I = CI->getOperand(0);
742 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
743 // Inline asm calls cannot throw - mark them 'nounwind'.
744 CS.setDoesNotThrow();
748 return Changed ? CS.getInstruction() : 0;
751 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
752 // attempt to move the cast to the arguments of the call/invoke.
754 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
755 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
756 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
757 if (CE->getOpcode() != Instruction::BitCast ||
758 !isa<Function>(CE->getOperand(0)))
760 Function *Callee = cast<Function>(CE->getOperand(0));
761 Instruction *Caller = CS.getInstruction();
762 const AttrListPtr &CallerPAL = CS.getAttributes();
764 // Okay, this is a cast from a function to a different type. Unless doing so
765 // would cause a type conversion of one of our arguments, change this call to
766 // be a direct call with arguments casted to the appropriate types.
768 const FunctionType *FT = Callee->getFunctionType();
769 const Type *OldRetTy = Caller->getType();
770 const Type *NewRetTy = FT->getReturnType();
772 if (isa<StructType>(NewRetTy))
773 return false; // TODO: Handle multiple return values.
775 // Check to see if we are changing the return type...
776 if (OldRetTy != NewRetTy) {
777 if (Callee->isDeclaration() &&
778 // Conversion is ok if changing from one pointer type to another or from
779 // a pointer to an integer of the same size.
780 !((isa<PointerType>(OldRetTy) || !TD ||
781 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
782 (isa<PointerType>(NewRetTy) || !TD ||
783 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
784 return false; // Cannot transform this return value.
786 if (!Caller->use_empty() &&
787 // void -> non-void is handled specially
788 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
789 return false; // Cannot transform this return value.
791 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
792 Attributes RAttrs = CallerPAL.getRetAttributes();
793 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
794 return false; // Attribute not compatible with transformed value.
797 // If the callsite is an invoke instruction, and the return value is used by
798 // a PHI node in a successor, we cannot change the return type of the call
799 // because there is no place to put the cast instruction (without breaking
800 // the critical edge). Bail out in this case.
801 if (!Caller->use_empty())
802 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
803 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
805 if (PHINode *PN = dyn_cast<PHINode>(*UI))
806 if (PN->getParent() == II->getNormalDest() ||
807 PN->getParent() == II->getUnwindDest())
811 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
812 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
814 CallSite::arg_iterator AI = CS.arg_begin();
815 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
816 const Type *ParamTy = FT->getParamType(i);
817 const Type *ActTy = (*AI)->getType();
819 if (!CastInst::isCastable(ActTy, ParamTy))
820 return false; // Cannot transform this parameter value.
822 if (CallerPAL.getParamAttributes(i + 1)
823 & Attribute::typeIncompatible(ParamTy))
824 return false; // Attribute not compatible with transformed value.
826 // Converting from one pointer type to another or between a pointer and an
827 // integer of the same size is safe even if we do not have a body.
828 bool isConvertible = ActTy == ParamTy ||
829 (TD && ((isa<PointerType>(ParamTy) ||
830 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
831 (isa<PointerType>(ActTy) ||
832 ActTy == TD->getIntPtrType(Caller->getContext()))));
833 if (Callee->isDeclaration() && !isConvertible) return false;
836 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
837 Callee->isDeclaration())
838 return false; // Do not delete arguments unless we have a function body.
840 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
841 !CallerPAL.isEmpty())
842 // In this case we have more arguments than the new function type, but we
843 // won't be dropping them. Check that these extra arguments have attributes
844 // that are compatible with being a vararg call argument.
845 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
846 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
848 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
849 if (PAttrs & Attribute::VarArgsIncompatible)
853 // Okay, we decided that this is a safe thing to do: go ahead and start
854 // inserting cast instructions as necessary...
855 std::vector<Value*> Args;
856 Args.reserve(NumActualArgs);
857 SmallVector<AttributeWithIndex, 8> attrVec;
858 attrVec.reserve(NumCommonArgs);
860 // Get any return attributes.
861 Attributes RAttrs = CallerPAL.getRetAttributes();
863 // If the return value is not being used, the type may not be compatible
864 // with the existing attributes. Wipe out any problematic attributes.
865 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
867 // Add the new return attributes.
869 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
872 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
873 const Type *ParamTy = FT->getParamType(i);
874 if ((*AI)->getType() == ParamTy) {
877 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
878 false, ParamTy, false);
879 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
882 // Add any parameter attributes.
883 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
884 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
887 // If the function takes more arguments than the call was taking, add them
889 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
890 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
892 // If we are removing arguments to the function, emit an obnoxious warning.
893 if (FT->getNumParams() < NumActualArgs) {
894 if (!FT->isVarArg()) {
895 errs() << "WARNING: While resolving call to function '"
896 << Callee->getName() << "' arguments were dropped!\n";
898 // Add all of the arguments in their promoted form to the arg list.
899 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
900 const Type *PTy = getPromotedType((*AI)->getType());
901 if (PTy != (*AI)->getType()) {
902 // Must promote to pass through va_arg area!
903 Instruction::CastOps opcode =
904 CastInst::getCastOpcode(*AI, false, PTy, false);
905 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
910 // Add any parameter attributes.
911 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
912 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
917 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
918 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
920 if (NewRetTy->isVoidTy())
921 Caller->setName(""); // Void type should not have a name.
923 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
927 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
928 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
929 Args.begin(), Args.end(),
930 Caller->getName(), Caller);
931 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
932 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
934 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
935 Caller->getName(), Caller);
936 CallInst *CI = cast<CallInst>(Caller);
937 if (CI->isTailCall())
938 cast<CallInst>(NC)->setTailCall();
939 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
940 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
943 // Insert a cast of the return type as necessary.
945 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
946 if (!NV->getType()->isVoidTy()) {
947 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
949 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
951 // If this is an invoke instruction, we should insert it after the first
952 // non-phi, instruction in the normal successor block.
953 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
954 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
955 InsertNewInstBefore(NC, *I);
957 // Otherwise, it's a call, just insert cast right after the call instr
958 InsertNewInstBefore(NC, *Caller);
960 Worklist.AddUsersToWorkList(*Caller);
962 NV = UndefValue::get(Caller->getType());
967 if (!Caller->use_empty())
968 Caller->replaceAllUsesWith(NV);
970 EraseInstFromFunction(*Caller);
974 // transformCallThroughTrampoline - Turn a call to a function created by the
975 // init_trampoline intrinsic into a direct call to the underlying function.
977 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
978 Value *Callee = CS.getCalledValue();
979 const PointerType *PTy = cast<PointerType>(Callee->getType());
980 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
981 const AttrListPtr &Attrs = CS.getAttributes();
983 // If the call already has the 'nest' attribute somewhere then give up -
984 // otherwise 'nest' would occur twice after splicing in the chain.
985 if (Attrs.hasAttrSomewhere(Attribute::Nest))
988 IntrinsicInst *Tramp =
989 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
991 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
992 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
993 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
995 const AttrListPtr &NestAttrs = NestF->getAttributes();
996 if (!NestAttrs.isEmpty()) {
997 unsigned NestIdx = 1;
998 const Type *NestTy = 0;
999 Attributes NestAttr = Attribute::None;
1001 // Look for a parameter marked with the 'nest' attribute.
1002 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1003 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1004 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1005 // Record the parameter type and any other attributes.
1007 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1012 Instruction *Caller = CS.getInstruction();
1013 std::vector<Value*> NewArgs;
1014 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1016 SmallVector<AttributeWithIndex, 8> NewAttrs;
1017 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1019 // Insert the nest argument into the call argument list, which may
1020 // mean appending it. Likewise for attributes.
1022 // Add any result attributes.
1023 if (Attributes Attr = Attrs.getRetAttributes())
1024 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1028 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1030 if (Idx == NestIdx) {
1031 // Add the chain argument and attributes.
1032 Value *NestVal = Tramp->getOperand(3);
1033 if (NestVal->getType() != NestTy)
1034 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1035 NewArgs.push_back(NestVal);
1036 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1042 // Add the original argument and attributes.
1043 NewArgs.push_back(*I);
1044 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1046 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1052 // Add any function attributes.
1053 if (Attributes Attr = Attrs.getFnAttributes())
1054 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1056 // The trampoline may have been bitcast to a bogus type (FTy).
1057 // Handle this by synthesizing a new function type, equal to FTy
1058 // with the chain parameter inserted.
1060 std::vector<const Type*> NewTypes;
1061 NewTypes.reserve(FTy->getNumParams()+1);
1063 // Insert the chain's type into the list of parameter types, which may
1064 // mean appending it.
1067 FunctionType::param_iterator I = FTy->param_begin(),
1068 E = FTy->param_end();
1072 // Add the chain's type.
1073 NewTypes.push_back(NestTy);
1078 // Add the original type.
1079 NewTypes.push_back(*I);
1085 // Replace the trampoline call with a direct call. Let the generic
1086 // code sort out any function type mismatches.
1087 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1089 Constant *NewCallee =
1090 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1091 NestF : ConstantExpr::getBitCast(NestF,
1092 PointerType::getUnqual(NewFTy));
1093 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1096 Instruction *NewCaller;
1097 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1098 NewCaller = InvokeInst::Create(NewCallee,
1099 II->getNormalDest(), II->getUnwindDest(),
1100 NewArgs.begin(), NewArgs.end(),
1101 Caller->getName(), Caller);
1102 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1103 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1105 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1106 Caller->getName(), Caller);
1107 if (cast<CallInst>(Caller)->isTailCall())
1108 cast<CallInst>(NewCaller)->setTailCall();
1109 cast<CallInst>(NewCaller)->
1110 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1111 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1113 if (!Caller->getType()->isVoidTy())
1114 Caller->replaceAllUsesWith(NewCaller);
1115 Caller->eraseFromParent();
1116 Worklist.Remove(Caller);
1121 // Replace the trampoline call with a direct call. Since there is no 'nest'
1122 // parameter, there is no need to adjust the argument list. Let the generic
1123 // code sort out any function type mismatches.
1124 Constant *NewCallee =
1125 NestF->getType() == PTy ? NestF :
1126 ConstantExpr::getBitCast(NestF, PTy);
1127 CS.setCalledFunction(NewCallee);
1128 return CS.getInstruction();