1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Support/CallSite.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Transforms/Utils/BuildLibCalls.h"
22 /// getPromotedType - Return the specified type promoted as it would be to pass
23 /// though a va_arg area.
24 static const Type *getPromotedType(const Type *Ty) {
25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26 if (ITy->getBitWidth() < 32)
27 return Type::getInt32Ty(Ty->getContext());
32 /// EnforceKnownAlignment - If the specified pointer points to an object that
33 /// we control, modify the object's alignment to PrefAlign. This isn't
34 /// often possible though. If alignment is important, a more reliable approach
35 /// is to simply align all global variables and allocation instructions to
36 /// their preferred alignment from the beginning.
38 static unsigned EnforceKnownAlignment(Value *V,
39 unsigned Align, unsigned PrefAlign) {
41 User *U = dyn_cast<User>(V);
44 switch (Operator::getOpcode(U)) {
46 case Instruction::BitCast:
47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
48 case Instruction::GetElementPtr: {
49 // If all indexes are zero, it is just the alignment of the base pointer.
50 bool AllZeroOperands = true;
51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
52 if (!isa<Constant>(*i) ||
53 !cast<Constant>(*i)->isNullValue()) {
54 AllZeroOperands = false;
58 if (AllZeroOperands) {
59 // Treat this like a bitcast.
60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
64 case Instruction::Alloca: {
65 AllocaInst *AI = cast<AllocaInst>(V);
66 // If there is a requested alignment and if this is an alloca, round up.
67 if (AI->getAlignment() >= PrefAlign)
68 return AI->getAlignment();
69 AI->setAlignment(PrefAlign);
74 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
75 // If there is a large requested alignment and we can, bump up the alignment
77 if (GV->isDeclaration()) return Align;
79 if (GV->getAlignment() >= PrefAlign)
80 return GV->getAlignment();
81 // We can only increase the alignment of the global if it has no alignment
82 // specified or if it is not assigned a section. If it is assigned a
83 // section, the global could be densely packed with other objects in the
84 // section, increasing the alignment could cause padding issues.
85 if (!GV->hasSection() || GV->getAlignment() == 0)
86 GV->setAlignment(PrefAlign);
87 return GV->getAlignment();
93 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
94 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
95 /// and it is more than the alignment of the ultimate object, see if we can
96 /// increase the alignment of the ultimate object, making this check succeed.
97 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
99 assert(V->getType()->isPointerTy() &&
100 "GetOrEnforceKnownAlignment expects a pointer!");
101 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
102 APInt Mask = APInt::getAllOnesValue(BitWidth);
103 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
104 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
105 unsigned TrailZ = KnownZero.countTrailingOnes();
107 // Avoid trouble with rediculously large TrailZ values, such as
108 // those computed from a null pointer.
109 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
111 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
113 // LLVM doesn't support alignments larger than this currently.
114 Align = std::min(Align, MaximumAlignment);
116 if (PrefAlign > Align)
117 Align = EnforceKnownAlignment(V, Align, PrefAlign);
119 // We don't need to make any adjustment.
123 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
124 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0));
125 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1));
126 unsigned MinAlign = std::min(DstAlign, SrcAlign);
127 unsigned CopyAlign = MI->getAlignment();
129 if (CopyAlign < MinAlign) {
130 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
135 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
137 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
138 if (MemOpLength == 0) return 0;
140 // Source and destination pointer types are always "i8*" for intrinsic. See
141 // if the size is something we can handle with a single primitive load/store.
142 // A single load+store correctly handles overlapping memory in the memmove
144 unsigned Size = MemOpLength->getZExtValue();
145 if (Size == 0) return MI; // Delete this mem transfer.
147 if (Size > 8 || (Size&(Size-1)))
148 return 0; // If not 1/2/4/8 bytes, exit.
150 // Use an integer load+store unless we can find something better.
152 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
154 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
156 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
157 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
158 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
160 // Memcpy forces the use of i8* for the source and destination. That means
161 // that if you're using memcpy to move one double around, you'll get a cast
162 // from double* to i8*. We'd much rather use a double load+store rather than
163 // an i64 load+store, here because this improves the odds that the source or
164 // dest address will be promotable. See if we can find a better type than the
166 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
167 if (StrippedDest != MI->getArgOperand(0)) {
168 const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
170 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
171 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
172 // down through these levels if so.
173 while (!SrcETy->isSingleValueType()) {
174 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
175 if (STy->getNumElements() == 1)
176 SrcETy = STy->getElementType(0);
179 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
180 if (ATy->getNumElements() == 1)
181 SrcETy = ATy->getElementType();
188 if (SrcETy->isSingleValueType()) {
189 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
190 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
196 // If the memcpy/memmove provides better alignment info than we can
198 SrcAlign = std::max(SrcAlign, CopyAlign);
199 DstAlign = std::max(DstAlign, CopyAlign);
201 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
202 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
203 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
204 InsertNewInstBefore(L, *MI);
205 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
208 // Set the size of the copy to 0, it will be deleted on the next iteration.
209 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
213 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
214 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
215 if (MI->getAlignment() < Alignment) {
216 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
221 // Extract the length and alignment and fill if they are constant.
222 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
223 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
224 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
226 uint64_t Len = LenC->getZExtValue();
227 Alignment = MI->getAlignment();
229 // If the length is zero, this is a no-op
230 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
232 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
233 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
234 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
236 Value *Dest = MI->getDest();
237 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
239 // Alignment 0 is identity for alignment 1 for memset, but not store.
240 if (Alignment == 0) Alignment = 1;
242 // Extract the fill value and store.
243 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
244 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
245 Dest, false, Alignment), *MI);
247 // Set the size of the copy to 0, it will be deleted on the next iteration.
248 MI->setLength(Constant::getNullValue(LenC->getType()));
255 /// visitCallInst - CallInst simplification. This mostly only handles folding
256 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
257 /// the heavy lifting.
259 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
261 return visitFree(CI);
263 return visitMalloc(CI);
265 // If the caller function is nounwind, mark the call as nounwind, even if the
267 if (CI.getParent()->getParent()->doesNotThrow() &&
268 !CI.doesNotThrow()) {
269 CI.setDoesNotThrow();
273 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
274 if (!II) return visitCallSite(&CI);
276 // Intrinsics cannot occur in an invoke, so handle them here instead of in
278 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
279 bool Changed = false;
281 // memmove/cpy/set of zero bytes is a noop.
282 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
283 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
285 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
286 if (CI->getZExtValue() == 1) {
287 // Replace the instruction with just byte operations. We would
288 // transform other cases to loads/stores, but we don't know if
289 // alignment is sufficient.
293 // If we have a memmove and the source operation is a constant global,
294 // then the source and dest pointers can't alias, so we can change this
295 // into a call to memcpy.
296 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
297 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
298 if (GVSrc->isConstant()) {
299 Module *M = CI.getParent()->getParent()->getParent();
300 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
301 const Type *Tys[3] = { CI.getArgOperand(0)->getType(),
302 CI.getArgOperand(1)->getType(),
303 CI.getArgOperand(2)->getType() };
304 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
309 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
310 // memmove(x,x,size) -> noop.
311 if (MTI->getSource() == MTI->getDest())
312 return EraseInstFromFunction(CI);
315 // If we can determine a pointer alignment that is bigger than currently
316 // set, update the alignment.
317 if (isa<MemTransferInst>(MI)) {
318 if (Instruction *I = SimplifyMemTransfer(MI))
320 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
321 if (Instruction *I = SimplifyMemSet(MSI))
325 if (Changed) return II;
328 switch (II->getIntrinsicID()) {
330 case Intrinsic::objectsize: {
331 // We need target data for just about everything so depend on it.
334 const Type *ReturnTy = CI.getType();
335 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
337 // Get to the real allocated thing and offset as fast as possible.
338 Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
340 // If we've stripped down to a single global variable that we
341 // can know the size of then just return that.
342 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
343 if (GV->hasDefinitiveInitializer()) {
344 Constant *C = GV->getInitializer();
345 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
346 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
348 // Can't determine size of the GV.
349 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
350 return ReplaceInstUsesWith(CI, RetVal);
352 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
354 if (AI->getAllocatedType()->isSized()) {
355 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
356 if (AI->isArrayAllocation()) {
357 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
359 AllocaSize *= C->getZExtValue();
361 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
363 } else if (CallInst *MI = extractMallocCall(Op1)) {
364 const Type* MallocType = getMallocAllocatedType(MI);
366 if (MallocType && MallocType->isSized()) {
367 if (Value *NElems = getMallocArraySize(MI, TD, true)) {
368 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
369 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy,
370 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType))));
373 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
374 // Only handle constant GEPs here.
375 if (CE->getOpcode() != Instruction::GetElementPtr) break;
376 GEPOperator *GEP = cast<GEPOperator>(CE);
378 // Make sure we're not a constant offset from an external
380 Value *Operand = GEP->getPointerOperand();
381 Operand = Operand->stripPointerCasts();
382 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
383 if (!GV->hasDefinitiveInitializer()) break;
385 // Get what we're pointing to and its size.
386 const PointerType *BaseType =
387 cast<PointerType>(Operand->getType());
388 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
390 // Get the current byte offset into the thing. Use the original
391 // operand in case we're looking through a bitcast.
392 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
393 const PointerType *OffsetType =
394 cast<PointerType>(GEP->getPointerOperand()->getType());
395 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
398 // Out of bound reference? Negative index normalized to large
399 // index? Just return "I don't know".
400 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
401 return ReplaceInstUsesWith(CI, RetVal);
404 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
405 return ReplaceInstUsesWith(CI, RetVal);
408 // Do not return "I don't know" here. Later optimization passes could
409 // make it possible to evaluate objectsize to a constant.
412 case Intrinsic::bswap:
413 // bswap(bswap(x)) -> x
414 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
415 if (Operand->getIntrinsicID() == Intrinsic::bswap)
416 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
418 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
419 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
420 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
421 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
422 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
423 TI->getType()->getPrimitiveSizeInBits();
424 Value *CV = ConstantInt::get(Operand->getType(), C);
425 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
426 return new TruncInst(V, TI->getType());
431 case Intrinsic::powi:
432 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
435 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
438 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
439 // powi(x, -1) -> 1/x
440 if (Power->isAllOnesValue())
441 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
442 II->getArgOperand(0));
445 case Intrinsic::cttz: {
446 // If all bits below the first known one are known zero,
447 // this value is constant.
448 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
449 uint32_t BitWidth = IT->getBitWidth();
450 APInt KnownZero(BitWidth, 0);
451 APInt KnownOne(BitWidth, 0);
452 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
453 KnownZero, KnownOne);
454 unsigned TrailingZeros = KnownOne.countTrailingZeros();
455 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
456 if ((Mask & KnownZero) == Mask)
457 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
458 APInt(BitWidth, TrailingZeros)));
462 case Intrinsic::ctlz: {
463 // If all bits above the first known one are known zero,
464 // this value is constant.
465 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
466 uint32_t BitWidth = IT->getBitWidth();
467 APInt KnownZero(BitWidth, 0);
468 APInt KnownOne(BitWidth, 0);
469 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
470 KnownZero, KnownOne);
471 unsigned LeadingZeros = KnownOne.countLeadingZeros();
472 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
473 if ((Mask & KnownZero) == Mask)
474 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
475 APInt(BitWidth, LeadingZeros)));
479 case Intrinsic::uadd_with_overflow: {
480 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
481 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
482 uint32_t BitWidth = IT->getBitWidth();
483 APInt Mask = APInt::getSignBit(BitWidth);
484 APInt LHSKnownZero(BitWidth, 0);
485 APInt LHSKnownOne(BitWidth, 0);
486 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
487 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
488 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
490 if (LHSKnownNegative || LHSKnownPositive) {
491 APInt RHSKnownZero(BitWidth, 0);
492 APInt RHSKnownOne(BitWidth, 0);
493 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
494 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
495 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
496 if (LHSKnownNegative && RHSKnownNegative) {
497 // The sign bit is set in both cases: this MUST overflow.
498 // Create a simple add instruction, and insert it into the struct.
499 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
502 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
504 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
505 return InsertValueInst::Create(Struct, Add, 0);
508 if (LHSKnownPositive && RHSKnownPositive) {
509 // The sign bit is clear in both cases: this CANNOT overflow.
510 // Create a simple add instruction, and insert it into the struct.
511 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
514 UndefValue::get(LHS->getType()),
515 ConstantInt::getFalse(II->getContext())
517 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
518 return InsertValueInst::Create(Struct, Add, 0);
522 // FALL THROUGH uadd into sadd
523 case Intrinsic::sadd_with_overflow:
524 // Canonicalize constants into the RHS.
525 if (isa<Constant>(II->getArgOperand(0)) &&
526 !isa<Constant>(II->getArgOperand(1))) {
527 Value *LHS = II->getArgOperand(0);
528 II->setArgOperand(0, II->getArgOperand(1));
529 II->setArgOperand(1, LHS);
533 // X + undef -> undef
534 if (isa<UndefValue>(II->getArgOperand(1)))
535 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
537 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
538 // X + 0 -> {X, false}
541 UndefValue::get(II->getCalledValue()->getType()),
542 ConstantInt::getFalse(II->getContext())
544 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
545 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
549 case Intrinsic::usub_with_overflow:
550 case Intrinsic::ssub_with_overflow:
551 // undef - X -> undef
552 // X - undef -> undef
553 if (isa<UndefValue>(II->getArgOperand(0)) ||
554 isa<UndefValue>(II->getArgOperand(1)))
555 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
557 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
558 // X - 0 -> {X, false}
561 UndefValue::get(II->getArgOperand(0)->getType()),
562 ConstantInt::getFalse(II->getContext())
564 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
565 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
569 case Intrinsic::umul_with_overflow:
570 case Intrinsic::smul_with_overflow:
571 // Canonicalize constants into the RHS.
572 if (isa<Constant>(II->getArgOperand(0)) &&
573 !isa<Constant>(II->getArgOperand(1))) {
574 Value *LHS = II->getArgOperand(0);
575 II->setArgOperand(0, II->getArgOperand(1));
576 II->setArgOperand(1, LHS);
580 // X * undef -> undef
581 if (isa<UndefValue>(II->getArgOperand(1)))
582 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
584 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
587 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
589 // X * 1 -> {X, false}
590 if (RHSI->equalsInt(1)) {
592 UndefValue::get(II->getArgOperand(0)->getType()),
593 ConstantInt::getFalse(II->getContext())
595 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
596 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
600 case Intrinsic::ppc_altivec_lvx:
601 case Intrinsic::ppc_altivec_lvxl:
602 case Intrinsic::x86_sse_loadu_ps:
603 case Intrinsic::x86_sse2_loadu_pd:
604 case Intrinsic::x86_sse2_loadu_dq:
605 // Turn PPC lvx -> load if the pointer is known aligned.
606 // Turn X86 loadups -> load if the pointer is known aligned.
607 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
608 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
609 PointerType::getUnqual(II->getType()));
610 return new LoadInst(Ptr);
613 case Intrinsic::ppc_altivec_stvx:
614 case Intrinsic::ppc_altivec_stvxl:
615 // Turn stvx -> store if the pointer is known aligned.
616 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
617 const Type *OpPtrTy =
618 PointerType::getUnqual(II->getArgOperand(0)->getType());
619 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
620 return new StoreInst(II->getArgOperand(0), Ptr);
623 case Intrinsic::x86_sse_storeu_ps:
624 case Intrinsic::x86_sse2_storeu_pd:
625 case Intrinsic::x86_sse2_storeu_dq:
626 // Turn X86 storeu -> store if the pointer is known aligned.
627 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
628 const Type *OpPtrTy =
629 PointerType::getUnqual(II->getArgOperand(1)->getType());
630 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
631 return new StoreInst(II->getArgOperand(1), Ptr);
635 case Intrinsic::x86_sse_cvttss2si: {
636 // These intrinsics only demands the 0th element of its input vector. If
637 // we can simplify the input based on that, do so now.
639 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
640 APInt DemandedElts(VWidth, 1);
641 APInt UndefElts(VWidth, 0);
642 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
643 DemandedElts, UndefElts)) {
644 II->setArgOperand(0, V);
650 case Intrinsic::ppc_altivec_vperm:
651 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
652 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
653 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
655 // Check that all of the elements are integer constants or undefs.
656 bool AllEltsOk = true;
657 for (unsigned i = 0; i != 16; ++i) {
658 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
659 !isa<UndefValue>(Mask->getOperand(i))) {
666 // Cast the input vectors to byte vectors.
667 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
669 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
671 Value *Result = UndefValue::get(Op0->getType());
673 // Only extract each element once.
674 Value *ExtractedElts[32];
675 memset(ExtractedElts, 0, sizeof(ExtractedElts));
677 for (unsigned i = 0; i != 16; ++i) {
678 if (isa<UndefValue>(Mask->getOperand(i)))
680 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
681 Idx &= 31; // Match the hardware behavior.
683 if (ExtractedElts[Idx] == 0) {
685 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
686 ConstantInt::get(Type::getInt32Ty(II->getContext()),
687 Idx&15, false), "tmp");
690 // Insert this value into the result vector.
691 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
692 ConstantInt::get(Type::getInt32Ty(II->getContext()),
695 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
700 case Intrinsic::stackrestore: {
701 // If the save is right next to the restore, remove the restore. This can
702 // happen when variable allocas are DCE'd.
703 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
704 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
705 BasicBlock::iterator BI = SS;
707 return EraseInstFromFunction(CI);
711 // Scan down this block to see if there is another stack restore in the
712 // same block without an intervening call/alloca.
713 BasicBlock::iterator BI = II;
714 TerminatorInst *TI = II->getParent()->getTerminator();
715 bool CannotRemove = false;
716 for (++BI; &*BI != TI; ++BI) {
717 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
721 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
722 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
723 // If there is a stackrestore below this one, remove this one.
724 if (II->getIntrinsicID() == Intrinsic::stackrestore)
725 return EraseInstFromFunction(CI);
726 // Otherwise, ignore the intrinsic.
728 // If we found a non-intrinsic call, we can't remove the stack
736 // If the stack restore is in a return/unwind block and if there are no
737 // allocas or calls between the restore and the return, nuke the restore.
738 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
739 return EraseInstFromFunction(CI);
744 return visitCallSite(II);
747 // InvokeInst simplification
749 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
750 return visitCallSite(&II);
753 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
754 /// passed through the varargs area, we can eliminate the use of the cast.
755 static bool isSafeToEliminateVarargsCast(const CallSite CS,
756 const CastInst * const CI,
757 const TargetData * const TD,
759 if (!CI->isLosslessCast())
762 // The size of ByVal arguments is derived from the type, so we
763 // can't change to a type with a different size. If the size were
764 // passed explicitly we could avoid this check.
765 if (!CS.paramHasAttr(ix, Attribute::ByVal))
769 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
770 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
771 if (!SrcTy->isSized() || !DstTy->isSized())
773 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
779 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
782 void replaceCall(Value *With) {
783 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
785 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
786 if (ConstantInt *SizeCI =
787 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
788 if (SizeCI->isAllOnesValue())
791 return SizeCI->getZExtValue() >=
792 GetStringLength(CI->getArgOperand(SizeArgOp));
793 if (ConstantInt *Arg = dyn_cast<ConstantInt>(
794 CI->getArgOperand(SizeArgOp)))
795 return SizeCI->getZExtValue() >= Arg->getZExtValue();
800 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
801 Instruction *NewInstruction;
803 } // end anonymous namespace
805 // Try to fold some different type of calls here.
806 // Currently we're only working with the checking functions, memcpy_chk,
807 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
808 // strcat_chk and strncat_chk.
809 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
810 if (CI->getCalledFunction() == 0) return 0;
812 InstCombineFortifiedLibCalls Simplifier(this);
813 Simplifier.fold(CI, TD);
814 return Simplifier.NewInstruction;
817 // visitCallSite - Improvements for call and invoke instructions.
819 Instruction *InstCombiner::visitCallSite(CallSite CS) {
820 bool Changed = false;
822 // If the callee is a constexpr cast of a function, attempt to move the cast
823 // to the arguments of the call/invoke.
824 if (transformConstExprCastCall(CS)) return 0;
826 Value *Callee = CS.getCalledValue();
828 if (Function *CalleeF = dyn_cast<Function>(Callee))
829 // If the call and callee calling conventions don't match, this call must
830 // be unreachable, as the call is undefined.
831 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
832 // Only do this for calls to a function with a body. A prototype may
833 // not actually end up matching the implementation's calling conv for a
834 // variety of reasons (e.g. it may be written in assembly).
835 !CalleeF->isDeclaration()) {
836 Instruction *OldCall = CS.getInstruction();
837 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
838 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
840 // If OldCall dues not return void then replaceAllUsesWith undef.
841 // This allows ValueHandlers and custom metadata to adjust itself.
842 if (!OldCall->getType()->isVoidTy())
843 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
844 if (isa<CallInst>(OldCall))
845 return EraseInstFromFunction(*OldCall);
847 // We cannot remove an invoke, because it would change the CFG, just
848 // change the callee to a null pointer.
849 cast<InvokeInst>(OldCall)->setCalledFunction(
850 Constant::getNullValue(CalleeF->getType()));
854 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
855 // This instruction is not reachable, just remove it. We insert a store to
856 // undef so that we know that this code is not reachable, despite the fact
857 // that we can't modify the CFG here.
858 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
859 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
860 CS.getInstruction());
862 // If CS does not return void then replaceAllUsesWith undef.
863 // This allows ValueHandlers and custom metadata to adjust itself.
864 if (!CS.getInstruction()->getType()->isVoidTy())
865 CS.getInstruction()->
866 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
868 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
869 // Don't break the CFG, insert a dummy cond branch.
870 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
871 ConstantInt::getTrue(Callee->getContext()), II);
873 return EraseInstFromFunction(*CS.getInstruction());
876 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
877 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
878 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
879 return transformCallThroughTrampoline(CS);
881 const PointerType *PTy = cast<PointerType>(Callee->getType());
882 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
883 if (FTy->isVarArg()) {
884 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
885 // See if we can optimize any arguments passed through the varargs area of
887 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
888 E = CS.arg_end(); I != E; ++I, ++ix) {
889 CastInst *CI = dyn_cast<CastInst>(*I);
890 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
891 *I = CI->getOperand(0);
897 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
898 // Inline asm calls cannot throw - mark them 'nounwind'.
899 CS.setDoesNotThrow();
903 // Try to optimize the call if possible, we require TargetData for most of
904 // this. None of these calls are seen as possibly dead so go ahead and
905 // delete the instruction now.
906 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
907 Instruction *I = tryOptimizeCall(CI, TD);
908 // If we changed something return the result, etc. Otherwise let
909 // the fallthrough check.
910 if (I) return EraseInstFromFunction(*I);
913 return Changed ? CS.getInstruction() : 0;
916 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
917 // attempt to move the cast to the arguments of the call/invoke.
919 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
920 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
921 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
922 if (CE->getOpcode() != Instruction::BitCast ||
923 !isa<Function>(CE->getOperand(0)))
925 Function *Callee = cast<Function>(CE->getOperand(0));
926 Instruction *Caller = CS.getInstruction();
927 const AttrListPtr &CallerPAL = CS.getAttributes();
929 // Okay, this is a cast from a function to a different type. Unless doing so
930 // would cause a type conversion of one of our arguments, change this call to
931 // be a direct call with arguments casted to the appropriate types.
933 const FunctionType *FT = Callee->getFunctionType();
934 const Type *OldRetTy = Caller->getType();
935 const Type *NewRetTy = FT->getReturnType();
937 if (NewRetTy->isStructTy())
938 return false; // TODO: Handle multiple return values.
940 // Check to see if we are changing the return type...
941 if (OldRetTy != NewRetTy) {
942 if (Callee->isDeclaration() &&
943 // Conversion is ok if changing from one pointer type to another or from
944 // a pointer to an integer of the same size.
945 !((OldRetTy->isPointerTy() || !TD ||
946 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
947 (NewRetTy->isPointerTy() || !TD ||
948 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
949 return false; // Cannot transform this return value.
951 if (!Caller->use_empty() &&
952 // void -> non-void is handled specially
953 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
954 return false; // Cannot transform this return value.
956 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
957 Attributes RAttrs = CallerPAL.getRetAttributes();
958 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
959 return false; // Attribute not compatible with transformed value.
962 // If the callsite is an invoke instruction, and the return value is used by
963 // a PHI node in a successor, we cannot change the return type of the call
964 // because there is no place to put the cast instruction (without breaking
965 // the critical edge). Bail out in this case.
966 if (!Caller->use_empty())
967 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
968 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
970 if (PHINode *PN = dyn_cast<PHINode>(*UI))
971 if (PN->getParent() == II->getNormalDest() ||
972 PN->getParent() == II->getUnwindDest())
976 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
977 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
979 CallSite::arg_iterator AI = CS.arg_begin();
980 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
981 const Type *ParamTy = FT->getParamType(i);
982 const Type *ActTy = (*AI)->getType();
984 if (!CastInst::isCastable(ActTy, ParamTy))
985 return false; // Cannot transform this parameter value.
987 if (CallerPAL.getParamAttributes(i + 1)
988 & Attribute::typeIncompatible(ParamTy))
989 return false; // Attribute not compatible with transformed value.
991 // Converting from one pointer type to another or between a pointer and an
992 // integer of the same size is safe even if we do not have a body.
993 bool isConvertible = ActTy == ParamTy ||
994 (TD && ((ParamTy->isPointerTy() ||
995 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
996 (ActTy->isPointerTy() ||
997 ActTy == TD->getIntPtrType(Caller->getContext()))));
998 if (Callee->isDeclaration() && !isConvertible) return false;
1001 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
1002 Callee->isDeclaration())
1003 return false; // Do not delete arguments unless we have a function body.
1005 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1006 !CallerPAL.isEmpty())
1007 // In this case we have more arguments than the new function type, but we
1008 // won't be dropping them. Check that these extra arguments have attributes
1009 // that are compatible with being a vararg call argument.
1010 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1011 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1013 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1014 if (PAttrs & Attribute::VarArgsIncompatible)
1018 // Okay, we decided that this is a safe thing to do: go ahead and start
1019 // inserting cast instructions as necessary...
1020 std::vector<Value*> Args;
1021 Args.reserve(NumActualArgs);
1022 SmallVector<AttributeWithIndex, 8> attrVec;
1023 attrVec.reserve(NumCommonArgs);
1025 // Get any return attributes.
1026 Attributes RAttrs = CallerPAL.getRetAttributes();
1028 // If the return value is not being used, the type may not be compatible
1029 // with the existing attributes. Wipe out any problematic attributes.
1030 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1032 // Add the new return attributes.
1034 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1036 AI = CS.arg_begin();
1037 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1038 const Type *ParamTy = FT->getParamType(i);
1039 if ((*AI)->getType() == ParamTy) {
1040 Args.push_back(*AI);
1042 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1043 false, ParamTy, false);
1044 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
1047 // Add any parameter attributes.
1048 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1049 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1052 // If the function takes more arguments than the call was taking, add them
1054 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1055 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1057 // If we are removing arguments to the function, emit an obnoxious warning.
1058 if (FT->getNumParams() < NumActualArgs) {
1059 if (!FT->isVarArg()) {
1060 errs() << "WARNING: While resolving call to function '"
1061 << Callee->getName() << "' arguments were dropped!\n";
1063 // Add all of the arguments in their promoted form to the arg list.
1064 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1065 const Type *PTy = getPromotedType((*AI)->getType());
1066 if (PTy != (*AI)->getType()) {
1067 // Must promote to pass through va_arg area!
1068 Instruction::CastOps opcode =
1069 CastInst::getCastOpcode(*AI, false, PTy, false);
1070 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
1072 Args.push_back(*AI);
1075 // Add any parameter attributes.
1076 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1077 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1082 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1083 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1085 if (NewRetTy->isVoidTy())
1086 Caller->setName(""); // Void type should not have a name.
1088 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1092 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1093 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
1094 Args.begin(), Args.end(),
1095 Caller->getName(), Caller);
1096 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1097 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1099 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
1100 Caller->getName(), Caller);
1101 CallInst *CI = cast<CallInst>(Caller);
1102 if (CI->isTailCall())
1103 cast<CallInst>(NC)->setTailCall();
1104 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1105 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1108 // Insert a cast of the return type as necessary.
1110 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1111 if (!NV->getType()->isVoidTy()) {
1112 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
1114 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
1116 // If this is an invoke instruction, we should insert it after the first
1117 // non-phi, instruction in the normal successor block.
1118 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1119 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
1120 InsertNewInstBefore(NC, *I);
1122 // Otherwise, it's a call, just insert cast right after the call instr
1123 InsertNewInstBefore(NC, *Caller);
1125 Worklist.AddUsersToWorkList(*Caller);
1127 NV = UndefValue::get(Caller->getType());
1132 if (!Caller->use_empty())
1133 Caller->replaceAllUsesWith(NV);
1135 EraseInstFromFunction(*Caller);
1139 // transformCallThroughTrampoline - Turn a call to a function created by the
1140 // init_trampoline intrinsic into a direct call to the underlying function.
1142 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
1143 Value *Callee = CS.getCalledValue();
1144 const PointerType *PTy = cast<PointerType>(Callee->getType());
1145 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1146 const AttrListPtr &Attrs = CS.getAttributes();
1148 // If the call already has the 'nest' attribute somewhere then give up -
1149 // otherwise 'nest' would occur twice after splicing in the chain.
1150 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1153 IntrinsicInst *Tramp =
1154 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
1156 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1157 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1158 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1160 const AttrListPtr &NestAttrs = NestF->getAttributes();
1161 if (!NestAttrs.isEmpty()) {
1162 unsigned NestIdx = 1;
1163 const Type *NestTy = 0;
1164 Attributes NestAttr = Attribute::None;
1166 // Look for a parameter marked with the 'nest' attribute.
1167 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1168 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1169 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1170 // Record the parameter type and any other attributes.
1172 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1177 Instruction *Caller = CS.getInstruction();
1178 std::vector<Value*> NewArgs;
1179 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1181 SmallVector<AttributeWithIndex, 8> NewAttrs;
1182 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1184 // Insert the nest argument into the call argument list, which may
1185 // mean appending it. Likewise for attributes.
1187 // Add any result attributes.
1188 if (Attributes Attr = Attrs.getRetAttributes())
1189 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1193 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1195 if (Idx == NestIdx) {
1196 // Add the chain argument and attributes.
1197 Value *NestVal = Tramp->getArgOperand(2);
1198 if (NestVal->getType() != NestTy)
1199 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1200 NewArgs.push_back(NestVal);
1201 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1207 // Add the original argument and attributes.
1208 NewArgs.push_back(*I);
1209 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1211 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1217 // Add any function attributes.
1218 if (Attributes Attr = Attrs.getFnAttributes())
1219 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1221 // The trampoline may have been bitcast to a bogus type (FTy).
1222 // Handle this by synthesizing a new function type, equal to FTy
1223 // with the chain parameter inserted.
1225 std::vector<const Type*> NewTypes;
1226 NewTypes.reserve(FTy->getNumParams()+1);
1228 // Insert the chain's type into the list of parameter types, which may
1229 // mean appending it.
1232 FunctionType::param_iterator I = FTy->param_begin(),
1233 E = FTy->param_end();
1237 // Add the chain's type.
1238 NewTypes.push_back(NestTy);
1243 // Add the original type.
1244 NewTypes.push_back(*I);
1250 // Replace the trampoline call with a direct call. Let the generic
1251 // code sort out any function type mismatches.
1252 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1254 Constant *NewCallee =
1255 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1256 NestF : ConstantExpr::getBitCast(NestF,
1257 PointerType::getUnqual(NewFTy));
1258 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1261 Instruction *NewCaller;
1262 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1263 NewCaller = InvokeInst::Create(NewCallee,
1264 II->getNormalDest(), II->getUnwindDest(),
1265 NewArgs.begin(), NewArgs.end(),
1266 Caller->getName(), Caller);
1267 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1268 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1270 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1271 Caller->getName(), Caller);
1272 if (cast<CallInst>(Caller)->isTailCall())
1273 cast<CallInst>(NewCaller)->setTailCall();
1274 cast<CallInst>(NewCaller)->
1275 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1276 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1278 if (!Caller->getType()->isVoidTy())
1279 Caller->replaceAllUsesWith(NewCaller);
1280 Caller->eraseFromParent();
1281 Worklist.Remove(Caller);
1286 // Replace the trampoline call with a direct call. Since there is no 'nest'
1287 // parameter, there is no need to adjust the argument list. Let the generic
1288 // code sort out any function type mismatches.
1289 Constant *NewCallee =
1290 NestF->getType() == PTy ? NestF :
1291 ConstantExpr::getBitCast(NestF, PTy);
1292 CS.setCalledFunction(NewCallee);
1293 return CS.getInstruction();