1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Support/CallSite.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Transforms/Utils/BuildLibCalls.h"
22 /// getPromotedType - Return the specified type promoted as it would be to pass
23 /// though a va_arg area.
24 static const Type *getPromotedType(const Type *Ty) {
25 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26 if (ITy->getBitWidth() < 32)
27 return Type::getInt32Ty(Ty->getContext());
32 /// EnforceKnownAlignment - If the specified pointer points to an object that
33 /// we control, modify the object's alignment to PrefAlign. This isn't
34 /// often possible though. If alignment is important, a more reliable approach
35 /// is to simply align all global variables and allocation instructions to
36 /// their preferred alignment from the beginning.
38 static unsigned EnforceKnownAlignment(Value *V,
39 unsigned Align, unsigned PrefAlign) {
41 User *U = dyn_cast<User>(V);
44 switch (Operator::getOpcode(U)) {
46 case Instruction::BitCast:
47 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
48 case Instruction::GetElementPtr: {
49 // If all indexes are zero, it is just the alignment of the base pointer.
50 bool AllZeroOperands = true;
51 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
52 if (!isa<Constant>(*i) ||
53 !cast<Constant>(*i)->isNullValue()) {
54 AllZeroOperands = false;
58 if (AllZeroOperands) {
59 // Treat this like a bitcast.
60 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
64 case Instruction::Alloca: {
65 AllocaInst *AI = cast<AllocaInst>(V);
66 // If there is a requested alignment and if this is an alloca, round up.
67 if (AI->getAlignment() >= PrefAlign)
68 return AI->getAlignment();
69 AI->setAlignment(PrefAlign);
74 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
75 // If there is a large requested alignment and we can, bump up the alignment
77 if (GV->isDeclaration()) return Align;
79 if (GV->getAlignment() >= PrefAlign)
80 return GV->getAlignment();
81 // We can only increase the alignment of the global if it has no alignment
82 // specified or if it is not assigned a section. If it is assigned a
83 // section, the global could be densely packed with other objects in the
84 // section, increasing the alignment could cause padding issues.
85 if (!GV->hasSection() || GV->getAlignment() == 0)
86 GV->setAlignment(PrefAlign);
87 return GV->getAlignment();
93 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
94 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
95 /// and it is more than the alignment of the ultimate object, see if we can
96 /// increase the alignment of the ultimate object, making this check succeed.
97 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
99 assert(V->getType()->isPointerTy() &&
100 "GetOrEnforceKnownAlignment expects a pointer!");
101 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64;
102 APInt Mask = APInt::getAllOnesValue(BitWidth);
103 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
104 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
105 unsigned TrailZ = KnownZero.countTrailingOnes();
107 // Avoid trouble with rediculously large TrailZ values, such as
108 // those computed from a null pointer.
109 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
111 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
113 // LLVM doesn't support alignments larger than this currently.
114 Align = std::min(Align, +Value::MaximumAlignment);
116 if (PrefAlign > Align)
117 Align = EnforceKnownAlignment(V, Align, PrefAlign);
119 // We don't need to make any adjustment.
123 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
124 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(0));
125 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getArgOperand(1));
126 unsigned MinAlign = std::min(DstAlign, SrcAlign);
127 unsigned CopyAlign = MI->getAlignment();
129 if (CopyAlign < MinAlign) {
130 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
135 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
137 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
138 if (MemOpLength == 0) return 0;
140 // Source and destination pointer types are always "i8*" for intrinsic. See
141 // if the size is something we can handle with a single primitive load/store.
142 // A single load+store correctly handles overlapping memory in the memmove
144 unsigned Size = MemOpLength->getZExtValue();
145 if (Size == 0) return MI; // Delete this mem transfer.
147 if (Size > 8 || (Size&(Size-1)))
148 return 0; // If not 1/2/4/8 bytes, exit.
150 // Use an integer load+store unless we can find something better.
152 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
154 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
156 const IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
157 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
158 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
160 // Memcpy forces the use of i8* for the source and destination. That means
161 // that if you're using memcpy to move one double around, you'll get a cast
162 // from double* to i8*. We'd much rather use a double load+store rather than
163 // an i64 load+store, here because this improves the odds that the source or
164 // dest address will be promotable. See if we can find a better type than the
166 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
167 if (StrippedDest != MI->getArgOperand(0)) {
168 const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
170 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
171 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
172 // down through these levels if so.
173 while (!SrcETy->isSingleValueType()) {
174 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
175 if (STy->getNumElements() == 1)
176 SrcETy = STy->getElementType(0);
179 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
180 if (ATy->getNumElements() == 1)
181 SrcETy = ATy->getElementType();
188 if (SrcETy->isSingleValueType()) {
189 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
190 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
196 // If the memcpy/memmove provides better alignment info than we can
198 SrcAlign = std::max(SrcAlign, CopyAlign);
199 DstAlign = std::max(DstAlign, CopyAlign);
201 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
202 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
203 Instruction *L = new LoadInst(Src, "tmp", MI->isVolatile(), SrcAlign);
204 InsertNewInstBefore(L, *MI);
205 InsertNewInstBefore(new StoreInst(L, Dest, MI->isVolatile(), DstAlign),
208 // Set the size of the copy to 0, it will be deleted on the next iteration.
209 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
213 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
214 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
215 if (MI->getAlignment() < Alignment) {
216 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
221 // Extract the length and alignment and fill if they are constant.
222 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
223 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
224 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
226 uint64_t Len = LenC->getZExtValue();
227 Alignment = MI->getAlignment();
229 // If the length is zero, this is a no-op
230 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
232 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
233 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
234 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
236 Value *Dest = MI->getDest();
237 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
239 // Alignment 0 is identity for alignment 1 for memset, but not store.
240 if (Alignment == 0) Alignment = 1;
242 // Extract the fill value and store.
243 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
244 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
245 Dest, false, Alignment), *MI);
247 // Set the size of the copy to 0, it will be deleted on the next iteration.
248 MI->setLength(Constant::getNullValue(LenC->getType()));
255 /// visitCallInst - CallInst simplification. This mostly only handles folding
256 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
257 /// the heavy lifting.
259 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
261 return visitFree(CI);
263 return visitMalloc(CI);
265 // If the caller function is nounwind, mark the call as nounwind, even if the
267 if (CI.getParent()->getParent()->doesNotThrow() &&
268 !CI.doesNotThrow()) {
269 CI.setDoesNotThrow();
273 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
274 if (!II) return visitCallSite(&CI);
276 // Intrinsics cannot occur in an invoke, so handle them here instead of in
278 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
279 bool Changed = false;
281 // memmove/cpy/set of zero bytes is a noop.
282 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
283 if (NumBytes->isNullValue())
284 return EraseInstFromFunction(CI);
286 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
287 if (CI->getZExtValue() == 1) {
288 // Replace the instruction with just byte operations. We would
289 // transform other cases to loads/stores, but we don't know if
290 // alignment is sufficient.
294 // No other transformations apply to volatile transfers.
295 if (MI->isVolatile())
298 // If we have a memmove and the source operation is a constant global,
299 // then the source and dest pointers can't alias, so we can change this
300 // into a call to memcpy.
301 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
302 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
303 if (GVSrc->isConstant()) {
304 Module *M = CI.getParent()->getParent()->getParent();
305 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
306 const Type *Tys[3] = { CI.getArgOperand(0)->getType(),
307 CI.getArgOperand(1)->getType(),
308 CI.getArgOperand(2)->getType() };
309 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys, 3));
314 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
315 // memmove(x,x,size) -> noop.
316 if (MTI->getSource() == MTI->getDest())
317 return EraseInstFromFunction(CI);
320 // If we can determine a pointer alignment that is bigger than currently
321 // set, update the alignment.
322 if (isa<MemTransferInst>(MI)) {
323 if (Instruction *I = SimplifyMemTransfer(MI))
325 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
326 if (Instruction *I = SimplifyMemSet(MSI))
330 if (Changed) return II;
333 switch (II->getIntrinsicID()) {
335 case Intrinsic::objectsize: {
336 // We need target data for just about everything so depend on it.
339 const Type *ReturnTy = CI.getType();
340 bool Min = (cast<ConstantInt>(II->getArgOperand(1))->getZExtValue() == 1);
342 // Get to the real allocated thing and offset as fast as possible.
343 Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
345 // If we've stripped down to a single global variable that we
346 // can know the size of then just return that.
347 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
348 if (GV->hasDefinitiveInitializer()) {
349 Constant *C = GV->getInitializer();
350 uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
351 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
353 // Can't determine size of the GV.
354 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
355 return ReplaceInstUsesWith(CI, RetVal);
357 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
359 if (AI->getAllocatedType()->isSized()) {
360 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
361 if (AI->isArrayAllocation()) {
362 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
364 AllocaSize *= C->getZExtValue();
366 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
368 } else if (CallInst *MI = extractMallocCall(Op1)) {
369 const Type* MallocType = getMallocAllocatedType(MI);
371 if (MallocType && MallocType->isSized()) {
372 if (Value *NElems = getMallocArraySize(MI, TD, true)) {
373 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
374 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy,
375 (NElements->getZExtValue() * TD->getTypeAllocSize(MallocType))));
378 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
379 // Only handle constant GEPs here.
380 if (CE->getOpcode() != Instruction::GetElementPtr) break;
381 GEPOperator *GEP = cast<GEPOperator>(CE);
383 // Make sure we're not a constant offset from an external
385 Value *Operand = GEP->getPointerOperand();
386 Operand = Operand->stripPointerCasts();
387 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
388 if (!GV->hasDefinitiveInitializer()) break;
390 // Get what we're pointing to and its size.
391 const PointerType *BaseType =
392 cast<PointerType>(Operand->getType());
393 uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
395 // Get the current byte offset into the thing. Use the original
396 // operand in case we're looking through a bitcast.
397 SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
398 const PointerType *OffsetType =
399 cast<PointerType>(GEP->getPointerOperand()->getType());
400 uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
403 // Out of bound reference? Negative index normalized to large
404 // index? Just return "I don't know".
405 Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
406 return ReplaceInstUsesWith(CI, RetVal);
409 Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
410 return ReplaceInstUsesWith(CI, RetVal);
413 // Do not return "I don't know" here. Later optimization passes could
414 // make it possible to evaluate objectsize to a constant.
417 case Intrinsic::bswap:
418 // bswap(bswap(x)) -> x
419 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
420 if (Operand->getIntrinsicID() == Intrinsic::bswap)
421 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
423 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
424 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
425 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
426 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
427 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
428 TI->getType()->getPrimitiveSizeInBits();
429 Value *CV = ConstantInt::get(Operand->getType(), C);
430 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
431 return new TruncInst(V, TI->getType());
436 case Intrinsic::powi:
437 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
440 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
443 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
444 // powi(x, -1) -> 1/x
445 if (Power->isAllOnesValue())
446 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
447 II->getArgOperand(0));
450 case Intrinsic::cttz: {
451 // If all bits below the first known one are known zero,
452 // this value is constant.
453 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
454 uint32_t BitWidth = IT->getBitWidth();
455 APInt KnownZero(BitWidth, 0);
456 APInt KnownOne(BitWidth, 0);
457 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
458 KnownZero, KnownOne);
459 unsigned TrailingZeros = KnownOne.countTrailingZeros();
460 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
461 if ((Mask & KnownZero) == Mask)
462 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
463 APInt(BitWidth, TrailingZeros)));
467 case Intrinsic::ctlz: {
468 // If all bits above the first known one are known zero,
469 // this value is constant.
470 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
471 uint32_t BitWidth = IT->getBitWidth();
472 APInt KnownZero(BitWidth, 0);
473 APInt KnownOne(BitWidth, 0);
474 ComputeMaskedBits(II->getArgOperand(0), APInt::getAllOnesValue(BitWidth),
475 KnownZero, KnownOne);
476 unsigned LeadingZeros = KnownOne.countLeadingZeros();
477 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
478 if ((Mask & KnownZero) == Mask)
479 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
480 APInt(BitWidth, LeadingZeros)));
484 case Intrinsic::uadd_with_overflow: {
485 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
486 const IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
487 uint32_t BitWidth = IT->getBitWidth();
488 APInt Mask = APInt::getSignBit(BitWidth);
489 APInt LHSKnownZero(BitWidth, 0);
490 APInt LHSKnownOne(BitWidth, 0);
491 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
492 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
493 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
495 if (LHSKnownNegative || LHSKnownPositive) {
496 APInt RHSKnownZero(BitWidth, 0);
497 APInt RHSKnownOne(BitWidth, 0);
498 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
499 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
500 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
501 if (LHSKnownNegative && RHSKnownNegative) {
502 // The sign bit is set in both cases: this MUST overflow.
503 // Create a simple add instruction, and insert it into the struct.
504 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
507 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
509 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
510 return InsertValueInst::Create(Struct, Add, 0);
513 if (LHSKnownPositive && RHSKnownPositive) {
514 // The sign bit is clear in both cases: this CANNOT overflow.
515 // Create a simple add instruction, and insert it into the struct.
516 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
519 UndefValue::get(LHS->getType()),
520 ConstantInt::getFalse(II->getContext())
522 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
523 return InsertValueInst::Create(Struct, Add, 0);
527 // FALL THROUGH uadd into sadd
528 case Intrinsic::sadd_with_overflow:
529 // Canonicalize constants into the RHS.
530 if (isa<Constant>(II->getArgOperand(0)) &&
531 !isa<Constant>(II->getArgOperand(1))) {
532 Value *LHS = II->getArgOperand(0);
533 II->setArgOperand(0, II->getArgOperand(1));
534 II->setArgOperand(1, LHS);
538 // X + undef -> undef
539 if (isa<UndefValue>(II->getArgOperand(1)))
540 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
542 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
543 // X + 0 -> {X, false}
546 UndefValue::get(II->getArgOperand(0)->getType()),
547 ConstantInt::getFalse(II->getContext())
549 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
550 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
554 case Intrinsic::usub_with_overflow:
555 case Intrinsic::ssub_with_overflow:
556 // undef - X -> undef
557 // X - undef -> undef
558 if (isa<UndefValue>(II->getArgOperand(0)) ||
559 isa<UndefValue>(II->getArgOperand(1)))
560 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
562 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
563 // X - 0 -> {X, false}
566 UndefValue::get(II->getArgOperand(0)->getType()),
567 ConstantInt::getFalse(II->getContext())
569 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
570 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
574 case Intrinsic::umul_with_overflow:
575 case Intrinsic::smul_with_overflow:
576 // Canonicalize constants into the RHS.
577 if (isa<Constant>(II->getArgOperand(0)) &&
578 !isa<Constant>(II->getArgOperand(1))) {
579 Value *LHS = II->getArgOperand(0);
580 II->setArgOperand(0, II->getArgOperand(1));
581 II->setArgOperand(1, LHS);
585 // X * undef -> undef
586 if (isa<UndefValue>(II->getArgOperand(1)))
587 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
589 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
592 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
594 // X * 1 -> {X, false}
595 if (RHSI->equalsInt(1)) {
597 UndefValue::get(II->getArgOperand(0)->getType()),
598 ConstantInt::getFalse(II->getContext())
600 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
601 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
605 case Intrinsic::ppc_altivec_lvx:
606 case Intrinsic::ppc_altivec_lvxl:
607 case Intrinsic::x86_sse_loadu_ps:
608 case Intrinsic::x86_sse2_loadu_pd:
609 case Intrinsic::x86_sse2_loadu_dq:
610 // Turn PPC lvx -> load if the pointer is known aligned.
611 // Turn X86 loadups -> load if the pointer is known aligned.
612 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
613 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
614 PointerType::getUnqual(II->getType()));
615 return new LoadInst(Ptr);
618 case Intrinsic::ppc_altivec_stvx:
619 case Intrinsic::ppc_altivec_stvxl:
620 // Turn stvx -> store if the pointer is known aligned.
621 if (GetOrEnforceKnownAlignment(II->getArgOperand(1), 16) >= 16) {
622 const Type *OpPtrTy =
623 PointerType::getUnqual(II->getArgOperand(0)->getType());
624 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
625 return new StoreInst(II->getArgOperand(0), Ptr);
628 case Intrinsic::x86_sse_storeu_ps:
629 case Intrinsic::x86_sse2_storeu_pd:
630 case Intrinsic::x86_sse2_storeu_dq:
631 // Turn X86 storeu -> store if the pointer is known aligned.
632 if (GetOrEnforceKnownAlignment(II->getArgOperand(0), 16) >= 16) {
633 const Type *OpPtrTy =
634 PointerType::getUnqual(II->getArgOperand(1)->getType());
635 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
636 return new StoreInst(II->getArgOperand(1), Ptr);
640 case Intrinsic::x86_sse_cvttss2si: {
641 // These intrinsics only demands the 0th element of its input vector. If
642 // we can simplify the input based on that, do so now.
644 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
645 APInt DemandedElts(VWidth, 1);
646 APInt UndefElts(VWidth, 0);
647 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
648 DemandedElts, UndefElts)) {
649 II->setArgOperand(0, V);
655 case Intrinsic::ppc_altivec_vperm:
656 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
657 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getArgOperand(2))) {
658 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
660 // Check that all of the elements are integer constants or undefs.
661 bool AllEltsOk = true;
662 for (unsigned i = 0; i != 16; ++i) {
663 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
664 !isa<UndefValue>(Mask->getOperand(i))) {
671 // Cast the input vectors to byte vectors.
672 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
674 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
676 Value *Result = UndefValue::get(Op0->getType());
678 // Only extract each element once.
679 Value *ExtractedElts[32];
680 memset(ExtractedElts, 0, sizeof(ExtractedElts));
682 for (unsigned i = 0; i != 16; ++i) {
683 if (isa<UndefValue>(Mask->getOperand(i)))
685 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
686 Idx &= 31; // Match the hardware behavior.
688 if (ExtractedElts[Idx] == 0) {
690 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
691 ConstantInt::get(Type::getInt32Ty(II->getContext()),
692 Idx&15, false), "tmp");
695 // Insert this value into the result vector.
696 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
697 ConstantInt::get(Type::getInt32Ty(II->getContext()),
700 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
705 case Intrinsic::stackrestore: {
706 // If the save is right next to the restore, remove the restore. This can
707 // happen when variable allocas are DCE'd.
708 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
709 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
710 BasicBlock::iterator BI = SS;
712 return EraseInstFromFunction(CI);
716 // Scan down this block to see if there is another stack restore in the
717 // same block without an intervening call/alloca.
718 BasicBlock::iterator BI = II;
719 TerminatorInst *TI = II->getParent()->getTerminator();
720 bool CannotRemove = false;
721 for (++BI; &*BI != TI; ++BI) {
722 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
726 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
727 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
728 // If there is a stackrestore below this one, remove this one.
729 if (II->getIntrinsicID() == Intrinsic::stackrestore)
730 return EraseInstFromFunction(CI);
731 // Otherwise, ignore the intrinsic.
733 // If we found a non-intrinsic call, we can't remove the stack
741 // If the stack restore is in a return/unwind block and if there are no
742 // allocas or calls between the restore and the return, nuke the restore.
743 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
744 return EraseInstFromFunction(CI);
749 return visitCallSite(II);
752 // InvokeInst simplification
754 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
755 return visitCallSite(&II);
758 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
759 /// passed through the varargs area, we can eliminate the use of the cast.
760 static bool isSafeToEliminateVarargsCast(const CallSite CS,
761 const CastInst * const CI,
762 const TargetData * const TD,
764 if (!CI->isLosslessCast())
767 // The size of ByVal arguments is derived from the type, so we
768 // can't change to a type with a different size. If the size were
769 // passed explicitly we could avoid this check.
770 if (!CS.paramHasAttr(ix, Attribute::ByVal))
774 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
775 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
776 if (!SrcTy->isSized() || !DstTy->isSized())
778 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
784 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
787 void replaceCall(Value *With) {
788 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
790 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
791 if (ConstantInt *SizeCI =
792 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
793 if (SizeCI->isAllOnesValue())
796 return SizeCI->getZExtValue() >=
797 GetStringLength(CI->getArgOperand(SizeArgOp));
798 if (ConstantInt *Arg = dyn_cast<ConstantInt>(
799 CI->getArgOperand(SizeArgOp)))
800 return SizeCI->getZExtValue() >= Arg->getZExtValue();
805 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
806 Instruction *NewInstruction;
808 } // end anonymous namespace
810 // Try to fold some different type of calls here.
811 // Currently we're only working with the checking functions, memcpy_chk,
812 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
813 // strcat_chk and strncat_chk.
814 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
815 if (CI->getCalledFunction() == 0) return 0;
817 InstCombineFortifiedLibCalls Simplifier(this);
818 Simplifier.fold(CI, TD);
819 return Simplifier.NewInstruction;
822 // visitCallSite - Improvements for call and invoke instructions.
824 Instruction *InstCombiner::visitCallSite(CallSite CS) {
825 bool Changed = false;
827 // If the callee is a constexpr cast of a function, attempt to move the cast
828 // to the arguments of the call/invoke.
829 if (transformConstExprCastCall(CS)) return 0;
831 Value *Callee = CS.getCalledValue();
833 if (Function *CalleeF = dyn_cast<Function>(Callee))
834 // If the call and callee calling conventions don't match, this call must
835 // be unreachable, as the call is undefined.
836 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
837 // Only do this for calls to a function with a body. A prototype may
838 // not actually end up matching the implementation's calling conv for a
839 // variety of reasons (e.g. it may be written in assembly).
840 !CalleeF->isDeclaration()) {
841 Instruction *OldCall = CS.getInstruction();
842 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
843 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
845 // If OldCall dues not return void then replaceAllUsesWith undef.
846 // This allows ValueHandlers and custom metadata to adjust itself.
847 if (!OldCall->getType()->isVoidTy())
848 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
849 if (isa<CallInst>(OldCall))
850 return EraseInstFromFunction(*OldCall);
852 // We cannot remove an invoke, because it would change the CFG, just
853 // change the callee to a null pointer.
854 cast<InvokeInst>(OldCall)->setCalledFunction(
855 Constant::getNullValue(CalleeF->getType()));
859 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
860 // This instruction is not reachable, just remove it. We insert a store to
861 // undef so that we know that this code is not reachable, despite the fact
862 // that we can't modify the CFG here.
863 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
864 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
865 CS.getInstruction());
867 // If CS does not return void then replaceAllUsesWith undef.
868 // This allows ValueHandlers and custom metadata to adjust itself.
869 if (!CS.getInstruction()->getType()->isVoidTy())
870 CS.getInstruction()->
871 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
873 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
874 // Don't break the CFG, insert a dummy cond branch.
875 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
876 ConstantInt::getTrue(Callee->getContext()), II);
878 return EraseInstFromFunction(*CS.getInstruction());
881 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
882 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
883 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
884 return transformCallThroughTrampoline(CS);
886 const PointerType *PTy = cast<PointerType>(Callee->getType());
887 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
888 if (FTy->isVarArg()) {
889 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
890 // See if we can optimize any arguments passed through the varargs area of
892 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
893 E = CS.arg_end(); I != E; ++I, ++ix) {
894 CastInst *CI = dyn_cast<CastInst>(*I);
895 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
896 *I = CI->getOperand(0);
902 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
903 // Inline asm calls cannot throw - mark them 'nounwind'.
904 CS.setDoesNotThrow();
908 // Try to optimize the call if possible, we require TargetData for most of
909 // this. None of these calls are seen as possibly dead so go ahead and
910 // delete the instruction now.
911 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
912 Instruction *I = tryOptimizeCall(CI, TD);
913 // If we changed something return the result, etc. Otherwise let
914 // the fallthrough check.
915 if (I) return EraseInstFromFunction(*I);
918 return Changed ? CS.getInstruction() : 0;
921 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
922 // attempt to move the cast to the arguments of the call/invoke.
924 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
925 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
926 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
927 if (CE->getOpcode() != Instruction::BitCast ||
928 !isa<Function>(CE->getOperand(0)))
930 Function *Callee = cast<Function>(CE->getOperand(0));
931 Instruction *Caller = CS.getInstruction();
932 const AttrListPtr &CallerPAL = CS.getAttributes();
934 // Okay, this is a cast from a function to a different type. Unless doing so
935 // would cause a type conversion of one of our arguments, change this call to
936 // be a direct call with arguments casted to the appropriate types.
938 const FunctionType *FT = Callee->getFunctionType();
939 const Type *OldRetTy = Caller->getType();
940 const Type *NewRetTy = FT->getReturnType();
942 if (NewRetTy->isStructTy())
943 return false; // TODO: Handle multiple return values.
945 // Check to see if we are changing the return type...
946 if (OldRetTy != NewRetTy) {
947 if (Callee->isDeclaration() &&
948 // Conversion is ok if changing from one pointer type to another or from
949 // a pointer to an integer of the same size.
950 !((OldRetTy->isPointerTy() || !TD ||
951 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
952 (NewRetTy->isPointerTy() || !TD ||
953 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
954 return false; // Cannot transform this return value.
956 if (!Caller->use_empty() &&
957 // void -> non-void is handled specially
958 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
959 return false; // Cannot transform this return value.
961 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
962 Attributes RAttrs = CallerPAL.getRetAttributes();
963 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
964 return false; // Attribute not compatible with transformed value.
967 // If the callsite is an invoke instruction, and the return value is used by
968 // a PHI node in a successor, we cannot change the return type of the call
969 // because there is no place to put the cast instruction (without breaking
970 // the critical edge). Bail out in this case.
971 if (!Caller->use_empty())
972 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
973 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
975 if (PHINode *PN = dyn_cast<PHINode>(*UI))
976 if (PN->getParent() == II->getNormalDest() ||
977 PN->getParent() == II->getUnwindDest())
981 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
982 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
984 CallSite::arg_iterator AI = CS.arg_begin();
985 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
986 const Type *ParamTy = FT->getParamType(i);
987 const Type *ActTy = (*AI)->getType();
989 if (!CastInst::isCastable(ActTy, ParamTy))
990 return false; // Cannot transform this parameter value.
992 if (CallerPAL.getParamAttributes(i + 1)
993 & Attribute::typeIncompatible(ParamTy))
994 return false; // Attribute not compatible with transformed value.
996 // Converting from one pointer type to another or between a pointer and an
997 // integer of the same size is safe even if we do not have a body.
998 bool isConvertible = ActTy == ParamTy ||
999 (TD && ((ParamTy->isPointerTy() ||
1000 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1001 (ActTy->isPointerTy() ||
1002 ActTy == TD->getIntPtrType(Caller->getContext()))));
1003 if (Callee->isDeclaration() && !isConvertible) return false;
1006 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
1007 Callee->isDeclaration())
1008 return false; // Do not delete arguments unless we have a function body.
1010 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1011 !CallerPAL.isEmpty())
1012 // In this case we have more arguments than the new function type, but we
1013 // won't be dropping them. Check that these extra arguments have attributes
1014 // that are compatible with being a vararg call argument.
1015 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1016 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1018 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1019 if (PAttrs & Attribute::VarArgsIncompatible)
1023 // Okay, we decided that this is a safe thing to do: go ahead and start
1024 // inserting cast instructions as necessary...
1025 std::vector<Value*> Args;
1026 Args.reserve(NumActualArgs);
1027 SmallVector<AttributeWithIndex, 8> attrVec;
1028 attrVec.reserve(NumCommonArgs);
1030 // Get any return attributes.
1031 Attributes RAttrs = CallerPAL.getRetAttributes();
1033 // If the return value is not being used, the type may not be compatible
1034 // with the existing attributes. Wipe out any problematic attributes.
1035 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1037 // Add the new return attributes.
1039 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1041 AI = CS.arg_begin();
1042 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1043 const Type *ParamTy = FT->getParamType(i);
1044 if ((*AI)->getType() == ParamTy) {
1045 Args.push_back(*AI);
1047 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1048 false, ParamTy, false);
1049 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
1052 // Add any parameter attributes.
1053 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1054 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1057 // If the function takes more arguments than the call was taking, add them
1059 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1060 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1062 // If we are removing arguments to the function, emit an obnoxious warning.
1063 if (FT->getNumParams() < NumActualArgs) {
1064 if (!FT->isVarArg()) {
1065 errs() << "WARNING: While resolving call to function '"
1066 << Callee->getName() << "' arguments were dropped!\n";
1068 // Add all of the arguments in their promoted form to the arg list.
1069 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1070 const Type *PTy = getPromotedType((*AI)->getType());
1071 if (PTy != (*AI)->getType()) {
1072 // Must promote to pass through va_arg area!
1073 Instruction::CastOps opcode =
1074 CastInst::getCastOpcode(*AI, false, PTy, false);
1075 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
1077 Args.push_back(*AI);
1080 // Add any parameter attributes.
1081 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1082 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1087 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1088 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1090 if (NewRetTy->isVoidTy())
1091 Caller->setName(""); // Void type should not have a name.
1093 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1097 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1098 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
1099 Args.begin(), Args.end(),
1100 Caller->getName(), Caller);
1101 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1102 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1104 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
1105 Caller->getName(), Caller);
1106 CallInst *CI = cast<CallInst>(Caller);
1107 if (CI->isTailCall())
1108 cast<CallInst>(NC)->setTailCall();
1109 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1110 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1113 // Insert a cast of the return type as necessary.
1115 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1116 if (!NV->getType()->isVoidTy()) {
1117 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
1119 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
1121 // If this is an invoke instruction, we should insert it after the first
1122 // non-phi, instruction in the normal successor block.
1123 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1124 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
1125 InsertNewInstBefore(NC, *I);
1127 // Otherwise, it's a call, just insert cast right after the call instr
1128 InsertNewInstBefore(NC, *Caller);
1130 Worklist.AddUsersToWorkList(*Caller);
1132 NV = UndefValue::get(Caller->getType());
1137 if (!Caller->use_empty())
1138 Caller->replaceAllUsesWith(NV);
1140 EraseInstFromFunction(*Caller);
1144 // transformCallThroughTrampoline - Turn a call to a function created by the
1145 // init_trampoline intrinsic into a direct call to the underlying function.
1147 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
1148 Value *Callee = CS.getCalledValue();
1149 const PointerType *PTy = cast<PointerType>(Callee->getType());
1150 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1151 const AttrListPtr &Attrs = CS.getAttributes();
1153 // If the call already has the 'nest' attribute somewhere then give up -
1154 // otherwise 'nest' would occur twice after splicing in the chain.
1155 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1158 IntrinsicInst *Tramp =
1159 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
1161 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1162 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1163 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1165 const AttrListPtr &NestAttrs = NestF->getAttributes();
1166 if (!NestAttrs.isEmpty()) {
1167 unsigned NestIdx = 1;
1168 const Type *NestTy = 0;
1169 Attributes NestAttr = Attribute::None;
1171 // Look for a parameter marked with the 'nest' attribute.
1172 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1173 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1174 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1175 // Record the parameter type and any other attributes.
1177 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1182 Instruction *Caller = CS.getInstruction();
1183 std::vector<Value*> NewArgs;
1184 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1186 SmallVector<AttributeWithIndex, 8> NewAttrs;
1187 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1189 // Insert the nest argument into the call argument list, which may
1190 // mean appending it. Likewise for attributes.
1192 // Add any result attributes.
1193 if (Attributes Attr = Attrs.getRetAttributes())
1194 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1198 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1200 if (Idx == NestIdx) {
1201 // Add the chain argument and attributes.
1202 Value *NestVal = Tramp->getArgOperand(2);
1203 if (NestVal->getType() != NestTy)
1204 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1205 NewArgs.push_back(NestVal);
1206 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1212 // Add the original argument and attributes.
1213 NewArgs.push_back(*I);
1214 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1216 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1222 // Add any function attributes.
1223 if (Attributes Attr = Attrs.getFnAttributes())
1224 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1226 // The trampoline may have been bitcast to a bogus type (FTy).
1227 // Handle this by synthesizing a new function type, equal to FTy
1228 // with the chain parameter inserted.
1230 std::vector<const Type*> NewTypes;
1231 NewTypes.reserve(FTy->getNumParams()+1);
1233 // Insert the chain's type into the list of parameter types, which may
1234 // mean appending it.
1237 FunctionType::param_iterator I = FTy->param_begin(),
1238 E = FTy->param_end();
1242 // Add the chain's type.
1243 NewTypes.push_back(NestTy);
1248 // Add the original type.
1249 NewTypes.push_back(*I);
1255 // Replace the trampoline call with a direct call. Let the generic
1256 // code sort out any function type mismatches.
1257 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1259 Constant *NewCallee =
1260 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1261 NestF : ConstantExpr::getBitCast(NestF,
1262 PointerType::getUnqual(NewFTy));
1263 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1266 Instruction *NewCaller;
1267 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1268 NewCaller = InvokeInst::Create(NewCallee,
1269 II->getNormalDest(), II->getUnwindDest(),
1270 NewArgs.begin(), NewArgs.end(),
1271 Caller->getName(), Caller);
1272 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1273 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1275 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1276 Caller->getName(), Caller);
1277 if (cast<CallInst>(Caller)->isTailCall())
1278 cast<CallInst>(NewCaller)->setTailCall();
1279 cast<CallInst>(NewCaller)->
1280 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1281 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1283 if (!Caller->getType()->isVoidTy())
1284 Caller->replaceAllUsesWith(NewCaller);
1285 Caller->eraseFromParent();
1286 Worklist.Remove(Caller);
1291 // Replace the trampoline call with a direct call. Since there is no 'nest'
1292 // parameter, there is no need to adjust the argument list. Let the generic
1293 // code sort out any function type mismatches.
1294 Constant *NewCallee =
1295 NestF->getType() == PTy ? NestF :
1296 ConstantExpr::getBitCast(NestF, PTy);
1297 CS.setCalledFunction(NewCallee);
1298 return CS.getInstruction();