1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/Support/CallSite.h"
16 #include "llvm/Target/TargetData.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Transforms/Utils/BuildLibCalls.h"
20 #include "llvm/Transforms/Utils/Local.h"
23 /// getPromotedType - Return the specified type promoted as it would be to pass
24 /// though a va_arg area.
25 static Type *getPromotedType(Type *Ty) {
26 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
27 if (ITy->getBitWidth() < 32)
28 return Type::getInt32Ty(Ty->getContext());
34 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
35 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
36 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
37 unsigned MinAlign = std::min(DstAlign, SrcAlign);
38 unsigned CopyAlign = MI->getAlignment();
40 if (CopyAlign < MinAlign) {
41 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
46 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
48 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
49 if (MemOpLength == 0) return 0;
51 // Source and destination pointer types are always "i8*" for intrinsic. See
52 // if the size is something we can handle with a single primitive load/store.
53 // A single load+store correctly handles overlapping memory in the memmove
55 unsigned Size = MemOpLength->getZExtValue();
56 if (Size == 0) return MI; // Delete this mem transfer.
58 if (Size > 8 || (Size&(Size-1)))
59 return 0; // If not 1/2/4/8 bytes, exit.
61 // Use an integer load+store unless we can find something better.
63 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
65 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
67 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
68 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
69 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
71 // Memcpy forces the use of i8* for the source and destination. That means
72 // that if you're using memcpy to move one double around, you'll get a cast
73 // from double* to i8*. We'd much rather use a double load+store rather than
74 // an i64 load+store, here because this improves the odds that the source or
75 // dest address will be promotable. See if we can find a better type than the
77 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
78 if (StrippedDest != MI->getArgOperand(0)) {
79 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
81 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
82 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
83 // down through these levels if so.
84 while (!SrcETy->isSingleValueType()) {
85 if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
86 if (STy->getNumElements() == 1)
87 SrcETy = STy->getElementType(0);
90 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
91 if (ATy->getNumElements() == 1)
92 SrcETy = ATy->getElementType();
99 if (SrcETy->isSingleValueType()) {
100 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
101 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
107 // If the memcpy/memmove provides better alignment info than we can
109 SrcAlign = std::max(SrcAlign, CopyAlign);
110 DstAlign = std::max(DstAlign, CopyAlign);
112 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
113 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
114 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
115 L->setAlignment(SrcAlign);
116 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
117 S->setAlignment(DstAlign);
119 // Set the size of the copy to 0, it will be deleted on the next iteration.
120 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
124 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
125 unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
126 if (MI->getAlignment() < Alignment) {
127 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
132 // Extract the length and alignment and fill if they are constant.
133 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
134 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
135 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
137 uint64_t Len = LenC->getZExtValue();
138 Alignment = MI->getAlignment();
140 // If the length is zero, this is a no-op
141 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
143 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
144 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
145 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
147 Value *Dest = MI->getDest();
148 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
149 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
150 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
152 // Alignment 0 is identity for alignment 1 for memset, but not store.
153 if (Alignment == 0) Alignment = 1;
155 // Extract the fill value and store.
156 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
157 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
159 S->setAlignment(Alignment);
161 // Set the size of the copy to 0, it will be deleted on the next iteration.
162 MI->setLength(Constant::getNullValue(LenC->getType()));
169 /// visitCallInst - CallInst simplification. This mostly only handles folding
170 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
171 /// the heavy lifting.
173 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
175 return visitFree(CI);
177 return visitMalloc(CI);
179 // If the caller function is nounwind, mark the call as nounwind, even if the
181 if (CI.getParent()->getParent()->doesNotThrow() &&
182 !CI.doesNotThrow()) {
183 CI.setDoesNotThrow();
187 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
188 if (!II) return visitCallSite(&CI);
190 // Intrinsics cannot occur in an invoke, so handle them here instead of in
192 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
193 bool Changed = false;
195 // memmove/cpy/set of zero bytes is a noop.
196 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
197 if (NumBytes->isNullValue())
198 return EraseInstFromFunction(CI);
200 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
201 if (CI->getZExtValue() == 1) {
202 // Replace the instruction with just byte operations. We would
203 // transform other cases to loads/stores, but we don't know if
204 // alignment is sufficient.
208 // No other transformations apply to volatile transfers.
209 if (MI->isVolatile())
212 // If we have a memmove and the source operation is a constant global,
213 // then the source and dest pointers can't alias, so we can change this
214 // into a call to memcpy.
215 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
216 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
217 if (GVSrc->isConstant()) {
218 Module *M = CI.getParent()->getParent()->getParent();
219 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
220 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
221 CI.getArgOperand(1)->getType(),
222 CI.getArgOperand(2)->getType() };
223 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
228 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
229 // memmove(x,x,size) -> noop.
230 if (MTI->getSource() == MTI->getDest())
231 return EraseInstFromFunction(CI);
234 // If we can determine a pointer alignment that is bigger than currently
235 // set, update the alignment.
236 if (isa<MemTransferInst>(MI)) {
237 if (Instruction *I = SimplifyMemTransfer(MI))
239 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
240 if (Instruction *I = SimplifyMemSet(MSI))
244 if (Changed) return II;
247 switch (II->getIntrinsicID()) {
249 case Intrinsic::objectsize: {
250 // We need target data for just about everything so depend on it.
253 Type *ReturnTy = CI.getType();
254 uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
256 // Get to the real allocated thing and offset as fast as possible.
257 Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
260 uint64_t Size = -1ULL;
262 // Try to look through constant GEPs.
263 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) {
264 if (!GEP->hasAllConstantIndices()) break;
266 // Get the current byte offset into the thing. Use the original
267 // operand in case we're looking through a bitcast.
268 SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
269 if (!GEP->getPointerOperandType()->isPointerTy())
271 Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
273 Op1 = GEP->getPointerOperand()->stripPointerCasts();
275 // Make sure we're not a constant offset from an external
277 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1))
278 if (!GV->hasDefinitiveInitializer()) break;
281 // If we've stripped down to a single global variable that we
282 // can know the size of then just return that.
283 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
284 if (GV->hasDefinitiveInitializer()) {
285 Constant *C = GV->getInitializer();
286 Size = TD->getTypeAllocSize(C->getType());
288 // Can't determine size of the GV.
289 Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow);
290 return ReplaceInstUsesWith(CI, RetVal);
292 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
294 if (AI->getAllocatedType()->isSized()) {
295 Size = TD->getTypeAllocSize(AI->getAllocatedType());
296 if (AI->isArrayAllocation()) {
297 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
299 Size *= C->getZExtValue();
302 } else if (CallInst *MI = extractMallocCall(Op1)) {
303 // Get allocation size.
304 Type* MallocType = getMallocAllocatedType(MI);
305 if (MallocType && MallocType->isSized())
306 if (Value *NElems = getMallocArraySize(MI, TD, true))
307 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
308 Size = NElements->getZExtValue() * TD->getTypeAllocSize(MallocType);
311 // Do not return "I don't know" here. Later optimization passes could
312 // make it possible to evaluate objectsize to a constant.
317 // Out of bound reference? Negative index normalized to large
318 // index? Just return "I don't know".
319 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
321 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
323 case Intrinsic::bswap:
324 // bswap(bswap(x)) -> x
325 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
326 if (Operand->getIntrinsicID() == Intrinsic::bswap)
327 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
329 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
330 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
331 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
332 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
333 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
334 TI->getType()->getPrimitiveSizeInBits();
335 Value *CV = ConstantInt::get(Operand->getType(), C);
336 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
337 return new TruncInst(V, TI->getType());
342 case Intrinsic::powi:
343 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
346 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
349 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
350 // powi(x, -1) -> 1/x
351 if (Power->isAllOnesValue())
352 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
353 II->getArgOperand(0));
356 case Intrinsic::cttz: {
357 // If all bits below the first known one are known zero,
358 // this value is constant.
359 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
360 // FIXME: Try to simplify vectors of integers.
362 uint32_t BitWidth = IT->getBitWidth();
363 APInt KnownZero(BitWidth, 0);
364 APInt KnownOne(BitWidth, 0);
365 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
366 unsigned TrailingZeros = KnownOne.countTrailingZeros();
367 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
368 if ((Mask & KnownZero) == Mask)
369 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
370 APInt(BitWidth, TrailingZeros)));
374 case Intrinsic::ctlz: {
375 // If all bits above the first known one are known zero,
376 // this value is constant.
377 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
378 // FIXME: Try to simplify vectors of integers.
380 uint32_t BitWidth = IT->getBitWidth();
381 APInt KnownZero(BitWidth, 0);
382 APInt KnownOne(BitWidth, 0);
383 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
384 unsigned LeadingZeros = KnownOne.countLeadingZeros();
385 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
386 if ((Mask & KnownZero) == Mask)
387 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
388 APInt(BitWidth, LeadingZeros)));
392 case Intrinsic::uadd_with_overflow: {
393 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
394 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
395 uint32_t BitWidth = IT->getBitWidth();
396 APInt LHSKnownZero(BitWidth, 0);
397 APInt LHSKnownOne(BitWidth, 0);
398 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
399 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
400 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
402 if (LHSKnownNegative || LHSKnownPositive) {
403 APInt RHSKnownZero(BitWidth, 0);
404 APInt RHSKnownOne(BitWidth, 0);
405 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
406 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
407 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
408 if (LHSKnownNegative && RHSKnownNegative) {
409 // The sign bit is set in both cases: this MUST overflow.
410 // Create a simple add instruction, and insert it into the struct.
411 Value *Add = Builder->CreateAdd(LHS, RHS);
414 UndefValue::get(LHS->getType()),
415 ConstantInt::getTrue(II->getContext())
417 StructType *ST = cast<StructType>(II->getType());
418 Constant *Struct = ConstantStruct::get(ST, V);
419 return InsertValueInst::Create(Struct, Add, 0);
422 if (LHSKnownPositive && RHSKnownPositive) {
423 // The sign bit is clear in both cases: this CANNOT overflow.
424 // Create a simple add instruction, and insert it into the struct.
425 Value *Add = Builder->CreateNUWAdd(LHS, RHS);
428 UndefValue::get(LHS->getType()),
429 ConstantInt::getFalse(II->getContext())
431 StructType *ST = cast<StructType>(II->getType());
432 Constant *Struct = ConstantStruct::get(ST, V);
433 return InsertValueInst::Create(Struct, Add, 0);
437 // FALL THROUGH uadd into sadd
438 case Intrinsic::sadd_with_overflow:
439 // Canonicalize constants into the RHS.
440 if (isa<Constant>(II->getArgOperand(0)) &&
441 !isa<Constant>(II->getArgOperand(1))) {
442 Value *LHS = II->getArgOperand(0);
443 II->setArgOperand(0, II->getArgOperand(1));
444 II->setArgOperand(1, LHS);
448 // X + undef -> undef
449 if (isa<UndefValue>(II->getArgOperand(1)))
450 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
452 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
453 // X + 0 -> {X, false}
456 UndefValue::get(II->getArgOperand(0)->getType()),
457 ConstantInt::getFalse(II->getContext())
460 ConstantStruct::get(cast<StructType>(II->getType()), V);
461 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
465 case Intrinsic::usub_with_overflow:
466 case Intrinsic::ssub_with_overflow:
467 // undef - X -> undef
468 // X - undef -> undef
469 if (isa<UndefValue>(II->getArgOperand(0)) ||
470 isa<UndefValue>(II->getArgOperand(1)))
471 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
473 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
474 // X - 0 -> {X, false}
477 UndefValue::get(II->getArgOperand(0)->getType()),
478 ConstantInt::getFalse(II->getContext())
481 ConstantStruct::get(cast<StructType>(II->getType()), V);
482 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
486 case Intrinsic::umul_with_overflow: {
487 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
488 unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
490 APInt LHSKnownZero(BitWidth, 0);
491 APInt LHSKnownOne(BitWidth, 0);
492 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
493 APInt RHSKnownZero(BitWidth, 0);
494 APInt RHSKnownOne(BitWidth, 0);
495 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
497 // Get the largest possible values for each operand.
498 APInt LHSMax = ~LHSKnownZero;
499 APInt RHSMax = ~RHSKnownZero;
501 // If multiplying the maximum values does not overflow then we can turn
502 // this into a plain NUW mul.
504 LHSMax.umul_ov(RHSMax, Overflow);
506 Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow");
508 UndefValue::get(LHS->getType()),
511 Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V);
512 return InsertValueInst::Create(Struct, Mul, 0);
515 case Intrinsic::smul_with_overflow:
516 // Canonicalize constants into the RHS.
517 if (isa<Constant>(II->getArgOperand(0)) &&
518 !isa<Constant>(II->getArgOperand(1))) {
519 Value *LHS = II->getArgOperand(0);
520 II->setArgOperand(0, II->getArgOperand(1));
521 II->setArgOperand(1, LHS);
525 // X * undef -> undef
526 if (isa<UndefValue>(II->getArgOperand(1)))
527 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
529 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
532 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
534 // X * 1 -> {X, false}
535 if (RHSI->equalsInt(1)) {
537 UndefValue::get(II->getArgOperand(0)->getType()),
538 ConstantInt::getFalse(II->getContext())
541 ConstantStruct::get(cast<StructType>(II->getType()), V);
542 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
546 case Intrinsic::ppc_altivec_lvx:
547 case Intrinsic::ppc_altivec_lvxl:
548 // Turn PPC lvx -> load if the pointer is known aligned.
549 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
550 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
551 PointerType::getUnqual(II->getType()));
552 return new LoadInst(Ptr);
555 case Intrinsic::ppc_altivec_stvx:
556 case Intrinsic::ppc_altivec_stvxl:
557 // Turn stvx -> store if the pointer is known aligned.
558 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
560 PointerType::getUnqual(II->getArgOperand(0)->getType());
561 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
562 return new StoreInst(II->getArgOperand(0), Ptr);
565 case Intrinsic::x86_sse_storeu_ps:
566 case Intrinsic::x86_sse2_storeu_pd:
567 case Intrinsic::x86_sse2_storeu_dq:
568 // Turn X86 storeu -> store if the pointer is known aligned.
569 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
571 PointerType::getUnqual(II->getArgOperand(1)->getType());
572 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
573 return new StoreInst(II->getArgOperand(1), Ptr);
577 case Intrinsic::x86_sse_cvtss2si:
578 case Intrinsic::x86_sse_cvtss2si64:
579 case Intrinsic::x86_sse_cvttss2si:
580 case Intrinsic::x86_sse_cvttss2si64:
581 case Intrinsic::x86_sse2_cvtsd2si:
582 case Intrinsic::x86_sse2_cvtsd2si64:
583 case Intrinsic::x86_sse2_cvttsd2si:
584 case Intrinsic::x86_sse2_cvttsd2si64: {
585 // These intrinsics only demand the 0th element of their input vectors. If
586 // we can simplify the input based on that, do so now.
588 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
589 APInt DemandedElts(VWidth, 1);
590 APInt UndefElts(VWidth, 0);
591 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
592 DemandedElts, UndefElts)) {
593 II->setArgOperand(0, V);
600 case Intrinsic::x86_sse41_pmovsxbw:
601 case Intrinsic::x86_sse41_pmovsxwd:
602 case Intrinsic::x86_sse41_pmovsxdq:
603 case Intrinsic::x86_sse41_pmovzxbw:
604 case Intrinsic::x86_sse41_pmovzxwd:
605 case Intrinsic::x86_sse41_pmovzxdq: {
606 // pmov{s|z}x ignores the upper half of their input vectors.
608 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
609 unsigned LowHalfElts = VWidth / 2;
610 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
611 APInt UndefElts(VWidth, 0);
612 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
615 II->setArgOperand(0, TmpV);
621 case Intrinsic::ppc_altivec_vperm:
622 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
623 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
624 assert(Mask->getType()->getVectorNumElements() == 16 &&
625 "Bad type for intrinsic!");
627 // Check that all of the elements are integer constants or undefs.
628 bool AllEltsOk = true;
629 for (unsigned i = 0; i != 16; ++i) {
630 Constant *Elt = Mask->getAggregateElement(i);
632 !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
639 // Cast the input vectors to byte vectors.
640 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
642 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
644 Value *Result = UndefValue::get(Op0->getType());
646 // Only extract each element once.
647 Value *ExtractedElts[32];
648 memset(ExtractedElts, 0, sizeof(ExtractedElts));
650 for (unsigned i = 0; i != 16; ++i) {
651 if (isa<UndefValue>(Mask->getAggregateElement(i)))
654 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
655 Idx &= 31; // Match the hardware behavior.
657 if (ExtractedElts[Idx] == 0) {
659 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
660 Builder->getInt32(Idx&15));
663 // Insert this value into the result vector.
664 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
665 Builder->getInt32(i));
667 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
672 case Intrinsic::arm_neon_vld1:
673 case Intrinsic::arm_neon_vld2:
674 case Intrinsic::arm_neon_vld3:
675 case Intrinsic::arm_neon_vld4:
676 case Intrinsic::arm_neon_vld2lane:
677 case Intrinsic::arm_neon_vld3lane:
678 case Intrinsic::arm_neon_vld4lane:
679 case Intrinsic::arm_neon_vst1:
680 case Intrinsic::arm_neon_vst2:
681 case Intrinsic::arm_neon_vst3:
682 case Intrinsic::arm_neon_vst4:
683 case Intrinsic::arm_neon_vst2lane:
684 case Intrinsic::arm_neon_vst3lane:
685 case Intrinsic::arm_neon_vst4lane: {
686 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
687 unsigned AlignArg = II->getNumArgOperands() - 1;
688 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
689 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
690 II->setArgOperand(AlignArg,
691 ConstantInt::get(Type::getInt32Ty(II->getContext()),
698 case Intrinsic::arm_neon_vmulls:
699 case Intrinsic::arm_neon_vmullu: {
700 // Zext/sext intrinsic operands according to the intrinsic type, then try to
701 // simplify them. This lets us try a SimplifyMulInst on the extended
702 // operands. If the zext/sext instructions are unused when we're done then
703 // delete them from the block.
704 Value* Arg0 = II->getArgOperand(0);
705 Value* Arg1 = II->getArgOperand(1);
706 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu);
708 Zext ? CastInst::CreateZExtOrBitCast(Arg0, II->getType(), "", II) :
709 CastInst::CreateSExtOrBitCast(Arg0, II->getType(), "", II);
710 Value* Arg0WS = SimplifyInstruction(Arg0W);
711 if (Arg0WS == 0) // If simplification fails just pass through the ext'd val.
714 Zext ? CastInst::CreateZExtOrBitCast(Arg1, II->getType(), "", II) :
715 CastInst::CreateSExtOrBitCast(Arg1, II->getType(), "", II);
716 Value* Arg1WS = SimplifyInstruction(Arg1W);
719 Instruction *SimplifiedInst = 0;
720 if (Value* V = SimplifyMulInst(Arg0WS, Arg1WS, TD)) {
721 SimplifiedInst = ReplaceInstUsesWith(CI, V);
723 if (Arg0W->use_empty())
724 Arg0W->eraseFromParent();
725 if (Arg1W->use_empty())
726 Arg1W->eraseFromParent();
727 if (SimplifiedInst != 0)
728 return SimplifiedInst;
732 case Intrinsic::stackrestore: {
733 // If the save is right next to the restore, remove the restore. This can
734 // happen when variable allocas are DCE'd.
735 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
736 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
737 BasicBlock::iterator BI = SS;
739 return EraseInstFromFunction(CI);
743 // Scan down this block to see if there is another stack restore in the
744 // same block without an intervening call/alloca.
745 BasicBlock::iterator BI = II;
746 TerminatorInst *TI = II->getParent()->getTerminator();
747 bool CannotRemove = false;
748 for (++BI; &*BI != TI; ++BI) {
749 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
753 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
754 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
755 // If there is a stackrestore below this one, remove this one.
756 if (II->getIntrinsicID() == Intrinsic::stackrestore)
757 return EraseInstFromFunction(CI);
758 // Otherwise, ignore the intrinsic.
760 // If we found a non-intrinsic call, we can't remove the stack
768 // If the stack restore is in a return, resume, or unwind block and if there
769 // are no allocas or calls between the restore and the return, nuke the
771 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
772 return EraseInstFromFunction(CI);
777 return visitCallSite(II);
780 // InvokeInst simplification
782 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
783 return visitCallSite(&II);
786 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
787 /// passed through the varargs area, we can eliminate the use of the cast.
788 static bool isSafeToEliminateVarargsCast(const CallSite CS,
789 const CastInst * const CI,
790 const TargetData * const TD,
792 if (!CI->isLosslessCast())
795 // The size of ByVal arguments is derived from the type, so we
796 // can't change to a type with a different size. If the size were
797 // passed explicitly we could avoid this check.
798 if (!CS.isByValArgument(ix))
802 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
803 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
804 if (!SrcTy->isSized() || !DstTy->isSized())
806 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
812 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
815 void replaceCall(Value *With) {
816 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
818 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
819 if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
821 if (ConstantInt *SizeCI =
822 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
823 if (SizeCI->isAllOnesValue())
826 uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp));
827 // If the length is 0 we don't know how long it is and so we can't
829 if (Len == 0) return false;
830 return SizeCI->getZExtValue() >= Len;
832 if (ConstantInt *Arg = dyn_cast<ConstantInt>(
833 CI->getArgOperand(SizeArgOp)))
834 return SizeCI->getZExtValue() >= Arg->getZExtValue();
839 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
840 Instruction *NewInstruction;
842 } // end anonymous namespace
844 // Try to fold some different type of calls here.
845 // Currently we're only working with the checking functions, memcpy_chk,
846 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
847 // strcat_chk and strncat_chk.
848 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
849 if (CI->getCalledFunction() == 0) return 0;
851 InstCombineFortifiedLibCalls Simplifier(this);
852 Simplifier.fold(CI, TD);
853 return Simplifier.NewInstruction;
856 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
857 // Strip off at most one level of pointer casts, looking for an alloca. This
858 // is good enough in practice and simpler than handling any number of casts.
859 Value *Underlying = TrampMem->stripPointerCasts();
860 if (Underlying != TrampMem &&
861 (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
863 if (!isa<AllocaInst>(Underlying))
866 IntrinsicInst *InitTrampoline = 0;
867 for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
869 IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
872 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
874 // More than one init_trampoline writes to this value. Give up.
879 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
880 // Allow any number of calls to adjust.trampoline.
885 // No call to init.trampoline found.
889 // Check that the alloca is being used in the expected way.
890 if (InitTrampoline->getOperand(0) != TrampMem)
893 return InitTrampoline;
896 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
898 // Visit all the previous instructions in the basic block, and try to find a
899 // init.trampoline which has a direct path to the adjust.trampoline.
900 for (BasicBlock::iterator I = AdjustTramp,
901 E = AdjustTramp->getParent()->begin(); I != E; ) {
902 Instruction *Inst = --I;
903 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
904 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
905 II->getOperand(0) == TrampMem)
907 if (Inst->mayWriteToMemory())
913 // Given a call to llvm.adjust.trampoline, find and return the corresponding
914 // call to llvm.init.trampoline if the call to the trampoline can be optimized
915 // to a direct call to a function. Otherwise return NULL.
917 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
918 Callee = Callee->stripPointerCasts();
919 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
921 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
924 Value *TrampMem = AdjustTramp->getOperand(0);
926 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
928 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
933 // visitCallSite - Improvements for call and invoke instructions.
935 Instruction *InstCombiner::visitCallSite(CallSite CS) {
936 bool Changed = false;
938 // If the callee is a pointer to a function, attempt to move any casts to the
939 // arguments of the call/invoke.
940 Value *Callee = CS.getCalledValue();
941 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
944 if (Function *CalleeF = dyn_cast<Function>(Callee))
945 // If the call and callee calling conventions don't match, this call must
946 // be unreachable, as the call is undefined.
947 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
948 // Only do this for calls to a function with a body. A prototype may
949 // not actually end up matching the implementation's calling conv for a
950 // variety of reasons (e.g. it may be written in assembly).
951 !CalleeF->isDeclaration()) {
952 Instruction *OldCall = CS.getInstruction();
953 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
954 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
956 // If OldCall dues not return void then replaceAllUsesWith undef.
957 // This allows ValueHandlers and custom metadata to adjust itself.
958 if (!OldCall->getType()->isVoidTy())
959 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
960 if (isa<CallInst>(OldCall))
961 return EraseInstFromFunction(*OldCall);
963 // We cannot remove an invoke, because it would change the CFG, just
964 // change the callee to a null pointer.
965 cast<InvokeInst>(OldCall)->setCalledFunction(
966 Constant::getNullValue(CalleeF->getType()));
970 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
971 // This instruction is not reachable, just remove it. We insert a store to
972 // undef so that we know that this code is not reachable, despite the fact
973 // that we can't modify the CFG here.
974 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
975 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
976 CS.getInstruction());
978 // If CS does not return void then replaceAllUsesWith undef.
979 // This allows ValueHandlers and custom metadata to adjust itself.
980 if (!CS.getInstruction()->getType()->isVoidTy())
981 ReplaceInstUsesWith(*CS.getInstruction(),
982 UndefValue::get(CS.getInstruction()->getType()));
984 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
985 // Don't break the CFG, insert a dummy cond branch.
986 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
987 ConstantInt::getTrue(Callee->getContext()), II);
989 return EraseInstFromFunction(*CS.getInstruction());
992 if (IntrinsicInst *II = FindInitTrampoline(Callee))
993 return transformCallThroughTrampoline(CS, II);
995 PointerType *PTy = cast<PointerType>(Callee->getType());
996 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
997 if (FTy->isVarArg()) {
998 int ix = FTy->getNumParams();
999 // See if we can optimize any arguments passed through the varargs area of
1001 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
1002 E = CS.arg_end(); I != E; ++I, ++ix) {
1003 CastInst *CI = dyn_cast<CastInst>(*I);
1004 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
1005 *I = CI->getOperand(0);
1011 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1012 // Inline asm calls cannot throw - mark them 'nounwind'.
1013 CS.setDoesNotThrow();
1017 // Try to optimize the call if possible, we require TargetData for most of
1018 // this. None of these calls are seen as possibly dead so go ahead and
1019 // delete the instruction now.
1020 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1021 Instruction *I = tryOptimizeCall(CI, TD);
1022 // If we changed something return the result, etc. Otherwise let
1023 // the fallthrough check.
1024 if (I) return EraseInstFromFunction(*I);
1027 return Changed ? CS.getInstruction() : 0;
1030 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
1031 // attempt to move the cast to the arguments of the call/invoke.
1033 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1035 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1038 Instruction *Caller = CS.getInstruction();
1039 const AttrListPtr &CallerPAL = CS.getAttributes();
1041 // Okay, this is a cast from a function to a different type. Unless doing so
1042 // would cause a type conversion of one of our arguments, change this call to
1043 // be a direct call with arguments casted to the appropriate types.
1045 FunctionType *FT = Callee->getFunctionType();
1046 Type *OldRetTy = Caller->getType();
1047 Type *NewRetTy = FT->getReturnType();
1049 if (NewRetTy->isStructTy())
1050 return false; // TODO: Handle multiple return values.
1052 // Check to see if we are changing the return type...
1053 if (OldRetTy != NewRetTy) {
1054 if (Callee->isDeclaration() &&
1055 // Conversion is ok if changing from one pointer type to another or from
1056 // a pointer to an integer of the same size.
1057 !((OldRetTy->isPointerTy() || !TD ||
1058 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
1059 (NewRetTy->isPointerTy() || !TD ||
1060 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
1061 return false; // Cannot transform this return value.
1063 if (!Caller->use_empty() &&
1064 // void -> non-void is handled specially
1065 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
1066 return false; // Cannot transform this return value.
1068 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1069 Attributes RAttrs = CallerPAL.getRetAttributes();
1070 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
1071 return false; // Attribute not compatible with transformed value.
1074 // If the callsite is an invoke instruction, and the return value is used by
1075 // a PHI node in a successor, we cannot change the return type of the call
1076 // because there is no place to put the cast instruction (without breaking
1077 // the critical edge). Bail out in this case.
1078 if (!Caller->use_empty())
1079 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1080 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
1082 if (PHINode *PN = dyn_cast<PHINode>(*UI))
1083 if (PN->getParent() == II->getNormalDest() ||
1084 PN->getParent() == II->getUnwindDest())
1088 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
1089 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1091 CallSite::arg_iterator AI = CS.arg_begin();
1092 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1093 Type *ParamTy = FT->getParamType(i);
1094 Type *ActTy = (*AI)->getType();
1096 if (!CastInst::isCastable(ActTy, ParamTy))
1097 return false; // Cannot transform this parameter value.
1099 Attributes Attrs = CallerPAL.getParamAttributes(i + 1);
1100 if (Attrs & Attribute::typeIncompatible(ParamTy))
1101 return false; // Attribute not compatible with transformed value.
1103 // If the parameter is passed as a byval argument, then we have to have a
1104 // sized type and the sized type has to have the same size as the old type.
1105 if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
1106 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1107 if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
1110 Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
1111 if (TD->getTypeAllocSize(CurElTy) !=
1112 TD->getTypeAllocSize(ParamPTy->getElementType()))
1116 // Converting from one pointer type to another or between a pointer and an
1117 // integer of the same size is safe even if we do not have a body.
1118 bool isConvertible = ActTy == ParamTy ||
1119 (TD && ((ParamTy->isPointerTy() ||
1120 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1121 (ActTy->isPointerTy() ||
1122 ActTy == TD->getIntPtrType(Caller->getContext()))));
1123 if (Callee->isDeclaration() && !isConvertible) return false;
1126 if (Callee->isDeclaration()) {
1127 // Do not delete arguments unless we have a function body.
1128 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1131 // If the callee is just a declaration, don't change the varargsness of the
1132 // call. We don't want to introduce a varargs call where one doesn't
1134 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1135 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1138 // If both the callee and the cast type are varargs, we still have to make
1139 // sure the number of fixed parameters are the same or we have the same
1140 // ABI issues as if we introduce a varargs call.
1141 if (FT->isVarArg() &&
1142 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1143 FT->getNumParams() !=
1144 cast<FunctionType>(APTy->getElementType())->getNumParams())
1148 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1149 !CallerPAL.isEmpty())
1150 // In this case we have more arguments than the new function type, but we
1151 // won't be dropping them. Check that these extra arguments have attributes
1152 // that are compatible with being a vararg call argument.
1153 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1154 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1156 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1157 if (PAttrs & Attribute::VarArgsIncompatible)
1162 // Okay, we decided that this is a safe thing to do: go ahead and start
1163 // inserting cast instructions as necessary.
1164 std::vector<Value*> Args;
1165 Args.reserve(NumActualArgs);
1166 SmallVector<AttributeWithIndex, 8> attrVec;
1167 attrVec.reserve(NumCommonArgs);
1169 // Get any return attributes.
1170 Attributes RAttrs = CallerPAL.getRetAttributes();
1172 // If the return value is not being used, the type may not be compatible
1173 // with the existing attributes. Wipe out any problematic attributes.
1174 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1176 // Add the new return attributes.
1178 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1180 AI = CS.arg_begin();
1181 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1182 Type *ParamTy = FT->getParamType(i);
1183 if ((*AI)->getType() == ParamTy) {
1184 Args.push_back(*AI);
1186 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1187 false, ParamTy, false);
1188 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy));
1191 // Add any parameter attributes.
1192 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1193 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1196 // If the function takes more arguments than the call was taking, add them
1198 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1199 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1201 // If we are removing arguments to the function, emit an obnoxious warning.
1202 if (FT->getNumParams() < NumActualArgs) {
1203 if (!FT->isVarArg()) {
1204 errs() << "WARNING: While resolving call to function '"
1205 << Callee->getName() << "' arguments were dropped!\n";
1207 // Add all of the arguments in their promoted form to the arg list.
1208 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1209 Type *PTy = getPromotedType((*AI)->getType());
1210 if (PTy != (*AI)->getType()) {
1211 // Must promote to pass through va_arg area!
1212 Instruction::CastOps opcode =
1213 CastInst::getCastOpcode(*AI, false, PTy, false);
1214 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1216 Args.push_back(*AI);
1219 // Add any parameter attributes.
1220 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1221 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1226 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1227 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1229 if (NewRetTy->isVoidTy())
1230 Caller->setName(""); // Void type should not have a name.
1232 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1236 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1237 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1238 II->getUnwindDest(), Args);
1240 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1241 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1243 CallInst *CI = cast<CallInst>(Caller);
1244 NC = Builder->CreateCall(Callee, Args);
1246 if (CI->isTailCall())
1247 cast<CallInst>(NC)->setTailCall();
1248 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1249 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1252 // Insert a cast of the return type as necessary.
1254 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1255 if (!NV->getType()->isVoidTy()) {
1256 Instruction::CastOps opcode =
1257 CastInst::getCastOpcode(NC, false, OldRetTy, false);
1258 NV = NC = CastInst::Create(opcode, NC, OldRetTy);
1259 NC->setDebugLoc(Caller->getDebugLoc());
1261 // If this is an invoke instruction, we should insert it after the first
1262 // non-phi, instruction in the normal successor block.
1263 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1264 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1265 InsertNewInstBefore(NC, *I);
1267 // Otherwise, it's a call, just insert cast right after the call.
1268 InsertNewInstBefore(NC, *Caller);
1270 Worklist.AddUsersToWorkList(*Caller);
1272 NV = UndefValue::get(Caller->getType());
1276 if (!Caller->use_empty())
1277 ReplaceInstUsesWith(*Caller, NV);
1279 EraseInstFromFunction(*Caller);
1283 // transformCallThroughTrampoline - Turn a call to a function created by
1284 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1285 // underlying function.
1288 InstCombiner::transformCallThroughTrampoline(CallSite CS,
1289 IntrinsicInst *Tramp) {
1290 Value *Callee = CS.getCalledValue();
1291 PointerType *PTy = cast<PointerType>(Callee->getType());
1292 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1293 const AttrListPtr &Attrs = CS.getAttributes();
1295 // If the call already has the 'nest' attribute somewhere then give up -
1296 // otherwise 'nest' would occur twice after splicing in the chain.
1297 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1301 "transformCallThroughTrampoline called with incorrect CallSite.");
1303 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1304 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1305 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1307 const AttrListPtr &NestAttrs = NestF->getAttributes();
1308 if (!NestAttrs.isEmpty()) {
1309 unsigned NestIdx = 1;
1311 Attributes NestAttr = Attribute::None;
1313 // Look for a parameter marked with the 'nest' attribute.
1314 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1315 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1316 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1317 // Record the parameter type and any other attributes.
1319 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1324 Instruction *Caller = CS.getInstruction();
1325 std::vector<Value*> NewArgs;
1326 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1328 SmallVector<AttributeWithIndex, 8> NewAttrs;
1329 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1331 // Insert the nest argument into the call argument list, which may
1332 // mean appending it. Likewise for attributes.
1334 // Add any result attributes.
1335 if (Attributes Attr = Attrs.getRetAttributes())
1336 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1340 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1342 if (Idx == NestIdx) {
1343 // Add the chain argument and attributes.
1344 Value *NestVal = Tramp->getArgOperand(2);
1345 if (NestVal->getType() != NestTy)
1346 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1347 NewArgs.push_back(NestVal);
1348 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1354 // Add the original argument and attributes.
1355 NewArgs.push_back(*I);
1356 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1358 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1364 // Add any function attributes.
1365 if (Attributes Attr = Attrs.getFnAttributes())
1366 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1368 // The trampoline may have been bitcast to a bogus type (FTy).
1369 // Handle this by synthesizing a new function type, equal to FTy
1370 // with the chain parameter inserted.
1372 std::vector<Type*> NewTypes;
1373 NewTypes.reserve(FTy->getNumParams()+1);
1375 // Insert the chain's type into the list of parameter types, which may
1376 // mean appending it.
1379 FunctionType::param_iterator I = FTy->param_begin(),
1380 E = FTy->param_end();
1384 // Add the chain's type.
1385 NewTypes.push_back(NestTy);
1390 // Add the original type.
1391 NewTypes.push_back(*I);
1397 // Replace the trampoline call with a direct call. Let the generic
1398 // code sort out any function type mismatches.
1399 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1401 Constant *NewCallee =
1402 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1403 NestF : ConstantExpr::getBitCast(NestF,
1404 PointerType::getUnqual(NewFTy));
1405 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1408 Instruction *NewCaller;
1409 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1410 NewCaller = InvokeInst::Create(NewCallee,
1411 II->getNormalDest(), II->getUnwindDest(),
1413 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1414 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1416 NewCaller = CallInst::Create(NewCallee, NewArgs);
1417 if (cast<CallInst>(Caller)->isTailCall())
1418 cast<CallInst>(NewCaller)->setTailCall();
1419 cast<CallInst>(NewCaller)->
1420 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1421 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1428 // Replace the trampoline call with a direct call. Since there is no 'nest'
1429 // parameter, there is no need to adjust the argument list. Let the generic
1430 // code sort out any function type mismatches.
1431 Constant *NewCallee =
1432 NestF->getType() == PTy ? NestF :
1433 ConstantExpr::getBitCast(NestF, PTy);
1434 CS.setCalledFunction(NewCallee);
1435 return CS.getInstruction();