1 //===- InstCombineCalls.cpp -----------------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visitCall and visitInvoke functions.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/Support/CallSite.h"
16 #include "llvm/Target/TargetData.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/Transforms/Utils/BuildLibCalls.h"
19 #include "llvm/Transforms/Utils/Local.h"
22 /// getPromotedType - Return the specified type promoted as it would be to pass
23 /// though a va_arg area.
24 static Type *getPromotedType(Type *Ty) {
25 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26 if (ITy->getBitWidth() < 32)
27 return Type::getInt32Ty(Ty->getContext());
33 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
34 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), TD);
35 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), TD);
36 unsigned MinAlign = std::min(DstAlign, SrcAlign);
37 unsigned CopyAlign = MI->getAlignment();
39 if (CopyAlign < MinAlign) {
40 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
45 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
47 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
48 if (MemOpLength == 0) return 0;
50 // Source and destination pointer types are always "i8*" for intrinsic. See
51 // if the size is something we can handle with a single primitive load/store.
52 // A single load+store correctly handles overlapping memory in the memmove
54 unsigned Size = MemOpLength->getZExtValue();
55 if (Size == 0) return MI; // Delete this mem transfer.
57 if (Size > 8 || (Size&(Size-1)))
58 return 0; // If not 1/2/4/8 bytes, exit.
60 // Use an integer load+store unless we can find something better.
62 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
64 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
66 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
67 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
68 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
70 // Memcpy forces the use of i8* for the source and destination. That means
71 // that if you're using memcpy to move one double around, you'll get a cast
72 // from double* to i8*. We'd much rather use a double load+store rather than
73 // an i64 load+store, here because this improves the odds that the source or
74 // dest address will be promotable. See if we can find a better type than the
76 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
77 if (StrippedDest != MI->getArgOperand(0)) {
78 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
80 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
81 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
82 // down through these levels if so.
83 while (!SrcETy->isSingleValueType()) {
84 if (StructType *STy = dyn_cast<StructType>(SrcETy)) {
85 if (STy->getNumElements() == 1)
86 SrcETy = STy->getElementType(0);
89 } else if (ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
90 if (ATy->getNumElements() == 1)
91 SrcETy = ATy->getElementType();
98 if (SrcETy->isSingleValueType()) {
99 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
100 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
106 // If the memcpy/memmove provides better alignment info than we can
108 SrcAlign = std::max(SrcAlign, CopyAlign);
109 DstAlign = std::max(DstAlign, CopyAlign);
111 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
112 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
113 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
114 L->setAlignment(SrcAlign);
115 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
116 S->setAlignment(DstAlign);
118 // Set the size of the copy to 0, it will be deleted on the next iteration.
119 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
123 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
124 unsigned Alignment = getKnownAlignment(MI->getDest(), TD);
125 if (MI->getAlignment() < Alignment) {
126 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
131 // Extract the length and alignment and fill if they are constant.
132 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
133 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
134 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
136 uint64_t Len = LenC->getZExtValue();
137 Alignment = MI->getAlignment();
139 // If the length is zero, this is a no-op
140 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
142 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
143 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
144 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
146 Value *Dest = MI->getDest();
147 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
148 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
149 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
151 // Alignment 0 is identity for alignment 1 for memset, but not store.
152 if (Alignment == 0) Alignment = 1;
154 // Extract the fill value and store.
155 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
156 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
158 S->setAlignment(Alignment);
160 // Set the size of the copy to 0, it will be deleted on the next iteration.
161 MI->setLength(Constant::getNullValue(LenC->getType()));
168 /// visitCallInst - CallInst simplification. This mostly only handles folding
169 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
170 /// the heavy lifting.
172 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
174 return visitFree(CI);
175 if (extractMallocCall(&CI) || extractCallocCall(&CI))
176 return visitMalloc(CI);
178 // If the caller function is nounwind, mark the call as nounwind, even if the
180 if (CI.getParent()->getParent()->doesNotThrow() &&
181 !CI.doesNotThrow()) {
182 CI.setDoesNotThrow();
186 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
187 if (!II) return visitCallSite(&CI);
189 // Intrinsics cannot occur in an invoke, so handle them here instead of in
191 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
192 bool Changed = false;
194 // memmove/cpy/set of zero bytes is a noop.
195 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
196 if (NumBytes->isNullValue())
197 return EraseInstFromFunction(CI);
199 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
200 if (CI->getZExtValue() == 1) {
201 // Replace the instruction with just byte operations. We would
202 // transform other cases to loads/stores, but we don't know if
203 // alignment is sufficient.
207 // No other transformations apply to volatile transfers.
208 if (MI->isVolatile())
211 // If we have a memmove and the source operation is a constant global,
212 // then the source and dest pointers can't alias, so we can change this
213 // into a call to memcpy.
214 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
215 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
216 if (GVSrc->isConstant()) {
217 Module *M = CI.getParent()->getParent()->getParent();
218 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
219 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
220 CI.getArgOperand(1)->getType(),
221 CI.getArgOperand(2)->getType() };
222 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
227 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
228 // memmove(x,x,size) -> noop.
229 if (MTI->getSource() == MTI->getDest())
230 return EraseInstFromFunction(CI);
233 // If we can determine a pointer alignment that is bigger than currently
234 // set, update the alignment.
235 if (isa<MemTransferInst>(MI)) {
236 if (Instruction *I = SimplifyMemTransfer(MI))
238 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
239 if (Instruction *I = SimplifyMemSet(MSI))
243 if (Changed) return II;
246 switch (II->getIntrinsicID()) {
248 case Intrinsic::objectsize: {
249 // We need target data for just about everything so depend on it.
252 Type *ReturnTy = CI.getType();
253 uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
255 // Get to the real allocated thing and offset as fast as possible.
256 Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
259 uint64_t Size = -1ULL;
261 // Try to look through constant GEPs.
262 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) {
263 if (!GEP->hasAllConstantIndices()) return 0;
265 // Get the current byte offset into the thing. Use the original
266 // operand in case we're looking through a bitcast.
267 SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
268 if (!GEP->getPointerOperandType()->isPointerTy())
270 Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
272 Op1 = GEP->getPointerOperand()->stripPointerCasts();
274 // Make sure we're not a constant offset from an external
276 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1))
277 if (!GV->hasDefinitiveInitializer()) return 0;
280 // If we've stripped down to a single global variable that we
281 // can know the size of then just return that.
282 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
283 if (GV->hasDefinitiveInitializer()) {
284 Constant *C = GV->getInitializer();
285 Size = TD->getTypeAllocSize(C->getType());
287 // Can't determine size of the GV.
288 Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow);
289 return ReplaceInstUsesWith(CI, RetVal);
291 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
293 if (AI->getAllocatedType()->isSized()) {
294 Size = TD->getTypeAllocSize(AI->getAllocatedType());
295 if (AI->isArrayAllocation()) {
296 const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
298 Size *= C->getZExtValue();
301 } else if (CallInst *MI = extractMallocCall(Op1)) {
302 // Get allocation size.
303 Value *Arg = MI->getArgOperand(0);
304 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
305 Size = CI->getZExtValue();
307 } else if (CallInst *MI = extractCallocCall(Op1)) {
308 // Get allocation size.
309 Value *Arg1 = MI->getArgOperand(0);
310 Value *Arg2 = MI->getArgOperand(1);
311 if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1))
312 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2)) {
314 APInt SizeAP = CI1->getValue().umul_ov(CI2->getValue(), overflow);
316 Size = SizeAP.getZExtValue();
318 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
322 // Do not return "I don't know" here. Later optimization passes could
323 // make it possible to evaluate objectsize to a constant.
328 // Out of bound reference? Negative index normalized to large
329 // index? Just return "I don't know".
330 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
332 return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
334 case Intrinsic::bswap:
335 // bswap(bswap(x)) -> x
336 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
337 if (Operand->getIntrinsicID() == Intrinsic::bswap)
338 return ReplaceInstUsesWith(CI, Operand->getArgOperand(0));
340 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
341 if (TruncInst *TI = dyn_cast<TruncInst>(II->getArgOperand(0))) {
342 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
343 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
344 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
345 TI->getType()->getPrimitiveSizeInBits();
346 Value *CV = ConstantInt::get(Operand->getType(), C);
347 Value *V = Builder->CreateLShr(Operand->getArgOperand(0), CV);
348 return new TruncInst(V, TI->getType());
353 case Intrinsic::powi:
354 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
357 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
360 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
361 // powi(x, -1) -> 1/x
362 if (Power->isAllOnesValue())
363 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
364 II->getArgOperand(0));
367 case Intrinsic::cttz: {
368 // If all bits below the first known one are known zero,
369 // this value is constant.
370 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
371 // FIXME: Try to simplify vectors of integers.
373 uint32_t BitWidth = IT->getBitWidth();
374 APInt KnownZero(BitWidth, 0);
375 APInt KnownOne(BitWidth, 0);
376 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
377 unsigned TrailingZeros = KnownOne.countTrailingZeros();
378 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
379 if ((Mask & KnownZero) == Mask)
380 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
381 APInt(BitWidth, TrailingZeros)));
385 case Intrinsic::ctlz: {
386 // If all bits above the first known one are known zero,
387 // this value is constant.
388 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
389 // FIXME: Try to simplify vectors of integers.
391 uint32_t BitWidth = IT->getBitWidth();
392 APInt KnownZero(BitWidth, 0);
393 APInt KnownOne(BitWidth, 0);
394 ComputeMaskedBits(II->getArgOperand(0), KnownZero, KnownOne);
395 unsigned LeadingZeros = KnownOne.countLeadingZeros();
396 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
397 if ((Mask & KnownZero) == Mask)
398 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
399 APInt(BitWidth, LeadingZeros)));
403 case Intrinsic::uadd_with_overflow: {
404 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
405 IntegerType *IT = cast<IntegerType>(II->getArgOperand(0)->getType());
406 uint32_t BitWidth = IT->getBitWidth();
407 APInt LHSKnownZero(BitWidth, 0);
408 APInt LHSKnownOne(BitWidth, 0);
409 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
410 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
411 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
413 if (LHSKnownNegative || LHSKnownPositive) {
414 APInt RHSKnownZero(BitWidth, 0);
415 APInt RHSKnownOne(BitWidth, 0);
416 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
417 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
418 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
419 if (LHSKnownNegative && RHSKnownNegative) {
420 // The sign bit is set in both cases: this MUST overflow.
421 // Create a simple add instruction, and insert it into the struct.
422 Value *Add = Builder->CreateAdd(LHS, RHS);
425 UndefValue::get(LHS->getType()),
426 ConstantInt::getTrue(II->getContext())
428 StructType *ST = cast<StructType>(II->getType());
429 Constant *Struct = ConstantStruct::get(ST, V);
430 return InsertValueInst::Create(Struct, Add, 0);
433 if (LHSKnownPositive && RHSKnownPositive) {
434 // The sign bit is clear in both cases: this CANNOT overflow.
435 // Create a simple add instruction, and insert it into the struct.
436 Value *Add = Builder->CreateNUWAdd(LHS, RHS);
439 UndefValue::get(LHS->getType()),
440 ConstantInt::getFalse(II->getContext())
442 StructType *ST = cast<StructType>(II->getType());
443 Constant *Struct = ConstantStruct::get(ST, V);
444 return InsertValueInst::Create(Struct, Add, 0);
448 // FALL THROUGH uadd into sadd
449 case Intrinsic::sadd_with_overflow:
450 // Canonicalize constants into the RHS.
451 if (isa<Constant>(II->getArgOperand(0)) &&
452 !isa<Constant>(II->getArgOperand(1))) {
453 Value *LHS = II->getArgOperand(0);
454 II->setArgOperand(0, II->getArgOperand(1));
455 II->setArgOperand(1, LHS);
459 // X + undef -> undef
460 if (isa<UndefValue>(II->getArgOperand(1)))
461 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
463 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
464 // X + 0 -> {X, false}
467 UndefValue::get(II->getArgOperand(0)->getType()),
468 ConstantInt::getFalse(II->getContext())
471 ConstantStruct::get(cast<StructType>(II->getType()), V);
472 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
476 case Intrinsic::usub_with_overflow:
477 case Intrinsic::ssub_with_overflow:
478 // undef - X -> undef
479 // X - undef -> undef
480 if (isa<UndefValue>(II->getArgOperand(0)) ||
481 isa<UndefValue>(II->getArgOperand(1)))
482 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
484 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
485 // X - 0 -> {X, false}
488 UndefValue::get(II->getArgOperand(0)->getType()),
489 ConstantInt::getFalse(II->getContext())
492 ConstantStruct::get(cast<StructType>(II->getType()), V);
493 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
497 case Intrinsic::umul_with_overflow: {
498 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
499 unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
501 APInt LHSKnownZero(BitWidth, 0);
502 APInt LHSKnownOne(BitWidth, 0);
503 ComputeMaskedBits(LHS, LHSKnownZero, LHSKnownOne);
504 APInt RHSKnownZero(BitWidth, 0);
505 APInt RHSKnownOne(BitWidth, 0);
506 ComputeMaskedBits(RHS, RHSKnownZero, RHSKnownOne);
508 // Get the largest possible values for each operand.
509 APInt LHSMax = ~LHSKnownZero;
510 APInt RHSMax = ~RHSKnownZero;
512 // If multiplying the maximum values does not overflow then we can turn
513 // this into a plain NUW mul.
515 LHSMax.umul_ov(RHSMax, Overflow);
517 Value *Mul = Builder->CreateNUWMul(LHS, RHS, "umul_with_overflow");
519 UndefValue::get(LHS->getType()),
522 Constant *Struct = ConstantStruct::get(cast<StructType>(II->getType()),V);
523 return InsertValueInst::Create(Struct, Mul, 0);
526 case Intrinsic::smul_with_overflow:
527 // Canonicalize constants into the RHS.
528 if (isa<Constant>(II->getArgOperand(0)) &&
529 !isa<Constant>(II->getArgOperand(1))) {
530 Value *LHS = II->getArgOperand(0);
531 II->setArgOperand(0, II->getArgOperand(1));
532 II->setArgOperand(1, LHS);
536 // X * undef -> undef
537 if (isa<UndefValue>(II->getArgOperand(1)))
538 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
540 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
543 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
545 // X * 1 -> {X, false}
546 if (RHSI->equalsInt(1)) {
548 UndefValue::get(II->getArgOperand(0)->getType()),
549 ConstantInt::getFalse(II->getContext())
552 ConstantStruct::get(cast<StructType>(II->getType()), V);
553 return InsertValueInst::Create(Struct, II->getArgOperand(0), 0);
557 case Intrinsic::ppc_altivec_lvx:
558 case Intrinsic::ppc_altivec_lvxl:
559 // Turn PPC lvx -> load if the pointer is known aligned.
560 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
561 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
562 PointerType::getUnqual(II->getType()));
563 return new LoadInst(Ptr);
566 case Intrinsic::ppc_altivec_stvx:
567 case Intrinsic::ppc_altivec_stvxl:
568 // Turn stvx -> store if the pointer is known aligned.
569 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, TD) >= 16) {
571 PointerType::getUnqual(II->getArgOperand(0)->getType());
572 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
573 return new StoreInst(II->getArgOperand(0), Ptr);
576 case Intrinsic::x86_sse_storeu_ps:
577 case Intrinsic::x86_sse2_storeu_pd:
578 case Intrinsic::x86_sse2_storeu_dq:
579 // Turn X86 storeu -> store if the pointer is known aligned.
580 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, TD) >= 16) {
582 PointerType::getUnqual(II->getArgOperand(1)->getType());
583 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
584 return new StoreInst(II->getArgOperand(1), Ptr);
588 case Intrinsic::x86_sse_cvtss2si:
589 case Intrinsic::x86_sse_cvtss2si64:
590 case Intrinsic::x86_sse_cvttss2si:
591 case Intrinsic::x86_sse_cvttss2si64:
592 case Intrinsic::x86_sse2_cvtsd2si:
593 case Intrinsic::x86_sse2_cvtsd2si64:
594 case Intrinsic::x86_sse2_cvttsd2si:
595 case Intrinsic::x86_sse2_cvttsd2si64: {
596 // These intrinsics only demand the 0th element of their input vectors. If
597 // we can simplify the input based on that, do so now.
599 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
600 APInt DemandedElts(VWidth, 1);
601 APInt UndefElts(VWidth, 0);
602 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
603 DemandedElts, UndefElts)) {
604 II->setArgOperand(0, V);
611 case Intrinsic::x86_sse41_pmovsxbw:
612 case Intrinsic::x86_sse41_pmovsxwd:
613 case Intrinsic::x86_sse41_pmovsxdq:
614 case Intrinsic::x86_sse41_pmovzxbw:
615 case Intrinsic::x86_sse41_pmovzxwd:
616 case Intrinsic::x86_sse41_pmovzxdq: {
617 // pmov{s|z}x ignores the upper half of their input vectors.
619 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
620 unsigned LowHalfElts = VWidth / 2;
621 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
622 APInt UndefElts(VWidth, 0);
623 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
626 II->setArgOperand(0, TmpV);
632 case Intrinsic::ppc_altivec_vperm:
633 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
634 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
635 assert(Mask->getType()->getVectorNumElements() == 16 &&
636 "Bad type for intrinsic!");
638 // Check that all of the elements are integer constants or undefs.
639 bool AllEltsOk = true;
640 for (unsigned i = 0; i != 16; ++i) {
641 Constant *Elt = Mask->getAggregateElement(i);
643 !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
650 // Cast the input vectors to byte vectors.
651 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
653 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
655 Value *Result = UndefValue::get(Op0->getType());
657 // Only extract each element once.
658 Value *ExtractedElts[32];
659 memset(ExtractedElts, 0, sizeof(ExtractedElts));
661 for (unsigned i = 0; i != 16; ++i) {
662 if (isa<UndefValue>(Mask->getAggregateElement(i)))
665 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
666 Idx &= 31; // Match the hardware behavior.
668 if (ExtractedElts[Idx] == 0) {
670 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
671 Builder->getInt32(Idx&15));
674 // Insert this value into the result vector.
675 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
676 Builder->getInt32(i));
678 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
683 case Intrinsic::arm_neon_vld1:
684 case Intrinsic::arm_neon_vld2:
685 case Intrinsic::arm_neon_vld3:
686 case Intrinsic::arm_neon_vld4:
687 case Intrinsic::arm_neon_vld2lane:
688 case Intrinsic::arm_neon_vld3lane:
689 case Intrinsic::arm_neon_vld4lane:
690 case Intrinsic::arm_neon_vst1:
691 case Intrinsic::arm_neon_vst2:
692 case Intrinsic::arm_neon_vst3:
693 case Intrinsic::arm_neon_vst4:
694 case Intrinsic::arm_neon_vst2lane:
695 case Intrinsic::arm_neon_vst3lane:
696 case Intrinsic::arm_neon_vst4lane: {
697 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), TD);
698 unsigned AlignArg = II->getNumArgOperands() - 1;
699 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
700 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
701 II->setArgOperand(AlignArg,
702 ConstantInt::get(Type::getInt32Ty(II->getContext()),
709 case Intrinsic::arm_neon_vmulls:
710 case Intrinsic::arm_neon_vmullu: {
711 Value *Arg0 = II->getArgOperand(0);
712 Value *Arg1 = II->getArgOperand(1);
714 // Handle mul by zero first:
715 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
716 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
719 // Check for constant LHS & RHS - in this case we just simplify.
720 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu);
721 VectorType *NewVT = cast<VectorType>(II->getType());
722 unsigned NewWidth = NewVT->getElementType()->getIntegerBitWidth();
723 if (ConstantDataVector *CV0 = dyn_cast<ConstantDataVector>(Arg0)) {
724 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
725 VectorType* VT = cast<VectorType>(CV0->getType());
726 SmallVector<Constant*, 4> NewElems;
727 for (unsigned i = 0; i < VT->getNumElements(); ++i) {
729 (cast<ConstantInt>(CV0->getAggregateElement(i)))->getValue();
730 CV0E = Zext ? CV0E.zext(NewWidth) : CV0E.sext(NewWidth);
732 (cast<ConstantInt>(CV1->getAggregateElement(i)))->getValue();
733 CV1E = Zext ? CV1E.zext(NewWidth) : CV1E.sext(NewWidth);
735 ConstantInt::get(NewVT->getElementType(), CV0E * CV1E));
737 return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems));
740 // Couldn't simplify - cannonicalize constant to the RHS.
741 std::swap(Arg0, Arg1);
744 // Handle mul by one:
745 if (ConstantDataVector *CV1 = dyn_cast<ConstantDataVector>(Arg1)) {
746 if (ConstantInt *Splat =
747 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue())) {
748 if (Splat->isOne()) {
750 return CastInst::CreateZExtOrBitCast(Arg0, II->getType());
752 return CastInst::CreateSExtOrBitCast(Arg0, II->getType());
760 case Intrinsic::stackrestore: {
761 // If the save is right next to the restore, remove the restore. This can
762 // happen when variable allocas are DCE'd.
763 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
764 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
765 BasicBlock::iterator BI = SS;
767 return EraseInstFromFunction(CI);
771 // Scan down this block to see if there is another stack restore in the
772 // same block without an intervening call/alloca.
773 BasicBlock::iterator BI = II;
774 TerminatorInst *TI = II->getParent()->getTerminator();
775 bool CannotRemove = false;
776 for (++BI; &*BI != TI; ++BI) {
777 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
781 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
782 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
783 // If there is a stackrestore below this one, remove this one.
784 if (II->getIntrinsicID() == Intrinsic::stackrestore)
785 return EraseInstFromFunction(CI);
786 // Otherwise, ignore the intrinsic.
788 // If we found a non-intrinsic call, we can't remove the stack
796 // If the stack restore is in a return, resume, or unwind block and if there
797 // are no allocas or calls between the restore and the return, nuke the
799 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
800 return EraseInstFromFunction(CI);
805 return visitCallSite(II);
808 // InvokeInst simplification
810 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
811 return visitCallSite(&II);
814 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
815 /// passed through the varargs area, we can eliminate the use of the cast.
816 static bool isSafeToEliminateVarargsCast(const CallSite CS,
817 const CastInst * const CI,
818 const TargetData * const TD,
820 if (!CI->isLosslessCast())
823 // The size of ByVal arguments is derived from the type, so we
824 // can't change to a type with a different size. If the size were
825 // passed explicitly we could avoid this check.
826 if (!CS.isByValArgument(ix))
830 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
831 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
832 if (!SrcTy->isSized() || !DstTy->isSized())
834 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
840 class InstCombineFortifiedLibCalls : public SimplifyFortifiedLibCalls {
843 void replaceCall(Value *With) {
844 NewInstruction = IC->ReplaceInstUsesWith(*CI, With);
846 bool isFoldable(unsigned SizeCIOp, unsigned SizeArgOp, bool isString) const {
847 if (CI->getArgOperand(SizeCIOp) == CI->getArgOperand(SizeArgOp))
849 if (ConstantInt *SizeCI =
850 dyn_cast<ConstantInt>(CI->getArgOperand(SizeCIOp))) {
851 if (SizeCI->isAllOnesValue())
854 uint64_t Len = GetStringLength(CI->getArgOperand(SizeArgOp));
855 // If the length is 0 we don't know how long it is and so we can't
857 if (Len == 0) return false;
858 return SizeCI->getZExtValue() >= Len;
860 if (ConstantInt *Arg = dyn_cast<ConstantInt>(
861 CI->getArgOperand(SizeArgOp)))
862 return SizeCI->getZExtValue() >= Arg->getZExtValue();
867 InstCombineFortifiedLibCalls(InstCombiner *IC) : IC(IC), NewInstruction(0) { }
868 Instruction *NewInstruction;
870 } // end anonymous namespace
872 // Try to fold some different type of calls here.
873 // Currently we're only working with the checking functions, memcpy_chk,
874 // mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
875 // strcat_chk and strncat_chk.
876 Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
877 if (CI->getCalledFunction() == 0) return 0;
879 InstCombineFortifiedLibCalls Simplifier(this);
880 Simplifier.fold(CI, TD);
881 return Simplifier.NewInstruction;
884 static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
885 // Strip off at most one level of pointer casts, looking for an alloca. This
886 // is good enough in practice and simpler than handling any number of casts.
887 Value *Underlying = TrampMem->stripPointerCasts();
888 if (Underlying != TrampMem &&
889 (!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
891 if (!isa<AllocaInst>(Underlying))
894 IntrinsicInst *InitTrampoline = 0;
895 for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
897 IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
900 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
902 // More than one init_trampoline writes to this value. Give up.
907 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
908 // Allow any number of calls to adjust.trampoline.
913 // No call to init.trampoline found.
917 // Check that the alloca is being used in the expected way.
918 if (InitTrampoline->getOperand(0) != TrampMem)
921 return InitTrampoline;
924 static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
926 // Visit all the previous instructions in the basic block, and try to find a
927 // init.trampoline which has a direct path to the adjust.trampoline.
928 for (BasicBlock::iterator I = AdjustTramp,
929 E = AdjustTramp->getParent()->begin(); I != E; ) {
930 Instruction *Inst = --I;
931 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
932 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
933 II->getOperand(0) == TrampMem)
935 if (Inst->mayWriteToMemory())
941 // Given a call to llvm.adjust.trampoline, find and return the corresponding
942 // call to llvm.init.trampoline if the call to the trampoline can be optimized
943 // to a direct call to a function. Otherwise return NULL.
945 static IntrinsicInst *FindInitTrampoline(Value *Callee) {
946 Callee = Callee->stripPointerCasts();
947 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
949 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
952 Value *TrampMem = AdjustTramp->getOperand(0);
954 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
956 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
961 // visitCallSite - Improvements for call and invoke instructions.
963 Instruction *InstCombiner::visitCallSite(CallSite CS) {
964 bool Changed = false;
966 // If the callee is a pointer to a function, attempt to move any casts to the
967 // arguments of the call/invoke.
968 Value *Callee = CS.getCalledValue();
969 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
972 if (Function *CalleeF = dyn_cast<Function>(Callee))
973 // If the call and callee calling conventions don't match, this call must
974 // be unreachable, as the call is undefined.
975 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
976 // Only do this for calls to a function with a body. A prototype may
977 // not actually end up matching the implementation's calling conv for a
978 // variety of reasons (e.g. it may be written in assembly).
979 !CalleeF->isDeclaration()) {
980 Instruction *OldCall = CS.getInstruction();
981 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
982 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
984 // If OldCall dues not return void then replaceAllUsesWith undef.
985 // This allows ValueHandlers and custom metadata to adjust itself.
986 if (!OldCall->getType()->isVoidTy())
987 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
988 if (isa<CallInst>(OldCall))
989 return EraseInstFromFunction(*OldCall);
991 // We cannot remove an invoke, because it would change the CFG, just
992 // change the callee to a null pointer.
993 cast<InvokeInst>(OldCall)->setCalledFunction(
994 Constant::getNullValue(CalleeF->getType()));
998 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
999 // This instruction is not reachable, just remove it. We insert a store to
1000 // undef so that we know that this code is not reachable, despite the fact
1001 // that we can't modify the CFG here.
1002 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1003 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1004 CS.getInstruction());
1006 // If CS does not return void then replaceAllUsesWith undef.
1007 // This allows ValueHandlers and custom metadata to adjust itself.
1008 if (!CS.getInstruction()->getType()->isVoidTy())
1009 ReplaceInstUsesWith(*CS.getInstruction(),
1010 UndefValue::get(CS.getInstruction()->getType()));
1012 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1013 // Don't break the CFG, insert a dummy cond branch.
1014 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
1015 ConstantInt::getTrue(Callee->getContext()), II);
1017 return EraseInstFromFunction(*CS.getInstruction());
1020 if (IntrinsicInst *II = FindInitTrampoline(Callee))
1021 return transformCallThroughTrampoline(CS, II);
1023 PointerType *PTy = cast<PointerType>(Callee->getType());
1024 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1025 if (FTy->isVarArg()) {
1026 int ix = FTy->getNumParams();
1027 // See if we can optimize any arguments passed through the varargs area of
1029 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
1030 E = CS.arg_end(); I != E; ++I, ++ix) {
1031 CastInst *CI = dyn_cast<CastInst>(*I);
1032 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
1033 *I = CI->getOperand(0);
1039 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1040 // Inline asm calls cannot throw - mark them 'nounwind'.
1041 CS.setDoesNotThrow();
1045 // Try to optimize the call if possible, we require TargetData for most of
1046 // this. None of these calls are seen as possibly dead so go ahead and
1047 // delete the instruction now.
1048 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1049 Instruction *I = tryOptimizeCall(CI, TD);
1050 // If we changed something return the result, etc. Otherwise let
1051 // the fallthrough check.
1052 if (I) return EraseInstFromFunction(*I);
1055 return Changed ? CS.getInstruction() : 0;
1058 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
1059 // attempt to move the cast to the arguments of the call/invoke.
1061 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1063 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1066 Instruction *Caller = CS.getInstruction();
1067 const AttrListPtr &CallerPAL = CS.getAttributes();
1069 // Okay, this is a cast from a function to a different type. Unless doing so
1070 // would cause a type conversion of one of our arguments, change this call to
1071 // be a direct call with arguments casted to the appropriate types.
1073 FunctionType *FT = Callee->getFunctionType();
1074 Type *OldRetTy = Caller->getType();
1075 Type *NewRetTy = FT->getReturnType();
1077 if (NewRetTy->isStructTy())
1078 return false; // TODO: Handle multiple return values.
1080 // Check to see if we are changing the return type...
1081 if (OldRetTy != NewRetTy) {
1082 if (Callee->isDeclaration() &&
1083 // Conversion is ok if changing from one pointer type to another or from
1084 // a pointer to an integer of the same size.
1085 !((OldRetTy->isPointerTy() || !TD ||
1086 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
1087 (NewRetTy->isPointerTy() || !TD ||
1088 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
1089 return false; // Cannot transform this return value.
1091 if (!Caller->use_empty() &&
1092 // void -> non-void is handled specially
1093 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
1094 return false; // Cannot transform this return value.
1096 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1097 Attributes RAttrs = CallerPAL.getRetAttributes();
1098 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
1099 return false; // Attribute not compatible with transformed value.
1102 // If the callsite is an invoke instruction, and the return value is used by
1103 // a PHI node in a successor, we cannot change the return type of the call
1104 // because there is no place to put the cast instruction (without breaking
1105 // the critical edge). Bail out in this case.
1106 if (!Caller->use_empty())
1107 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1108 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
1110 if (PHINode *PN = dyn_cast<PHINode>(*UI))
1111 if (PN->getParent() == II->getNormalDest() ||
1112 PN->getParent() == II->getUnwindDest())
1116 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
1117 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1119 CallSite::arg_iterator AI = CS.arg_begin();
1120 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1121 Type *ParamTy = FT->getParamType(i);
1122 Type *ActTy = (*AI)->getType();
1124 if (!CastInst::isCastable(ActTy, ParamTy))
1125 return false; // Cannot transform this parameter value.
1127 Attributes Attrs = CallerPAL.getParamAttributes(i + 1);
1128 if (Attrs & Attribute::typeIncompatible(ParamTy))
1129 return false; // Attribute not compatible with transformed value.
1131 // If the parameter is passed as a byval argument, then we have to have a
1132 // sized type and the sized type has to have the same size as the old type.
1133 if (ParamTy != ActTy && (Attrs & Attribute::ByVal)) {
1134 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1135 if (ParamPTy == 0 || !ParamPTy->getElementType()->isSized() || TD == 0)
1138 Type *CurElTy = cast<PointerType>(ActTy)->getElementType();
1139 if (TD->getTypeAllocSize(CurElTy) !=
1140 TD->getTypeAllocSize(ParamPTy->getElementType()))
1144 // Converting from one pointer type to another or between a pointer and an
1145 // integer of the same size is safe even if we do not have a body.
1146 bool isConvertible = ActTy == ParamTy ||
1147 (TD && ((ParamTy->isPointerTy() ||
1148 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1149 (ActTy->isPointerTy() ||
1150 ActTy == TD->getIntPtrType(Caller->getContext()))));
1151 if (Callee->isDeclaration() && !isConvertible) return false;
1154 if (Callee->isDeclaration()) {
1155 // Do not delete arguments unless we have a function body.
1156 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1159 // If the callee is just a declaration, don't change the varargsness of the
1160 // call. We don't want to introduce a varargs call where one doesn't
1162 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1163 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1166 // If both the callee and the cast type are varargs, we still have to make
1167 // sure the number of fixed parameters are the same or we have the same
1168 // ABI issues as if we introduce a varargs call.
1169 if (FT->isVarArg() &&
1170 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1171 FT->getNumParams() !=
1172 cast<FunctionType>(APTy->getElementType())->getNumParams())
1176 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1177 !CallerPAL.isEmpty())
1178 // In this case we have more arguments than the new function type, but we
1179 // won't be dropping them. Check that these extra arguments have attributes
1180 // that are compatible with being a vararg call argument.
1181 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1182 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1184 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1185 if (PAttrs & Attribute::VarArgsIncompatible)
1190 // Okay, we decided that this is a safe thing to do: go ahead and start
1191 // inserting cast instructions as necessary.
1192 std::vector<Value*> Args;
1193 Args.reserve(NumActualArgs);
1194 SmallVector<AttributeWithIndex, 8> attrVec;
1195 attrVec.reserve(NumCommonArgs);
1197 // Get any return attributes.
1198 Attributes RAttrs = CallerPAL.getRetAttributes();
1200 // If the return value is not being used, the type may not be compatible
1201 // with the existing attributes. Wipe out any problematic attributes.
1202 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1204 // Add the new return attributes.
1206 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1208 AI = CS.arg_begin();
1209 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1210 Type *ParamTy = FT->getParamType(i);
1211 if ((*AI)->getType() == ParamTy) {
1212 Args.push_back(*AI);
1214 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1215 false, ParamTy, false);
1216 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy));
1219 // Add any parameter attributes.
1220 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1221 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1224 // If the function takes more arguments than the call was taking, add them
1226 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1227 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1229 // If we are removing arguments to the function, emit an obnoxious warning.
1230 if (FT->getNumParams() < NumActualArgs) {
1231 if (!FT->isVarArg()) {
1232 errs() << "WARNING: While resolving call to function '"
1233 << Callee->getName() << "' arguments were dropped!\n";
1235 // Add all of the arguments in their promoted form to the arg list.
1236 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1237 Type *PTy = getPromotedType((*AI)->getType());
1238 if (PTy != (*AI)->getType()) {
1239 // Must promote to pass through va_arg area!
1240 Instruction::CastOps opcode =
1241 CastInst::getCastOpcode(*AI, false, PTy, false);
1242 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1244 Args.push_back(*AI);
1247 // Add any parameter attributes.
1248 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1249 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1254 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1255 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1257 if (NewRetTy->isVoidTy())
1258 Caller->setName(""); // Void type should not have a name.
1260 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1264 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1265 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1266 II->getUnwindDest(), Args);
1268 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1269 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1271 CallInst *CI = cast<CallInst>(Caller);
1272 NC = Builder->CreateCall(Callee, Args);
1274 if (CI->isTailCall())
1275 cast<CallInst>(NC)->setTailCall();
1276 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1277 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1280 // Insert a cast of the return type as necessary.
1282 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1283 if (!NV->getType()->isVoidTy()) {
1284 Instruction::CastOps opcode =
1285 CastInst::getCastOpcode(NC, false, OldRetTy, false);
1286 NV = NC = CastInst::Create(opcode, NC, OldRetTy);
1287 NC->setDebugLoc(Caller->getDebugLoc());
1289 // If this is an invoke instruction, we should insert it after the first
1290 // non-phi, instruction in the normal successor block.
1291 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1292 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1293 InsertNewInstBefore(NC, *I);
1295 // Otherwise, it's a call, just insert cast right after the call.
1296 InsertNewInstBefore(NC, *Caller);
1298 Worklist.AddUsersToWorkList(*Caller);
1300 NV = UndefValue::get(Caller->getType());
1304 if (!Caller->use_empty())
1305 ReplaceInstUsesWith(*Caller, NV);
1307 EraseInstFromFunction(*Caller);
1311 // transformCallThroughTrampoline - Turn a call to a function created by
1312 // init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1313 // underlying function.
1316 InstCombiner::transformCallThroughTrampoline(CallSite CS,
1317 IntrinsicInst *Tramp) {
1318 Value *Callee = CS.getCalledValue();
1319 PointerType *PTy = cast<PointerType>(Callee->getType());
1320 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1321 const AttrListPtr &Attrs = CS.getAttributes();
1323 // If the call already has the 'nest' attribute somewhere then give up -
1324 // otherwise 'nest' would occur twice after splicing in the chain.
1325 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1329 "transformCallThroughTrampoline called with incorrect CallSite.");
1331 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1332 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1333 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1335 const AttrListPtr &NestAttrs = NestF->getAttributes();
1336 if (!NestAttrs.isEmpty()) {
1337 unsigned NestIdx = 1;
1339 Attributes NestAttr = Attribute::None;
1341 // Look for a parameter marked with the 'nest' attribute.
1342 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1343 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1344 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1345 // Record the parameter type and any other attributes.
1347 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1352 Instruction *Caller = CS.getInstruction();
1353 std::vector<Value*> NewArgs;
1354 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1356 SmallVector<AttributeWithIndex, 8> NewAttrs;
1357 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1359 // Insert the nest argument into the call argument list, which may
1360 // mean appending it. Likewise for attributes.
1362 // Add any result attributes.
1363 if (Attributes Attr = Attrs.getRetAttributes())
1364 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1368 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1370 if (Idx == NestIdx) {
1371 // Add the chain argument and attributes.
1372 Value *NestVal = Tramp->getArgOperand(2);
1373 if (NestVal->getType() != NestTy)
1374 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1375 NewArgs.push_back(NestVal);
1376 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1382 // Add the original argument and attributes.
1383 NewArgs.push_back(*I);
1384 if (Attributes Attr = Attrs.getParamAttributes(Idx))
1386 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1392 // Add any function attributes.
1393 if (Attributes Attr = Attrs.getFnAttributes())
1394 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1396 // The trampoline may have been bitcast to a bogus type (FTy).
1397 // Handle this by synthesizing a new function type, equal to FTy
1398 // with the chain parameter inserted.
1400 std::vector<Type*> NewTypes;
1401 NewTypes.reserve(FTy->getNumParams()+1);
1403 // Insert the chain's type into the list of parameter types, which may
1404 // mean appending it.
1407 FunctionType::param_iterator I = FTy->param_begin(),
1408 E = FTy->param_end();
1412 // Add the chain's type.
1413 NewTypes.push_back(NestTy);
1418 // Add the original type.
1419 NewTypes.push_back(*I);
1425 // Replace the trampoline call with a direct call. Let the generic
1426 // code sort out any function type mismatches.
1427 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1429 Constant *NewCallee =
1430 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1431 NestF : ConstantExpr::getBitCast(NestF,
1432 PointerType::getUnqual(NewFTy));
1433 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1436 Instruction *NewCaller;
1437 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1438 NewCaller = InvokeInst::Create(NewCallee,
1439 II->getNormalDest(), II->getUnwindDest(),
1441 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1442 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1444 NewCaller = CallInst::Create(NewCallee, NewArgs);
1445 if (cast<CallInst>(Caller)->isTailCall())
1446 cast<CallInst>(NewCaller)->setTailCall();
1447 cast<CallInst>(NewCaller)->
1448 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1449 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1456 // Replace the trampoline call with a direct call. Since there is no 'nest'
1457 // parameter, there is no need to adjust the argument list. Let the generic
1458 // code sort out any function type mismatches.
1459 Constant *NewCallee =
1460 NestF->getType() == PTy ? NestF :
1461 ConstantExpr::getBitCast(NestF, PTy);
1462 CS.setCalledFunction(NewCallee);
1463 return CS.getInstruction();