1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines routines for folding instructions into constants.
12 // Also, to supplement the basic IR ConstantExpr simplifications,
13 // this file defines some additional folding routines that can make use of
14 // DataLayout information. These functions cannot go in IR due to library
17 //===----------------------------------------------------------------------===//
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/StringMap.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/Constants.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/Function.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Operator.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/FEnv.h"
33 #include "llvm/Support/GetElementPtrTypeIterator.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Target/TargetLibraryInfo.h"
40 //===----------------------------------------------------------------------===//
41 // Constant Folding internal helper functions
42 //===----------------------------------------------------------------------===//
44 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
45 /// DataLayout. This always returns a non-null constant, but it may be a
46 /// ConstantExpr if unfoldable.
47 static Constant *FoldBitCast(Constant *C, Type *DestTy,
48 const DataLayout &TD) {
49 // Catch the obvious splat cases.
50 if (C->isNullValue() && !DestTy->isX86_MMXTy())
51 return Constant::getNullValue(DestTy);
52 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy())
53 return Constant::getAllOnesValue(DestTy);
55 // Handle a vector->integer cast.
56 if (IntegerType *IT = dyn_cast<IntegerType>(DestTy)) {
57 VectorType *VTy = dyn_cast<VectorType>(C->getType());
59 return ConstantExpr::getBitCast(C, DestTy);
61 unsigned NumSrcElts = VTy->getNumElements();
62 Type *SrcEltTy = VTy->getElementType();
64 // If the vector is a vector of floating point, convert it to vector of int
65 // to simplify things.
66 if (SrcEltTy->isFloatingPointTy()) {
67 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
69 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
70 // Ask IR to do the conversion now that #elts line up.
71 C = ConstantExpr::getBitCast(C, SrcIVTy);
74 ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
76 return ConstantExpr::getBitCast(C, DestTy);
78 // Now that we know that the input value is a vector of integers, just shift
79 // and insert them into our result.
80 unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
81 APInt Result(IT->getBitWidth(), 0);
82 for (unsigned i = 0; i != NumSrcElts; ++i) {
84 if (TD.isLittleEndian())
85 Result |= CDV->getElementAsInteger(NumSrcElts-i-1);
87 Result |= CDV->getElementAsInteger(i);
90 return ConstantInt::get(IT, Result);
93 // The code below only handles casts to vectors currently.
94 VectorType *DestVTy = dyn_cast<VectorType>(DestTy);
96 return ConstantExpr::getBitCast(C, DestTy);
98 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
99 // vector so the code below can handle it uniformly.
100 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
101 Constant *Ops = C; // don't take the address of C!
102 return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
105 // If this is a bitcast from constant vector -> vector, fold it.
106 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
107 return ConstantExpr::getBitCast(C, DestTy);
109 // If the element types match, IR can fold it.
110 unsigned NumDstElt = DestVTy->getNumElements();
111 unsigned NumSrcElt = C->getType()->getVectorNumElements();
112 if (NumDstElt == NumSrcElt)
113 return ConstantExpr::getBitCast(C, DestTy);
115 Type *SrcEltTy = C->getType()->getVectorElementType();
116 Type *DstEltTy = DestVTy->getElementType();
118 // Otherwise, we're changing the number of elements in a vector, which
119 // requires endianness information to do the right thing. For example,
120 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
121 // folds to (little endian):
122 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
123 // and to (big endian):
124 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
126 // First thing is first. We only want to think about integer here, so if
127 // we have something in FP form, recast it as integer.
128 if (DstEltTy->isFloatingPointTy()) {
129 // Fold to an vector of integers with same size as our FP type.
130 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
132 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
133 // Recursively handle this integer conversion, if possible.
134 C = FoldBitCast(C, DestIVTy, TD);
136 // Finally, IR can handle this now that #elts line up.
137 return ConstantExpr::getBitCast(C, DestTy);
140 // Okay, we know the destination is integer, if the input is FP, convert
141 // it to integer first.
142 if (SrcEltTy->isFloatingPointTy()) {
143 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
145 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
146 // Ask IR to do the conversion now that #elts line up.
147 C = ConstantExpr::getBitCast(C, SrcIVTy);
148 // If IR wasn't able to fold it, bail out.
149 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
150 !isa<ConstantDataVector>(C))
154 // Now we know that the input and output vectors are both integer vectors
155 // of the same size, and that their #elements is not the same. Do the
156 // conversion here, which depends on whether the input or output has
158 bool isLittleEndian = TD.isLittleEndian();
160 SmallVector<Constant*, 32> Result;
161 if (NumDstElt < NumSrcElt) {
162 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
163 Constant *Zero = Constant::getNullValue(DstEltTy);
164 unsigned Ratio = NumSrcElt/NumDstElt;
165 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
167 for (unsigned i = 0; i != NumDstElt; ++i) {
168 // Build each element of the result.
169 Constant *Elt = Zero;
170 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
171 for (unsigned j = 0; j != Ratio; ++j) {
172 Constant *Src =dyn_cast<ConstantInt>(C->getAggregateElement(SrcElt++));
173 if (!Src) // Reject constantexpr elements.
174 return ConstantExpr::getBitCast(C, DestTy);
176 // Zero extend the element to the right size.
177 Src = ConstantExpr::getZExt(Src, Elt->getType());
179 // Shift it to the right place, depending on endianness.
180 Src = ConstantExpr::getShl(Src,
181 ConstantInt::get(Src->getType(), ShiftAmt));
182 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
185 Elt = ConstantExpr::getOr(Elt, Src);
187 Result.push_back(Elt);
189 return ConstantVector::get(Result);
192 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
193 unsigned Ratio = NumDstElt/NumSrcElt;
194 unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
196 // Loop over each source value, expanding into multiple results.
197 for (unsigned i = 0; i != NumSrcElt; ++i) {
198 Constant *Src = dyn_cast<ConstantInt>(C->getAggregateElement(i));
199 if (!Src) // Reject constantexpr elements.
200 return ConstantExpr::getBitCast(C, DestTy);
202 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
203 for (unsigned j = 0; j != Ratio; ++j) {
204 // Shift the piece of the value into the right place, depending on
206 Constant *Elt = ConstantExpr::getLShr(Src,
207 ConstantInt::get(Src->getType(), ShiftAmt));
208 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
210 // Truncate and remember this piece.
211 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
215 return ConstantVector::get(Result);
219 /// IsConstantOffsetFromGlobal - If this constant is actually a constant offset
220 /// from a global, return the global and the constant. Because of
221 /// constantexprs, this function is recursive.
222 static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
223 APInt &Offset, const DataLayout &TD) {
224 // Trivial case, constant is the global.
225 if ((GV = dyn_cast<GlobalValue>(C))) {
226 Offset.clearAllBits();
230 // Otherwise, if this isn't a constant expr, bail out.
231 ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
232 if (!CE) return false;
234 // Look through ptr->int and ptr->ptr casts.
235 if (CE->getOpcode() == Instruction::PtrToInt ||
236 CE->getOpcode() == Instruction::BitCast)
237 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
239 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
240 if (GEPOperator *GEP = dyn_cast<GEPOperator>(CE)) {
241 // If the base isn't a global+constant, we aren't either.
242 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
245 // Otherwise, add any offset that our operands provide.
246 return GEP->accumulateConstantOffset(TD, Offset);
252 /// ReadDataFromGlobal - Recursive helper to read bits out of global. C is the
253 /// constant being copied out of. ByteOffset is an offset into C. CurPtr is the
254 /// pointer to copy results into and BytesLeft is the number of bytes left in
255 /// the CurPtr buffer. TD is the target data.
256 static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
257 unsigned char *CurPtr, unsigned BytesLeft,
258 const DataLayout &TD) {
259 assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
260 "Out of range access");
262 // If this element is zero or undefined, we can just return since *CurPtr is
264 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
267 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
268 if (CI->getBitWidth() > 64 ||
269 (CI->getBitWidth() & 7) != 0)
272 uint64_t Val = CI->getZExtValue();
273 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
275 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
277 if (!TD.isLittleEndian())
278 n = IntBytes - n - 1;
279 CurPtr[i] = (unsigned char)(Val >> (n * 8));
285 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
286 if (CFP->getType()->isDoubleTy()) {
287 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
288 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
290 if (CFP->getType()->isFloatTy()){
291 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), TD);
292 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
294 if (CFP->getType()->isHalfTy()){
295 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), TD);
296 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, TD);
301 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
302 const StructLayout *SL = TD.getStructLayout(CS->getType());
303 unsigned Index = SL->getElementContainingOffset(ByteOffset);
304 uint64_t CurEltOffset = SL->getElementOffset(Index);
305 ByteOffset -= CurEltOffset;
308 // If the element access is to the element itself and not to tail padding,
309 // read the bytes from the element.
310 uint64_t EltSize = TD.getTypeAllocSize(CS->getOperand(Index)->getType());
312 if (ByteOffset < EltSize &&
313 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
319 // Check to see if we read from the last struct element, if so we're done.
320 if (Index == CS->getType()->getNumElements())
323 // If we read all of the bytes we needed from this element we're done.
324 uint64_t NextEltOffset = SL->getElementOffset(Index);
326 if (BytesLeft <= NextEltOffset-CurEltOffset-ByteOffset)
329 // Move to the next element of the struct.
330 CurPtr += NextEltOffset-CurEltOffset-ByteOffset;
331 BytesLeft -= NextEltOffset-CurEltOffset-ByteOffset;
333 CurEltOffset = NextEltOffset;
338 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
339 isa<ConstantDataSequential>(C)) {
340 Type *EltTy = cast<SequentialType>(C->getType())->getElementType();
341 uint64_t EltSize = TD.getTypeAllocSize(EltTy);
342 uint64_t Index = ByteOffset / EltSize;
343 uint64_t Offset = ByteOffset - Index * EltSize;
345 if (ArrayType *AT = dyn_cast<ArrayType>(C->getType()))
346 NumElts = AT->getNumElements();
348 NumElts = cast<VectorType>(C->getType())->getNumElements();
350 for (; Index != NumElts; ++Index) {
351 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
355 uint64_t BytesWritten = EltSize - Offset;
356 assert(BytesWritten <= EltSize && "Not indexing into this element?");
357 if (BytesWritten >= BytesLeft)
361 BytesLeft -= BytesWritten;
362 CurPtr += BytesWritten;
367 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
368 if (CE->getOpcode() == Instruction::IntToPtr &&
369 CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
370 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
374 // Otherwise, unknown initializer type.
378 static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
379 const DataLayout &TD) {
380 Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
381 IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
383 // If this isn't an integer load we can't fold it directly.
385 // If this is a float/double load, we can try folding it as an int32/64 load
386 // and then bitcast the result. This can be useful for union cases. Note
387 // that address spaces don't matter here since we're not going to result in
388 // an actual new load.
390 if (LoadTy->isHalfTy())
391 MapTy = Type::getInt16PtrTy(C->getContext());
392 else if (LoadTy->isFloatTy())
393 MapTy = Type::getInt32PtrTy(C->getContext());
394 else if (LoadTy->isDoubleTy())
395 MapTy = Type::getInt64PtrTy(C->getContext());
396 else if (LoadTy->isVectorTy()) {
397 MapTy = IntegerType::get(C->getContext(),
398 TD.getTypeAllocSizeInBits(LoadTy));
399 MapTy = PointerType::getUnqual(MapTy);
403 C = FoldBitCast(C, MapTy, TD);
404 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD))
405 return FoldBitCast(Res, LoadTy, TD);
409 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
410 if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
413 APInt Offset(TD.getPointerSizeInBits(), 0);
414 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
417 GlobalVariable *GV = dyn_cast<GlobalVariable>(GVal);
418 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
419 !GV->getInitializer()->getType()->isSized())
422 // If we're loading off the beginning of the global, some bytes may be valid,
423 // but we don't try to handle this.
424 if (Offset.isNegative()) return 0;
426 // If we're not accessing anything in this constant, the result is undefined.
427 if (Offset.getZExtValue() >=
428 TD.getTypeAllocSize(GV->getInitializer()->getType()))
429 return UndefValue::get(IntType);
431 unsigned char RawBytes[32] = {0};
432 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
436 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
437 if (TD.isLittleEndian()) {
438 ResultVal = RawBytes[BytesLoaded - 1];
439 for (unsigned i = 1; i != BytesLoaded; ++i) {
441 ResultVal |= RawBytes[BytesLoaded-1-i];
444 ResultVal = RawBytes[0];
445 for (unsigned i = 1; i != BytesLoaded; ++i) {
447 ResultVal |= RawBytes[i];
451 return ConstantInt::get(IntType->getContext(), ResultVal);
454 /// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
455 /// produce if it is constant and determinable. If this is not determinable,
457 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
458 const DataLayout *TD) {
459 // First, try the easy cases:
460 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
461 if (GV->isConstant() && GV->hasDefinitiveInitializer())
462 return GV->getInitializer();
464 // If the loaded value isn't a constant expr, we can't handle it.
465 ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
468 if (CE->getOpcode() == Instruction::GetElementPtr) {
469 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
470 if (GV->isConstant() && GV->hasDefinitiveInitializer())
472 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
476 // Instead of loading constant c string, use corresponding integer value
477 // directly if string length is small enough.
479 if (TD && getConstantStringInfo(CE, Str) && !Str.empty()) {
480 unsigned StrLen = Str.size();
481 Type *Ty = cast<PointerType>(CE->getType())->getElementType();
482 unsigned NumBits = Ty->getPrimitiveSizeInBits();
483 // Replace load with immediate integer if the result is an integer or fp
485 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
486 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
487 APInt StrVal(NumBits, 0);
488 APInt SingleChar(NumBits, 0);
489 if (TD->isLittleEndian()) {
490 for (signed i = StrLen-1; i >= 0; i--) {
491 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
492 StrVal = (StrVal << 8) | SingleChar;
495 for (unsigned i = 0; i < StrLen; i++) {
496 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
497 StrVal = (StrVal << 8) | SingleChar;
499 // Append NULL at the end.
501 StrVal = (StrVal << 8) | SingleChar;
504 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
505 if (Ty->isFloatingPointTy())
506 Res = ConstantExpr::getBitCast(Res, Ty);
511 // If this load comes from anywhere in a constant global, and if the global
512 // is all undef or zero, we know what it loads.
513 if (GlobalVariable *GV =
514 dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, TD))) {
515 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
516 Type *ResTy = cast<PointerType>(C->getType())->getElementType();
517 if (GV->getInitializer()->isNullValue())
518 return Constant::getNullValue(ResTy);
519 if (isa<UndefValue>(GV->getInitializer()))
520 return UndefValue::get(ResTy);
524 // Try hard to fold loads from bitcasted strange and non-type-safe things.
526 return FoldReinterpretLoadFromConstPtr(CE, *TD);
530 static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
531 if (LI->isVolatile()) return 0;
533 if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
534 return ConstantFoldLoadFromConstPtr(C, TD);
539 /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
540 /// Attempt to symbolically evaluate the result of a binary operator merging
541 /// these together. If target data info is available, it is provided as DL,
542 /// otherwise DL is null.
543 static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
544 Constant *Op1, const DataLayout *DL){
547 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
548 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
552 if (Opc == Instruction::And && DL) {
553 unsigned BitWidth = DL->getTypeSizeInBits(Op0->getType());
554 APInt KnownZero0(BitWidth, 0), KnownOne0(BitWidth, 0);
555 APInt KnownZero1(BitWidth, 0), KnownOne1(BitWidth, 0);
556 ComputeMaskedBits(Op0, KnownZero0, KnownOne0, DL);
557 ComputeMaskedBits(Op1, KnownZero1, KnownOne1, DL);
558 if ((KnownOne1 | KnownZero0).isAllOnesValue()) {
559 // All the bits of Op0 that the 'and' could be masking are already zero.
562 if ((KnownOne0 | KnownZero1).isAllOnesValue()) {
563 // All the bits of Op1 that the 'and' could be masking are already zero.
567 APInt KnownZero = KnownZero0 | KnownZero1;
568 APInt KnownOne = KnownOne0 & KnownOne1;
569 if ((KnownZero | KnownOne).isAllOnesValue()) {
570 return ConstantInt::get(Op0->getType(), KnownOne);
574 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
575 // constant. This happens frequently when iterating over a global array.
576 if (Opc == Instruction::Sub && DL) {
577 GlobalValue *GV1, *GV2;
578 unsigned PtrSize = DL->getPointerSizeInBits();
579 unsigned OpSize = DL->getTypeSizeInBits(Op0->getType());
580 APInt Offs1(PtrSize, 0), Offs2(PtrSize, 0);
582 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *DL))
583 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *DL) &&
585 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
586 // PtrToInt may change the bitwidth so we have convert to the right size
588 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
589 Offs2.zextOrTrunc(OpSize));
596 /// CastGEPIndices - If array indices are not pointer-sized integers,
597 /// explicitly cast them so that they aren't implicitly casted by the
599 static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
600 Type *ResultTy, const DataLayout *TD,
601 const TargetLibraryInfo *TLI) {
603 Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
606 SmallVector<Constant*, 32> NewIdxs;
607 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
609 !isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(),
610 Ops.slice(1, i-1)))) &&
611 Ops[i]->getType() != IntPtrTy) {
613 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
619 NewIdxs.push_back(Ops[i]);
624 ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
625 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
626 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
631 /// Strip the pointer casts, but preserve the address space information.
632 static Constant* StripPtrCastKeepAS(Constant* Ptr) {
633 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
634 PointerType *OldPtrTy = cast<PointerType>(Ptr->getType());
635 Ptr = cast<Constant>(Ptr->stripPointerCasts());
636 PointerType *NewPtrTy = cast<PointerType>(Ptr->getType());
638 // Preserve the address space number of the pointer.
639 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
640 NewPtrTy = NewPtrTy->getElementType()->getPointerTo(
641 OldPtrTy->getAddressSpace());
642 Ptr = ConstantExpr::getBitCast(Ptr, NewPtrTy);
647 /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
648 /// constant expression, do so.
649 static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
650 Type *ResultTy, const DataLayout *TD,
651 const TargetLibraryInfo *TLI) {
652 Constant *Ptr = Ops[0];
653 if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
654 !Ptr->getType()->isPointerTy())
657 Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
659 // If this is a constant expr gep that is effectively computing an
660 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
661 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
662 if (!isa<ConstantInt>(Ops[i])) {
664 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
665 // "inttoptr (sub (ptrtoint Ptr), V)"
666 if (Ops.size() == 2 &&
667 cast<PointerType>(ResultTy)->getElementType()->isIntegerTy(8)) {
668 ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[1]);
669 assert((CE == 0 || CE->getType() == IntPtrTy) &&
670 "CastGEPIndices didn't canonicalize index types!");
671 if (CE && CE->getOpcode() == Instruction::Sub &&
672 CE->getOperand(0)->isNullValue()) {
673 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
674 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
675 Res = ConstantExpr::getIntToPtr(Res, ResultTy);
676 if (ConstantExpr *ResCE = dyn_cast<ConstantExpr>(Res))
677 Res = ConstantFoldConstantExpression(ResCE, TD, TLI);
684 unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
686 APInt(BitWidth, TD->getIndexedOffset(Ptr->getType(),
687 makeArrayRef((Value *const*)
690 Ptr = StripPtrCastKeepAS(Ptr);
692 // If this is a GEP of a GEP, fold it all into a single GEP.
693 while (GEPOperator *GEP = dyn_cast<GEPOperator>(Ptr)) {
694 SmallVector<Value *, 4> NestedOps(GEP->op_begin()+1, GEP->op_end());
696 // Do not try the incorporate the sub-GEP if some index is not a number.
697 bool AllConstantInt = true;
698 for (unsigned i = 0, e = NestedOps.size(); i != e; ++i)
699 if (!isa<ConstantInt>(NestedOps[i])) {
700 AllConstantInt = false;
706 Ptr = cast<Constant>(GEP->getOperand(0));
707 Offset += APInt(BitWidth,
708 TD->getIndexedOffset(Ptr->getType(), NestedOps));
709 Ptr = StripPtrCastKeepAS(Ptr);
712 // If the base value for this address is a literal integer value, fold the
713 // getelementptr to the resulting integer value casted to the pointer type.
714 APInt BasePtr(BitWidth, 0);
715 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
716 if (CE->getOpcode() == Instruction::IntToPtr)
717 if (ConstantInt *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
718 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
719 if (Ptr->isNullValue() || BasePtr != 0) {
720 Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr);
721 return ConstantExpr::getIntToPtr(C, ResultTy);
724 // Otherwise form a regular getelementptr. Recompute the indices so that
725 // we eliminate over-indexing of the notional static type array bounds.
726 // This makes it easy to determine if the getelementptr is "inbounds".
727 // Also, this helps GlobalOpt do SROA on GlobalVariables.
728 Type *Ty = Ptr->getType();
729 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
730 SmallVector<Constant*, 32> NewIdxs;
732 if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
733 if (ATy->isPointerTy()) {
734 // The only pointer indexing we'll do is on the first index of the GEP.
735 if (!NewIdxs.empty())
738 // Only handle pointers to sized types, not pointers to functions.
739 if (!ATy->getElementType()->isSized())
743 // Determine which element of the array the offset points into.
744 APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
745 IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
747 // The element size is 0. This may be [0 x Ty]*, so just use a zero
748 // index for this level and proceed to the next level to see if it can
749 // accommodate the offset.
750 NewIdxs.push_back(ConstantInt::get(IntPtrTy, 0));
752 // The element size is non-zero divide the offset by the element
753 // size (rounding down), to compute the index at this level.
754 APInt NewIdx = Offset.udiv(ElemSize);
755 Offset -= NewIdx * ElemSize;
756 NewIdxs.push_back(ConstantInt::get(IntPtrTy, NewIdx));
758 Ty = ATy->getElementType();
759 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
760 // If we end up with an offset that isn't valid for this struct type, we
761 // can't re-form this GEP in a regular form, so bail out. The pointer
762 // operand likely went through casts that are necessary to make the GEP
764 const StructLayout &SL = *TD->getStructLayout(STy);
765 if (Offset.uge(SL.getSizeInBytes()))
768 // Determine which field of the struct the offset points into. The
769 // getZExtValue is fine as we've already ensured that the offset is
770 // within the range representable by the StructLayout API.
771 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
772 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
774 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
775 Ty = STy->getTypeAtIndex(ElIdx);
777 // We've reached some non-indexable type.
780 } while (Ty != cast<PointerType>(ResultTy)->getElementType());
782 // If we haven't used up the entire offset by descending the static
783 // type, then the offset is pointing into the middle of an indivisible
784 // member, so we can't simplify it.
790 ConstantExpr::getGetElementPtr(Ptr, NewIdxs);
791 assert(cast<PointerType>(C->getType())->getElementType() == Ty &&
792 "Computed GetElementPtr has unexpected type!");
794 // If we ended up indexing a member with a type that doesn't match
795 // the type of what the original indices indexed, add a cast.
796 if (Ty != cast<PointerType>(ResultTy)->getElementType())
797 C = FoldBitCast(C, ResultTy, *TD);
804 //===----------------------------------------------------------------------===//
805 // Constant Folding public APIs
806 //===----------------------------------------------------------------------===//
808 /// ConstantFoldInstruction - Try to constant fold the specified instruction.
809 /// If successful, the constant result is returned, if not, null is returned.
810 /// Note that this fails if not all of the operands are constant. Otherwise,
811 /// this function can only fail when attempting to fold instructions like loads
812 /// and stores, which have no constant expression form.
813 Constant *llvm::ConstantFoldInstruction(Instruction *I,
814 const DataLayout *TD,
815 const TargetLibraryInfo *TLI) {
816 // Handle PHI nodes quickly here...
817 if (PHINode *PN = dyn_cast<PHINode>(I)) {
818 Constant *CommonValue = 0;
820 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
821 Value *Incoming = PN->getIncomingValue(i);
822 // If the incoming value is undef then skip it. Note that while we could
823 // skip the value if it is equal to the phi node itself we choose not to
824 // because that would break the rule that constant folding only applies if
825 // all operands are constants.
826 if (isa<UndefValue>(Incoming))
828 // If the incoming value is not a constant, then give up.
829 Constant *C = dyn_cast<Constant>(Incoming);
832 // Fold the PHI's operands.
833 if (ConstantExpr *NewC = dyn_cast<ConstantExpr>(C))
834 C = ConstantFoldConstantExpression(NewC, TD, TLI);
835 // If the incoming value is a different constant to
836 // the one we saw previously, then give up.
837 if (CommonValue && C != CommonValue)
843 // If we reach here, all incoming values are the same constant or undef.
844 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
847 // Scan the operand list, checking to see if they are all constants, if so,
848 // hand off to ConstantFoldInstOperands.
849 SmallVector<Constant*, 8> Ops;
850 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
851 Constant *Op = dyn_cast<Constant>(*i);
853 return 0; // All operands not constant!
855 // Fold the Instruction's operands.
856 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(Op))
857 Op = ConstantFoldConstantExpression(NewCE, TD, TLI);
862 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
863 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
866 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
867 return ConstantFoldLoadInst(LI, TD);
869 if (InsertValueInst *IVI = dyn_cast<InsertValueInst>(I))
870 return ConstantExpr::getInsertValue(
871 cast<Constant>(IVI->getAggregateOperand()),
872 cast<Constant>(IVI->getInsertedValueOperand()),
875 if (ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(I))
876 return ConstantExpr::getExtractValue(
877 cast<Constant>(EVI->getAggregateOperand()),
880 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Ops, TD, TLI);
883 /// ConstantFoldConstantExpression - Attempt to fold the constant expression
884 /// using the specified DataLayout. If successful, the constant result is
885 /// result is returned, if not, null is returned.
886 Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
887 const DataLayout *TD,
888 const TargetLibraryInfo *TLI) {
889 SmallVector<Constant*, 8> Ops;
890 for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
892 Constant *NewC = cast<Constant>(*i);
893 // Recursively fold the ConstantExpr's operands.
894 if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC))
895 NewC = ConstantFoldConstantExpression(NewCE, TD, TLI);
900 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
902 return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(), Ops, TD, TLI);
905 /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
906 /// specified opcode and operands. If successful, the constant result is
907 /// returned, if not, null is returned. Note that this function can fail when
908 /// attempting to fold instructions like loads and stores, which have no
909 /// constant expression form.
911 /// TODO: This function neither utilizes nor preserves nsw/nuw/inbounds/etc
912 /// information, due to only being passed an opcode and operands. Constant
913 /// folding using this function strips this information.
915 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
916 ArrayRef<Constant *> Ops,
917 const DataLayout *TD,
918 const TargetLibraryInfo *TLI) {
919 // Handle easy binops first.
920 if (Instruction::isBinaryOp(Opcode)) {
921 if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
922 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
925 return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
930 case Instruction::ICmp:
931 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
932 case Instruction::Call:
933 if (Function *F = dyn_cast<Function>(Ops.back()))
934 if (canConstantFoldCallTo(F))
935 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
937 case Instruction::PtrToInt:
938 // If the input is a inttoptr, eliminate the pair. This requires knowing
939 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
940 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
941 if (TD && CE->getOpcode() == Instruction::IntToPtr) {
942 Constant *Input = CE->getOperand(0);
943 unsigned InWidth = Input->getType()->getScalarSizeInBits();
944 if (TD->getPointerSizeInBits() < InWidth) {
946 ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
947 TD->getPointerSizeInBits()));
948 Input = ConstantExpr::getAnd(Input, Mask);
950 // Do a zext or trunc to get to the dest size.
951 return ConstantExpr::getIntegerCast(Input, DestTy, false);
954 return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
955 case Instruction::IntToPtr:
956 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
957 // the int size is >= the ptr size. This requires knowing the width of a
958 // pointer, so it can't be done in ConstantExpr::getCast.
959 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
961 TD->getPointerSizeInBits() <= CE->getType()->getScalarSizeInBits() &&
962 CE->getOpcode() == Instruction::PtrToInt)
963 return FoldBitCast(CE->getOperand(0), DestTy, *TD);
965 return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
966 case Instruction::Trunc:
967 case Instruction::ZExt:
968 case Instruction::SExt:
969 case Instruction::FPTrunc:
970 case Instruction::FPExt:
971 case Instruction::UIToFP:
972 case Instruction::SIToFP:
973 case Instruction::FPToUI:
974 case Instruction::FPToSI:
975 return ConstantExpr::getCast(Opcode, Ops[0], DestTy);
976 case Instruction::BitCast:
978 return FoldBitCast(Ops[0], DestTy, *TD);
979 return ConstantExpr::getBitCast(Ops[0], DestTy);
980 case Instruction::Select:
981 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
982 case Instruction::ExtractElement:
983 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
984 case Instruction::InsertElement:
985 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
986 case Instruction::ShuffleVector:
987 return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
988 case Instruction::GetElementPtr:
989 if (Constant *C = CastGEPIndices(Ops, DestTy, TD, TLI))
991 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
994 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
998 /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
999 /// instruction (icmp/fcmp) with the specified operands. If it fails, it
1000 /// returns a constant expression of the specified operands.
1002 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1003 Constant *Ops0, Constant *Ops1,
1004 const DataLayout *TD,
1005 const TargetLibraryInfo *TLI) {
1006 // fold: icmp (inttoptr x), null -> icmp x, 0
1007 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1008 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1009 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1011 // ConstantExpr::getCompare cannot do this, because it doesn't have TD
1012 // around to know if bit truncation is happening.
1013 if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1014 if (TD && Ops1->isNullValue()) {
1015 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
1016 if (CE0->getOpcode() == Instruction::IntToPtr) {
1017 // Convert the integer value to the right size to ensure we get the
1018 // proper extension or truncation.
1019 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1021 Constant *Null = Constant::getNullValue(C->getType());
1022 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
1025 // Only do this transformation if the int is intptrty in size, otherwise
1026 // there is a truncation or extension that we aren't modeling.
1027 if (CE0->getOpcode() == Instruction::PtrToInt &&
1028 CE0->getType() == IntPtrTy) {
1029 Constant *C = CE0->getOperand(0);
1030 Constant *Null = Constant::getNullValue(C->getType());
1031 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
1035 if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1036 if (TD && CE0->getOpcode() == CE1->getOpcode()) {
1037 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
1039 if (CE0->getOpcode() == Instruction::IntToPtr) {
1040 // Convert the integer value to the right size to ensure we get the
1041 // proper extension or truncation.
1042 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1044 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1046 return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
1049 // Only do this transformation if the int is intptrty in size, otherwise
1050 // there is a truncation or extension that we aren't modeling.
1051 if ((CE0->getOpcode() == Instruction::PtrToInt &&
1052 CE0->getType() == IntPtrTy &&
1053 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
1054 return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
1055 CE1->getOperand(0), TD, TLI);
1059 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1060 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1061 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1062 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1064 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
1067 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
1070 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1071 Constant *Ops[] = { LHS, RHS };
1072 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
1076 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1080 /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
1081 /// getelementptr constantexpr, return the constant value being addressed by the
1082 /// constant expression, or null if something is funny and we can't decide.
1083 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1085 if (!CE->getOperand(1)->isNullValue())
1086 return 0; // Do not allow stepping over the value!
1088 // Loop over all of the operands, tracking down which value we are
1090 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1091 C = C->getAggregateElement(CE->getOperand(i));
1092 if (C == 0) return 0;
1097 /// ConstantFoldLoadThroughGEPIndices - Given a constant and getelementptr
1098 /// indices (with an *implied* zero pointer index that is not in the list),
1099 /// return the constant value being addressed by a virtual load, or null if
1100 /// something is funny and we can't decide.
1101 Constant *llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1102 ArrayRef<Constant*> Indices) {
1103 // Loop over all of the operands, tracking down which value we are
1105 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
1106 C = C->getAggregateElement(Indices[i]);
1107 if (C == 0) return 0;
1113 //===----------------------------------------------------------------------===//
1114 // Constant Folding for Calls
1117 /// canConstantFoldCallTo - Return true if its even possible to fold a call to
1118 /// the specified function.
1120 llvm::canConstantFoldCallTo(const Function *F) {
1121 switch (F->getIntrinsicID()) {
1122 case Intrinsic::fabs:
1123 case Intrinsic::log:
1124 case Intrinsic::log2:
1125 case Intrinsic::log10:
1126 case Intrinsic::exp:
1127 case Intrinsic::exp2:
1128 case Intrinsic::floor:
1129 case Intrinsic::sqrt:
1130 case Intrinsic::pow:
1131 case Intrinsic::powi:
1132 case Intrinsic::bswap:
1133 case Intrinsic::ctpop:
1134 case Intrinsic::ctlz:
1135 case Intrinsic::cttz:
1136 case Intrinsic::sadd_with_overflow:
1137 case Intrinsic::uadd_with_overflow:
1138 case Intrinsic::ssub_with_overflow:
1139 case Intrinsic::usub_with_overflow:
1140 case Intrinsic::smul_with_overflow:
1141 case Intrinsic::umul_with_overflow:
1142 case Intrinsic::convert_from_fp16:
1143 case Intrinsic::convert_to_fp16:
1144 case Intrinsic::x86_sse_cvtss2si:
1145 case Intrinsic::x86_sse_cvtss2si64:
1146 case Intrinsic::x86_sse_cvttss2si:
1147 case Intrinsic::x86_sse_cvttss2si64:
1148 case Intrinsic::x86_sse2_cvtsd2si:
1149 case Intrinsic::x86_sse2_cvtsd2si64:
1150 case Intrinsic::x86_sse2_cvttsd2si:
1151 case Intrinsic::x86_sse2_cvttsd2si64:
1158 if (!F->hasName()) return false;
1159 StringRef Name = F->getName();
1161 // In these cases, the check of the length is required. We don't want to
1162 // return true for a name like "cos\0blah" which strcmp would return equal to
1163 // "cos", but has length 8.
1165 default: return false;
1167 return Name == "acos" || Name == "asin" || Name == "atan" || Name =="atan2";
1169 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
1171 return Name == "exp" || Name == "exp2";
1173 return Name == "fabs" || Name == "fmod" || Name == "floor";
1175 return Name == "log" || Name == "log10";
1177 return Name == "pow";
1179 return Name == "sin" || Name == "sinh" || Name == "sqrt" ||
1180 Name == "sinf" || Name == "sqrtf";
1182 return Name == "tan" || Name == "tanh";
1186 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
1188 sys::llvm_fenv_clearexcept();
1190 if (sys::llvm_fenv_testexcept()) {
1191 sys::llvm_fenv_clearexcept();
1195 if (Ty->isHalfTy()) {
1198 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
1199 return ConstantFP::get(Ty->getContext(), APF);
1201 if (Ty->isFloatTy())
1202 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1203 if (Ty->isDoubleTy())
1204 return ConstantFP::get(Ty->getContext(), APFloat(V));
1205 llvm_unreachable("Can only constant fold half/float/double");
1208 static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1209 double V, double W, Type *Ty) {
1210 sys::llvm_fenv_clearexcept();
1212 if (sys::llvm_fenv_testexcept()) {
1213 sys::llvm_fenv_clearexcept();
1217 if (Ty->isHalfTy()) {
1220 APF.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &unused);
1221 return ConstantFP::get(Ty->getContext(), APF);
1223 if (Ty->isFloatTy())
1224 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
1225 if (Ty->isDoubleTy())
1226 return ConstantFP::get(Ty->getContext(), APFloat(V));
1227 llvm_unreachable("Can only constant fold half/float/double");
1230 /// ConstantFoldConvertToInt - Attempt to an SSE floating point to integer
1231 /// conversion of a constant floating point. If roundTowardZero is false, the
1232 /// default IEEE rounding is used (toward nearest, ties to even). This matches
1233 /// the behavior of the non-truncating SSE instructions in the default rounding
1234 /// mode. The desired integer type Ty is used to select how many bits are
1235 /// available for the result. Returns null if the conversion cannot be
1236 /// performed, otherwise returns the Constant value resulting from the
1238 static Constant *ConstantFoldConvertToInt(const APFloat &Val,
1239 bool roundTowardZero, Type *Ty) {
1240 // All of these conversion intrinsics form an integer of at most 64bits.
1241 unsigned ResultWidth = cast<IntegerType>(Ty)->getBitWidth();
1242 assert(ResultWidth <= 64 &&
1243 "Can only constant fold conversions to 64 and 32 bit ints");
1246 bool isExact = false;
1247 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1248 : APFloat::rmNearestTiesToEven;
1249 APFloat::opStatus status = Val.convertToInteger(&UIntVal, ResultWidth,
1250 /*isSigned=*/true, mode,
1252 if (status != APFloat::opOK && status != APFloat::opInexact)
1254 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
1257 /// ConstantFoldCall - Attempt to constant fold a call to the specified function
1258 /// with the specified arguments, returning null if unsuccessful.
1260 llvm::ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
1261 const TargetLibraryInfo *TLI) {
1262 if (!F->hasName()) return 0;
1263 StringRef Name = F->getName();
1265 Type *Ty = F->getReturnType();
1266 if (Operands.size() == 1) {
1267 if (ConstantFP *Op = dyn_cast<ConstantFP>(Operands[0])) {
1268 if (F->getIntrinsicID() == Intrinsic::convert_to_fp16) {
1269 APFloat Val(Op->getValueAPF());
1272 Val.convert(APFloat::IEEEhalf, APFloat::rmNearestTiesToEven, &lost);
1274 return ConstantInt::get(F->getContext(), Val.bitcastToAPInt());
1279 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1282 /// We only fold functions with finite arguments. Folding NaN and inf is
1283 /// likely to be aborted with an exception anyway, and some host libms
1284 /// have known errors raising exceptions.
1285 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1288 /// Currently APFloat versions of these functions do not exist, so we use
1289 /// the host native double versions. Float versions are not called
1290 /// directly but for all these it is true (float)(f((double)arg)) ==
1291 /// f(arg). Long double not supported yet.
1293 if (Ty->isFloatTy())
1294 V = Op->getValueAPF().convertToFloat();
1295 else if (Ty->isDoubleTy())
1296 V = Op->getValueAPF().convertToDouble();
1299 APFloat APF = Op->getValueAPF();
1300 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
1301 V = APF.convertToDouble();
1304 switch (F->getIntrinsicID()) {
1306 case Intrinsic::fabs:
1307 return ConstantFoldFP(fabs, V, Ty);
1309 case Intrinsic::log2:
1310 return ConstantFoldFP(log2, V, Ty);
1313 case Intrinsic::log:
1314 return ConstantFoldFP(log, V, Ty);
1317 case Intrinsic::log10:
1318 return ConstantFoldFP(log10, V, Ty);
1321 case Intrinsic::exp:
1322 return ConstantFoldFP(exp, V, Ty);
1325 case Intrinsic::exp2:
1326 return ConstantFoldFP(exp2, V, Ty);
1328 case Intrinsic::floor:
1329 return ConstantFoldFP(floor, V, Ty);
1334 if (Name == "acos" && TLI->has(LibFunc::acos))
1335 return ConstantFoldFP(acos, V, Ty);
1336 else if (Name == "asin" && TLI->has(LibFunc::asin))
1337 return ConstantFoldFP(asin, V, Ty);
1338 else if (Name == "atan" && TLI->has(LibFunc::atan))
1339 return ConstantFoldFP(atan, V, Ty);
1342 if (Name == "ceil" && TLI->has(LibFunc::ceil))
1343 return ConstantFoldFP(ceil, V, Ty);
1344 else if (Name == "cos" && TLI->has(LibFunc::cos))
1345 return ConstantFoldFP(cos, V, Ty);
1346 else if (Name == "cosh" && TLI->has(LibFunc::cosh))
1347 return ConstantFoldFP(cosh, V, Ty);
1348 else if (Name == "cosf" && TLI->has(LibFunc::cosf))
1349 return ConstantFoldFP(cos, V, Ty);
1352 if (Name == "exp" && TLI->has(LibFunc::exp))
1353 return ConstantFoldFP(exp, V, Ty);
1355 if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
1356 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
1358 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1362 if (Name == "fabs" && TLI->has(LibFunc::fabs))
1363 return ConstantFoldFP(fabs, V, Ty);
1364 else if (Name == "floor" && TLI->has(LibFunc::floor))
1365 return ConstantFoldFP(floor, V, Ty);
1368 if (Name == "log" && V > 0 && TLI->has(LibFunc::log))
1369 return ConstantFoldFP(log, V, Ty);
1370 else if (Name == "log10" && V > 0 && TLI->has(LibFunc::log10))
1371 return ConstantFoldFP(log10, V, Ty);
1372 else if (F->getIntrinsicID() == Intrinsic::sqrt &&
1373 (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())) {
1375 return ConstantFoldFP(sqrt, V, Ty);
1377 return Constant::getNullValue(Ty);
1381 if (Name == "sin" && TLI->has(LibFunc::sin))
1382 return ConstantFoldFP(sin, V, Ty);
1383 else if (Name == "sinh" && TLI->has(LibFunc::sinh))
1384 return ConstantFoldFP(sinh, V, Ty);
1385 else if (Name == "sqrt" && V >= 0 && TLI->has(LibFunc::sqrt))
1386 return ConstantFoldFP(sqrt, V, Ty);
1387 else if (Name == "sqrtf" && V >= 0 && TLI->has(LibFunc::sqrtf))
1388 return ConstantFoldFP(sqrt, V, Ty);
1389 else if (Name == "sinf" && TLI->has(LibFunc::sinf))
1390 return ConstantFoldFP(sin, V, Ty);
1393 if (Name == "tan" && TLI->has(LibFunc::tan))
1394 return ConstantFoldFP(tan, V, Ty);
1395 else if (Name == "tanh" && TLI->has(LibFunc::tanh))
1396 return ConstantFoldFP(tanh, V, Ty);
1404 if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
1405 switch (F->getIntrinsicID()) {
1406 case Intrinsic::bswap:
1407 return ConstantInt::get(F->getContext(), Op->getValue().byteSwap());
1408 case Intrinsic::ctpop:
1409 return ConstantInt::get(Ty, Op->getValue().countPopulation());
1410 case Intrinsic::convert_from_fp16: {
1411 APFloat Val(APFloat::IEEEhalf, Op->getValue());
1414 APFloat::opStatus status =
1415 Val.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
1417 // Conversion is always precise.
1419 assert(status == APFloat::opOK && !lost &&
1420 "Precision lost during fp16 constfolding");
1422 return ConstantFP::get(F->getContext(), Val);
1429 // Support ConstantVector in case we have an Undef in the top.
1430 if (isa<ConstantVector>(Operands[0]) ||
1431 isa<ConstantDataVector>(Operands[0])) {
1432 Constant *Op = cast<Constant>(Operands[0]);
1433 switch (F->getIntrinsicID()) {
1435 case Intrinsic::x86_sse_cvtss2si:
1436 case Intrinsic::x86_sse_cvtss2si64:
1437 case Intrinsic::x86_sse2_cvtsd2si:
1438 case Intrinsic::x86_sse2_cvtsd2si64:
1439 if (ConstantFP *FPOp =
1440 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1441 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
1442 /*roundTowardZero=*/false, Ty);
1443 case Intrinsic::x86_sse_cvttss2si:
1444 case Intrinsic::x86_sse_cvttss2si64:
1445 case Intrinsic::x86_sse2_cvttsd2si:
1446 case Intrinsic::x86_sse2_cvttsd2si64:
1447 if (ConstantFP *FPOp =
1448 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
1449 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
1450 /*roundTowardZero=*/true, Ty);
1454 if (isa<UndefValue>(Operands[0])) {
1455 if (F->getIntrinsicID() == Intrinsic::bswap)
1463 if (Operands.size() == 2) {
1464 if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
1465 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1468 if (Ty->isFloatTy())
1469 Op1V = Op1->getValueAPF().convertToFloat();
1470 else if (Ty->isDoubleTy())
1471 Op1V = Op1->getValueAPF().convertToDouble();
1474 APFloat APF = Op1->getValueAPF();
1475 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
1476 Op1V = APF.convertToDouble();
1479 if (ConstantFP *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
1480 if (Op2->getType() != Op1->getType())
1484 if (Ty->isFloatTy())
1485 Op2V = Op2->getValueAPF().convertToFloat();
1486 else if (Ty->isDoubleTy())
1487 Op2V = Op2->getValueAPF().convertToDouble();
1490 APFloat APF = Op2->getValueAPF();
1491 APF.convert(APFloat::IEEEdouble, APFloat::rmNearestTiesToEven, &unused);
1492 Op2V = APF.convertToDouble();
1495 if (F->getIntrinsicID() == Intrinsic::pow) {
1496 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1500 if (Name == "pow" && TLI->has(LibFunc::pow))
1501 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
1502 if (Name == "fmod" && TLI->has(LibFunc::fmod))
1503 return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
1504 if (Name == "atan2" && TLI->has(LibFunc::atan2))
1505 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
1506 } else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
1507 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isHalfTy())
1508 return ConstantFP::get(F->getContext(),
1509 APFloat((float)std::pow((float)Op1V,
1510 (int)Op2C->getZExtValue())));
1511 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isFloatTy())
1512 return ConstantFP::get(F->getContext(),
1513 APFloat((float)std::pow((float)Op1V,
1514 (int)Op2C->getZExtValue())));
1515 if (F->getIntrinsicID() == Intrinsic::powi && Ty->isDoubleTy())
1516 return ConstantFP::get(F->getContext(),
1517 APFloat((double)std::pow((double)Op1V,
1518 (int)Op2C->getZExtValue())));
1523 if (ConstantInt *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
1524 if (ConstantInt *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
1525 switch (F->getIntrinsicID()) {
1527 case Intrinsic::sadd_with_overflow:
1528 case Intrinsic::uadd_with_overflow:
1529 case Intrinsic::ssub_with_overflow:
1530 case Intrinsic::usub_with_overflow:
1531 case Intrinsic::smul_with_overflow:
1532 case Intrinsic::umul_with_overflow: {
1535 switch (F->getIntrinsicID()) {
1536 default: llvm_unreachable("Invalid case");
1537 case Intrinsic::sadd_with_overflow:
1538 Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
1540 case Intrinsic::uadd_with_overflow:
1541 Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
1543 case Intrinsic::ssub_with_overflow:
1544 Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
1546 case Intrinsic::usub_with_overflow:
1547 Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
1549 case Intrinsic::smul_with_overflow:
1550 Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
1552 case Intrinsic::umul_with_overflow:
1553 Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
1557 ConstantInt::get(F->getContext(), Res),
1558 ConstantInt::get(Type::getInt1Ty(F->getContext()), Overflow)
1560 return ConstantStruct::get(cast<StructType>(F->getReturnType()), Ops);
1562 case Intrinsic::cttz:
1563 if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
1564 return UndefValue::get(Ty);
1565 return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
1566 case Intrinsic::ctlz:
1567 if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
1568 return UndefValue::get(Ty);
1569 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());