1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "InstCombine.h"
39 #include "llvm/IntrinsicInst.h"
40 #include "llvm/LLVMContext.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Operator.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/InstructionSimplify.h"
46 #include "llvm/Analysis/MemoryBuiltins.h"
47 #include "llvm/Target/TargetData.h"
48 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
49 #include "llvm/Transforms/Utils/Local.h"
50 #include "llvm/Support/CallSite.h"
51 #include "llvm/Support/ConstantRange.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/GetElementPtrTypeIterator.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/PatternMatch.h"
57 #include "llvm/ADT/SmallPtrSet.h"
58 #include "llvm/ADT/Statistic.h"
59 #include "llvm/ADT/STLExtras.h"
63 using namespace llvm::PatternMatch;
65 STATISTIC(NumCombined , "Number of insts combined");
66 STATISTIC(NumConstProp, "Number of constant folds");
67 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
68 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
69 STATISTIC(NumSunkInst , "Number of instructions sunk");
72 char InstCombiner::ID = 0;
73 static RegisterPass<InstCombiner>
74 X("instcombine", "Combine redundant instructions");
76 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
77 AU.addPreservedID(LCSSAID);
82 // getComplexity: Assign a complexity or rank value to LLVM Values...
83 // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
84 static unsigned getComplexity(Value *V) {
85 if (isa<Instruction>(V)) {
86 if (BinaryOperator::isNeg(V) ||
87 BinaryOperator::isFNeg(V) ||
88 BinaryOperator::isNot(V))
92 if (isa<Argument>(V)) return 3;
93 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
96 // isOnlyUse - Return true if this instruction will be deleted if we stop using
98 static bool isOnlyUse(Value *V) {
99 return V->hasOneUse() || isa<Constant>(V);
102 // getPromotedType - Return the specified type promoted as it would be to pass
103 // though a va_arg area...
104 static const Type *getPromotedType(const Type *Ty) {
105 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
106 if (ITy->getBitWidth() < 32)
107 return Type::getInt32Ty(Ty->getContext());
112 /// ShouldChangeType - Return true if it is desirable to convert a computation
113 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal
114 /// type for example, or from a smaller to a larger illegal type.
115 static bool ShouldChangeType(const Type *From, const Type *To,
116 const TargetData *TD) {
117 assert(isa<IntegerType>(From) && isa<IntegerType>(To));
119 // If we don't have TD, we don't know if the source/dest are legal.
120 if (!TD) return false;
122 unsigned FromWidth = From->getPrimitiveSizeInBits();
123 unsigned ToWidth = To->getPrimitiveSizeInBits();
124 bool FromLegal = TD->isLegalInteger(FromWidth);
125 bool ToLegal = TD->isLegalInteger(ToWidth);
127 // If this is a legal integer from type, and the result would be an illegal
128 // type, don't do the transformation.
129 if (FromLegal && !ToLegal)
132 // Otherwise, if both are illegal, do not increase the size of the result. We
133 // do allow things like i160 -> i64, but not i64 -> i160.
134 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
140 /// getBitCastOperand - If the specified operand is a CastInst, a constant
141 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
142 /// operand value, otherwise return null.
143 static Value *getBitCastOperand(Value *V) {
144 if (Operator *O = dyn_cast<Operator>(V)) {
145 if (O->getOpcode() == Instruction::BitCast)
146 return O->getOperand(0);
147 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
148 if (GEP->hasAllZeroIndices())
149 return GEP->getPointerOperand();
154 /// This function is a wrapper around CastInst::isEliminableCastPair. It
155 /// simply extracts arguments and returns what that function returns.
156 static Instruction::CastOps
157 isEliminableCastPair(
158 const CastInst *CI, ///< The first cast instruction
159 unsigned opcode, ///< The opcode of the second cast instruction
160 const Type *DstTy, ///< The target type for the second cast instruction
161 TargetData *TD ///< The target data for pointer size
164 const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
165 const Type *MidTy = CI->getType(); // B from above
167 // Get the opcodes of the two Cast instructions
168 Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
169 Instruction::CastOps secondOp = Instruction::CastOps(opcode);
171 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
173 TD ? TD->getIntPtrType(CI->getContext()) : 0);
175 // We don't want to form an inttoptr or ptrtoint that converts to an integer
176 // type that differs from the pointer size.
177 if ((Res == Instruction::IntToPtr &&
178 (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
179 (Res == Instruction::PtrToInt &&
180 (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
183 return Instruction::CastOps(Res);
186 /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
187 /// in any code being generated. It does not require codegen if V is simple
188 /// enough or if the cast can be folded into other casts.
189 static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V,
190 const Type *Ty, TargetData *TD) {
191 if (V->getType() == Ty || isa<Constant>(V)) return false;
193 // If this is another cast that can be eliminated, it isn't codegen either.
194 if (const CastInst *CI = dyn_cast<CastInst>(V))
195 if (isEliminableCastPair(CI, opcode, Ty, TD))
200 // SimplifyCommutative - This performs a few simplifications for commutative
203 // 1. Order operands such that they are listed from right (least complex) to
204 // left (most complex). This puts constants before unary operators before
207 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
208 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
210 bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
211 bool Changed = false;
212 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
213 Changed = !I.swapOperands();
215 if (!I.isAssociative()) return Changed;
216 Instruction::BinaryOps Opcode = I.getOpcode();
217 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
218 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
219 if (isa<Constant>(I.getOperand(1))) {
220 Constant *Folded = ConstantExpr::get(I.getOpcode(),
221 cast<Constant>(I.getOperand(1)),
222 cast<Constant>(Op->getOperand(1)));
223 I.setOperand(0, Op->getOperand(0));
224 I.setOperand(1, Folded);
226 } else if (BinaryOperator *Op1=dyn_cast<BinaryOperator>(I.getOperand(1)))
227 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
228 isOnlyUse(Op) && isOnlyUse(Op1)) {
229 Constant *C1 = cast<Constant>(Op->getOperand(1));
230 Constant *C2 = cast<Constant>(Op1->getOperand(1));
232 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
233 Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
234 Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
238 I.setOperand(0, New);
239 I.setOperand(1, Folded);
246 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
247 // if the LHS is a constant zero (which is the 'negate' form).
249 static inline Value *dyn_castNegVal(Value *V) {
250 if (BinaryOperator::isNeg(V))
251 return BinaryOperator::getNegArgument(V);
253 // Constants can be considered to be negated values if they can be folded.
254 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
255 return ConstantExpr::getNeg(C);
257 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
258 if (C->getType()->getElementType()->isInteger())
259 return ConstantExpr::getNeg(C);
264 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
265 // instruction if the LHS is a constant negative zero (which is the 'negate'
268 static inline Value *dyn_castFNegVal(Value *V) {
269 if (BinaryOperator::isFNeg(V))
270 return BinaryOperator::getFNegArgument(V);
272 // Constants can be considered to be negated values if they can be folded.
273 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
274 return ConstantExpr::getFNeg(C);
276 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
277 if (C->getType()->getElementType()->isFloatingPoint())
278 return ConstantExpr::getFNeg(C);
283 /// MatchSelectPattern - Pattern match integer [SU]MIN, [SU]MAX, and ABS idioms,
284 /// returning the kind and providing the out parameter results if we
285 /// successfully match.
286 static SelectPatternFlavor
287 MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
288 SelectInst *SI = dyn_cast<SelectInst>(V);
289 if (SI == 0) return SPF_UNKNOWN;
291 ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition());
292 if (ICI == 0) return SPF_UNKNOWN;
294 LHS = ICI->getOperand(0);
295 RHS = ICI->getOperand(1);
297 // (icmp X, Y) ? X : Y
298 if (SI->getTrueValue() == ICI->getOperand(0) &&
299 SI->getFalseValue() == ICI->getOperand(1)) {
300 switch (ICI->getPredicate()) {
301 default: return SPF_UNKNOWN; // Equality.
302 case ICmpInst::ICMP_UGT:
303 case ICmpInst::ICMP_UGE: return SPF_UMAX;
304 case ICmpInst::ICMP_SGT:
305 case ICmpInst::ICMP_SGE: return SPF_SMAX;
306 case ICmpInst::ICMP_ULT:
307 case ICmpInst::ICMP_ULE: return SPF_UMIN;
308 case ICmpInst::ICMP_SLT:
309 case ICmpInst::ICMP_SLE: return SPF_SMIN;
313 // (icmp X, Y) ? Y : X
314 if (SI->getTrueValue() == ICI->getOperand(1) &&
315 SI->getFalseValue() == ICI->getOperand(0)) {
316 switch (ICI->getPredicate()) {
317 default: return SPF_UNKNOWN; // Equality.
318 case ICmpInst::ICMP_UGT:
319 case ICmpInst::ICMP_UGE: return SPF_UMIN;
320 case ICmpInst::ICMP_SGT:
321 case ICmpInst::ICMP_SGE: return SPF_SMIN;
322 case ICmpInst::ICMP_ULT:
323 case ICmpInst::ICMP_ULE: return SPF_UMAX;
324 case ICmpInst::ICMP_SLT:
325 case ICmpInst::ICMP_SLE: return SPF_SMAX;
329 // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5)
334 /// isFreeToInvert - Return true if the specified value is free to invert (apply
335 /// ~ to). This happens in cases where the ~ can be eliminated.
336 static inline bool isFreeToInvert(Value *V) {
338 if (BinaryOperator::isNot(V))
341 // Constants can be considered to be not'ed values.
342 if (isa<ConstantInt>(V))
345 // Compares can be inverted if they have a single use.
346 if (CmpInst *CI = dyn_cast<CmpInst>(V))
347 return CI->hasOneUse();
352 static inline Value *dyn_castNotVal(Value *V) {
353 // If this is not(not(x)) don't return that this is a not: we want the two
354 // not's to be folded first.
355 if (BinaryOperator::isNot(V)) {
356 Value *Operand = BinaryOperator::getNotArgument(V);
357 if (!isFreeToInvert(Operand))
361 // Constants can be considered to be not'ed values...
362 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
363 return ConstantInt::get(C->getType(), ~C->getValue());
369 // dyn_castFoldableMul - If this value is a multiply that can be folded into
370 // other computations (because it has a constant operand), return the
371 // non-constant operand of the multiply, and set CST to point to the multiplier.
372 // Otherwise, return null.
374 static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
375 if (V->hasOneUse() && V->getType()->isInteger())
376 if (Instruction *I = dyn_cast<Instruction>(V)) {
377 if (I->getOpcode() == Instruction::Mul)
378 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
379 return I->getOperand(0);
380 if (I->getOpcode() == Instruction::Shl)
381 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
382 // The multiplier is really 1 << CST.
383 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
384 uint32_t CSTVal = CST->getLimitedValue(BitWidth);
385 CST = ConstantInt::get(V->getType()->getContext(),
386 APInt(BitWidth, 1).shl(CSTVal));
387 return I->getOperand(0);
393 /// AddOne - Add one to a ConstantInt
394 static Constant *AddOne(Constant *C) {
395 return ConstantExpr::getAdd(C,
396 ConstantInt::get(C->getType(), 1));
398 /// SubOne - Subtract one from a ConstantInt
399 static Constant *SubOne(ConstantInt *C) {
400 return ConstantExpr::getSub(C,
401 ConstantInt::get(C->getType(), 1));
403 /// MultiplyOverflows - True if the multiply can not be expressed in an int
405 static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
406 uint32_t W = C1->getBitWidth();
407 APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
416 APInt MulExt = LHSExt * RHSExt;
419 return MulExt.ugt(APInt::getLowBitsSet(W * 2, W));
421 APInt Min = APInt::getSignedMinValue(W).sext(W * 2);
422 APInt Max = APInt::getSignedMaxValue(W).sext(W * 2);
423 return MulExt.slt(Min) || MulExt.sgt(Max);
427 // ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
428 // set of known zero and one bits, compute the maximum and minimum values that
429 // could have the specified known zero and known one bits, returning them in
431 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
432 const APInt& KnownOne,
433 APInt& Min, APInt& Max) {
434 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
435 KnownZero.getBitWidth() == Min.getBitWidth() &&
436 KnownZero.getBitWidth() == Max.getBitWidth() &&
437 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
438 APInt UnknownBits = ~(KnownZero|KnownOne);
440 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
441 // bit if it is unknown.
443 Max = KnownOne|UnknownBits;
445 if (UnknownBits.isNegative()) { // Sign bit is unknown
446 Min.set(Min.getBitWidth()-1);
447 Max.clear(Max.getBitWidth()-1);
451 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
452 // a set of known zero and one bits, compute the maximum and minimum values that
453 // could have the specified known zero and known one bits, returning them in
455 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
456 const APInt &KnownOne,
457 APInt &Min, APInt &Max) {
458 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
459 KnownZero.getBitWidth() == Min.getBitWidth() &&
460 KnownZero.getBitWidth() == Max.getBitWidth() &&
461 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
462 APInt UnknownBits = ~(KnownZero|KnownOne);
464 // The minimum value is when the unknown bits are all zeros.
466 // The maximum value is when the unknown bits are all ones.
467 Max = KnownOne|UnknownBits;
471 /// AssociativeOpt - Perform an optimization on an associative operator. This
472 /// function is designed to check a chain of associative operators for a
473 /// potential to apply a certain optimization. Since the optimization may be
474 /// applicable if the expression was reassociated, this checks the chain, then
475 /// reassociates the expression as necessary to expose the optimization
476 /// opportunity. This makes use of a special Functor, which must define
477 /// 'shouldApply' and 'apply' methods.
479 template<typename Functor>
480 static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) {
481 unsigned Opcode = Root.getOpcode();
482 Value *LHS = Root.getOperand(0);
484 // Quick check, see if the immediate LHS matches...
485 if (F.shouldApply(LHS))
486 return F.apply(Root);
488 // Otherwise, if the LHS is not of the same opcode as the root, return.
489 Instruction *LHSI = dyn_cast<Instruction>(LHS);
490 while (LHSI && LHSI->getOpcode() == Opcode && LHSI->hasOneUse()) {
491 // Should we apply this transform to the RHS?
492 bool ShouldApply = F.shouldApply(LHSI->getOperand(1));
494 // If not to the RHS, check to see if we should apply to the LHS...
495 if (!ShouldApply && F.shouldApply(LHSI->getOperand(0))) {
496 cast<BinaryOperator>(LHSI)->swapOperands(); // Make the LHS the RHS
500 // If the functor wants to apply the optimization to the RHS of LHSI,
501 // reassociate the expression from ((? op A) op B) to (? op (A op B))
503 // Now all of the instructions are in the current basic block, go ahead
504 // and perform the reassociation.
505 Instruction *TmpLHSI = cast<Instruction>(Root.getOperand(0));
507 // First move the selected RHS to the LHS of the root...
508 Root.setOperand(0, LHSI->getOperand(1));
510 // Make what used to be the LHS of the root be the user of the root...
511 Value *ExtraOperand = TmpLHSI->getOperand(1);
512 if (&Root == TmpLHSI) {
513 Root.replaceAllUsesWith(Constant::getNullValue(TmpLHSI->getType()));
516 Root.replaceAllUsesWith(TmpLHSI); // Users now use TmpLHSI
517 TmpLHSI->setOperand(1, &Root); // TmpLHSI now uses the root
518 BasicBlock::iterator ARI = &Root; ++ARI;
519 TmpLHSI->moveBefore(ARI); // Move TmpLHSI to after Root
522 // Now propagate the ExtraOperand down the chain of instructions until we
524 while (TmpLHSI != LHSI) {
525 Instruction *NextLHSI = cast<Instruction>(TmpLHSI->getOperand(0));
526 // Move the instruction to immediately before the chain we are
527 // constructing to avoid breaking dominance properties.
528 NextLHSI->moveBefore(ARI);
531 Value *NextOp = NextLHSI->getOperand(1);
532 NextLHSI->setOperand(1, ExtraOperand);
534 ExtraOperand = NextOp;
537 // Now that the instructions are reassociated, have the functor perform
538 // the transformation...
539 return F.apply(Root);
542 LHSI = dyn_cast<Instruction>(LHSI->getOperand(0));
549 // AddRHS - Implements: X + X --> X << 1
552 explicit AddRHS(Value *rhs) : RHS(rhs) {}
553 bool shouldApply(Value *LHS) const { return LHS == RHS; }
554 Instruction *apply(BinaryOperator &Add) const {
555 return BinaryOperator::CreateShl(Add.getOperand(0),
556 ConstantInt::get(Add.getType(), 1));
560 // AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
562 struct AddMaskingAnd {
564 explicit AddMaskingAnd(Constant *c) : C2(c) {}
565 bool shouldApply(Value *LHS) const {
567 return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
568 ConstantExpr::getAnd(C1, C2)->isNullValue();
570 Instruction *apply(BinaryOperator &Add) const {
571 return BinaryOperator::CreateOr(Add.getOperand(0), Add.getOperand(1));
577 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
579 if (CastInst *CI = dyn_cast<CastInst>(&I))
580 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
582 // Figure out if the constant is the left or the right argument.
583 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
584 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
586 if (Constant *SOC = dyn_cast<Constant>(SO)) {
588 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
589 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
592 Value *Op0 = SO, *Op1 = ConstOperand;
596 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
597 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
598 SO->getName()+".op");
599 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
600 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
601 SO->getName()+".cmp");
602 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
603 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
604 SO->getName()+".cmp");
605 llvm_unreachable("Unknown binary instruction type!");
608 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
609 // constant as the other operand, try to fold the binary operator into the
610 // select arguments. This also works for Cast instructions, which obviously do
611 // not have a second operand.
612 static Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
614 // Don't modify shared select instructions
615 if (!SI->hasOneUse()) return 0;
616 Value *TV = SI->getOperand(1);
617 Value *FV = SI->getOperand(2);
619 if (isa<Constant>(TV) || isa<Constant>(FV)) {
620 // Bool selects with constant operands can be folded to logical ops.
621 if (SI->getType() == Type::getInt1Ty(SI->getContext())) return 0;
623 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, IC);
624 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, IC);
626 return SelectInst::Create(SI->getCondition(), SelectTrueVal,
633 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
634 /// has a PHI node as operand #0, see if we can fold the instruction into the
635 /// PHI (which is only possible if all operands to the PHI are constants).
637 /// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
638 /// that would normally be unprofitable because they strongly encourage jump
640 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
641 bool AllowAggressive) {
642 AllowAggressive = false;
643 PHINode *PN = cast<PHINode>(I.getOperand(0));
644 unsigned NumPHIValues = PN->getNumIncomingValues();
645 if (NumPHIValues == 0 ||
646 // We normally only transform phis with a single use, unless we're trying
647 // hard to make jump threading happen.
648 (!PN->hasOneUse() && !AllowAggressive))
652 // Check to see if all of the operands of the PHI are simple constants
653 // (constantint/constantfp/undef). If there is one non-constant value,
654 // remember the BB it is in. If there is more than one or if *it* is a PHI,
655 // bail out. We don't do arbitrary constant expressions here because moving
656 // their computation can be expensive without a cost model.
657 BasicBlock *NonConstBB = 0;
658 for (unsigned i = 0; i != NumPHIValues; ++i)
659 if (!isa<Constant>(PN->getIncomingValue(i)) ||
660 isa<ConstantExpr>(PN->getIncomingValue(i))) {
661 if (NonConstBB) return 0; // More than one non-const value.
662 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
663 NonConstBB = PN->getIncomingBlock(i);
665 // If the incoming non-constant value is in I's block, we have an infinite
667 if (NonConstBB == I.getParent())
671 // If there is exactly one non-constant value, we can insert a copy of the
672 // operation in that block. However, if this is a critical edge, we would be
673 // inserting the computation one some other paths (e.g. inside a loop). Only
674 // do this if the pred block is unconditionally branching into the phi block.
675 if (NonConstBB != 0 && !AllowAggressive) {
676 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
677 if (!BI || !BI->isUnconditional()) return 0;
680 // Okay, we can do the transformation: create the new PHI node.
681 PHINode *NewPN = PHINode::Create(I.getType(), "");
682 NewPN->reserveOperandSpace(PN->getNumOperands()/2);
683 InsertNewInstBefore(NewPN, *PN);
686 // Next, add all of the operands to the PHI.
687 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
688 // We only currently try to fold the condition of a select when it is a phi,
689 // not the true/false values.
690 Value *TrueV = SI->getTrueValue();
691 Value *FalseV = SI->getFalseValue();
692 BasicBlock *PhiTransBB = PN->getParent();
693 for (unsigned i = 0; i != NumPHIValues; ++i) {
694 BasicBlock *ThisBB = PN->getIncomingBlock(i);
695 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
696 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
698 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
699 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
701 assert(PN->getIncomingBlock(i) == NonConstBB);
702 InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
704 "phitmp", NonConstBB->getTerminator());
705 Worklist.Add(cast<Instruction>(InV));
707 NewPN->addIncoming(InV, ThisBB);
709 } else if (I.getNumOperands() == 2) {
710 Constant *C = cast<Constant>(I.getOperand(1));
711 for (unsigned i = 0; i != NumPHIValues; ++i) {
713 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
714 if (CmpInst *CI = dyn_cast<CmpInst>(&I))
715 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
717 InV = ConstantExpr::get(I.getOpcode(), InC, C);
719 assert(PN->getIncomingBlock(i) == NonConstBB);
720 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
721 InV = BinaryOperator::Create(BO->getOpcode(),
722 PN->getIncomingValue(i), C, "phitmp",
723 NonConstBB->getTerminator());
724 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
725 InV = CmpInst::Create(CI->getOpcode(),
727 PN->getIncomingValue(i), C, "phitmp",
728 NonConstBB->getTerminator());
730 llvm_unreachable("Unknown binop!");
732 Worklist.Add(cast<Instruction>(InV));
734 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
737 CastInst *CI = cast<CastInst>(&I);
738 const Type *RetTy = CI->getType();
739 for (unsigned i = 0; i != NumPHIValues; ++i) {
741 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
742 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
744 assert(PN->getIncomingBlock(i) == NonConstBB);
745 InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
746 I.getType(), "phitmp",
747 NonConstBB->getTerminator());
748 Worklist.Add(cast<Instruction>(InV));
750 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
753 return ReplaceInstUsesWith(I, NewPN);
757 /// WillNotOverflowSignedAdd - Return true if we can prove that:
758 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
759 /// This basically requires proving that the add in the original type would not
760 /// overflow to change the sign bit or have a carry out.
761 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
762 // There are different heuristics we can use for this. Here are some simple
765 // Add has the property that adding any two 2's complement numbers can only
766 // have one carry bit which can change a sign. As such, if LHS and RHS each
767 // have at least two sign bits, we know that the addition of the two values
768 // will sign extend fine.
769 if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
773 // If one of the operands only has one non-zero bit, and if the other operand
774 // has a known-zero bit in a more significant place than it (not including the
775 // sign bit) the ripple may go up to and fill the zero, but won't change the
776 // sign. For example, (X & ~4) + 1.
784 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
785 bool Changed = SimplifyCommutative(I);
786 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
788 if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
789 I.hasNoUnsignedWrap(), TD))
790 return ReplaceInstUsesWith(I, V);
793 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
794 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
795 // X + (signbit) --> X ^ signbit
796 const APInt& Val = CI->getValue();
797 uint32_t BitWidth = Val.getBitWidth();
798 if (Val == APInt::getSignBit(BitWidth))
799 return BinaryOperator::CreateXor(LHS, RHS);
801 // See if SimplifyDemandedBits can simplify this. This handles stuff like
802 // (X & 254)+1 -> (X&254)|1
803 if (SimplifyDemandedInstructionBits(I))
806 // zext(bool) + C -> bool ? C + 1 : C
807 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
808 if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext()))
809 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
812 if (isa<PHINode>(LHS))
813 if (Instruction *NV = FoldOpIntoPhi(I))
816 ConstantInt *XorRHS = 0;
818 if (isa<ConstantInt>(RHSC) &&
819 match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
820 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
821 const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
823 uint32_t Size = TySizeBits / 2;
824 APInt C0080Val(APInt(TySizeBits, 1ULL).shl(Size - 1));
825 APInt CFF80Val(-C0080Val);
827 if (TySizeBits > Size) {
828 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
829 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
830 if ((RHSVal == CFF80Val && XorRHS->getValue() == C0080Val) ||
831 (RHSVal == C0080Val && XorRHS->getValue() == CFF80Val)) {
832 // This is a sign extend if the top bits are known zero.
833 if (!MaskedValueIsZero(XorLHS,
834 APInt::getHighBitsSet(TySizeBits, TySizeBits - Size)))
835 Size = 0; // Not a sign ext, but can't be any others either.
840 C0080Val = APIntOps::lshr(C0080Val, Size);
841 CFF80Val = APIntOps::ashr(CFF80Val, Size);
844 // FIXME: This shouldn't be necessary. When the backends can handle types
845 // with funny bit widths then this switch statement should be removed. It
846 // is just here to get the size of the "middle" type back up to something
847 // that the back ends can handle.
848 const Type *MiddleType = 0;
853 case 8: MiddleType = IntegerType::get(I.getContext(), Size); break;
856 Value *NewTrunc = Builder->CreateTrunc(XorLHS, MiddleType, "sext");
857 return new SExtInst(NewTrunc, I.getType(), I.getName());
862 if (I.getType() == Type::getInt1Ty(I.getContext()))
863 return BinaryOperator::CreateXor(LHS, RHS);
866 if (I.getType()->isInteger()) {
867 if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS)))
870 if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
871 if (RHSI->getOpcode() == Instruction::Sub)
872 if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
873 return ReplaceInstUsesWith(I, RHSI->getOperand(0));
875 if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
876 if (LHSI->getOpcode() == Instruction::Sub)
877 if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
878 return ReplaceInstUsesWith(I, LHSI->getOperand(0));
883 // -A + -B --> -(A + B)
884 if (Value *LHSV = dyn_castNegVal(LHS)) {
885 if (LHS->getType()->isIntOrIntVector()) {
886 if (Value *RHSV = dyn_castNegVal(RHS)) {
887 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
888 return BinaryOperator::CreateNeg(NewAdd);
892 return BinaryOperator::CreateSub(RHS, LHSV);
896 if (!isa<Constant>(RHS))
897 if (Value *V = dyn_castNegVal(RHS))
898 return BinaryOperator::CreateSub(LHS, V);
902 if (Value *X = dyn_castFoldableMul(LHS, C2)) {
903 if (X == RHS) // X*C + X --> X * (C+1)
904 return BinaryOperator::CreateMul(RHS, AddOne(C2));
906 // X*C1 + X*C2 --> X * (C1+C2)
908 if (X == dyn_castFoldableMul(RHS, C1))
909 return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
912 // X + X*C --> X * (C+1)
913 if (dyn_castFoldableMul(RHS, C2) == LHS)
914 return BinaryOperator::CreateMul(LHS, AddOne(C2));
916 // X + ~X --> -1 since ~X = -X-1
917 if (dyn_castNotVal(LHS) == RHS ||
918 dyn_castNotVal(RHS) == LHS)
919 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
922 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
923 if (match(RHS, m_And(m_Value(), m_ConstantInt(C2))))
924 if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2)))
927 // A+B --> A|B iff A and B have no bits set in common.
928 if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
929 APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
930 APInt LHSKnownOne(IT->getBitWidth(), 0);
931 APInt LHSKnownZero(IT->getBitWidth(), 0);
932 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
933 if (LHSKnownZero != 0) {
934 APInt RHSKnownOne(IT->getBitWidth(), 0);
935 APInt RHSKnownZero(IT->getBitWidth(), 0);
936 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
938 // No bits in common -> bitwise or.
939 if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
940 return BinaryOperator::CreateOr(LHS, RHS);
944 // W*X + Y*Z --> W * (X+Z) iff W == Y
945 if (I.getType()->isIntOrIntVector()) {
946 Value *W, *X, *Y, *Z;
947 if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
948 match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
961 Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
962 return BinaryOperator::CreateMul(W, NewAdd);
967 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
969 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
970 return BinaryOperator::CreateSub(SubOne(CRHS), X);
972 // (X & FF00) + xx00 -> (X+xx00) & FF00
973 if (LHS->hasOneUse() &&
974 match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) {
975 Constant *Anded = ConstantExpr::getAnd(CRHS, C2);
977 // See if all bits from the first bit set in the Add RHS up are included
978 // in the mask. First, get the rightmost bit.
979 const APInt& AddRHSV = CRHS->getValue();
981 // Form a mask of all bits from the lowest bit added through the top.
982 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
984 // See if the and mask includes all of these bits.
985 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
987 if (AddRHSHighBits == AddRHSHighBitsAnd) {
988 // Okay, the xform is safe. Insert the new add pronto.
989 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
990 return BinaryOperator::CreateAnd(NewAdd, C2);
995 // Try to fold constant add into select arguments.
996 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
997 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
1001 // add (select X 0 (sub n A)) A --> select X A n
1003 SelectInst *SI = dyn_cast<SelectInst>(LHS);
1006 SI = dyn_cast<SelectInst>(RHS);
1009 if (SI && SI->hasOneUse()) {
1010 Value *TV = SI->getTrueValue();
1011 Value *FV = SI->getFalseValue();
1014 // Can we fold the add into the argument of the select?
1015 // We check both true and false select arguments for a matching subtract.
1016 if (match(FV, m_Zero()) &&
1017 match(TV, m_Sub(m_Value(N), m_Specific(A))))
1018 // Fold the add into the true select value.
1019 return SelectInst::Create(SI->getCondition(), N, A);
1020 if (match(TV, m_Zero()) &&
1021 match(FV, m_Sub(m_Value(N), m_Specific(A))))
1022 // Fold the add into the false select value.
1023 return SelectInst::Create(SI->getCondition(), A, N);
1027 // Check for (add (sext x), y), see if we can merge this into an
1028 // integer add followed by a sext.
1029 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
1030 // (add (sext x), cst) --> (sext (add x, cst'))
1031 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1033 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1034 if (LHSConv->hasOneUse() &&
1035 ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
1036 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
1037 // Insert the new, smaller add.
1038 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1040 return new SExtInst(NewAdd, I.getType());
1044 // (add (sext x), (sext y)) --> (sext (add int x, y))
1045 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
1046 // Only do this if x/y have the same type, if at last one of them has a
1047 // single use (so we don't increase the number of sexts), and if the
1048 // integer add will not overflow.
1049 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
1050 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1051 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1052 RHSConv->getOperand(0))) {
1053 // Insert the new integer add.
1054 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1055 RHSConv->getOperand(0), "addconv");
1056 return new SExtInst(NewAdd, I.getType());
1061 return Changed ? &I : 0;
1064 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
1065 bool Changed = SimplifyCommutative(I);
1066 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1068 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
1070 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
1071 if (CFP->isExactlyValue(ConstantFP::getNegativeZero
1072 (I.getType())->getValueAPF()))
1073 return ReplaceInstUsesWith(I, LHS);
1076 if (isa<PHINode>(LHS))
1077 if (Instruction *NV = FoldOpIntoPhi(I))
1082 // -A + -B --> -(A + B)
1083 if (Value *LHSV = dyn_castFNegVal(LHS))
1084 return BinaryOperator::CreateFSub(RHS, LHSV);
1087 if (!isa<Constant>(RHS))
1088 if (Value *V = dyn_castFNegVal(RHS))
1089 return BinaryOperator::CreateFSub(LHS, V);
1091 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
1092 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
1093 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
1094 return ReplaceInstUsesWith(I, LHS);
1096 // Check for (add double (sitofp x), y), see if we can merge this into an
1097 // integer add followed by a promotion.
1098 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1099 // (add double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1100 // ... if the constant fits in the integer value. This is useful for things
1101 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1102 // requires a constant pool load, and generally allows the add to be better
1104 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
1106 ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
1107 if (LHSConv->hasOneUse() &&
1108 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1109 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
1110 // Insert the new integer add.
1111 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1113 return new SIToFPInst(NewAdd, I.getType());
1117 // (add double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1118 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1119 // Only do this if x/y have the same type, if at last one of them has a
1120 // single use (so we don't increase the number of int->fp conversions),
1121 // and if the integer add will not overflow.
1122 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
1123 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1124 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1125 RHSConv->getOperand(0))) {
1126 // Insert the new integer add.
1127 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1128 RHSConv->getOperand(0),"addconv");
1129 return new SIToFPInst(NewAdd, I.getType());
1134 return Changed ? &I : 0;
1138 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
1139 /// code necessary to compute the offset from the base pointer (without adding
1140 /// in the base pointer). Return the result as a signed integer of intptr size.
1141 static Value *EmitGEPOffset(User *GEP, InstCombiner &IC) {
1142 TargetData &TD = *IC.getTargetData();
1143 gep_type_iterator GTI = gep_type_begin(GEP);
1144 const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
1145 Value *Result = Constant::getNullValue(IntPtrTy);
1147 // Build a mask for high order bits.
1148 unsigned IntPtrWidth = TD.getPointerSizeInBits();
1149 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
1151 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
1154 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
1155 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
1156 if (OpC->isZero()) continue;
1158 // Handle a struct index, which adds its field offset to the pointer.
1159 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
1160 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
1162 Result = IC.Builder->CreateAdd(Result,
1163 ConstantInt::get(IntPtrTy, Size),
1164 GEP->getName()+".offs");
1168 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
1170 ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
1171 Scale = ConstantExpr::getMul(OC, Scale);
1172 // Emit an add instruction.
1173 Result = IC.Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
1176 // Convert to correct type.
1177 if (Op->getType() != IntPtrTy)
1178 Op = IC.Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
1180 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
1181 // We'll let instcombine(mul) convert this to a shl if possible.
1182 Op = IC.Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
1185 // Emit an add instruction.
1186 Result = IC.Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
1192 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
1193 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
1194 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
1195 /// be complex, and scales are involved. The above expression would also be
1196 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
1197 /// This later form is less amenable to optimization though, and we are allowed
1198 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
1200 /// If we can't emit an optimized form for this expression, this returns null.
1202 static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
1204 TargetData &TD = *IC.getTargetData();
1205 gep_type_iterator GTI = gep_type_begin(GEP);
1207 // Check to see if this gep only has a single variable index. If so, and if
1208 // any constant indices are a multiple of its scale, then we can compute this
1209 // in terms of the scale of the variable index. For example, if the GEP
1210 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
1211 // because the expression will cross zero at the same point.
1212 unsigned i, e = GEP->getNumOperands();
1214 for (i = 1; i != e; ++i, ++GTI) {
1215 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
1216 // Compute the aggregate offset of constant indices.
1217 if (CI->isZero()) continue;
1219 // Handle a struct index, which adds its field offset to the pointer.
1220 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
1221 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
1223 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
1224 Offset += Size*CI->getSExtValue();
1227 // Found our variable index.
1232 // If there are no variable indices, we must have a constant offset, just
1233 // evaluate it the general way.
1234 if (i == e) return 0;
1236 Value *VariableIdx = GEP->getOperand(i);
1237 // Determine the scale factor of the variable element. For example, this is
1238 // 4 if the variable index is into an array of i32.
1239 uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
1241 // Verify that there are no other variable indices. If so, emit the hard way.
1242 for (++i, ++GTI; i != e; ++i, ++GTI) {
1243 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
1246 // Compute the aggregate offset of constant indices.
1247 if (CI->isZero()) continue;
1249 // Handle a struct index, which adds its field offset to the pointer.
1250 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
1251 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
1253 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
1254 Offset += Size*CI->getSExtValue();
1258 // Okay, we know we have a single variable index, which must be a
1259 // pointer/array/vector index. If there is no offset, life is simple, return
1261 unsigned IntPtrWidth = TD.getPointerSizeInBits();
1263 // Cast to intptrty in case a truncation occurs. If an extension is needed,
1264 // we don't need to bother extending: the extension won't affect where the
1265 // computation crosses zero.
1266 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
1267 VariableIdx = new TruncInst(VariableIdx,
1268 TD.getIntPtrType(VariableIdx->getContext()),
1269 VariableIdx->getName(), &I);
1273 // Otherwise, there is an index. The computation we will do will be modulo
1274 // the pointer size, so get it.
1275 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
1277 Offset &= PtrSizeMask;
1278 VariableScale &= PtrSizeMask;
1280 // To do this transformation, any constant index must be a multiple of the
1281 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
1282 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
1283 // multiple of the variable scale.
1284 int64_t NewOffs = Offset / (int64_t)VariableScale;
1285 if (Offset != NewOffs*(int64_t)VariableScale)
1288 // Okay, we can do this evaluation. Start by converting the index to intptr.
1289 const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
1290 if (VariableIdx->getType() != IntPtrTy)
1291 VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
1293 VariableIdx->getName(), &I);
1294 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
1295 return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
1299 /// Optimize pointer differences into the same array into a size. Consider:
1300 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1301 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1303 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
1305 assert(TD && "Must have target data info for this");
1307 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1310 GetElementPtrInst *GEP = 0;
1311 ConstantExpr *CstGEP = 0;
1313 // TODO: Could also optimize &A[i] - &A[j] -> "i-j", and "&A.foo[i] - &A.foo".
1314 // For now we require one side to be the base pointer "A" or a constant
1315 // expression derived from it.
1316 if (GetElementPtrInst *LHSGEP = dyn_cast<GetElementPtrInst>(LHS)) {
1318 if (LHSGEP->getOperand(0) == RHS) {
1321 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(RHS)) {
1322 // (gep X, ...) - (ce_gep X, ...)
1323 if (CE->getOpcode() == Instruction::GetElementPtr &&
1324 LHSGEP->getOperand(0) == CE->getOperand(0)) {
1332 if (GetElementPtrInst *RHSGEP = dyn_cast<GetElementPtrInst>(RHS)) {
1334 if (RHSGEP->getOperand(0) == LHS) {
1337 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(LHS)) {
1338 // (ce_gep X, ...) - (gep X, ...)
1339 if (CE->getOpcode() == Instruction::GetElementPtr &&
1340 RHSGEP->getOperand(0) == CE->getOperand(0)) {
1351 // Emit the offset of the GEP and an intptr_t.
1352 Value *Result = EmitGEPOffset(GEP, *this);
1354 // If we had a constant expression GEP on the other side offsetting the
1355 // pointer, subtract it from the offset we have.
1357 Value *CstOffset = EmitGEPOffset(CstGEP, *this);
1358 Result = Builder->CreateSub(Result, CstOffset);
1362 // If we have p - gep(p, ...) then we have to negate the result.
1364 Result = Builder->CreateNeg(Result, "diff.neg");
1366 return Builder->CreateIntCast(Result, Ty, true);
1370 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
1371 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1373 if (Op0 == Op1) // sub X, X -> 0
1374 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1376 // If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW.
1377 if (Value *V = dyn_castNegVal(Op1)) {
1378 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1379 Res->setHasNoSignedWrap(I.hasNoSignedWrap());
1380 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1384 if (isa<UndefValue>(Op0))
1385 return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
1386 if (isa<UndefValue>(Op1))
1387 return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
1388 if (I.getType() == Type::getInt1Ty(I.getContext()))
1389 return BinaryOperator::CreateXor(Op0, Op1);
1391 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
1392 // Replace (-1 - A) with (~A).
1393 if (C->isAllOnesValue())
1394 return BinaryOperator::CreateNot(Op1);
1396 // C - ~X == X + (1+C)
1398 if (match(Op1, m_Not(m_Value(X))))
1399 return BinaryOperator::CreateAdd(X, AddOne(C));
1401 // -(X >>u 31) -> (X >>s 31)
1402 // -(X >>s 31) -> (X >>u 31)
1404 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
1405 if (SI->getOpcode() == Instruction::LShr) {
1406 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
1407 // Check to see if we are shifting out everything but the sign bit.
1408 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
1409 SI->getType()->getPrimitiveSizeInBits()-1) {
1410 // Ok, the transformation is safe. Insert AShr.
1411 return BinaryOperator::Create(Instruction::AShr,
1412 SI->getOperand(0), CU, SI->getName());
1415 } else if (SI->getOpcode() == Instruction::AShr) {
1416 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
1417 // Check to see if we are shifting out everything but the sign bit.
1418 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
1419 SI->getType()->getPrimitiveSizeInBits()-1) {
1420 // Ok, the transformation is safe. Insert LShr.
1421 return BinaryOperator::CreateLShr(
1422 SI->getOperand(0), CU, SI->getName());
1429 // Try to fold constant sub into select arguments.
1430 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1431 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
1434 // C - zext(bool) -> bool ? C - 1 : C
1435 if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
1436 if (ZI->getSrcTy() == Type::getInt1Ty(I.getContext()))
1437 return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
1440 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
1441 if (Op1I->getOpcode() == Instruction::Add) {
1442 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
1443 return BinaryOperator::CreateNeg(Op1I->getOperand(1),
1445 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
1446 return BinaryOperator::CreateNeg(Op1I->getOperand(0),
1448 else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
1449 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
1450 // C1-(X+C2) --> (C1-C2)-X
1451 return BinaryOperator::CreateSub(
1452 ConstantExpr::getSub(CI1, CI2), Op1I->getOperand(0));
1456 if (Op1I->hasOneUse()) {
1457 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
1458 // is not used by anyone else...
1460 if (Op1I->getOpcode() == Instruction::Sub) {
1461 // Swap the two operands of the subexpr...
1462 Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
1463 Op1I->setOperand(0, IIOp1);
1464 Op1I->setOperand(1, IIOp0);
1466 // Create the new top level add instruction...
1467 return BinaryOperator::CreateAdd(Op0, Op1);
1470 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
1472 if (Op1I->getOpcode() == Instruction::And &&
1473 (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
1474 Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
1476 Value *NewNot = Builder->CreateNot(OtherOp, "B.not");
1477 return BinaryOperator::CreateAnd(Op0, NewNot);
1480 // 0 - (X sdiv C) -> (X sdiv -C)
1481 if (Op1I->getOpcode() == Instruction::SDiv)
1482 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
1484 if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
1485 return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
1486 ConstantExpr::getNeg(DivRHS));
1488 // X - X*C --> X * (1-C)
1489 ConstantInt *C2 = 0;
1490 if (dyn_castFoldableMul(Op1I, C2) == Op0) {
1492 ConstantExpr::getSub(ConstantInt::get(I.getType(), 1),
1494 return BinaryOperator::CreateMul(Op0, CP1);
1499 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1500 if (Op0I->getOpcode() == Instruction::Add) {
1501 if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
1502 return ReplaceInstUsesWith(I, Op0I->getOperand(1));
1503 else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
1504 return ReplaceInstUsesWith(I, Op0I->getOperand(0));
1505 } else if (Op0I->getOpcode() == Instruction::Sub) {
1506 if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
1507 return BinaryOperator::CreateNeg(Op0I->getOperand(1),
1513 if (Value *X = dyn_castFoldableMul(Op0, C1)) {
1514 if (X == Op1) // X*C - X --> X * (C-1)
1515 return BinaryOperator::CreateMul(Op1, SubOne(C1));
1517 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
1518 if (X == dyn_castFoldableMul(Op1, C2))
1519 return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
1522 // Optimize pointer differences into the same array into a size. Consider:
1523 // &A[10] - &A[0]: we should compile this to "10".
1525 Value *LHSOp, *RHSOp;
1526 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
1527 match(Op1, m_PtrToInt(m_Value(RHSOp))))
1528 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1529 return ReplaceInstUsesWith(I, Res);
1531 // trunc(p)-trunc(q) -> trunc(p-q)
1532 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
1533 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
1534 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1535 return ReplaceInstUsesWith(I, Res);
1541 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
1542 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1544 // If this is a 'B = x-(-A)', change to B = x+A...
1545 if (Value *V = dyn_castFNegVal(Op1))
1546 return BinaryOperator::CreateFAdd(Op0, V);
1548 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
1549 if (Op1I->getOpcode() == Instruction::FAdd) {
1550 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
1551 return BinaryOperator::CreateFNeg(Op1I->getOperand(1),
1553 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
1554 return BinaryOperator::CreateFNeg(Op1I->getOperand(0),
1562 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
1563 /// comparison only checks the sign bit. If it only checks the sign bit, set
1564 /// TrueIfSigned if the result of the comparison is true when the input value is
1566 static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
1567 bool &TrueIfSigned) {
1569 case ICmpInst::ICMP_SLT: // True if LHS s< 0
1570 TrueIfSigned = true;
1571 return RHS->isZero();
1572 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
1573 TrueIfSigned = true;
1574 return RHS->isAllOnesValue();
1575 case ICmpInst::ICMP_SGT: // True if LHS s> -1
1576 TrueIfSigned = false;
1577 return RHS->isAllOnesValue();
1578 case ICmpInst::ICMP_UGT:
1579 // True if LHS u> RHS and RHS == high-bit-mask - 1
1580 TrueIfSigned = true;
1581 return RHS->getValue() ==
1582 APInt::getSignedMaxValue(RHS->getType()->getPrimitiveSizeInBits());
1583 case ICmpInst::ICMP_UGE:
1584 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
1585 TrueIfSigned = true;
1586 return RHS->getValue().isSignBit();
1592 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
1593 bool Changed = SimplifyCommutative(I);
1594 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1596 if (isa<UndefValue>(Op1)) // undef * X -> 0
1597 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1599 // Simplify mul instructions with a constant RHS.
1600 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
1601 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1C)) {
1603 // ((X << C1)*C2) == (X * (C2 << C1))
1604 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
1605 if (SI->getOpcode() == Instruction::Shl)
1606 if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
1607 return BinaryOperator::CreateMul(SI->getOperand(0),
1608 ConstantExpr::getShl(CI, ShOp));
1611 return ReplaceInstUsesWith(I, Op1C); // X * 0 == 0
1612 if (CI->equalsInt(1)) // X * 1 == X
1613 return ReplaceInstUsesWith(I, Op0);
1614 if (CI->isAllOnesValue()) // X * -1 == 0 - X
1615 return BinaryOperator::CreateNeg(Op0, I.getName());
1617 const APInt& Val = cast<ConstantInt>(CI)->getValue();
1618 if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
1619 return BinaryOperator::CreateShl(Op0,
1620 ConstantInt::get(Op0->getType(), Val.logBase2()));
1622 } else if (isa<VectorType>(Op1C->getType())) {
1623 if (Op1C->isNullValue())
1624 return ReplaceInstUsesWith(I, Op1C);
1626 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
1627 if (Op1V->isAllOnesValue()) // X * -1 == 0 - X
1628 return BinaryOperator::CreateNeg(Op0, I.getName());
1630 // As above, vector X*splat(1.0) -> X in all defined cases.
1631 if (Constant *Splat = Op1V->getSplatValue()) {
1632 if (ConstantInt *CI = dyn_cast<ConstantInt>(Splat))
1633 if (CI->equalsInt(1))
1634 return ReplaceInstUsesWith(I, Op0);
1639 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
1640 if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
1641 isa<ConstantInt>(Op0I->getOperand(1)) && isa<ConstantInt>(Op1C)) {
1642 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
1643 Value *Add = Builder->CreateMul(Op0I->getOperand(0), Op1C, "tmp");
1644 Value *C1C2 = Builder->CreateMul(Op1C, Op0I->getOperand(1));
1645 return BinaryOperator::CreateAdd(Add, C1C2);
1649 // Try to fold constant mul into select arguments.
1650 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1651 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
1654 if (isa<PHINode>(Op0))
1655 if (Instruction *NV = FoldOpIntoPhi(I))
1659 if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y
1660 if (Value *Op1v = dyn_castNegVal(Op1))
1661 return BinaryOperator::CreateMul(Op0v, Op1v);
1663 // (X / Y) * Y = X - (X % Y)
1664 // (X / Y) * -Y = (X % Y) - X
1667 BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0);
1669 (BO->getOpcode() != Instruction::UDiv &&
1670 BO->getOpcode() != Instruction::SDiv)) {
1672 BO = dyn_cast<BinaryOperator>(Op1);
1674 Value *Neg = dyn_castNegVal(Op1C);
1675 if (BO && BO->hasOneUse() &&
1676 (BO->getOperand(1) == Op1C || BO->getOperand(1) == Neg) &&
1677 (BO->getOpcode() == Instruction::UDiv ||
1678 BO->getOpcode() == Instruction::SDiv)) {
1679 Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);
1681 // If the division is exact, X % Y is zero.
1682 if (SDivOperator *SDiv = dyn_cast<SDivOperator>(BO))
1683 if (SDiv->isExact()) {
1685 return ReplaceInstUsesWith(I, Op0BO);
1686 return BinaryOperator::CreateNeg(Op0BO);
1690 if (BO->getOpcode() == Instruction::UDiv)
1691 Rem = Builder->CreateURem(Op0BO, Op1BO);
1693 Rem = Builder->CreateSRem(Op0BO, Op1BO);
1697 return BinaryOperator::CreateSub(Op0BO, Rem);
1698 return BinaryOperator::CreateSub(Rem, Op0BO);
1702 /// i1 mul -> i1 and.
1703 if (I.getType() == Type::getInt1Ty(I.getContext()))
1704 return BinaryOperator::CreateAnd(Op0, Op1);
1706 // X*(1 << Y) --> X << Y
1707 // (1 << Y)*X --> X << Y
1710 if (match(Op0, m_Shl(m_One(), m_Value(Y))))
1711 return BinaryOperator::CreateShl(Op1, Y);
1712 if (match(Op1, m_Shl(m_One(), m_Value(Y))))
1713 return BinaryOperator::CreateShl(Op0, Y);
1716 // If one of the operands of the multiply is a cast from a boolean value, then
1717 // we know the bool is either zero or one, so this is a 'masking' multiply.
1718 // X * Y (where Y is 0 or 1) -> X & (0-Y)
1719 if (!isa<VectorType>(I.getType())) {
1720 // -2 is "-1 << 1" so it is all bits set except the low one.
1721 APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);
1723 Value *BoolCast = 0, *OtherOp = 0;
1724 if (MaskedValueIsZero(Op0, Negative2))
1725 BoolCast = Op0, OtherOp = Op1;
1726 else if (MaskedValueIsZero(Op1, Negative2))
1727 BoolCast = Op1, OtherOp = Op0;
1730 Value *V = Builder->CreateSub(Constant::getNullValue(I.getType()),
1732 return BinaryOperator::CreateAnd(V, OtherOp);
1736 return Changed ? &I : 0;
1739 Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
1740 bool Changed = SimplifyCommutative(I);
1741 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1743 // Simplify mul instructions with a constant RHS...
1744 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
1745 if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1C)) {
1746 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
1747 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
1748 if (Op1F->isExactlyValue(1.0))
1749 return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0'
1750 } else if (isa<VectorType>(Op1C->getType())) {
1751 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
1752 // As above, vector X*splat(1.0) -> X in all defined cases.
1753 if (Constant *Splat = Op1V->getSplatValue()) {
1754 if (ConstantFP *F = dyn_cast<ConstantFP>(Splat))
1755 if (F->isExactlyValue(1.0))
1756 return ReplaceInstUsesWith(I, Op0);
1761 // Try to fold constant mul into select arguments.
1762 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1763 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
1766 if (isa<PHINode>(Op0))
1767 if (Instruction *NV = FoldOpIntoPhi(I))
1771 if (Value *Op0v = dyn_castFNegVal(Op0)) // -X * -Y = X*Y
1772 if (Value *Op1v = dyn_castFNegVal(Op1))
1773 return BinaryOperator::CreateFMul(Op0v, Op1v);
1775 return Changed ? &I : 0;
1778 /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
1780 bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
1781 SelectInst *SI = cast<SelectInst>(I.getOperand(1));
1783 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
1784 int NonNullOperand = -1;
1785 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
1786 if (ST->isNullValue())
1788 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
1789 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
1790 if (ST->isNullValue())
1793 if (NonNullOperand == -1)
1796 Value *SelectCond = SI->getOperand(0);
1798 // Change the div/rem to use 'Y' instead of the select.
1799 I.setOperand(1, SI->getOperand(NonNullOperand));
1801 // Okay, we know we replace the operand of the div/rem with 'Y' with no
1802 // problem. However, the select, or the condition of the select may have
1803 // multiple uses. Based on our knowledge that the operand must be non-zero,
1804 // propagate the known value for the select into other uses of it, and
1805 // propagate a known value of the condition into its other users.
1807 // If the select and condition only have a single use, don't bother with this,
1809 if (SI->use_empty() && SelectCond->hasOneUse())
1812 // Scan the current block backward, looking for other uses of SI.
1813 BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();
1815 while (BBI != BBFront) {
1817 // If we found a call to a function, we can't assume it will return, so
1818 // information from below it cannot be propagated above it.
1819 if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
1822 // Replace uses of the select or its condition with the known values.
1823 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
1826 *I = SI->getOperand(NonNullOperand);
1828 } else if (*I == SelectCond) {
1829 *I = NonNullOperand == 1 ? ConstantInt::getTrue(BBI->getContext()) :
1830 ConstantInt::getFalse(BBI->getContext());
1835 // If we past the instruction, quit looking for it.
1838 if (&*BBI == SelectCond)
1841 // If we ran out of things to eliminate, break out of the loop.
1842 if (SelectCond == 0 && SI == 0)
1850 /// This function implements the transforms on div instructions that work
1851 /// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
1852 /// used by the visitors to those instructions.
1853 /// @brief Transforms common to all three div instructions
1854 Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
1855 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1857 // undef / X -> 0 for integer.
1858 // undef / X -> undef for FP (the undef could be a snan).
1859 if (isa<UndefValue>(Op0)) {
1860 if (Op0->getType()->isFPOrFPVector())
1861 return ReplaceInstUsesWith(I, Op0);
1862 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1865 // X / undef -> undef
1866 if (isa<UndefValue>(Op1))
1867 return ReplaceInstUsesWith(I, Op1);
1872 /// This function implements the transforms common to both integer division
1873 /// instructions (udiv and sdiv). It is called by the visitors to those integer
1874 /// division instructions.
1875 /// @brief Common integer divide transforms
1876 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
1877 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1879 // (sdiv X, X) --> 1 (udiv X, X) --> 1
1881 if (const VectorType *Ty = dyn_cast<VectorType>(I.getType())) {
1882 Constant *CI = ConstantInt::get(Ty->getElementType(), 1);
1883 std::vector<Constant*> Elts(Ty->getNumElements(), CI);
1884 return ReplaceInstUsesWith(I, ConstantVector::get(Elts));
1887 Constant *CI = ConstantInt::get(I.getType(), 1);
1888 return ReplaceInstUsesWith(I, CI);
1891 if (Instruction *Common = commonDivTransforms(I))
1894 // Handle cases involving: [su]div X, (select Cond, Y, Z)
1895 // This does not apply for fdiv.
1896 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
1899 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
1901 if (RHS->equalsInt(1))
1902 return ReplaceInstUsesWith(I, Op0);
1904 // (X / C1) / C2 -> X / (C1*C2)
1905 if (Instruction *LHS = dyn_cast<Instruction>(Op0))
1906 if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
1907 if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
1908 if (MultiplyOverflows(RHS, LHSRHS,
1909 I.getOpcode()==Instruction::SDiv))
1910 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1912 return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
1913 ConstantExpr::getMul(RHS, LHSRHS));
1916 if (!RHS->isZero()) { // avoid X udiv 0
1917 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1918 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
1920 if (isa<PHINode>(Op0))
1921 if (Instruction *NV = FoldOpIntoPhi(I))
1926 // 0 / X == 0, we don't need to preserve faults!
1927 if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
1928 if (LHS->equalsInt(0))
1929 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
1931 // It can't be division by zero, hence it must be division by one.
1932 if (I.getType() == Type::getInt1Ty(I.getContext()))
1933 return ReplaceInstUsesWith(I, Op0);
1935 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
1936 if (ConstantInt *X = cast_or_null<ConstantInt>(Op1V->getSplatValue()))
1939 return ReplaceInstUsesWith(I, Op0);
1945 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
1946 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1948 // Handle the integer div common cases
1949 if (Instruction *Common = commonIDivTransforms(I))
1952 if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
1953 // X udiv C^2 -> X >> C
1954 // Check to see if this is an unsigned division with an exact power of 2,
1955 // if so, convert to a right shift.
1956 if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
1957 return BinaryOperator::CreateLShr(Op0,
1958 ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
1960 // X udiv C, where C >= signbit
1961 if (C->getValue().isNegative()) {
1962 Value *IC = Builder->CreateICmpULT( Op0, C);
1963 return SelectInst::Create(IC, Constant::getNullValue(I.getType()),
1964 ConstantInt::get(I.getType(), 1));
1968 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
1969 if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
1970 if (RHSI->getOpcode() == Instruction::Shl &&
1971 isa<ConstantInt>(RHSI->getOperand(0))) {
1972 const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
1973 if (C1.isPowerOf2()) {
1974 Value *N = RHSI->getOperand(1);
1975 const Type *NTy = N->getType();
1976 if (uint32_t C2 = C1.logBase2())
1977 N = Builder->CreateAdd(N, ConstantInt::get(NTy, C2), "tmp");
1978 return BinaryOperator::CreateLShr(Op0, N);
1983 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
1984 // where C1&C2 are powers of two.
1985 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1986 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
1987 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
1988 const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
1989 if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
1990 // Compute the shift amounts
1991 uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
1992 // Construct the "on true" case of the select
1993 Constant *TC = ConstantInt::get(Op0->getType(), TSA);
1994 Value *TSI = Builder->CreateLShr(Op0, TC, SI->getName()+".t");
1996 // Construct the "on false" case of the select
1997 Constant *FC = ConstantInt::get(Op0->getType(), FSA);
1998 Value *FSI = Builder->CreateLShr(Op0, FC, SI->getName()+".f");
2000 // construct the select instruction and return it.
2001 return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
2007 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
2008 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2010 // Handle the integer div common cases
2011 if (Instruction *Common = commonIDivTransforms(I))
2014 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2016 if (RHS->isAllOnesValue())
2017 return BinaryOperator::CreateNeg(Op0);
2019 // sdiv X, C --> ashr X, log2(C)
2020 if (cast<SDivOperator>(&I)->isExact() &&
2021 RHS->getValue().isNonNegative() &&
2022 RHS->getValue().isPowerOf2()) {
2023 Value *ShAmt = llvm::ConstantInt::get(RHS->getType(),
2024 RHS->getValue().exactLogBase2());
2025 return BinaryOperator::CreateAShr(Op0, ShAmt, I.getName());
2028 // -X/C --> X/-C provided the negation doesn't overflow.
2029 if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
2030 if (isa<Constant>(Sub->getOperand(0)) &&
2031 cast<Constant>(Sub->getOperand(0))->isNullValue() &&
2032 Sub->hasNoSignedWrap())
2033 return BinaryOperator::CreateSDiv(Sub->getOperand(1),
2034 ConstantExpr::getNeg(RHS));
2037 // If the sign bits of both operands are zero (i.e. we can prove they are
2038 // unsigned inputs), turn this into a udiv.
2039 if (I.getType()->isInteger()) {
2040 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
2041 if (MaskedValueIsZero(Op0, Mask)) {
2042 if (MaskedValueIsZero(Op1, Mask)) {
2043 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
2044 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
2046 ConstantInt *ShiftedInt;
2047 if (match(Op1, m_Shl(m_ConstantInt(ShiftedInt), m_Value())) &&
2048 ShiftedInt->getValue().isPowerOf2()) {
2049 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
2050 // Safe because the only negative value (1 << Y) can take on is
2051 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
2052 // the sign bit set.
2053 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
2061 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
2062 return commonDivTransforms(I);
2065 /// This function implements the transforms on rem instructions that work
2066 /// regardless of the kind of rem instruction it is (urem, srem, or frem). It
2067 /// is used by the visitors to those instructions.
2068 /// @brief Transforms common to all three rem instructions
2069 Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
2070 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2072 if (isa<UndefValue>(Op0)) { // undef % X -> 0
2073 if (I.getType()->isFPOrFPVector())
2074 return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
2075 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2077 if (isa<UndefValue>(Op1))
2078 return ReplaceInstUsesWith(I, Op1); // X % undef -> undef
2080 // Handle cases involving: rem X, (select Cond, Y, Z)
2081 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
2087 /// This function implements the transforms common to both integer remainder
2088 /// instructions (urem and srem). It is called by the visitors to those integer
2089 /// remainder instructions.
2090 /// @brief Common integer remainder transforms
2091 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
2092 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2094 if (Instruction *common = commonRemTransforms(I))
2097 // 0 % X == 0 for integer, we don't need to preserve faults!
2098 if (Constant *LHS = dyn_cast<Constant>(Op0))
2099 if (LHS->isNullValue())
2100 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2102 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2103 // X % 0 == undef, we don't need to preserve faults!
2104 if (RHS->equalsInt(0))
2105 return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
2107 if (RHS->equalsInt(1)) // X % 1 == 0
2108 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2110 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
2111 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
2112 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2114 } else if (isa<PHINode>(Op0I)) {
2115 if (Instruction *NV = FoldOpIntoPhi(I))
2119 // See if we can fold away this rem instruction.
2120 if (SimplifyDemandedInstructionBits(I))
2128 Instruction *InstCombiner::visitURem(BinaryOperator &I) {
2129 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2131 if (Instruction *common = commonIRemTransforms(I))
2134 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2135 // X urem C^2 -> X and C
2136 // Check to see if this is an unsigned remainder with an exact power of 2,
2137 // if so, convert to a bitwise and.
2138 if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
2139 if (C->getValue().isPowerOf2())
2140 return BinaryOperator::CreateAnd(Op0, SubOne(C));
2143 if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
2144 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
2145 if (RHSI->getOpcode() == Instruction::Shl &&
2146 isa<ConstantInt>(RHSI->getOperand(0))) {
2147 if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
2148 Constant *N1 = Constant::getAllOnesValue(I.getType());
2149 Value *Add = Builder->CreateAdd(RHSI, N1, "tmp");
2150 return BinaryOperator::CreateAnd(Op0, Add);
2155 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
2156 // where C1&C2 are powers of two.
2157 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
2158 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
2159 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
2160 // STO == 0 and SFO == 0 handled above.
2161 if ((STO->getValue().isPowerOf2()) &&
2162 (SFO->getValue().isPowerOf2())) {
2163 Value *TrueAnd = Builder->CreateAnd(Op0, SubOne(STO),
2164 SI->getName()+".t");
2165 Value *FalseAnd = Builder->CreateAnd(Op0, SubOne(SFO),
2166 SI->getName()+".f");
2167 return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
2175 Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
2176 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2178 // Handle the integer rem common cases
2179 if (Instruction *Common = commonIRemTransforms(I))
2182 if (Value *RHSNeg = dyn_castNegVal(Op1))
2183 if (!isa<Constant>(RHSNeg) ||
2184 (isa<ConstantInt>(RHSNeg) &&
2185 cast<ConstantInt>(RHSNeg)->getValue().isStrictlyPositive())) {
2187 Worklist.AddValue(I.getOperand(1));
2188 I.setOperand(1, RHSNeg);
2192 // If the sign bits of both operands are zero (i.e. we can prove they are
2193 // unsigned inputs), turn this into a urem.
2194 if (I.getType()->isInteger()) {
2195 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
2196 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
2197 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
2198 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
2202 // If it's a constant vector, flip any negative values positive.
2203 if (ConstantVector *RHSV = dyn_cast<ConstantVector>(Op1)) {
2204 unsigned VWidth = RHSV->getNumOperands();
2206 bool hasNegative = false;
2207 for (unsigned i = 0; !hasNegative && i != VWidth; ++i)
2208 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i)))
2209 if (RHS->getValue().isNegative())
2213 std::vector<Constant *> Elts(VWidth);
2214 for (unsigned i = 0; i != VWidth; ++i) {
2215 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i))) {
2216 if (RHS->getValue().isNegative())
2217 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
2223 Constant *NewRHSV = ConstantVector::get(Elts);
2224 if (NewRHSV != RHSV) {
2225 Worklist.AddValue(I.getOperand(1));
2226 I.setOperand(1, NewRHSV);
2235 Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
2236 return commonRemTransforms(I);
2239 // isOneBitSet - Return true if there is exactly one bit set in the specified
2241 static bool isOneBitSet(const ConstantInt *CI) {
2242 return CI->getValue().isPowerOf2();
2245 // isHighOnes - Return true if the constant is of the form 1+0+.
2246 // This is the same as lowones(~X).
2247 static bool isHighOnes(const ConstantInt *CI) {
2248 return (~CI->getValue() + 1).isPowerOf2();
2251 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
2252 /// are carefully arranged to allow folding of expressions such as:
2254 /// (A < B) | (A > B) --> (A != B)
2256 /// Note that this is only valid if the first and second predicates have the
2257 /// same sign. Is illegal to do: (A u< B) | (A s> B)
2259 /// Three bits are used to represent the condition, as follows:
2264 /// <=> Value Definition
2265 /// 000 0 Always false
2272 /// 111 7 Always true
2274 static unsigned getICmpCode(const ICmpInst *ICI) {
2275 switch (ICI->getPredicate()) {
2277 case ICmpInst::ICMP_UGT: return 1; // 001
2278 case ICmpInst::ICMP_SGT: return 1; // 001
2279 case ICmpInst::ICMP_EQ: return 2; // 010
2280 case ICmpInst::ICMP_UGE: return 3; // 011
2281 case ICmpInst::ICMP_SGE: return 3; // 011
2282 case ICmpInst::ICMP_ULT: return 4; // 100
2283 case ICmpInst::ICMP_SLT: return 4; // 100
2284 case ICmpInst::ICMP_NE: return 5; // 101
2285 case ICmpInst::ICMP_ULE: return 6; // 110
2286 case ICmpInst::ICMP_SLE: return 6; // 110
2289 llvm_unreachable("Invalid ICmp predicate!");
2294 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
2295 /// predicate into a three bit mask. It also returns whether it is an ordered
2296 /// predicate by reference.
2297 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
2300 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
2301 case FCmpInst::FCMP_UNO: return 0; // 000
2302 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
2303 case FCmpInst::FCMP_UGT: return 1; // 001
2304 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
2305 case FCmpInst::FCMP_UEQ: return 2; // 010
2306 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
2307 case FCmpInst::FCMP_UGE: return 3; // 011
2308 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
2309 case FCmpInst::FCMP_ULT: return 4; // 100
2310 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
2311 case FCmpInst::FCMP_UNE: return 5; // 101
2312 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
2313 case FCmpInst::FCMP_ULE: return 6; // 110
2316 // Not expecting FCMP_FALSE and FCMP_TRUE;
2317 llvm_unreachable("Unexpected FCmp predicate!");
2322 /// getICmpValue - This is the complement of getICmpCode, which turns an
2323 /// opcode and two operands into either a constant true or false, or a brand
2324 /// new ICmp instruction. The sign is passed in to determine which kind
2325 /// of predicate to use in the new icmp instruction.
2326 static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS) {
2328 default: llvm_unreachable("Illegal ICmp code!");
2329 case 0: return ConstantInt::getFalse(LHS->getContext());
2332 return new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS);
2334 return new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS);
2335 case 2: return new ICmpInst(ICmpInst::ICMP_EQ, LHS, RHS);
2338 return new ICmpInst(ICmpInst::ICMP_SGE, LHS, RHS);
2340 return new ICmpInst(ICmpInst::ICMP_UGE, LHS, RHS);
2343 return new ICmpInst(ICmpInst::ICMP_SLT, LHS, RHS);
2345 return new ICmpInst(ICmpInst::ICMP_ULT, LHS, RHS);
2346 case 5: return new ICmpInst(ICmpInst::ICMP_NE, LHS, RHS);
2349 return new ICmpInst(ICmpInst::ICMP_SLE, LHS, RHS);
2351 return new ICmpInst(ICmpInst::ICMP_ULE, LHS, RHS);
2352 case 7: return ConstantInt::getTrue(LHS->getContext());
2356 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
2357 /// opcode and two operands into either a FCmp instruction. isordered is passed
2358 /// in to determine which kind of predicate to use in the new fcmp instruction.
2359 static Value *getFCmpValue(bool isordered, unsigned code,
2360 Value *LHS, Value *RHS) {
2362 default: llvm_unreachable("Illegal FCmp code!");
2365 return new FCmpInst(FCmpInst::FCMP_ORD, LHS, RHS);
2367 return new FCmpInst(FCmpInst::FCMP_UNO, LHS, RHS);
2370 return new FCmpInst(FCmpInst::FCMP_OGT, LHS, RHS);
2372 return new FCmpInst(FCmpInst::FCMP_UGT, LHS, RHS);
2375 return new FCmpInst(FCmpInst::FCMP_OEQ, LHS, RHS);
2377 return new FCmpInst(FCmpInst::FCMP_UEQ, LHS, RHS);
2380 return new FCmpInst(FCmpInst::FCMP_OGE, LHS, RHS);
2382 return new FCmpInst(FCmpInst::FCMP_UGE, LHS, RHS);
2385 return new FCmpInst(FCmpInst::FCMP_OLT, LHS, RHS);
2387 return new FCmpInst(FCmpInst::FCMP_ULT, LHS, RHS);
2390 return new FCmpInst(FCmpInst::FCMP_ONE, LHS, RHS);
2392 return new FCmpInst(FCmpInst::FCMP_UNE, LHS, RHS);
2395 return new FCmpInst(FCmpInst::FCMP_OLE, LHS, RHS);
2397 return new FCmpInst(FCmpInst::FCMP_ULE, LHS, RHS);
2398 case 7: return ConstantInt::getTrue(LHS->getContext());
2402 /// PredicatesFoldable - Return true if both predicates match sign or if at
2403 /// least one of them is an equality comparison (which is signless).
2404 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
2405 return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
2406 (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
2407 (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
2411 // FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
2412 struct FoldICmpLogical {
2415 ICmpInst::Predicate pred;
2416 FoldICmpLogical(InstCombiner &ic, ICmpInst *ICI)
2417 : IC(ic), LHS(ICI->getOperand(0)), RHS(ICI->getOperand(1)),
2418 pred(ICI->getPredicate()) {}
2419 bool shouldApply(Value *V) const {
2420 if (ICmpInst *ICI = dyn_cast<ICmpInst>(V))
2421 if (PredicatesFoldable(pred, ICI->getPredicate()))
2422 return ((ICI->getOperand(0) == LHS && ICI->getOperand(1) == RHS) ||
2423 (ICI->getOperand(0) == RHS && ICI->getOperand(1) == LHS));
2426 Instruction *apply(Instruction &Log) const {
2427 ICmpInst *ICI = cast<ICmpInst>(Log.getOperand(0));
2428 if (ICI->getOperand(0) != LHS) {
2429 assert(ICI->getOperand(1) == LHS);
2430 ICI->swapOperands(); // Swap the LHS and RHS of the ICmp
2433 ICmpInst *RHSICI = cast<ICmpInst>(Log.getOperand(1));
2434 unsigned LHSCode = getICmpCode(ICI);
2435 unsigned RHSCode = getICmpCode(RHSICI);
2437 switch (Log.getOpcode()) {
2438 case Instruction::And: Code = LHSCode & RHSCode; break;
2439 case Instruction::Or: Code = LHSCode | RHSCode; break;
2440 case Instruction::Xor: Code = LHSCode ^ RHSCode; break;
2441 default: llvm_unreachable("Illegal logical opcode!"); return 0;
2444 bool isSigned = RHSICI->isSigned() || ICI->isSigned();
2445 Value *RV = getICmpValue(isSigned, Code, LHS, RHS);
2446 if (Instruction *I = dyn_cast<Instruction>(RV))
2448 // Otherwise, it's a constant boolean value...
2449 return IC.ReplaceInstUsesWith(Log, RV);
2452 } // end anonymous namespace
2454 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
2455 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
2456 // guaranteed to be a binary operator.
2457 Instruction *InstCombiner::OptAndOp(Instruction *Op,
2459 ConstantInt *AndRHS,
2460 BinaryOperator &TheAnd) {
2461 Value *X = Op->getOperand(0);
2462 Constant *Together = 0;
2464 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
2466 switch (Op->getOpcode()) {
2467 case Instruction::Xor:
2468 if (Op->hasOneUse()) {
2469 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
2470 Value *And = Builder->CreateAnd(X, AndRHS);
2472 return BinaryOperator::CreateXor(And, Together);
2475 case Instruction::Or:
2476 if (Together == AndRHS) // (X | C) & C --> C
2477 return ReplaceInstUsesWith(TheAnd, AndRHS);
2479 if (Op->hasOneUse() && Together != OpRHS) {
2480 // (X | C1) & C2 --> (X | (C1&C2)) & C2
2481 Value *Or = Builder->CreateOr(X, Together);
2483 return BinaryOperator::CreateAnd(Or, AndRHS);
2486 case Instruction::Add:
2487 if (Op->hasOneUse()) {
2488 // Adding a one to a single bit bit-field should be turned into an XOR
2489 // of the bit. First thing to check is to see if this AND is with a
2490 // single bit constant.
2491 const APInt& AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
2493 // If there is only one bit set...
2494 if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
2495 // Ok, at this point, we know that we are masking the result of the
2496 // ADD down to exactly one bit. If the constant we are adding has
2497 // no bits set below this bit, then we can eliminate the ADD.
2498 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
2500 // Check to see if any bits below the one bit set in AndRHSV are set.
2501 if ((AddRHS & (AndRHSV-1)) == 0) {
2502 // If not, the only thing that can effect the output of the AND is
2503 // the bit specified by AndRHSV. If that bit is set, the effect of
2504 // the XOR is to toggle the bit. If it is clear, then the ADD has
2506 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
2507 TheAnd.setOperand(0, X);
2510 // Pull the XOR out of the AND.
2511 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
2512 NewAnd->takeName(Op);
2513 return BinaryOperator::CreateXor(NewAnd, AndRHS);
2520 case Instruction::Shl: {
2521 // We know that the AND will not produce any of the bits shifted in, so if
2522 // the anded constant includes them, clear them now!
2524 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
2525 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
2526 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
2527 ConstantInt *CI = ConstantInt::get(AndRHS->getContext(),
2528 AndRHS->getValue() & ShlMask);
2530 if (CI->getValue() == ShlMask) {
2531 // Masking out bits that the shift already masks
2532 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
2533 } else if (CI != AndRHS) { // Reducing bits set in and.
2534 TheAnd.setOperand(1, CI);
2539 case Instruction::LShr: {
2540 // We know that the AND will not produce any of the bits shifted in, so if
2541 // the anded constant includes them, clear them now! This only applies to
2542 // unsigned shifts, because a signed shr may bring in set bits!
2544 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
2545 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
2546 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
2547 ConstantInt *CI = ConstantInt::get(Op->getContext(),
2548 AndRHS->getValue() & ShrMask);
2550 if (CI->getValue() == ShrMask) {
2551 // Masking out bits that the shift already masks.
2552 return ReplaceInstUsesWith(TheAnd, Op);
2553 } else if (CI != AndRHS) {
2554 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
2559 case Instruction::AShr:
2561 // See if this is shifting in some sign extension, then masking it out
2563 if (Op->hasOneUse()) {
2564 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
2565 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
2566 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
2567 Constant *C = ConstantInt::get(Op->getContext(),
2568 AndRHS->getValue() & ShrMask);
2569 if (C == AndRHS) { // Masking out bits shifted in.
2570 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
2571 // Make the argument unsigned.
2572 Value *ShVal = Op->getOperand(0);
2573 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
2574 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
2583 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
2584 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
2585 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
2586 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
2587 /// insert new instructions.
2588 Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
2589 bool isSigned, bool Inside,
2591 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
2592 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
2593 "Lo is not <= Hi in range emission code!");
2596 if (Lo == Hi) // Trivially false.
2597 return new ICmpInst(ICmpInst::ICMP_NE, V, V);
2599 // V >= Min && V < Hi --> V < Hi
2600 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
2601 ICmpInst::Predicate pred = (isSigned ?
2602 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
2603 return new ICmpInst(pred, V, Hi);
2606 // Emit V-Lo <u Hi-Lo
2607 Constant *NegLo = ConstantExpr::getNeg(Lo);
2608 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
2609 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
2610 return new ICmpInst(ICmpInst::ICMP_ULT, Add, UpperBound);
2613 if (Lo == Hi) // Trivially true.
2614 return new ICmpInst(ICmpInst::ICMP_EQ, V, V);
2616 // V < Min || V >= Hi -> V > Hi-1
2617 Hi = SubOne(cast<ConstantInt>(Hi));
2618 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
2619 ICmpInst::Predicate pred = (isSigned ?
2620 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
2621 return new ICmpInst(pred, V, Hi);
2624 // Emit V-Lo >u Hi-1-Lo
2625 // Note that Hi has already had one subtracted from it, above.
2626 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
2627 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
2628 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
2629 return new ICmpInst(ICmpInst::ICMP_UGT, Add, LowerBound);
2632 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
2633 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
2634 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
2635 // not, since all 1s are not contiguous.
2636 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
2637 const APInt& V = Val->getValue();
2638 uint32_t BitWidth = Val->getType()->getBitWidth();
2639 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
2641 // look for the first zero bit after the run of ones
2642 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
2643 // look for the first non-zero bit
2644 ME = V.getActiveBits();
2648 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
2649 /// where isSub determines whether the operator is a sub. If we can fold one of
2650 /// the following xforms:
2652 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
2653 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
2654 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
2656 /// return (A +/- B).
2658 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
2659 ConstantInt *Mask, bool isSub,
2661 Instruction *LHSI = dyn_cast<Instruction>(LHS);
2662 if (!LHSI || LHSI->getNumOperands() != 2 ||
2663 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
2665 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
2667 switch (LHSI->getOpcode()) {
2669 case Instruction::And:
2670 if (ConstantExpr::getAnd(N, Mask) == Mask) {
2671 // If the AndRHS is a power of two minus one (0+1+), this is simple.
2672 if ((Mask->getValue().countLeadingZeros() +
2673 Mask->getValue().countPopulation()) ==
2674 Mask->getValue().getBitWidth())
2677 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
2678 // part, we don't need any explicit masks to take them out of A. If that
2679 // is all N is, ignore it.
2680 uint32_t MB = 0, ME = 0;
2681 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
2682 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
2683 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
2684 if (MaskedValueIsZero(RHS, Mask))
2689 case Instruction::Or:
2690 case Instruction::Xor:
2691 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
2692 if ((Mask->getValue().countLeadingZeros() +
2693 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
2694 && ConstantExpr::getAnd(N, Mask)->isNullValue())
2700 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
2701 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
2704 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
2705 Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
2706 ICmpInst *LHS, ICmpInst *RHS) {
2708 ConstantInt *LHSCst, *RHSCst;
2709 ICmpInst::Predicate LHSCC, RHSCC;
2711 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
2712 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
2713 m_ConstantInt(LHSCst))) ||
2714 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
2715 m_ConstantInt(RHSCst))))
2718 if (LHSCst == RHSCst && LHSCC == RHSCC) {
2719 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
2720 // where C is a power of 2
2721 if (LHSCC == ICmpInst::ICMP_ULT &&
2722 LHSCst->getValue().isPowerOf2()) {
2723 Value *NewOr = Builder->CreateOr(Val, Val2);
2724 return new ICmpInst(LHSCC, NewOr, LHSCst);
2727 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
2728 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
2729 Value *NewOr = Builder->CreateOr(Val, Val2);
2730 return new ICmpInst(LHSCC, NewOr, LHSCst);
2734 // From here on, we only handle:
2735 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
2736 if (Val != Val2) return 0;
2738 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
2739 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
2740 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
2741 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
2742 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
2745 // We can't fold (ugt x, C) & (sgt x, C2).
2746 if (!PredicatesFoldable(LHSCC, RHSCC))
2749 // Ensure that the larger constant is on the RHS.
2751 if (CmpInst::isSigned(LHSCC) ||
2752 (ICmpInst::isEquality(LHSCC) &&
2753 CmpInst::isSigned(RHSCC)))
2754 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
2756 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
2759 std::swap(LHS, RHS);
2760 std::swap(LHSCst, RHSCst);
2761 std::swap(LHSCC, RHSCC);
2764 // At this point, we know we have have two icmp instructions
2765 // comparing a value against two constants and and'ing the result
2766 // together. Because of the above check, we know that we only have
2767 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
2768 // (from the FoldICmpLogical check above), that the two constants
2769 // are not equal and that the larger constant is on the RHS
2770 assert(LHSCst != RHSCst && "Compares not folded above?");
2773 default: llvm_unreachable("Unknown integer condition code!");
2774 case ICmpInst::ICMP_EQ:
2776 default: llvm_unreachable("Unknown integer condition code!");
2777 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
2778 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
2779 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
2780 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
2781 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
2782 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
2783 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
2784 return ReplaceInstUsesWith(I, LHS);
2786 case ICmpInst::ICMP_NE:
2788 default: llvm_unreachable("Unknown integer condition code!");
2789 case ICmpInst::ICMP_ULT:
2790 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
2791 return new ICmpInst(ICmpInst::ICMP_ULT, Val, LHSCst);
2792 break; // (X != 13 & X u< 15) -> no change
2793 case ICmpInst::ICMP_SLT:
2794 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
2795 return new ICmpInst(ICmpInst::ICMP_SLT, Val, LHSCst);
2796 break; // (X != 13 & X s< 15) -> no change
2797 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
2798 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
2799 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
2800 return ReplaceInstUsesWith(I, RHS);
2801 case ICmpInst::ICMP_NE:
2802 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
2803 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
2804 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
2805 return new ICmpInst(ICmpInst::ICMP_UGT, Add,
2806 ConstantInt::get(Add->getType(), 1));
2808 break; // (X != 13 & X != 15) -> no change
2811 case ICmpInst::ICMP_ULT:
2813 default: llvm_unreachable("Unknown integer condition code!");
2814 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
2815 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
2816 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
2817 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
2819 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
2820 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
2821 return ReplaceInstUsesWith(I, LHS);
2822 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
2826 case ICmpInst::ICMP_SLT:
2828 default: llvm_unreachable("Unknown integer condition code!");
2829 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
2830 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
2831 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
2832 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
2834 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
2835 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
2836 return ReplaceInstUsesWith(I, LHS);
2837 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
2841 case ICmpInst::ICMP_UGT:
2843 default: llvm_unreachable("Unknown integer condition code!");
2844 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
2845 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
2846 return ReplaceInstUsesWith(I, RHS);
2847 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
2849 case ICmpInst::ICMP_NE:
2850 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
2851 return new ICmpInst(LHSCC, Val, RHSCst);
2852 break; // (X u> 13 & X != 15) -> no change
2853 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
2854 return InsertRangeTest(Val, AddOne(LHSCst),
2855 RHSCst, false, true, I);
2856 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
2860 case ICmpInst::ICMP_SGT:
2862 default: llvm_unreachable("Unknown integer condition code!");
2863 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
2864 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
2865 return ReplaceInstUsesWith(I, RHS);
2866 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
2868 case ICmpInst::ICMP_NE:
2869 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
2870 return new ICmpInst(LHSCC, Val, RHSCst);
2871 break; // (X s> 13 & X != 15) -> no change
2872 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
2873 return InsertRangeTest(Val, AddOne(LHSCst),
2874 RHSCst, true, true, I);
2875 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
2884 Instruction *InstCombiner::FoldAndOfFCmps(Instruction &I, FCmpInst *LHS,
2887 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
2888 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
2889 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
2890 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
2891 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
2892 // If either of the constants are nans, then the whole thing returns
2894 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
2895 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
2896 return new FCmpInst(FCmpInst::FCMP_ORD,
2897 LHS->getOperand(0), RHS->getOperand(0));
2900 // Handle vector zeros. This occurs because the canonical form of
2901 // "fcmp ord x,x" is "fcmp ord x, 0".
2902 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
2903 isa<ConstantAggregateZero>(RHS->getOperand(1)))
2904 return new FCmpInst(FCmpInst::FCMP_ORD,
2905 LHS->getOperand(0), RHS->getOperand(0));
2909 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
2910 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
2911 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
2914 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
2915 // Swap RHS operands to match LHS.
2916 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
2917 std::swap(Op1LHS, Op1RHS);
2920 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
2921 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
2923 return new FCmpInst((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
2925 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
2926 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
2927 if (Op0CC == FCmpInst::FCMP_TRUE)
2928 return ReplaceInstUsesWith(I, RHS);
2929 if (Op1CC == FCmpInst::FCMP_TRUE)
2930 return ReplaceInstUsesWith(I, LHS);
2934 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
2935 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
2937 std::swap(LHS, RHS);
2938 std::swap(Op0Pred, Op1Pred);
2939 std::swap(Op0Ordered, Op1Ordered);
2942 // uno && ueq -> uno && (uno || eq) -> ueq
2943 // ord && olt -> ord && (ord && lt) -> olt
2944 if (Op0Ordered == Op1Ordered)
2945 return ReplaceInstUsesWith(I, RHS);
2947 // uno && oeq -> uno && (ord && eq) -> false
2948 // uno && ord -> false
2950 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
2951 // ord && ueq -> ord && (uno || eq) -> oeq
2952 return cast<Instruction>(getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS));
2960 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
2961 bool Changed = SimplifyCommutative(I);
2962 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2964 if (Value *V = SimplifyAndInst(Op0, Op1, TD))
2965 return ReplaceInstUsesWith(I, V);
2967 // See if we can simplify any instructions used by the instruction whose sole
2968 // purpose is to compute bits we don't care about.
2969 if (SimplifyDemandedInstructionBits(I))
2972 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
2973 const APInt &AndRHSMask = AndRHS->getValue();
2974 APInt NotAndRHS(~AndRHSMask);
2976 // Optimize a variety of ((val OP C1) & C2) combinations...
2977 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2978 Value *Op0LHS = Op0I->getOperand(0);
2979 Value *Op0RHS = Op0I->getOperand(1);
2980 switch (Op0I->getOpcode()) {
2982 case Instruction::Xor:
2983 case Instruction::Or:
2984 // If the mask is only needed on one incoming arm, push it up.
2985 if (!Op0I->hasOneUse()) break;
2987 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
2988 // Not masking anything out for the LHS, move to RHS.
2989 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
2990 Op0RHS->getName()+".masked");
2991 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
2993 if (!isa<Constant>(Op0RHS) &&
2994 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
2995 // Not masking anything out for the RHS, move to LHS.
2996 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
2997 Op0LHS->getName()+".masked");
2998 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
3002 case Instruction::Add:
3003 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
3004 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
3005 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
3006 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
3007 return BinaryOperator::CreateAnd(V, AndRHS);
3008 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
3009 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
3012 case Instruction::Sub:
3013 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
3014 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
3015 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
3016 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
3017 return BinaryOperator::CreateAnd(V, AndRHS);
3019 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
3020 // has 1's for all bits that the subtraction with A might affect.
3021 if (Op0I->hasOneUse()) {
3022 uint32_t BitWidth = AndRHSMask.getBitWidth();
3023 uint32_t Zeros = AndRHSMask.countLeadingZeros();
3024 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
3026 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
3027 if (!(A && A->isZero()) && // avoid infinite recursion.
3028 MaskedValueIsZero(Op0LHS, Mask)) {
3029 Value *NewNeg = Builder->CreateNeg(Op0RHS);
3030 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
3035 case Instruction::Shl:
3036 case Instruction::LShr:
3037 // (1 << x) & 1 --> zext(x == 0)
3038 // (1 >> x) & 1 --> zext(x == 0)
3039 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
3041 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
3042 return new ZExtInst(NewICmp, I.getType());
3047 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
3048 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
3050 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
3051 // If this is an integer truncation or change from signed-to-unsigned, and
3052 // if the source is an and/or with immediate, transform it. This
3053 // frequently occurs for bitfield accesses.
3054 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
3055 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
3056 CastOp->getNumOperands() == 2)
3057 if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
3058 if (CastOp->getOpcode() == Instruction::And) {
3059 // Change: and (cast (and X, C1) to T), C2
3060 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
3061 // This will fold the two constants together, which may allow
3062 // other simplifications.
3063 Value *NewCast = Builder->CreateTruncOrBitCast(
3064 CastOp->getOperand(0), I.getType(),
3065 CastOp->getName()+".shrunk");
3066 // trunc_or_bitcast(C1)&C2
3067 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
3068 C3 = ConstantExpr::getAnd(C3, AndRHS);
3069 return BinaryOperator::CreateAnd(NewCast, C3);
3070 } else if (CastOp->getOpcode() == Instruction::Or) {
3071 // Change: and (cast (or X, C1) to T), C2
3072 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
3073 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
3074 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
3076 return ReplaceInstUsesWith(I, AndRHS);
3082 // Try to fold constant and into select arguments.
3083 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3084 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3086 if (isa<PHINode>(Op0))
3087 if (Instruction *NV = FoldOpIntoPhi(I))
3092 // (~A & ~B) == (~(A | B)) - De Morgan's Law
3093 if (Value *Op0NotVal = dyn_castNotVal(Op0))
3094 if (Value *Op1NotVal = dyn_castNotVal(Op1))
3095 if (Op0->hasOneUse() && Op1->hasOneUse()) {
3096 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
3097 I.getName()+".demorgan");
3098 return BinaryOperator::CreateNot(Or);
3102 Value *A = 0, *B = 0, *C = 0, *D = 0;
3103 // (A|B) & ~(A&B) -> A^B
3104 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
3105 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
3106 ((A == C && B == D) || (A == D && B == C)))
3107 return BinaryOperator::CreateXor(A, B);
3109 // ~(A&B) & (A|B) -> A^B
3110 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
3111 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
3112 ((A == C && B == D) || (A == D && B == C)))
3113 return BinaryOperator::CreateXor(A, B);
3115 if (Op0->hasOneUse() &&
3116 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
3117 if (A == Op1) { // (A^B)&A -> A&(A^B)
3118 I.swapOperands(); // Simplify below
3119 std::swap(Op0, Op1);
3120 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
3121 cast<BinaryOperator>(Op0)->swapOperands();
3122 I.swapOperands(); // Simplify below
3123 std::swap(Op0, Op1);
3127 if (Op1->hasOneUse() &&
3128 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
3129 if (B == Op0) { // B&(A^B) -> B&(B^A)
3130 cast<BinaryOperator>(Op1)->swapOperands();
3133 if (A == Op0) // A&(A^B) -> A & ~B
3134 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
3137 // (A&((~A)|B)) -> A&B
3138 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
3139 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
3140 return BinaryOperator::CreateAnd(A, Op1);
3141 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
3142 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
3143 return BinaryOperator::CreateAnd(A, Op0);
3146 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) {
3147 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3148 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
3151 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
3152 if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS))
3156 // fold (and (cast A), (cast B)) -> (cast (and A, B))
3157 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
3158 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
3159 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
3160 const Type *SrcTy = Op0C->getOperand(0)->getType();
3161 if (SrcTy == Op1C->getOperand(0)->getType() &&
3162 SrcTy->isIntOrIntVector() &&
3163 // Only do this if the casts both really cause code to be generated.
3164 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
3166 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
3168 Value *NewOp = Builder->CreateAnd(Op0C->getOperand(0),
3169 Op1C->getOperand(0), I.getName());
3170 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
3174 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
3175 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
3176 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
3177 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
3178 SI0->getOperand(1) == SI1->getOperand(1) &&
3179 (SI0->hasOneUse() || SI1->hasOneUse())) {
3181 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
3183 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
3184 SI1->getOperand(1));
3188 // If and'ing two fcmp, try combine them into one.
3189 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
3190 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
3191 if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS))
3195 return Changed ? &I : 0;
3198 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
3199 /// capable of providing pieces of a bswap. The subexpression provides pieces
3200 /// of a bswap if it is proven that each of the non-zero bytes in the output of
3201 /// the expression came from the corresponding "byte swapped" byte in some other
3202 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
3203 /// we know that the expression deposits the low byte of %X into the high byte
3204 /// of the bswap result and that all other bytes are zero. This expression is
3205 /// accepted, the high byte of ByteValues is set to X to indicate a correct
3208 /// This function returns true if the match was unsuccessful and false if so.
3209 /// On entry to the function the "OverallLeftShift" is a signed integer value
3210 /// indicating the number of bytes that the subexpression is later shifted. For
3211 /// example, if the expression is later right shifted by 16 bits, the
3212 /// OverallLeftShift value would be -2 on entry. This is used to specify which
3213 /// byte of ByteValues is actually being set.
3215 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
3216 /// byte is masked to zero by a user. For example, in (X & 255), X will be
3217 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
3218 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
3219 /// always in the local (OverallLeftShift) coordinate space.
3221 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
3222 SmallVector<Value*, 8> &ByteValues) {
3223 if (Instruction *I = dyn_cast<Instruction>(V)) {
3224 // If this is an or instruction, it may be an inner node of the bswap.
3225 if (I->getOpcode() == Instruction::Or) {
3226 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
3228 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
3232 // If this is a logical shift by a constant multiple of 8, recurse with
3233 // OverallLeftShift and ByteMask adjusted.
3234 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
3236 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
3237 // Ensure the shift amount is defined and of a byte value.
3238 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
3241 unsigned ByteShift = ShAmt >> 3;
3242 if (I->getOpcode() == Instruction::Shl) {
3243 // X << 2 -> collect(X, +2)
3244 OverallLeftShift += ByteShift;
3245 ByteMask >>= ByteShift;
3247 // X >>u 2 -> collect(X, -2)
3248 OverallLeftShift -= ByteShift;
3249 ByteMask <<= ByteShift;
3250 ByteMask &= (~0U >> (32-ByteValues.size()));
3253 if (OverallLeftShift >= (int)ByteValues.size()) return true;
3254 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
3256 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
3260 // If this is a logical 'and' with a mask that clears bytes, clear the
3261 // corresponding bytes in ByteMask.
3262 if (I->getOpcode() == Instruction::And &&
3263 isa<ConstantInt>(I->getOperand(1))) {
3264 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
3265 unsigned NumBytes = ByteValues.size();
3266 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
3267 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
3269 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
3270 // If this byte is masked out by a later operation, we don't care what
3272 if ((ByteMask & (1 << i)) == 0)
3275 // If the AndMask is all zeros for this byte, clear the bit.
3276 APInt MaskB = AndMask & Byte;
3278 ByteMask &= ~(1U << i);
3282 // If the AndMask is not all ones for this byte, it's not a bytezap.
3286 // Otherwise, this byte is kept.
3289 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
3294 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
3295 // the input value to the bswap. Some observations: 1) if more than one byte
3296 // is demanded from this input, then it could not be successfully assembled
3297 // into a byteswap. At least one of the two bytes would not be aligned with
3298 // their ultimate destination.
3299 if (!isPowerOf2_32(ByteMask)) return true;
3300 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
3302 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
3303 // is demanded, it needs to go into byte 0 of the result. This means that the
3304 // byte needs to be shifted until it lands in the right byte bucket. The
3305 // shift amount depends on the position: if the byte is coming from the high
3306 // part of the value (e.g. byte 3) then it must be shifted right. If from the
3307 // low part, it must be shifted left.
3308 unsigned DestByteNo = InputByteNo + OverallLeftShift;
3309 if (InputByteNo < ByteValues.size()/2) {
3310 if (ByteValues.size()-1-DestByteNo != InputByteNo)
3313 if (ByteValues.size()-1-DestByteNo != InputByteNo)
3317 // If the destination byte value is already defined, the values are or'd
3318 // together, which isn't a bswap (unless it's an or of the same bits).
3319 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
3321 ByteValues[DestByteNo] = V;
3325 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
3326 /// If so, insert the new bswap intrinsic and return it.
3327 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
3328 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
3329 if (!ITy || ITy->getBitWidth() % 16 ||
3330 // ByteMask only allows up to 32-byte values.
3331 ITy->getBitWidth() > 32*8)
3332 return 0; // Can only bswap pairs of bytes. Can't do vectors.
3334 /// ByteValues - For each byte of the result, we keep track of which value
3335 /// defines each byte.
3336 SmallVector<Value*, 8> ByteValues;
3337 ByteValues.resize(ITy->getBitWidth()/8);
3339 // Try to find all the pieces corresponding to the bswap.
3340 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
3341 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
3344 // Check to see if all of the bytes come from the same value.
3345 Value *V = ByteValues[0];
3346 if (V == 0) return 0; // Didn't find a byte? Must be zero.
3348 // Check to make sure that all of the bytes come from the same value.
3349 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
3350 if (ByteValues[i] != V)
3352 const Type *Tys[] = { ITy };
3353 Module *M = I.getParent()->getParent()->getParent();
3354 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
3355 return CallInst::Create(F, V);
3358 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
3359 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
3360 /// we can simplify this expression to "cond ? C : D or B".
3361 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
3362 Value *C, Value *D) {
3363 // If A is not a select of -1/0, this cannot match.
3365 if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond))))
3368 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
3369 if (match(D, m_SelectCst<0, -1>(m_Specific(Cond))))
3370 return SelectInst::Create(Cond, C, B);
3371 if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
3372 return SelectInst::Create(Cond, C, B);
3373 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
3374 if (match(B, m_SelectCst<0, -1>(m_Specific(Cond))))
3375 return SelectInst::Create(Cond, C, D);
3376 if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
3377 return SelectInst::Create(Cond, C, D);
3381 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
3382 Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
3383 ICmpInst *LHS, ICmpInst *RHS) {
3385 ConstantInt *LHSCst, *RHSCst;
3386 ICmpInst::Predicate LHSCC, RHSCC;
3388 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
3389 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), m_ConstantInt(LHSCst))) ||
3390 !match(RHS, m_ICmp(RHSCC, m_Value(Val2), m_ConstantInt(RHSCst))))
3394 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
3395 if (LHSCst == RHSCst && LHSCC == RHSCC &&
3396 LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
3397 Value *NewOr = Builder->CreateOr(Val, Val2);
3398 return new ICmpInst(LHSCC, NewOr, LHSCst);
3401 // From here on, we only handle:
3402 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
3403 if (Val != Val2) return 0;
3405 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
3406 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
3407 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
3408 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
3409 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
3412 // We can't fold (ugt x, C) | (sgt x, C2).
3413 if (!PredicatesFoldable(LHSCC, RHSCC))
3416 // Ensure that the larger constant is on the RHS.
3418 if (CmpInst::isSigned(LHSCC) ||
3419 (ICmpInst::isEquality(LHSCC) &&
3420 CmpInst::isSigned(RHSCC)))
3421 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
3423 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
3426 std::swap(LHS, RHS);
3427 std::swap(LHSCst, RHSCst);
3428 std::swap(LHSCC, RHSCC);
3431 // At this point, we know we have have two icmp instructions
3432 // comparing a value against two constants and or'ing the result
3433 // together. Because of the above check, we know that we only have
3434 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
3435 // FoldICmpLogical check above), that the two constants are not
3437 assert(LHSCst != RHSCst && "Compares not folded above?");
3440 default: llvm_unreachable("Unknown integer condition code!");
3441 case ICmpInst::ICMP_EQ:
3443 default: llvm_unreachable("Unknown integer condition code!");
3444 case ICmpInst::ICMP_EQ:
3445 if (LHSCst == SubOne(RHSCst)) {
3446 // (X == 13 | X == 14) -> X-13 <u 2
3447 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
3448 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
3449 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
3450 return new ICmpInst(ICmpInst::ICMP_ULT, Add, AddCST);
3452 break; // (X == 13 | X == 15) -> no change
3453 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
3454 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
3456 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
3457 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
3458 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
3459 return ReplaceInstUsesWith(I, RHS);
3462 case ICmpInst::ICMP_NE:
3464 default: llvm_unreachable("Unknown integer condition code!");
3465 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
3466 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
3467 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
3468 return ReplaceInstUsesWith(I, LHS);
3469 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
3470 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
3471 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
3472 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
3475 case ICmpInst::ICMP_ULT:
3477 default: llvm_unreachable("Unknown integer condition code!");
3478 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
3480 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
3481 // If RHSCst is [us]MAXINT, it is always false. Not handling
3482 // this can cause overflow.
3483 if (RHSCst->isMaxValue(false))
3484 return ReplaceInstUsesWith(I, LHS);
3485 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
3487 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
3489 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
3490 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
3491 return ReplaceInstUsesWith(I, RHS);
3492 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
3496 case ICmpInst::ICMP_SLT:
3498 default: llvm_unreachable("Unknown integer condition code!");
3499 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
3501 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
3502 // If RHSCst is [us]MAXINT, it is always false. Not handling
3503 // this can cause overflow.
3504 if (RHSCst->isMaxValue(true))
3505 return ReplaceInstUsesWith(I, LHS);
3506 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
3508 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
3510 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
3511 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
3512 return ReplaceInstUsesWith(I, RHS);
3513 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
3517 case ICmpInst::ICMP_UGT:
3519 default: llvm_unreachable("Unknown integer condition code!");
3520 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
3521 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
3522 return ReplaceInstUsesWith(I, LHS);
3523 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
3525 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
3526 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
3527 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
3528 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
3532 case ICmpInst::ICMP_SGT:
3534 default: llvm_unreachable("Unknown integer condition code!");
3535 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
3536 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
3537 return ReplaceInstUsesWith(I, LHS);
3538 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
3540 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
3541 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
3542 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
3543 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
3551 Instruction *InstCombiner::FoldOrOfFCmps(Instruction &I, FCmpInst *LHS,
3553 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
3554 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
3555 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
3556 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
3557 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
3558 // If either of the constants are nans, then the whole thing returns
3560 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
3561 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
3563 // Otherwise, no need to compare the two constants, compare the
3565 return new FCmpInst(FCmpInst::FCMP_UNO,
3566 LHS->getOperand(0), RHS->getOperand(0));
3569 // Handle vector zeros. This occurs because the canonical form of
3570 // "fcmp uno x,x" is "fcmp uno x, 0".
3571 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
3572 isa<ConstantAggregateZero>(RHS->getOperand(1)))
3573 return new FCmpInst(FCmpInst::FCMP_UNO,
3574 LHS->getOperand(0), RHS->getOperand(0));
3579 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
3580 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
3581 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
3583 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
3584 // Swap RHS operands to match LHS.
3585 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
3586 std::swap(Op1LHS, Op1RHS);
3588 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
3589 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
3591 return new FCmpInst((FCmpInst::Predicate)Op0CC,
3593 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
3594 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
3595 if (Op0CC == FCmpInst::FCMP_FALSE)
3596 return ReplaceInstUsesWith(I, RHS);
3597 if (Op1CC == FCmpInst::FCMP_FALSE)
3598 return ReplaceInstUsesWith(I, LHS);
3601 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
3602 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
3603 if (Op0Ordered == Op1Ordered) {
3604 // If both are ordered or unordered, return a new fcmp with
3605 // or'ed predicates.
3606 Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS);
3607 if (Instruction *I = dyn_cast<Instruction>(RV))
3609 // Otherwise, it's a constant boolean value...
3610 return ReplaceInstUsesWith(I, RV);
3616 /// FoldOrWithConstants - This helper function folds:
3618 /// ((A | B) & C1) | (B & C2)
3624 /// when the XOR of the two constants is "all ones" (-1).
3625 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
3626 Value *A, Value *B, Value *C) {
3627 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
3631 ConstantInt *CI2 = 0;
3632 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
3634 APInt Xor = CI1->getValue() ^ CI2->getValue();
3635 if (!Xor.isAllOnesValue()) return 0;
3637 if (V1 == A || V1 == B) {
3638 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
3639 return BinaryOperator::CreateOr(NewOp, V1);
3645 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
3646 bool Changed = SimplifyCommutative(I);
3647 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3649 if (Value *V = SimplifyOrInst(Op0, Op1, TD))
3650 return ReplaceInstUsesWith(I, V);
3653 // See if we can simplify any instructions used by the instruction whose sole
3654 // purpose is to compute bits we don't care about.
3655 if (SimplifyDemandedInstructionBits(I))
3658 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3659 ConstantInt *C1 = 0; Value *X = 0;
3660 // (X & C1) | C2 --> (X | C2) & (C1|C2)
3661 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
3663 Value *Or = Builder->CreateOr(X, RHS);
3665 return BinaryOperator::CreateAnd(Or,
3666 ConstantInt::get(I.getContext(),
3667 RHS->getValue() | C1->getValue()));
3670 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
3671 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
3673 Value *Or = Builder->CreateOr(X, RHS);
3675 return BinaryOperator::CreateXor(Or,
3676 ConstantInt::get(I.getContext(),
3677 C1->getValue() & ~RHS->getValue()));
3680 // Try to fold constant and into select arguments.
3681 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3682 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3684 if (isa<PHINode>(Op0))
3685 if (Instruction *NV = FoldOpIntoPhi(I))
3689 Value *A = 0, *B = 0;
3690 ConstantInt *C1 = 0, *C2 = 0;
3692 // (A | B) | C and A | (B | C) -> bswap if possible.
3693 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
3694 if (match(Op0, m_Or(m_Value(), m_Value())) ||
3695 match(Op1, m_Or(m_Value(), m_Value())) ||
3696 (match(Op0, m_Shift(m_Value(), m_Value())) &&
3697 match(Op1, m_Shift(m_Value(), m_Value())))) {
3698 if (Instruction *BSwap = MatchBSwap(I))
3702 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
3703 if (Op0->hasOneUse() &&
3704 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
3705 MaskedValueIsZero(Op1, C1->getValue())) {
3706 Value *NOr = Builder->CreateOr(A, Op1);
3708 return BinaryOperator::CreateXor(NOr, C1);
3711 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
3712 if (Op1->hasOneUse() &&
3713 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
3714 MaskedValueIsZero(Op0, C1->getValue())) {
3715 Value *NOr = Builder->CreateOr(A, Op0);
3717 return BinaryOperator::CreateXor(NOr, C1);
3721 Value *C = 0, *D = 0;
3722 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
3723 match(Op1, m_And(m_Value(B), m_Value(D)))) {
3724 Value *V1 = 0, *V2 = 0, *V3 = 0;
3725 C1 = dyn_cast<ConstantInt>(C);
3726 C2 = dyn_cast<ConstantInt>(D);
3727 if (C1 && C2) { // (A & C1)|(B & C2)
3728 // If we have: ((V + N) & C1) | (V & C2)
3729 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
3730 // replace with V+N.
3731 if (C1->getValue() == ~C2->getValue()) {
3732 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
3733 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
3734 // Add commutes, try both ways.
3735 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
3736 return ReplaceInstUsesWith(I, A);
3737 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
3738 return ReplaceInstUsesWith(I, A);
3740 // Or commutes, try both ways.
3741 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
3742 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
3743 // Add commutes, try both ways.
3744 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
3745 return ReplaceInstUsesWith(I, B);
3746 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
3747 return ReplaceInstUsesWith(I, B);
3751 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
3752 // iff (C1&C2) == 0 and (N&~C1) == 0
3753 if ((C1->getValue() & C2->getValue()) == 0) {
3754 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
3755 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
3756 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
3757 return BinaryOperator::CreateAnd(A,
3758 ConstantInt::get(A->getContext(),
3759 C1->getValue()|C2->getValue()));
3760 // Or commutes, try both ways.
3761 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
3762 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N)
3763 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V)
3764 return BinaryOperator::CreateAnd(B,
3765 ConstantInt::get(B->getContext(),
3766 C1->getValue()|C2->getValue()));
3770 // Check to see if we have any common things being and'ed. If so, find the
3771 // terms for V1 & (V2|V3).
3772 if (isOnlyUse(Op0) || isOnlyUse(Op1)) {
3774 if (A == B) // (A & C)|(A & D) == A & (C|D)
3775 V1 = A, V2 = C, V3 = D;
3776 else if (A == D) // (A & C)|(B & A) == A & (B|C)
3777 V1 = A, V2 = B, V3 = C;
3778 else if (C == B) // (A & C)|(C & D) == C & (A|D)
3779 V1 = C, V2 = A, V3 = D;
3780 else if (C == D) // (A & C)|(B & C) == C & (A|B)
3781 V1 = C, V2 = A, V3 = B;
3784 Value *Or = Builder->CreateOr(V2, V3, "tmp");
3785 return BinaryOperator::CreateAnd(V1, Or);
3789 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
3790 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
3792 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
3794 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
3796 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
3799 // ((A&~B)|(~A&B)) -> A^B
3800 if ((match(C, m_Not(m_Specific(D))) &&
3801 match(B, m_Not(m_Specific(A)))))
3802 return BinaryOperator::CreateXor(A, D);
3803 // ((~B&A)|(~A&B)) -> A^B
3804 if ((match(A, m_Not(m_Specific(D))) &&
3805 match(B, m_Not(m_Specific(C)))))
3806 return BinaryOperator::CreateXor(C, D);
3807 // ((A&~B)|(B&~A)) -> A^B
3808 if ((match(C, m_Not(m_Specific(B))) &&
3809 match(D, m_Not(m_Specific(A)))))
3810 return BinaryOperator::CreateXor(A, B);
3811 // ((~B&A)|(B&~A)) -> A^B
3812 if ((match(A, m_Not(m_Specific(B))) &&
3813 match(D, m_Not(m_Specific(C)))))
3814 return BinaryOperator::CreateXor(C, B);
3817 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
3818 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
3819 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
3820 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
3821 SI0->getOperand(1) == SI1->getOperand(1) &&
3822 (SI0->hasOneUse() || SI1->hasOneUse())) {
3823 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
3825 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
3826 SI1->getOperand(1));
3830 // ((A|B)&1)|(B&-2) -> (A&1) | B
3831 if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
3832 match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
3833 Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
3834 if (Ret) return Ret;
3836 // (B&-2)|((A|B)&1) -> (A&1) | B
3837 if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
3838 match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
3839 Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
3840 if (Ret) return Ret;
3843 // (~A | ~B) == (~(A & B)) - De Morgan's Law
3844 if (Value *Op0NotVal = dyn_castNotVal(Op0))
3845 if (Value *Op1NotVal = dyn_castNotVal(Op1))
3846 if (Op0->hasOneUse() && Op1->hasOneUse()) {
3847 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
3848 I.getName()+".demorgan");
3849 return BinaryOperator::CreateNot(And);
3852 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
3853 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) {
3854 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
3857 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
3858 if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS))
3862 // fold (or (cast A), (cast B)) -> (cast (or A, B))
3863 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
3864 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
3865 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
3866 if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
3867 !isa<ICmpInst>(Op1C->getOperand(0))) {
3868 const Type *SrcTy = Op0C->getOperand(0)->getType();
3869 if (SrcTy == Op1C->getOperand(0)->getType() &&
3870 SrcTy->isIntOrIntVector() &&
3871 // Only do this if the casts both really cause code to be
3873 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
3875 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
3877 Value *NewOp = Builder->CreateOr(Op0C->getOperand(0),
3878 Op1C->getOperand(0), I.getName());
3879 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
3886 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
3887 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
3888 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
3889 if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS))
3893 return Changed ? &I : 0;
3898 // XorSelf - Implements: X ^ X --> 0
3901 XorSelf(Value *rhs) : RHS(rhs) {}
3902 bool shouldApply(Value *LHS) const { return LHS == RHS; }
3903 Instruction *apply(BinaryOperator &Xor) const {
3910 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
3911 bool Changed = SimplifyCommutative(I);
3912 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3914 if (isa<UndefValue>(Op1)) {
3915 if (isa<UndefValue>(Op0))
3916 // Handle undef ^ undef -> 0 special case. This is a common
3918 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3919 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
3922 // xor X, X = 0, even if X is nested in a sequence of Xor's.
3923 if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) {
3924 assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result;
3925 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3928 // See if we can simplify any instructions used by the instruction whose sole
3929 // purpose is to compute bits we don't care about.
3930 if (SimplifyDemandedInstructionBits(I))
3932 if (isa<VectorType>(I.getType()))
3933 if (isa<ConstantAggregateZero>(Op1))
3934 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
3936 // Is this a ~ operation?
3937 if (Value *NotOp = dyn_castNotVal(&I)) {
3938 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
3939 if (Op0I->getOpcode() == Instruction::And ||
3940 Op0I->getOpcode() == Instruction::Or) {
3941 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
3942 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
3943 if (dyn_castNotVal(Op0I->getOperand(1)))
3944 Op0I->swapOperands();
3945 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
3947 Builder->CreateNot(Op0I->getOperand(1),
3948 Op0I->getOperand(1)->getName()+".not");
3949 if (Op0I->getOpcode() == Instruction::And)
3950 return BinaryOperator::CreateOr(Op0NotVal, NotY);
3951 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
3954 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
3955 // ~(X | Y) === (~X & ~Y) - De Morgan's Law
3956 if (isFreeToInvert(Op0I->getOperand(0)) &&
3957 isFreeToInvert(Op0I->getOperand(1))) {
3959 Builder->CreateNot(Op0I->getOperand(0), "notlhs");
3961 Builder->CreateNot(Op0I->getOperand(1), "notrhs");
3962 if (Op0I->getOpcode() == Instruction::And)
3963 return BinaryOperator::CreateOr(NotX, NotY);
3964 return BinaryOperator::CreateAnd(NotX, NotY);
3971 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3972 if (RHS->isOne() && Op0->hasOneUse()) {
3973 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
3974 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
3975 return new ICmpInst(ICI->getInversePredicate(),
3976 ICI->getOperand(0), ICI->getOperand(1));
3978 if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
3979 return new FCmpInst(FCI->getInversePredicate(),
3980 FCI->getOperand(0), FCI->getOperand(1));
3983 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
3984 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
3985 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
3986 if (CI->hasOneUse() && Op0C->hasOneUse()) {
3987 Instruction::CastOps Opcode = Op0C->getOpcode();
3988 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
3989 (RHS == ConstantExpr::getCast(Opcode,
3990 ConstantInt::getTrue(I.getContext()),
3991 Op0C->getDestTy()))) {
3992 CI->setPredicate(CI->getInversePredicate());
3993 return CastInst::Create(Opcode, CI, Op0C->getType());
3999 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
4000 // ~(c-X) == X-c-1 == X+(-c-1)
4001 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
4002 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
4003 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
4004 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
4005 ConstantInt::get(I.getType(), 1));
4006 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
4009 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
4010 if (Op0I->getOpcode() == Instruction::Add) {
4011 // ~(X-c) --> (-c-1)-X
4012 if (RHS->isAllOnesValue()) {
4013 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
4014 return BinaryOperator::CreateSub(
4015 ConstantExpr::getSub(NegOp0CI,
4016 ConstantInt::get(I.getType(), 1)),
4017 Op0I->getOperand(0));
4018 } else if (RHS->getValue().isSignBit()) {
4019 // (X + C) ^ signbit -> (X + C + signbit)
4020 Constant *C = ConstantInt::get(I.getContext(),
4021 RHS->getValue() + Op0CI->getValue());
4022 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
4025 } else if (Op0I->getOpcode() == Instruction::Or) {
4026 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
4027 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
4028 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
4029 // Anything in both C1 and C2 is known to be zero, remove it from
4031 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
4032 NewRHS = ConstantExpr::getAnd(NewRHS,
4033 ConstantExpr::getNot(CommonBits));
4035 I.setOperand(0, Op0I->getOperand(0));
4036 I.setOperand(1, NewRHS);
4043 // Try to fold constant and into select arguments.
4044 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4045 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4047 if (isa<PHINode>(Op0))
4048 if (Instruction *NV = FoldOpIntoPhi(I))
4052 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
4054 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4056 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
4058 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
4061 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
4064 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
4065 if (A == Op0) { // B^(B|A) == (A|B)^B
4066 Op1I->swapOperands();
4068 std::swap(Op0, Op1);
4069 } else if (B == Op0) { // B^(A|B) == (A|B)^B
4070 I.swapOperands(); // Simplified below.
4071 std::swap(Op0, Op1);
4073 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
4074 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
4075 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
4076 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
4077 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
4079 if (A == Op0) { // A^(A&B) -> A^(B&A)
4080 Op1I->swapOperands();
4083 if (B == Op0) { // A^(B&A) -> (B&A)^A
4084 I.swapOperands(); // Simplified below.
4085 std::swap(Op0, Op1);
4090 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
4093 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
4094 Op0I->hasOneUse()) {
4095 if (A == Op1) // (B|A)^B == (A|B)^B
4097 if (B == Op1) // (A|B)^B == A & ~B
4098 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
4099 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
4100 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
4101 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
4102 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
4103 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
4105 if (A == Op1) // (A&B)^A -> (B&A)^A
4107 if (B == Op1 && // (B&A)^A == ~B & A
4108 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
4109 return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
4114 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
4115 if (Op0I && Op1I && Op0I->isShift() &&
4116 Op0I->getOpcode() == Op1I->getOpcode() &&
4117 Op0I->getOperand(1) == Op1I->getOperand(1) &&
4118 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
4120 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
4122 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
4123 Op1I->getOperand(1));
4127 Value *A, *B, *C, *D;
4128 // (A & B)^(A | B) -> A ^ B
4129 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
4130 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
4131 if ((A == C && B == D) || (A == D && B == C))
4132 return BinaryOperator::CreateXor(A, B);
4134 // (A | B)^(A & B) -> A ^ B
4135 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
4136 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
4137 if ((A == C && B == D) || (A == D && B == C))
4138 return BinaryOperator::CreateXor(A, B);
4142 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
4143 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
4144 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
4145 // (X & Y)^(X & Y) -> (Y^Z) & X
4146 Value *X = 0, *Y = 0, *Z = 0;
4148 X = A, Y = B, Z = D;
4150 X = A, Y = B, Z = C;
4152 X = B, Y = A, Z = D;
4154 X = B, Y = A, Z = C;
4157 Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
4158 return BinaryOperator::CreateAnd(NewOp, X);
4163 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
4164 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
4165 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
4168 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
4169 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
4170 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4171 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
4172 const Type *SrcTy = Op0C->getOperand(0)->getType();
4173 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
4174 // Only do this if the casts both really cause code to be generated.
4175 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4177 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4179 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
4180 Op1C->getOperand(0), I.getName());
4181 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
4186 return Changed ? &I : 0;
4189 static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
4190 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
4193 static bool HasAddOverflow(ConstantInt *Result,
4194 ConstantInt *In1, ConstantInt *In2,
4197 if (In2->getValue().isNegative())
4198 return Result->getValue().sgt(In1->getValue());
4200 return Result->getValue().slt(In1->getValue());
4202 return Result->getValue().ult(In1->getValue());
4205 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
4206 /// overflowed for this type.
4207 static bool AddWithOverflow(Constant *&Result, Constant *In1,
4208 Constant *In2, bool IsSigned = false) {
4209 Result = ConstantExpr::getAdd(In1, In2);
4211 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
4212 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
4213 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
4214 if (HasAddOverflow(ExtractElement(Result, Idx),
4215 ExtractElement(In1, Idx),
4216 ExtractElement(In2, Idx),
4223 return HasAddOverflow(cast<ConstantInt>(Result),
4224 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
4228 static bool HasSubOverflow(ConstantInt *Result,
4229 ConstantInt *In1, ConstantInt *In2,
4232 if (In2->getValue().isNegative())
4233 return Result->getValue().slt(In1->getValue());
4235 return Result->getValue().sgt(In1->getValue());
4237 return Result->getValue().ugt(In1->getValue());
4240 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
4241 /// overflowed for this type.
4242 static bool SubWithOverflow(Constant *&Result, Constant *In1,
4243 Constant *In2, bool IsSigned = false) {
4244 Result = ConstantExpr::getSub(In1, In2);
4246 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
4247 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
4248 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
4249 if (HasSubOverflow(ExtractElement(Result, Idx),
4250 ExtractElement(In1, Idx),
4251 ExtractElement(In2, Idx),
4258 return HasSubOverflow(cast<ConstantInt>(Result),
4259 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
4264 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
4265 /// else. At this point we know that the GEP is on the LHS of the comparison.
4266 Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
4267 ICmpInst::Predicate Cond,
4269 // Look through bitcasts.
4270 if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
4271 RHS = BCI->getOperand(0);
4273 Value *PtrBase = GEPLHS->getOperand(0);
4274 if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
4275 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
4276 // This transformation (ignoring the base and scales) is valid because we
4277 // know pointers can't overflow since the gep is inbounds. See if we can
4278 // output an optimized form.
4279 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, I, *this);
4281 // If not, synthesize the offset the hard way.
4283 Offset = EmitGEPOffset(GEPLHS, *this);
4284 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
4285 Constant::getNullValue(Offset->getType()));
4286 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
4287 // If the base pointers are different, but the indices are the same, just
4288 // compare the base pointer.
4289 if (PtrBase != GEPRHS->getOperand(0)) {
4290 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
4291 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
4292 GEPRHS->getOperand(0)->getType();
4294 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
4295 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
4296 IndicesTheSame = false;
4300 // If all indices are the same, just compare the base pointers.
4302 return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
4303 GEPLHS->getOperand(0), GEPRHS->getOperand(0));
4305 // Otherwise, the base pointers are different and the indices are
4306 // different, bail out.
4310 // If one of the GEPs has all zero indices, recurse.
4311 bool AllZeros = true;
4312 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
4313 if (!isa<Constant>(GEPLHS->getOperand(i)) ||
4314 !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
4319 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
4320 ICmpInst::getSwappedPredicate(Cond), I);
4322 // If the other GEP has all zero indices, recurse.
4324 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
4325 if (!isa<Constant>(GEPRHS->getOperand(i)) ||
4326 !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
4331 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
4333 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
4334 // If the GEPs only differ by one index, compare it.
4335 unsigned NumDifferences = 0; // Keep track of # differences.
4336 unsigned DiffOperand = 0; // The operand that differs.
4337 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
4338 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
4339 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
4340 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
4341 // Irreconcilable differences.
4345 if (NumDifferences++) break;
4350 if (NumDifferences == 0) // SAME GEP?
4351 return ReplaceInstUsesWith(I, // No comparison is needed here.
4352 ConstantInt::get(Type::getInt1Ty(I.getContext()),
4353 ICmpInst::isTrueWhenEqual(Cond)));
4355 else if (NumDifferences == 1) {
4356 Value *LHSV = GEPLHS->getOperand(DiffOperand);
4357 Value *RHSV = GEPRHS->getOperand(DiffOperand);
4358 // Make sure we do a signed comparison here.
4359 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
4363 // Only lower this if the icmp is the only user of the GEP or if we expect
4364 // the result to fold to a constant!
4366 (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
4367 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
4368 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
4369 Value *L = EmitGEPOffset(GEPLHS, *this);
4370 Value *R = EmitGEPOffset(GEPRHS, *this);
4371 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
4377 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
4379 Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
4382 if (!isa<ConstantFP>(RHSC)) return 0;
4383 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
4385 // Get the width of the mantissa. We don't want to hack on conversions that
4386 // might lose information from the integer, e.g. "i64 -> float"
4387 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
4388 if (MantissaWidth == -1) return 0; // Unknown.
4390 // Check to see that the input is converted from an integer type that is small
4391 // enough that preserves all bits. TODO: check here for "known" sign bits.
4392 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
4393 unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits();
4395 // If this is a uitofp instruction, we need an extra bit to hold the sign.
4396 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
4400 // If the conversion would lose info, don't hack on this.
4401 if ((int)InputSize > MantissaWidth)
4404 // Otherwise, we can potentially simplify the comparison. We know that it
4405 // will always come through as an integer value and we know the constant is
4406 // not a NAN (it would have been previously simplified).
4407 assert(!RHS.isNaN() && "NaN comparison not already folded!");
4409 ICmpInst::Predicate Pred;
4410 switch (I.getPredicate()) {
4411 default: llvm_unreachable("Unexpected predicate!");
4412 case FCmpInst::FCMP_UEQ:
4413 case FCmpInst::FCMP_OEQ:
4414 Pred = ICmpInst::ICMP_EQ;
4416 case FCmpInst::FCMP_UGT:
4417 case FCmpInst::FCMP_OGT:
4418 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
4420 case FCmpInst::FCMP_UGE:
4421 case FCmpInst::FCMP_OGE:
4422 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
4424 case FCmpInst::FCMP_ULT:
4425 case FCmpInst::FCMP_OLT:
4426 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
4428 case FCmpInst::FCMP_ULE:
4429 case FCmpInst::FCMP_OLE:
4430 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
4432 case FCmpInst::FCMP_UNE:
4433 case FCmpInst::FCMP_ONE:
4434 Pred = ICmpInst::ICMP_NE;
4436 case FCmpInst::FCMP_ORD:
4437 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
4438 case FCmpInst::FCMP_UNO:
4439 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
4442 const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
4444 // Now we know that the APFloat is a normal number, zero or inf.
4446 // See if the FP constant is too large for the integer. For example,
4447 // comparing an i8 to 300.0.
4448 unsigned IntWidth = IntTy->getScalarSizeInBits();
4451 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
4452 // and large values.
4453 APFloat SMax(RHS.getSemantics(), APFloat::fcZero, false);
4454 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
4455 APFloat::rmNearestTiesToEven);
4456 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
4457 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
4458 Pred == ICmpInst::ICMP_SLE)
4459 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
4460 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
4463 // If the RHS value is > UnsignedMax, fold the comparison. This handles
4464 // +INF and large values.
4465 APFloat UMax(RHS.getSemantics(), APFloat::fcZero, false);
4466 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
4467 APFloat::rmNearestTiesToEven);
4468 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
4469 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
4470 Pred == ICmpInst::ICMP_ULE)
4471 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
4472 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
4477 // See if the RHS value is < SignedMin.
4478 APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false);
4479 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
4480 APFloat::rmNearestTiesToEven);
4481 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
4482 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
4483 Pred == ICmpInst::ICMP_SGE)
4484 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
4485 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
4489 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
4490 // [0, UMAX], but it may still be fractional. See if it is fractional by
4491 // casting the FP value to the integer value and back, checking for equality.
4492 // Don't do this for zero, because -0.0 is not fractional.
4493 Constant *RHSInt = LHSUnsigned
4494 ? ConstantExpr::getFPToUI(RHSC, IntTy)
4495 : ConstantExpr::getFPToSI(RHSC, IntTy);
4496 if (!RHS.isZero()) {
4497 bool Equal = LHSUnsigned
4498 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
4499 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
4501 // If we had a comparison against a fractional value, we have to adjust
4502 // the compare predicate and sometimes the value. RHSC is rounded towards
4503 // zero at this point.
4505 default: llvm_unreachable("Unexpected integer comparison!");
4506 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
4507 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
4508 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
4509 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
4510 case ICmpInst::ICMP_ULE:
4511 // (float)int <= 4.4 --> int <= 4
4512 // (float)int <= -4.4 --> false
4513 if (RHS.isNegative())
4514 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
4516 case ICmpInst::ICMP_SLE:
4517 // (float)int <= 4.4 --> int <= 4
4518 // (float)int <= -4.4 --> int < -4
4519 if (RHS.isNegative())
4520 Pred = ICmpInst::ICMP_SLT;
4522 case ICmpInst::ICMP_ULT:
4523 // (float)int < -4.4 --> false
4524 // (float)int < 4.4 --> int <= 4
4525 if (RHS.isNegative())
4526 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
4527 Pred = ICmpInst::ICMP_ULE;
4529 case ICmpInst::ICMP_SLT:
4530 // (float)int < -4.4 --> int < -4
4531 // (float)int < 4.4 --> int <= 4
4532 if (!RHS.isNegative())
4533 Pred = ICmpInst::ICMP_SLE;
4535 case ICmpInst::ICMP_UGT:
4536 // (float)int > 4.4 --> int > 4
4537 // (float)int > -4.4 --> true
4538 if (RHS.isNegative())
4539 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
4541 case ICmpInst::ICMP_SGT:
4542 // (float)int > 4.4 --> int > 4
4543 // (float)int > -4.4 --> int >= -4
4544 if (RHS.isNegative())
4545 Pred = ICmpInst::ICMP_SGE;
4547 case ICmpInst::ICMP_UGE:
4548 // (float)int >= -4.4 --> true
4549 // (float)int >= 4.4 --> int > 4
4550 if (!RHS.isNegative())
4551 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
4552 Pred = ICmpInst::ICMP_UGT;
4554 case ICmpInst::ICMP_SGE:
4555 // (float)int >= -4.4 --> int >= -4
4556 // (float)int >= 4.4 --> int > 4
4557 if (!RHS.isNegative())
4558 Pred = ICmpInst::ICMP_SGT;
4564 // Lower this FP comparison into an appropriate integer version of the
4566 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
4569 /// FoldCmpLoadFromIndexedGlobal - Called we see this pattern:
4570 /// cmp pred (load (gep GV, ...)), cmpcst
4571 /// where GV is a global variable with a constant initializer. Try to simplify
4572 /// this into some simple computation that does not need the load. For example
4573 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
4575 /// If AndCst is non-null, then the loaded value is masked with that constant
4576 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
4577 Instruction *InstCombiner::
4578 FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
4579 CmpInst &ICI, ConstantInt *AndCst) {
4580 ConstantArray *Init = dyn_cast<ConstantArray>(GV->getInitializer());
4581 if (Init == 0 || Init->getNumOperands() > 1024) return 0;
4583 // There are many forms of this optimization we can handle, for now, just do
4584 // the simple index into a single-dimensional array.
4586 // Require: GEP GV, 0, i {{, constant indices}}
4587 if (GEP->getNumOperands() < 3 ||
4588 !isa<ConstantInt>(GEP->getOperand(1)) ||
4589 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
4590 isa<Constant>(GEP->getOperand(2)))
4593 // Check that indices after the variable are constants and in-range for the
4594 // type they index. Collect the indices. This is typically for arrays of
4596 SmallVector<unsigned, 4> LaterIndices;
4598 const Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
4599 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
4600 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
4601 if (Idx == 0) return 0; // Variable index.
4603 uint64_t IdxVal = Idx->getZExtValue();
4604 if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index.
4606 if (const StructType *STy = dyn_cast<StructType>(EltTy))
4607 EltTy = STy->getElementType(IdxVal);
4608 else if (const ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
4609 if (IdxVal >= ATy->getNumElements()) return 0;
4610 EltTy = ATy->getElementType();
4612 return 0; // Unknown type.
4615 LaterIndices.push_back(IdxVal);
4618 enum { Overdefined = -3, Undefined = -2 };
4620 // Variables for our state machines.
4622 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
4623 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
4624 // and 87 is the second (and last) index. FirstTrueElement is -2 when
4625 // undefined, otherwise set to the first true element. SecondTrueElement is
4626 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
4627 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
4629 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
4630 // form "i != 47 & i != 87". Same state transitions as for true elements.
4631 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
4633 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
4634 /// define a state machine that triggers for ranges of values that the index
4635 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
4636 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
4637 /// index in the range (inclusive). We use -2 for undefined here because we
4638 /// use relative comparisons and don't want 0-1 to match -1.
4639 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
4641 // MagicBitvector - This is a magic bitvector where we set a bit if the
4642 // comparison is true for element 'i'. If there are 64 elements or less in
4643 // the array, this will fully represent all the comparison results.
4644 uint64_t MagicBitvector = 0;
4647 // Scan the array and see if one of our patterns matches.
4648 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
4649 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
4650 Constant *Elt = Init->getOperand(i);
4652 // If this is indexing an array of structures, get the structure element.
4653 if (!LaterIndices.empty())
4654 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices.data(),
4655 LaterIndices.size());
4657 // If the element is masked, handle it.
4658 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
4660 // Find out if the comparison would be true or false for the i'th element.
4661 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
4663 // If the result is undef for this element, ignore it.
4664 if (isa<UndefValue>(C)) {
4665 // Extend range state machines to cover this element in case there is an
4666 // undef in the middle of the range.
4667 if (TrueRangeEnd == (int)i-1)
4669 if (FalseRangeEnd == (int)i-1)
4674 // If we can't compute the result for any of the elements, we have to give
4675 // up evaluating the entire conditional.
4676 if (!isa<ConstantInt>(C)) return 0;
4678 // Otherwise, we know if the comparison is true or false for this element,
4679 // update our state machines.
4680 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
4682 // State machine for single/double/range index comparison.
4684 // Update the TrueElement state machine.
4685 if (FirstTrueElement == Undefined)
4686 FirstTrueElement = TrueRangeEnd = i; // First true element.
4688 // Update double-compare state machine.
4689 if (SecondTrueElement == Undefined)
4690 SecondTrueElement = i;
4692 SecondTrueElement = Overdefined;
4694 // Update range state machine.
4695 if (TrueRangeEnd == (int)i-1)
4698 TrueRangeEnd = Overdefined;
4701 // Update the FalseElement state machine.
4702 if (FirstFalseElement == Undefined)
4703 FirstFalseElement = FalseRangeEnd = i; // First false element.
4705 // Update double-compare state machine.
4706 if (SecondFalseElement == Undefined)
4707 SecondFalseElement = i;
4709 SecondFalseElement = Overdefined;
4711 // Update range state machine.
4712 if (FalseRangeEnd == (int)i-1)
4715 FalseRangeEnd = Overdefined;
4720 // If this element is in range, update our magic bitvector.
4721 if (i < 64 && IsTrueForElt)
4722 MagicBitvector |= 1ULL << i;
4724 // If all of our states become overdefined, bail out early. Since the
4725 // predicate is expensive, only check it every 8 elements. This is only
4726 // really useful for really huge arrays.
4727 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
4728 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
4729 FalseRangeEnd == Overdefined)
4733 // Now that we've scanned the entire array, emit our new comparison(s). We
4734 // order the state machines in complexity of the generated code.
4735 Value *Idx = GEP->getOperand(2);
4738 // If the comparison is only true for one or two elements, emit direct
4740 if (SecondTrueElement != Overdefined) {
4741 // None true -> false.
4742 if (FirstTrueElement == Undefined)
4743 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(GEP->getContext()));
4745 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
4747 // True for one element -> 'i == 47'.
4748 if (SecondTrueElement == Undefined)
4749 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
4751 // True for two elements -> 'i == 47 | i == 72'.
4752 Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
4753 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
4754 Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
4755 return BinaryOperator::CreateOr(C1, C2);
4758 // If the comparison is only false for one or two elements, emit direct
4760 if (SecondFalseElement != Overdefined) {
4761 // None false -> true.
4762 if (FirstFalseElement == Undefined)
4763 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(GEP->getContext()));
4765 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
4767 // False for one element -> 'i != 47'.
4768 if (SecondFalseElement == Undefined)
4769 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
4771 // False for two elements -> 'i != 47 & i != 72'.
4772 Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
4773 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
4774 Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
4775 return BinaryOperator::CreateAnd(C1, C2);
4778 // If the comparison can be replaced with a range comparison for the elements
4779 // where it is true, emit the range check.
4780 if (TrueRangeEnd != Overdefined) {
4781 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
4783 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
4784 if (FirstTrueElement) {
4785 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
4786 Idx = Builder->CreateAdd(Idx, Offs);
4789 Value *End = ConstantInt::get(Idx->getType(),
4790 TrueRangeEnd-FirstTrueElement+1);
4791 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
4794 // False range check.
4795 if (FalseRangeEnd != Overdefined) {
4796 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
4797 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
4798 if (FirstFalseElement) {
4799 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
4800 Idx = Builder->CreateAdd(Idx, Offs);
4803 Value *End = ConstantInt::get(Idx->getType(),
4804 FalseRangeEnd-FirstFalseElement);
4805 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
4809 // If a 32-bit or 64-bit magic bitvector captures the entire comparison state
4810 // of this load, replace it with computation that does:
4811 // ((magic_cst >> i) & 1) != 0
4812 if (Init->getNumOperands() <= 32 ||
4813 (TD && Init->getNumOperands() <= 64 && TD->isLegalInteger(64))) {
4815 if (Init->getNumOperands() <= 32)
4816 Ty = Type::getInt32Ty(Init->getContext());
4818 Ty = Type::getInt64Ty(Init->getContext());
4819 Value *V = Builder->CreateIntCast(Idx, Ty, false);
4820 V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
4821 V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
4822 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
4829 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
4830 bool Changed = false;
4832 /// Orders the operands of the compare so that they are listed from most
4833 /// complex to least complex. This puts constants before unary operators,
4834 /// before binary operators.
4835 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
4840 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4842 if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
4843 return ReplaceInstUsesWith(I, V);
4845 // Simplify 'fcmp pred X, X'
4847 switch (I.getPredicate()) {
4848 default: llvm_unreachable("Unknown predicate!");
4849 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
4850 case FCmpInst::FCMP_ULT: // True if unordered or less than
4851 case FCmpInst::FCMP_UGT: // True if unordered or greater than
4852 case FCmpInst::FCMP_UNE: // True if unordered or not equal
4853 // Canonicalize these to be 'fcmp uno %X, 0.0'.
4854 I.setPredicate(FCmpInst::FCMP_UNO);
4855 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4858 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
4859 case FCmpInst::FCMP_OEQ: // True if ordered and equal
4860 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
4861 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
4862 // Canonicalize these to be 'fcmp ord %X, 0.0'.
4863 I.setPredicate(FCmpInst::FCMP_ORD);
4864 I.setOperand(1, Constant::getNullValue(Op0->getType()));
4869 // Handle fcmp with constant RHS
4870 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
4871 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
4872 switch (LHSI->getOpcode()) {
4873 case Instruction::PHI:
4874 // Only fold fcmp into the PHI if the phi and fcmp are in the same
4875 // block. If in the same block, we're encouraging jump threading. If
4876 // not, we are just pessimizing the code by making an i1 phi.
4877 if (LHSI->getParent() == I.getParent())
4878 if (Instruction *NV = FoldOpIntoPhi(I, true))
4881 case Instruction::SIToFP:
4882 case Instruction::UIToFP:
4883 if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
4886 case Instruction::Select: {
4887 // If either operand of the select is a constant, we can fold the
4888 // comparison into the select arms, which will cause one to be
4889 // constant folded and the select turned into a bitwise or.
4890 Value *Op1 = 0, *Op2 = 0;
4891 if (LHSI->hasOneUse()) {
4892 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
4893 // Fold the known value into the constant operand.
4894 Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
4895 // Insert a new FCmp of the other select operand.
4896 Op2 = Builder->CreateFCmp(I.getPredicate(),
4897 LHSI->getOperand(2), RHSC, I.getName());
4898 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
4899 // Fold the known value into the constant operand.
4900 Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
4901 // Insert a new FCmp of the other select operand.
4902 Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1),
4908 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
4911 case Instruction::Load:
4912 if (GetElementPtrInst *GEP =
4913 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
4914 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
4915 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
4916 !cast<LoadInst>(LHSI)->isVolatile())
4917 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
4924 return Changed ? &I : 0;
4927 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
4928 bool Changed = false;
4930 /// Orders the operands of the compare so that they are listed from most
4931 /// complex to least complex. This puts constants before unary operators,
4932 /// before binary operators.
4933 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
4938 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4940 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
4941 return ReplaceInstUsesWith(I, V);
4943 const Type *Ty = Op0->getType();
4945 // icmp's with boolean values can always be turned into bitwise operations
4946 if (Ty == Type::getInt1Ty(I.getContext())) {
4947 switch (I.getPredicate()) {
4948 default: llvm_unreachable("Invalid icmp instruction!");
4949 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
4950 Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp");
4951 return BinaryOperator::CreateNot(Xor);
4953 case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
4954 return BinaryOperator::CreateXor(Op0, Op1);
4956 case ICmpInst::ICMP_UGT:
4957 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
4959 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
4960 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
4961 return BinaryOperator::CreateAnd(Not, Op1);
4963 case ICmpInst::ICMP_SGT:
4964 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
4966 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
4967 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
4968 return BinaryOperator::CreateAnd(Not, Op0);
4970 case ICmpInst::ICMP_UGE:
4971 std::swap(Op0, Op1); // Change icmp uge -> icmp ule
4973 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
4974 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
4975 return BinaryOperator::CreateOr(Not, Op1);
4977 case ICmpInst::ICMP_SGE:
4978 std::swap(Op0, Op1); // Change icmp sge -> icmp sle
4980 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
4981 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
4982 return BinaryOperator::CreateOr(Not, Op0);
4987 unsigned BitWidth = 0;
4989 BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
4990 else if (Ty->isIntOrIntVector())
4991 BitWidth = Ty->getScalarSizeInBits();
4993 bool isSignBit = false;
4995 // See if we are doing a comparison with a constant.
4996 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
4997 Value *A = 0, *B = 0;
4999 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
5000 if (I.isEquality() && CI->isZero() &&
5001 match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
5002 // (icmp cond A B) if cond is equality
5003 return new ICmpInst(I.getPredicate(), A, B);
5006 // If we have an icmp le or icmp ge instruction, turn it into the
5007 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
5008 // them being folded in the code below. The SimplifyICmpInst code has
5009 // already handled the edge cases for us, so we just assert on them.
5010 switch (I.getPredicate()) {
5012 case ICmpInst::ICMP_ULE:
5013 assert(!CI->isMaxValue(false)); // A <=u MAX -> TRUE
5014 return new ICmpInst(ICmpInst::ICMP_ULT, Op0,
5016 case ICmpInst::ICMP_SLE:
5017 assert(!CI->isMaxValue(true)); // A <=s MAX -> TRUE
5018 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
5020 case ICmpInst::ICMP_UGE:
5021 assert(!CI->isMinValue(false)); // A >=u MIN -> TRUE
5022 return new ICmpInst(ICmpInst::ICMP_UGT, Op0,
5024 case ICmpInst::ICMP_SGE:
5025 assert(!CI->isMinValue(true)); // A >=s MIN -> TRUE
5026 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
5030 // If this comparison is a normal comparison, it demands all
5031 // bits, if it is a sign bit comparison, it only demands the sign bit.
5033 isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
5036 // See if we can fold the comparison based on range information we can get
5037 // by checking whether bits are known to be zero or one in the input.
5038 if (BitWidth != 0) {
5039 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
5040 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
5042 if (SimplifyDemandedBits(I.getOperandUse(0),
5043 isSignBit ? APInt::getSignBit(BitWidth)
5044 : APInt::getAllOnesValue(BitWidth),
5045 Op0KnownZero, Op0KnownOne, 0))
5047 if (SimplifyDemandedBits(I.getOperandUse(1),
5048 APInt::getAllOnesValue(BitWidth),
5049 Op1KnownZero, Op1KnownOne, 0))
5052 // Given the known and unknown bits, compute a range that the LHS could be
5053 // in. Compute the Min, Max and RHS values based on the known bits. For the
5054 // EQ and NE we use unsigned values.
5055 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
5056 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
5058 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
5060 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
5063 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
5065 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
5069 // If Min and Max are known to be the same, then SimplifyDemandedBits
5070 // figured out that the LHS is a constant. Just constant fold this now so
5071 // that code below can assume that Min != Max.
5072 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
5073 return new ICmpInst(I.getPredicate(),
5074 ConstantInt::get(I.getContext(), Op0Min), Op1);
5075 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
5076 return new ICmpInst(I.getPredicate(), Op0,
5077 ConstantInt::get(I.getContext(), Op1Min));
5079 // Based on the range information we know about the LHS, see if we can
5080 // simplify this comparison. For example, (x&4) < 8 is always true.
5081 switch (I.getPredicate()) {
5082 default: llvm_unreachable("Unknown icmp opcode!");
5083 case ICmpInst::ICMP_EQ:
5084 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
5085 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5087 case ICmpInst::ICMP_NE:
5088 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
5089 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5091 case ICmpInst::ICMP_ULT:
5092 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
5093 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5094 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
5095 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5096 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
5097 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5098 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
5099 if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
5100 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5103 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
5104 if (CI->isMinValue(true))
5105 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
5106 Constant::getAllOnesValue(Op0->getType()));
5109 case ICmpInst::ICMP_UGT:
5110 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
5111 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5112 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
5113 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5115 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
5116 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5117 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
5118 if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
5119 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5122 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
5123 if (CI->isMaxValue(true))
5124 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
5125 Constant::getNullValue(Op0->getType()));
5128 case ICmpInst::ICMP_SLT:
5129 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
5130 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5131 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
5132 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5133 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
5134 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5135 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
5136 if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
5137 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5141 case ICmpInst::ICMP_SGT:
5142 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
5143 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5144 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
5145 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5147 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
5148 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5149 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
5150 if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
5151 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5155 case ICmpInst::ICMP_SGE:
5156 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
5157 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
5158 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5159 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
5160 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5162 case ICmpInst::ICMP_SLE:
5163 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
5164 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
5165 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5166 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
5167 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5169 case ICmpInst::ICMP_UGE:
5170 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
5171 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
5172 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5173 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
5174 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5176 case ICmpInst::ICMP_ULE:
5177 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
5178 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
5179 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
5180 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
5181 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
5185 // Turn a signed comparison into an unsigned one if both operands
5186 // are known to have the same sign.
5188 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
5189 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
5190 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
5193 // Test if the ICmpInst instruction is used exclusively by a select as
5194 // part of a minimum or maximum operation. If so, refrain from doing
5195 // any other folding. This helps out other analyses which understand
5196 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
5197 // and CodeGen. And in this case, at least one of the comparison
5198 // operands has at least one user besides the compare (the select),
5199 // which would often largely negate the benefit of folding anyway.
5201 if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
5202 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
5203 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
5206 // See if we are doing a comparison between a constant and an instruction that
5207 // can be folded into the comparison.
5208 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
5209 // Since the RHS is a ConstantInt (CI), if the left hand side is an
5210 // instruction, see if that instruction also has constants so that the
5211 // instruction can be folded into the icmp
5212 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5213 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
5217 // Handle icmp with constant (but not simple integer constant) RHS
5218 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
5219 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5220 switch (LHSI->getOpcode()) {
5221 case Instruction::GetElementPtr:
5222 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
5223 if (RHSC->isNullValue() &&
5224 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
5225 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
5226 Constant::getNullValue(LHSI->getOperand(0)->getType()));
5228 case Instruction::PHI:
5229 // Only fold icmp into the PHI if the phi and icmp are in the same
5230 // block. If in the same block, we're encouraging jump threading. If
5231 // not, we are just pessimizing the code by making an i1 phi.
5232 if (LHSI->getParent() == I.getParent())
5233 if (Instruction *NV = FoldOpIntoPhi(I, true))
5236 case Instruction::Select: {
5237 // If either operand of the select is a constant, we can fold the
5238 // comparison into the select arms, which will cause one to be
5239 // constant folded and the select turned into a bitwise or.
5240 Value *Op1 = 0, *Op2 = 0;
5241 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1)))
5242 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
5243 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2)))
5244 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
5246 // We only want to perform this transformation if it will not lead to
5247 // additional code. This is true if either both sides of the select
5248 // fold to a constant (in which case the icmp is replaced with a select
5249 // which will usually simplify) or this is the only user of the
5250 // select (in which case we are trading a select+icmp for a simpler
5252 if ((Op1 && Op2) || (LHSI->hasOneUse() && (Op1 || Op2))) {
5254 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1),
5257 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2),
5259 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
5263 case Instruction::Call:
5264 // If we have (malloc != null), and if the malloc has a single use, we
5265 // can assume it is successful and remove the malloc.
5266 if (isMalloc(LHSI) && LHSI->hasOneUse() &&
5267 isa<ConstantPointerNull>(RHSC)) {
5268 // Need to explicitly erase malloc call here, instead of adding it to
5269 // Worklist, because it won't get DCE'd from the Worklist since
5270 // isInstructionTriviallyDead() returns false for function calls.
5271 // It is OK to replace LHSI/MallocCall with Undef because the
5272 // instruction that uses it will be erased via Worklist.
5273 if (extractMallocCall(LHSI)) {
5274 LHSI->replaceAllUsesWith(UndefValue::get(LHSI->getType()));
5275 EraseInstFromFunction(*LHSI);
5276 return ReplaceInstUsesWith(I,
5277 ConstantInt::get(Type::getInt1Ty(I.getContext()),
5278 !I.isTrueWhenEqual()));
5280 if (CallInst* MallocCall = extractMallocCallFromBitCast(LHSI))
5281 if (MallocCall->hasOneUse()) {
5282 MallocCall->replaceAllUsesWith(
5283 UndefValue::get(MallocCall->getType()));
5284 EraseInstFromFunction(*MallocCall);
5285 Worklist.Add(LHSI); // The malloc's bitcast use.
5286 return ReplaceInstUsesWith(I,
5287 ConstantInt::get(Type::getInt1Ty(I.getContext()),
5288 !I.isTrueWhenEqual()));
5292 case Instruction::IntToPtr:
5293 // icmp pred inttoptr(X), null -> icmp pred X, 0
5294 if (RHSC->isNullValue() && TD &&
5295 TD->getIntPtrType(RHSC->getContext()) ==
5296 LHSI->getOperand(0)->getType())
5297 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
5298 Constant::getNullValue(LHSI->getOperand(0)->getType()));
5301 case Instruction::Load:
5302 // Try to optimize things like "A[i] > 4" to index computations.
5303 if (GetElementPtrInst *GEP =
5304 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
5305 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
5306 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
5307 !cast<LoadInst>(LHSI)->isVolatile())
5308 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
5315 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
5316 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
5317 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
5319 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
5320 if (Instruction *NI = FoldGEPICmp(GEP, Op0,
5321 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
5324 // Test to see if the operands of the icmp are casted versions of other
5325 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
5327 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
5328 if (isa<PointerType>(Op0->getType()) &&
5329 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
5330 // We keep moving the cast from the left operand over to the right
5331 // operand, where it can often be eliminated completely.
5332 Op0 = CI->getOperand(0);
5334 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
5335 // so eliminate it as well.
5336 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
5337 Op1 = CI2->getOperand(0);
5339 // If Op1 is a constant, we can fold the cast into the constant.
5340 if (Op0->getType() != Op1->getType()) {
5341 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
5342 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
5344 // Otherwise, cast the RHS right before the icmp
5345 Op1 = Builder->CreateBitCast(Op1, Op0->getType());
5348 return new ICmpInst(I.getPredicate(), Op0, Op1);
5352 if (isa<CastInst>(Op0)) {
5353 // Handle the special case of: icmp (cast bool to X), <cst>
5354 // This comes up when you have code like
5357 // For generality, we handle any zero-extension of any operand comparison
5358 // with a constant or another cast from the same type.
5359 if (isa<Constant>(Op1) || isa<CastInst>(Op1))
5360 if (Instruction *R = visitICmpInstWithCastAndCast(I))
5364 // See if it's the same type of instruction on the left and right.
5365 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
5366 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
5367 if (Op0I->getOpcode() == Op1I->getOpcode() && Op0I->hasOneUse() &&
5368 Op1I->hasOneUse() && Op0I->getOperand(1) == Op1I->getOperand(1)) {
5369 switch (Op0I->getOpcode()) {
5371 case Instruction::Add:
5372 case Instruction::Sub:
5373 case Instruction::Xor:
5374 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
5375 return new ICmpInst(I.getPredicate(), Op0I->getOperand(0),
5376 Op1I->getOperand(0));
5377 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
5378 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
5379 if (CI->getValue().isSignBit()) {
5380 ICmpInst::Predicate Pred = I.isSigned()
5381 ? I.getUnsignedPredicate()
5382 : I.getSignedPredicate();
5383 return new ICmpInst(Pred, Op0I->getOperand(0),
5384 Op1I->getOperand(0));
5387 if (CI->getValue().isMaxSignedValue()) {
5388 ICmpInst::Predicate Pred = I.isSigned()
5389 ? I.getUnsignedPredicate()
5390 : I.getSignedPredicate();
5391 Pred = I.getSwappedPredicate(Pred);
5392 return new ICmpInst(Pred, Op0I->getOperand(0),
5393 Op1I->getOperand(0));
5397 case Instruction::Mul:
5398 if (!I.isEquality())
5401 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
5402 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
5403 // Mask = -1 >> count-trailing-zeros(Cst).
5404 if (!CI->isZero() && !CI->isOne()) {
5405 const APInt &AP = CI->getValue();
5406 ConstantInt *Mask = ConstantInt::get(I.getContext(),
5407 APInt::getLowBitsSet(AP.getBitWidth(),
5409 AP.countTrailingZeros()));
5410 Value *And1 = Builder->CreateAnd(Op0I->getOperand(0), Mask);
5411 Value *And2 = Builder->CreateAnd(Op1I->getOperand(0), Mask);
5412 return new ICmpInst(I.getPredicate(), And1, And2);
5421 // ~x < ~y --> y < x
5423 if (match(Op0, m_Not(m_Value(A))) &&
5424 match(Op1, m_Not(m_Value(B))))
5425 return new ICmpInst(I.getPredicate(), B, A);
5428 if (I.isEquality()) {
5429 Value *A, *B, *C, *D;
5431 // -x == -y --> x == y
5432 if (match(Op0, m_Neg(m_Value(A))) &&
5433 match(Op1, m_Neg(m_Value(B))))
5434 return new ICmpInst(I.getPredicate(), A, B);
5436 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
5437 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
5438 Value *OtherVal = A == Op1 ? B : A;
5439 return new ICmpInst(I.getPredicate(), OtherVal,
5440 Constant::getNullValue(A->getType()));
5443 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
5444 // A^c1 == C^c2 --> A == C^(c1^c2)
5445 ConstantInt *C1, *C2;
5446 if (match(B, m_ConstantInt(C1)) &&
5447 match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) {
5448 Constant *NC = ConstantInt::get(I.getContext(),
5449 C1->getValue() ^ C2->getValue());
5450 Value *Xor = Builder->CreateXor(C, NC, "tmp");
5451 return new ICmpInst(I.getPredicate(), A, Xor);
5454 // A^B == A^D -> B == D
5455 if (A == C) return new ICmpInst(I.getPredicate(), B, D);
5456 if (A == D) return new ICmpInst(I.getPredicate(), B, C);
5457 if (B == C) return new ICmpInst(I.getPredicate(), A, D);
5458 if (B == D) return new ICmpInst(I.getPredicate(), A, C);
5462 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
5463 (A == Op0 || B == Op0)) {
5464 // A == (A^B) -> B == 0
5465 Value *OtherVal = A == Op0 ? B : A;
5466 return new ICmpInst(I.getPredicate(), OtherVal,
5467 Constant::getNullValue(A->getType()));
5470 // (A-B) == A -> B == 0
5471 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B))))
5472 return new ICmpInst(I.getPredicate(), B,
5473 Constant::getNullValue(B->getType()));
5475 // A == (A-B) -> B == 0
5476 if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B))))
5477 return new ICmpInst(I.getPredicate(), B,
5478 Constant::getNullValue(B->getType()));
5480 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
5481 if (Op0->hasOneUse() && Op1->hasOneUse() &&
5482 match(Op0, m_And(m_Value(A), m_Value(B))) &&
5483 match(Op1, m_And(m_Value(C), m_Value(D)))) {
5484 Value *X = 0, *Y = 0, *Z = 0;
5487 X = B; Y = D; Z = A;
5488 } else if (A == D) {
5489 X = B; Y = C; Z = A;
5490 } else if (B == C) {
5491 X = A; Y = D; Z = B;
5492 } else if (B == D) {
5493 X = A; Y = C; Z = B;
5496 if (X) { // Build (X^Y) & Z
5497 Op1 = Builder->CreateXor(X, Y, "tmp");
5498 Op1 = Builder->CreateAnd(Op1, Z, "tmp");
5499 I.setOperand(0, Op1);
5500 I.setOperand(1, Constant::getNullValue(Op1->getType()));
5507 Value *X; ConstantInt *Cst;
5509 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
5510 return FoldICmpAddOpCst(I, X, Cst, I.getPredicate(), Op0);
5513 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
5514 return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate(), Op1);
5516 return Changed ? &I : 0;
5519 /// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
5520 Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
5521 Value *X, ConstantInt *CI,
5522 ICmpInst::Predicate Pred,
5524 // If we have X+0, exit early (simplifying logic below) and let it get folded
5525 // elsewhere. icmp X+0, X -> icmp X, X
5527 bool isTrue = ICmpInst::isTrueWhenEqual(Pred);
5528 return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
5531 // (X+4) == X -> false.
5532 if (Pred == ICmpInst::ICMP_EQ)
5533 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext()));
5535 // (X+4) != X -> true.
5536 if (Pred == ICmpInst::ICMP_NE)
5537 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
5539 // If this is an instruction (as opposed to constantexpr) get NUW/NSW info.
5540 bool isNUW = false, isNSW = false;
5541 if (BinaryOperator *Add = dyn_cast<BinaryOperator>(TheAdd)) {
5542 isNUW = Add->hasNoUnsignedWrap();
5543 isNSW = Add->hasNoSignedWrap();
5546 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
5547 // so the values can never be equal. Similiarly for all other "or equals"
5550 // (X+1) <u X --> X >u (MAXUINT-1) --> X != 255
5551 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
5552 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
5553 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5554 // If this is an NUW add, then this is always false.
5556 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext()));
5558 Value *R = ConstantExpr::getSub(ConstantInt::get(CI->getType(), -1ULL), CI);
5559 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
5562 // (X+1) >u X --> X <u (0-1) --> X != 255
5563 // (X+2) >u X --> X <u (0-2) --> X <u 254
5564 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
5565 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5566 // If this is an NUW add, then this is always true.
5568 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
5569 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
5572 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
5573 ConstantInt *SMax = ConstantInt::get(X->getContext(),
5574 APInt::getSignedMaxValue(BitWidth));
5576 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
5577 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
5578 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
5579 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
5580 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
5581 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
5582 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5583 // If this is an NSW add, then we have two cases: if the constant is
5584 // positive, then this is always false, if negative, this is always true.
5586 bool isTrue = CI->getValue().isNegative();
5587 return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
5590 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
5593 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
5594 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
5595 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
5596 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
5597 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
5598 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
5600 // If this is an NSW add, then we have two cases: if the constant is
5601 // positive, then this is always true, if negative, this is always false.
5603 bool isTrue = !CI->getValue().isNegative();
5604 return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
5607 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
5608 Constant *C = ConstantInt::get(X->getContext(), CI->getValue()-1);
5609 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
5612 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
5613 /// and CmpRHS are both known to be integer constants.
5614 Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
5615 ConstantInt *DivRHS) {
5616 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
5617 const APInt &CmpRHSV = CmpRHS->getValue();
5619 // FIXME: If the operand types don't match the type of the divide
5620 // then don't attempt this transform. The code below doesn't have the
5621 // logic to deal with a signed divide and an unsigned compare (and
5622 // vice versa). This is because (x /s C1) <s C2 produces different
5623 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
5624 // (x /u C1) <u C2. Simply casting the operands and result won't
5625 // work. :( The if statement below tests that condition and bails
5627 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
5628 if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
5630 if (DivRHS->isZero())
5631 return 0; // The ProdOV computation fails on divide by zero.
5632 if (DivIsSigned && DivRHS->isAllOnesValue())
5633 return 0; // The overflow computation also screws up here
5634 if (DivRHS->isOne())
5635 return 0; // Not worth bothering, and eliminates some funny cases
5638 // Compute Prod = CI * DivRHS. We are essentially solving an equation
5639 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
5640 // C2 (CI). By solving for X we can turn this into a range check
5641 // instead of computing a divide.
5642 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
5644 // Determine if the product overflows by seeing if the product is
5645 // not equal to the divide. Make sure we do the same kind of divide
5646 // as in the LHS instruction that we're folding.
5647 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
5648 ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
5650 // Get the ICmp opcode
5651 ICmpInst::Predicate Pred = ICI.getPredicate();
5653 // Figure out the interval that is being checked. For example, a comparison
5654 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
5655 // Compute this interval based on the constants involved and the signedness of
5656 // the compare/divide. This computes a half-open interval, keeping track of
5657 // whether either value in the interval overflows. After analysis each
5658 // overflow variable is set to 0 if it's corresponding bound variable is valid
5659 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
5660 int LoOverflow = 0, HiOverflow = 0;
5661 Constant *LoBound = 0, *HiBound = 0;
5663 if (!DivIsSigned) { // udiv
5664 // e.g. X/5 op 3 --> [15, 20)
5666 HiOverflow = LoOverflow = ProdOV;
5668 HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, false);
5669 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
5670 if (CmpRHSV == 0) { // (X / pos) op 0
5671 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
5672 LoBound = cast<ConstantInt>(ConstantExpr::getNeg(SubOne(DivRHS)));
5674 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
5675 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
5676 HiOverflow = LoOverflow = ProdOV;
5678 HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, true);
5679 } else { // (X / pos) op neg
5680 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
5681 HiBound = AddOne(Prod);
5682 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
5684 ConstantInt* DivNeg =
5685 cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
5686 LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
5689 } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
5690 if (CmpRHSV == 0) { // (X / neg) op 0
5691 // e.g. X/-5 op 0 --> [-4, 5)
5692 LoBound = AddOne(DivRHS);
5693 HiBound = cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
5694 if (HiBound == DivRHS) { // -INTMIN = INTMIN
5695 HiOverflow = 1; // [INTMIN+1, overflow)
5696 HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
5698 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
5699 // e.g. X/-5 op 3 --> [-19, -14)
5700 HiBound = AddOne(Prod);
5701 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
5703 LoOverflow = AddWithOverflow(LoBound, HiBound, DivRHS, true) ? -1 : 0;
5704 } else { // (X / neg) op neg
5705 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
5706 LoOverflow = HiOverflow = ProdOV;
5708 HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, true);
5711 // Dividing by a negative swaps the condition. LT <-> GT
5712 Pred = ICmpInst::getSwappedPredicate(Pred);
5715 Value *X = DivI->getOperand(0);
5717 default: llvm_unreachable("Unhandled icmp opcode!");
5718 case ICmpInst::ICMP_EQ:
5719 if (LoOverflow && HiOverflow)
5720 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
5721 else if (HiOverflow)
5722 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
5723 ICmpInst::ICMP_UGE, X, LoBound);
5724 else if (LoOverflow)
5725 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
5726 ICmpInst::ICMP_ULT, X, HiBound);
5728 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, true, ICI);
5729 case ICmpInst::ICMP_NE:
5730 if (LoOverflow && HiOverflow)
5731 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
5732 else if (HiOverflow)
5733 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
5734 ICmpInst::ICMP_ULT, X, LoBound);
5735 else if (LoOverflow)
5736 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
5737 ICmpInst::ICMP_UGE, X, HiBound);
5739 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, false, ICI);
5740 case ICmpInst::ICMP_ULT:
5741 case ICmpInst::ICMP_SLT:
5742 if (LoOverflow == +1) // Low bound is greater than input range.
5743 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
5744 if (LoOverflow == -1) // Low bound is less than input range.
5745 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
5746 return new ICmpInst(Pred, X, LoBound);
5747 case ICmpInst::ICMP_UGT:
5748 case ICmpInst::ICMP_SGT:
5749 if (HiOverflow == +1) // High bound greater than input range.
5750 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
5751 else if (HiOverflow == -1) // High bound less than input range.
5752 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
5753 if (Pred == ICmpInst::ICMP_UGT)
5754 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
5756 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
5761 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
5763 Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
5766 const APInt &RHSV = RHS->getValue();
5768 switch (LHSI->getOpcode()) {
5769 case Instruction::Trunc:
5770 if (ICI.isEquality() && LHSI->hasOneUse()) {
5771 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
5772 // of the high bits truncated out of x are known.
5773 unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
5774 SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
5775 APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
5776 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
5777 ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
5779 // If all the high bits are known, we can do this xform.
5780 if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
5781 // Pull in the high bits from known-ones set.
5782 APInt NewRHS(RHS->getValue());
5783 NewRHS.zext(SrcBits);
5785 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
5786 ConstantInt::get(ICI.getContext(), NewRHS));
5791 case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
5792 if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
5793 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
5795 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
5796 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
5797 Value *CompareVal = LHSI->getOperand(0);
5799 // If the sign bit of the XorCST is not set, there is no change to
5800 // the operation, just stop using the Xor.
5801 if (!XorCST->getValue().isNegative()) {
5802 ICI.setOperand(0, CompareVal);
5807 // Was the old condition true if the operand is positive?
5808 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
5810 // If so, the new one isn't.
5811 isTrueIfPositive ^= true;
5813 if (isTrueIfPositive)
5814 return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal,
5817 return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal,
5821 if (LHSI->hasOneUse()) {
5822 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
5823 if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
5824 const APInt &SignBit = XorCST->getValue();
5825 ICmpInst::Predicate Pred = ICI.isSigned()
5826 ? ICI.getUnsignedPredicate()
5827 : ICI.getSignedPredicate();
5828 return new ICmpInst(Pred, LHSI->getOperand(0),
5829 ConstantInt::get(ICI.getContext(),
5833 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
5834 if (!ICI.isEquality() && XorCST->getValue().isMaxSignedValue()) {
5835 const APInt &NotSignBit = XorCST->getValue();
5836 ICmpInst::Predicate Pred = ICI.isSigned()
5837 ? ICI.getUnsignedPredicate()
5838 : ICI.getSignedPredicate();
5839 Pred = ICI.getSwappedPredicate(Pred);
5840 return new ICmpInst(Pred, LHSI->getOperand(0),
5841 ConstantInt::get(ICI.getContext(),
5842 RHSV ^ NotSignBit));
5847 case Instruction::And: // (icmp pred (and X, AndCST), RHS)
5848 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
5849 LHSI->getOperand(0)->hasOneUse()) {
5850 ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
5852 // If the LHS is an AND of a truncating cast, we can widen the
5853 // and/compare to be the input width without changing the value
5854 // produced, eliminating a cast.
5855 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
5856 // We can do this transformation if either the AND constant does not
5857 // have its sign bit set or if it is an equality comparison.
5858 // Extending a relational comparison when we're checking the sign
5859 // bit would not work.
5860 if (Cast->hasOneUse() &&
5861 (ICI.isEquality() ||
5862 (AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
5864 cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
5865 APInt NewCST = AndCST->getValue();
5866 NewCST.zext(BitWidth);
5868 NewCI.zext(BitWidth);
5870 Builder->CreateAnd(Cast->getOperand(0),
5871 ConstantInt::get(ICI.getContext(), NewCST),
5873 return new ICmpInst(ICI.getPredicate(), NewAnd,
5874 ConstantInt::get(ICI.getContext(), NewCI));
5878 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
5879 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
5880 // happens a LOT in code produced by the C front-end, for bitfield
5882 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
5883 if (Shift && !Shift->isShift())
5887 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
5888 const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
5889 const Type *AndTy = AndCST->getType(); // Type of the and.
5891 // We can fold this as long as we can't shift unknown bits
5892 // into the mask. This can only happen with signed shift
5893 // rights, as they sign-extend.
5895 bool CanFold = Shift->isLogicalShift();
5897 // To test for the bad case of the signed shr, see if any
5898 // of the bits shifted in could be tested after the mask.
5899 uint32_t TyBits = Ty->getPrimitiveSizeInBits();
5900 int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
5902 uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
5903 if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
5904 AndCST->getValue()) == 0)
5910 if (Shift->getOpcode() == Instruction::Shl)
5911 NewCst = ConstantExpr::getLShr(RHS, ShAmt);
5913 NewCst = ConstantExpr::getShl(RHS, ShAmt);
5915 // Check to see if we are shifting out any of the bits being
5917 if (ConstantExpr::get(Shift->getOpcode(),
5918 NewCst, ShAmt) != RHS) {
5919 // If we shifted bits out, the fold is not going to work out.
5920 // As a special case, check to see if this means that the
5921 // result is always true or false now.
5922 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
5923 return ReplaceInstUsesWith(ICI,
5924 ConstantInt::getFalse(ICI.getContext()));
5925 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
5926 return ReplaceInstUsesWith(ICI,
5927 ConstantInt::getTrue(ICI.getContext()));
5929 ICI.setOperand(1, NewCst);
5930 Constant *NewAndCST;
5931 if (Shift->getOpcode() == Instruction::Shl)
5932 NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
5934 NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
5935 LHSI->setOperand(1, NewAndCST);
5936 LHSI->setOperand(0, Shift->getOperand(0));
5937 Worklist.Add(Shift); // Shift is dead.
5943 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
5944 // preferable because it allows the C<<Y expression to be hoisted out
5945 // of a loop if Y is invariant and X is not.
5946 if (Shift && Shift->hasOneUse() && RHSV == 0 &&
5947 ICI.isEquality() && !Shift->isArithmeticShift() &&
5948 !isa<Constant>(Shift->getOperand(0))) {
5951 if (Shift->getOpcode() == Instruction::LShr) {
5952 NS = Builder->CreateShl(AndCST, Shift->getOperand(1), "tmp");
5954 // Insert a logical shift.
5955 NS = Builder->CreateLShr(AndCST, Shift->getOperand(1), "tmp");
5958 // Compute X & (C << Y).
5960 Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
5962 ICI.setOperand(0, NewAnd);
5967 // Try to optimize things like "A[i]&42 == 0" to index computations.
5968 if (LoadInst *LI = dyn_cast<LoadInst>(LHSI->getOperand(0))) {
5969 if (GetElementPtrInst *GEP =
5970 dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
5971 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
5972 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
5973 !LI->isVolatile() && isa<ConstantInt>(LHSI->getOperand(1))) {
5974 ConstantInt *C = cast<ConstantInt>(LHSI->getOperand(1));
5975 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C))
5981 case Instruction::Or: {
5982 if (!ICI.isEquality() || !RHS->isNullValue() || !LHSI->hasOneUse())
5985 if (match(LHSI, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
5986 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
5987 // -> and (icmp eq P, null), (icmp eq Q, null).
5989 Value *ICIP = Builder->CreateICmp(ICI.getPredicate(), P,
5990 Constant::getNullValue(P->getType()));
5991 Value *ICIQ = Builder->CreateICmp(ICI.getPredicate(), Q,
5992 Constant::getNullValue(Q->getType()));
5994 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
5995 Op = BinaryOperator::CreateAnd(ICIP, ICIQ);
5997 Op = BinaryOperator::CreateOr(ICIP, ICIQ);
6003 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
6004 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6007 uint32_t TypeBits = RHSV.getBitWidth();
6009 // Check that the shift amount is in range. If not, don't perform
6010 // undefined shifts. When the shift is visited it will be
6012 if (ShAmt->uge(TypeBits))
6015 if (ICI.isEquality()) {
6016 // If we are comparing against bits always shifted out, the
6017 // comparison cannot succeed.
6019 ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt),
6021 if (Comp != RHS) {// Comparing against a bit that we know is zero.
6022 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6024 ConstantInt::get(Type::getInt1Ty(ICI.getContext()), IsICMP_NE);
6025 return ReplaceInstUsesWith(ICI, Cst);
6028 if (LHSI->hasOneUse()) {
6029 // Otherwise strength reduce the shift into an and.
6030 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6032 ConstantInt::get(ICI.getContext(), APInt::getLowBitsSet(TypeBits,
6033 TypeBits-ShAmtVal));
6036 Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
6037 return new ICmpInst(ICI.getPredicate(), And,
6038 ConstantInt::get(ICI.getContext(),
6039 RHSV.lshr(ShAmtVal)));
6043 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
6044 bool TrueIfSigned = false;
6045 if (LHSI->hasOneUse() &&
6046 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
6047 // (X << 31) <s 0 --> (X&1) != 0
6048 Constant *Mask = ConstantInt::get(ICI.getContext(), APInt(TypeBits, 1) <<
6049 (TypeBits-ShAmt->getZExtValue()-1));
6051 Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
6052 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
6053 And, Constant::getNullValue(And->getType()));
6058 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
6059 case Instruction::AShr: {
6060 // Only handle equality comparisons of shift-by-constant.
6061 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6062 if (!ShAmt || !ICI.isEquality()) break;
6064 // Check that the shift amount is in range. If not, don't perform
6065 // undefined shifts. When the shift is visited it will be
6067 uint32_t TypeBits = RHSV.getBitWidth();
6068 if (ShAmt->uge(TypeBits))
6071 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6073 // If we are comparing against bits always shifted out, the
6074 // comparison cannot succeed.
6075 APInt Comp = RHSV << ShAmtVal;
6076 if (LHSI->getOpcode() == Instruction::LShr)
6077 Comp = Comp.lshr(ShAmtVal);
6079 Comp = Comp.ashr(ShAmtVal);
6081 if (Comp != RHSV) { // Comparing against a bit that we know is zero.
6082 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6083 Constant *Cst = ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
6085 return ReplaceInstUsesWith(ICI, Cst);
6088 // Otherwise, check to see if the bits shifted out are known to be zero.
6089 // If so, we can compare against the unshifted value:
6090 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
6091 if (LHSI->hasOneUse() &&
6092 MaskedValueIsZero(LHSI->getOperand(0),
6093 APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
6094 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
6095 ConstantExpr::getShl(RHS, ShAmt));
6098 if (LHSI->hasOneUse()) {
6099 // Otherwise strength reduce the shift into an and.
6100 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
6101 Constant *Mask = ConstantInt::get(ICI.getContext(), Val);
6103 Value *And = Builder->CreateAnd(LHSI->getOperand(0),
6104 Mask, LHSI->getName()+".mask");
6105 return new ICmpInst(ICI.getPredicate(), And,
6106 ConstantExpr::getShl(RHS, ShAmt));
6111 case Instruction::SDiv:
6112 case Instruction::UDiv:
6113 // Fold: icmp pred ([us]div X, C1), C2 -> range test
6114 // Fold this div into the comparison, producing a range check.
6115 // Determine, based on the divide type, what the range is being
6116 // checked. If there is an overflow on the low or high side, remember
6117 // it, otherwise compute the range [low, hi) bounding the new value.
6118 // See: InsertRangeTest above for the kinds of replacements possible.
6119 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
6120 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
6125 case Instruction::Add:
6126 // Fold: icmp pred (add X, C1), C2
6127 if (!ICI.isEquality()) {
6128 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6130 const APInt &LHSV = LHSC->getValue();
6132 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
6135 if (ICI.isSigned()) {
6136 if (CR.getLower().isSignBit()) {
6137 return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
6138 ConstantInt::get(ICI.getContext(),CR.getUpper()));
6139 } else if (CR.getUpper().isSignBit()) {
6140 return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
6141 ConstantInt::get(ICI.getContext(),CR.getLower()));
6144 if (CR.getLower().isMinValue()) {
6145 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
6146 ConstantInt::get(ICI.getContext(),CR.getUpper()));
6147 } else if (CR.getUpper().isMinValue()) {
6148 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
6149 ConstantInt::get(ICI.getContext(),CR.getLower()));
6156 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
6157 if (ICI.isEquality()) {
6158 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6160 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
6161 // the second operand is a constant, simplify a bit.
6162 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
6163 switch (BO->getOpcode()) {
6164 case Instruction::SRem:
6165 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
6166 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
6167 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
6168 if (V.sgt(APInt(V.getBitWidth(), 1)) && V.isPowerOf2()) {
6170 Builder->CreateURem(BO->getOperand(0), BO->getOperand(1),
6172 return new ICmpInst(ICI.getPredicate(), NewRem,
6173 Constant::getNullValue(BO->getType()));
6177 case Instruction::Add:
6178 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
6179 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6180 if (BO->hasOneUse())
6181 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
6182 ConstantExpr::getSub(RHS, BOp1C));
6183 } else if (RHSV == 0) {
6184 // Replace ((add A, B) != 0) with (A != -B) if A or B is
6185 // efficiently invertible, or if the add has just this one use.
6186 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
6188 if (Value *NegVal = dyn_castNegVal(BOp1))
6189 return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
6190 else if (Value *NegVal = dyn_castNegVal(BOp0))
6191 return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
6192 else if (BO->hasOneUse()) {
6193 Value *Neg = Builder->CreateNeg(BOp1);
6195 return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
6199 case Instruction::Xor:
6200 // For the xor case, we can xor two constants together, eliminating
6201 // the explicit xor.
6202 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1)))
6203 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
6204 ConstantExpr::getXor(RHS, BOC));
6207 case Instruction::Sub:
6208 // Replace (([sub|xor] A, B) != 0) with (A != B)
6210 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
6214 case Instruction::Or:
6215 // If bits are being or'd in that are not present in the constant we
6216 // are comparing against, then the comparison could never succeed!
6217 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
6218 Constant *NotCI = ConstantExpr::getNot(RHS);
6219 if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
6220 return ReplaceInstUsesWith(ICI,
6221 ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
6226 case Instruction::And:
6227 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6228 // If bits are being compared against that are and'd out, then the
6229 // comparison can never succeed!
6230 if ((RHSV & ~BOC->getValue()) != 0)
6231 return ReplaceInstUsesWith(ICI,
6232 ConstantInt::get(Type::getInt1Ty(ICI.getContext()),
6235 // If we have ((X & C) == C), turn it into ((X & C) != 0).
6236 if (RHS == BOC && RHSV.isPowerOf2())
6237 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
6238 ICmpInst::ICMP_NE, LHSI,
6239 Constant::getNullValue(RHS->getType()));
6241 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
6242 if (BOC->getValue().isSignBit()) {
6243 Value *X = BO->getOperand(0);
6244 Constant *Zero = Constant::getNullValue(X->getType());
6245 ICmpInst::Predicate pred = isICMP_NE ?
6246 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
6247 return new ICmpInst(pred, X, Zero);
6250 // ((X & ~7) == 0) --> X < 8
6251 if (RHSV == 0 && isHighOnes(BOC)) {
6252 Value *X = BO->getOperand(0);
6253 Constant *NegX = ConstantExpr::getNeg(BOC);
6254 ICmpInst::Predicate pred = isICMP_NE ?
6255 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
6256 return new ICmpInst(pred, X, NegX);
6261 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
6262 // Handle icmp {eq|ne} <intrinsic>, intcst.
6263 if (II->getIntrinsicID() == Intrinsic::bswap) {
6265 ICI.setOperand(0, II->getOperand(1));
6266 ICI.setOperand(1, ConstantInt::get(II->getContext(), RHSV.byteSwap()));
6274 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
6275 /// We only handle extending casts so far.
6277 Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
6278 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
6279 Value *LHSCIOp = LHSCI->getOperand(0);
6280 const Type *SrcTy = LHSCIOp->getType();
6281 const Type *DestTy = LHSCI->getType();
6284 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
6285 // integer type is the same size as the pointer type.
6286 if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
6287 TD->getPointerSizeInBits() ==
6288 cast<IntegerType>(DestTy)->getBitWidth()) {
6290 if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
6291 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
6292 } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
6293 RHSOp = RHSC->getOperand(0);
6294 // If the pointer types don't match, insert a bitcast.
6295 if (LHSCIOp->getType() != RHSOp->getType())
6296 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
6300 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
6303 // The code below only handles extension cast instructions, so far.
6305 if (LHSCI->getOpcode() != Instruction::ZExt &&
6306 LHSCI->getOpcode() != Instruction::SExt)
6309 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
6310 bool isSignedCmp = ICI.isSigned();
6312 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
6313 // Not an extension from the same type?
6314 RHSCIOp = CI->getOperand(0);
6315 if (RHSCIOp->getType() != LHSCIOp->getType())
6318 // If the signedness of the two casts doesn't agree (i.e. one is a sext
6319 // and the other is a zext), then we can't handle this.
6320 if (CI->getOpcode() != LHSCI->getOpcode())
6323 // Deal with equality cases early.
6324 if (ICI.isEquality())
6325 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
6327 // A signed comparison of sign extended values simplifies into a
6328 // signed comparison.
6329 if (isSignedCmp && isSignedExt)
6330 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
6332 // The other three cases all fold into an unsigned comparison.
6333 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
6336 // If we aren't dealing with a constant on the RHS, exit early
6337 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
6341 // Compute the constant that would happen if we truncated to SrcTy then
6342 // reextended to DestTy.
6343 Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
6344 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(),
6347 // If the re-extended constant didn't change...
6349 // Deal with equality cases early.
6350 if (ICI.isEquality())
6351 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
6353 // A signed comparison of sign extended values simplifies into a
6354 // signed comparison.
6355 if (isSignedExt && isSignedCmp)
6356 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
6358 // The other three cases all fold into an unsigned comparison.
6359 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, Res1);
6362 // The re-extended constant changed so the constant cannot be represented
6363 // in the shorter type. Consequently, we cannot emit a simple comparison.
6365 // First, handle some easy cases. We know the result cannot be equal at this
6366 // point so handle the ICI.isEquality() cases
6367 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
6368 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(ICI.getContext()));
6369 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
6370 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(ICI.getContext()));
6372 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
6373 // should have been folded away previously and not enter in here.
6376 // We're performing a signed comparison.
6377 if (cast<ConstantInt>(CI)->getValue().isNegative())
6378 Result = ConstantInt::getFalse(ICI.getContext()); // X < (small) --> false
6380 Result = ConstantInt::getTrue(ICI.getContext()); // X < (large) --> true
6382 // We're performing an unsigned comparison.
6384 // We're performing an unsigned comp with a sign extended value.
6385 // This is true if the input is >= 0. [aka >s -1]
6386 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
6387 Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
6389 // Unsigned extend & unsigned compare -> always true.
6390 Result = ConstantInt::getTrue(ICI.getContext());
6394 // Finally, return the value computed.
6395 if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
6396 ICI.getPredicate() == ICmpInst::ICMP_SLT)
6397 return ReplaceInstUsesWith(ICI, Result);
6399 assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
6400 ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
6401 "ICmp should be folded!");
6402 if (Constant *CI = dyn_cast<Constant>(Result))
6403 return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI));
6404 return BinaryOperator::CreateNot(Result);
6407 Instruction *InstCombiner::visitShl(BinaryOperator &I) {
6408 return commonShiftTransforms(I);
6411 Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
6412 return commonShiftTransforms(I);
6415 Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
6416 if (Instruction *R = commonShiftTransforms(I))
6419 Value *Op0 = I.getOperand(0);
6421 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
6422 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
6423 if (CSI->isAllOnesValue())
6424 return ReplaceInstUsesWith(I, CSI);
6426 // See if we can turn a signed shr into an unsigned shr.
6427 if (MaskedValueIsZero(Op0,
6428 APInt::getSignBit(I.getType()->getScalarSizeInBits())))
6429 return BinaryOperator::CreateLShr(Op0, I.getOperand(1));
6431 // Arithmetic shifting an all-sign-bit value is a no-op.
6432 unsigned NumSignBits = ComputeNumSignBits(Op0);
6433 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
6434 return ReplaceInstUsesWith(I, Op0);
6439 Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
6440 assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
6441 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6443 // shl X, 0 == X and shr X, 0 == X
6444 // shl 0, X == 0 and shr 0, X == 0
6445 if (Op1 == Constant::getNullValue(Op1->getType()) ||
6446 Op0 == Constant::getNullValue(Op0->getType()))
6447 return ReplaceInstUsesWith(I, Op0);
6449 if (isa<UndefValue>(Op0)) {
6450 if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
6451 return ReplaceInstUsesWith(I, Op0);
6452 else // undef << X -> 0, undef >>u X -> 0
6453 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
6455 if (isa<UndefValue>(Op1)) {
6456 if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
6457 return ReplaceInstUsesWith(I, Op0);
6458 else // X << undef, X >>u undef -> 0
6459 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
6462 // See if we can fold away this shift.
6463 if (SimplifyDemandedInstructionBits(I))
6466 // Try to fold constant and into select arguments.
6467 if (isa<Constant>(Op0))
6468 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
6469 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
6472 if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
6473 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
6478 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
6479 BinaryOperator &I) {
6480 bool isLeftShift = I.getOpcode() == Instruction::Shl;
6482 // See if we can simplify any instructions used by the instruction whose sole
6483 // purpose is to compute bits we don't care about.
6484 uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
6486 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
6489 if (Op1->uge(TypeBits)) {
6490 if (I.getOpcode() != Instruction::AShr)
6491 return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
6493 I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
6498 // ((X*C1) << C2) == (X * (C1 << C2))
6499 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
6500 if (BO->getOpcode() == Instruction::Mul && isLeftShift)
6501 if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
6502 return BinaryOperator::CreateMul(BO->getOperand(0),
6503 ConstantExpr::getShl(BOOp, Op1));
6505 // Try to fold constant and into select arguments.
6506 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
6507 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
6509 if (isa<PHINode>(Op0))
6510 if (Instruction *NV = FoldOpIntoPhi(I))
6513 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
6514 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
6515 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
6516 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
6517 // place. Don't try to do this transformation in this case. Also, we
6518 // require that the input operand is a shift-by-constant so that we have
6519 // confidence that the shifts will get folded together. We could do this
6520 // xform in more cases, but it is unlikely to be profitable.
6521 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
6522 isa<ConstantInt>(TrOp->getOperand(1))) {
6523 // Okay, we'll do this xform. Make the shift of shift.
6524 Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
6525 // (shift2 (shift1 & 0x00FF), c2)
6526 Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
6528 // For logical shifts, the truncation has the effect of making the high
6529 // part of the register be zeros. Emulate this by inserting an AND to
6530 // clear the top bits as needed. This 'and' will usually be zapped by
6531 // other xforms later if dead.
6532 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
6533 unsigned DstSize = TI->getType()->getScalarSizeInBits();
6534 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
6536 // The mask we constructed says what the trunc would do if occurring
6537 // between the shifts. We want to know the effect *after* the second
6538 // shift. We know that it is a logical shift by a constant, so adjust the
6539 // mask as appropriate.
6540 if (I.getOpcode() == Instruction::Shl)
6541 MaskV <<= Op1->getZExtValue();
6543 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
6544 MaskV = MaskV.lshr(Op1->getZExtValue());
6548 Value *And = Builder->CreateAnd(NSh,
6549 ConstantInt::get(I.getContext(), MaskV),
6552 // Return the value truncated to the interesting size.
6553 return new TruncInst(And, I.getType());
6557 if (Op0->hasOneUse()) {
6558 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
6559 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
6562 switch (Op0BO->getOpcode()) {
6564 case Instruction::Add:
6565 case Instruction::And:
6566 case Instruction::Or:
6567 case Instruction::Xor: {
6568 // These operators commute.
6569 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
6570 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
6571 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
6572 m_Specific(Op1)))) {
6573 Value *YS = // (Y << C)
6574 Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
6576 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
6577 Op0BO->getOperand(1)->getName());
6578 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
6579 return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
6580 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
6583 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
6584 Value *Op0BOOp1 = Op0BO->getOperand(1);
6585 if (isLeftShift && Op0BOOp1->hasOneUse() &&
6587 m_And(m_Shr(m_Value(V1), m_Specific(Op1)),
6588 m_ConstantInt(CC))) &&
6589 cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse()) {
6590 Value *YS = // (Y << C)
6591 Builder->CreateShl(Op0BO->getOperand(0), Op1,
6594 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
6595 V1->getName()+".mask");
6596 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
6601 case Instruction::Sub: {
6602 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
6603 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
6604 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
6605 m_Specific(Op1)))) {
6606 Value *YS = // (Y << C)
6607 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
6609 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
6610 Op0BO->getOperand(0)->getName());
6611 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
6612 return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
6613 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
6616 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
6617 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
6618 match(Op0BO->getOperand(0),
6619 m_And(m_Shr(m_Value(V1), m_Value(V2)),
6620 m_ConstantInt(CC))) && V2 == Op1 &&
6621 cast<BinaryOperator>(Op0BO->getOperand(0))
6622 ->getOperand(0)->hasOneUse()) {
6623 Value *YS = // (Y << C)
6624 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
6626 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
6627 V1->getName()+".mask");
6629 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
6637 // If the operand is an bitwise operator with a constant RHS, and the
6638 // shift is the only use, we can pull it out of the shift.
6639 if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
6640 bool isValid = true; // Valid only for And, Or, Xor
6641 bool highBitSet = false; // Transform if high bit of constant set?
6643 switch (Op0BO->getOpcode()) {
6644 default: isValid = false; break; // Do not perform transform!
6645 case Instruction::Add:
6646 isValid = isLeftShift;
6648 case Instruction::Or:
6649 case Instruction::Xor:
6652 case Instruction::And:
6657 // If this is a signed shift right, and the high bit is modified
6658 // by the logical operation, do not perform the transformation.
6659 // The highBitSet boolean indicates the value of the high bit of
6660 // the constant which would cause it to be modified for this
6663 if (isValid && I.getOpcode() == Instruction::AShr)
6664 isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
6667 Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
6670 Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
6671 NewShift->takeName(Op0BO);
6673 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
6680 // Find out if this is a shift of a shift by a constant.
6681 BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
6682 if (ShiftOp && !ShiftOp->isShift())
6685 if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
6686 ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
6687 uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
6688 uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
6689 assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
6690 if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
6691 Value *X = ShiftOp->getOperand(0);
6693 uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
6695 const IntegerType *Ty = cast<IntegerType>(I.getType());
6697 // Check for (X << c1) << c2 and (X >> c1) >> c2
6698 if (I.getOpcode() == ShiftOp->getOpcode()) {
6699 // If this is oversized composite shift, then unsigned shifts get 0, ashr
6701 if (AmtSum >= TypeBits) {
6702 if (I.getOpcode() != Instruction::AShr)
6703 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
6704 AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
6707 return BinaryOperator::Create(I.getOpcode(), X,
6708 ConstantInt::get(Ty, AmtSum));
6711 if (ShiftOp->getOpcode() == Instruction::LShr &&
6712 I.getOpcode() == Instruction::AShr) {
6713 if (AmtSum >= TypeBits)
6714 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
6716 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
6717 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
6720 if (ShiftOp->getOpcode() == Instruction::AShr &&
6721 I.getOpcode() == Instruction::LShr) {
6722 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
6723 if (AmtSum >= TypeBits)
6724 AmtSum = TypeBits-1;
6726 Value *Shift = Builder->CreateAShr(X, ConstantInt::get(Ty, AmtSum));
6728 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
6729 return BinaryOperator::CreateAnd(Shift,
6730 ConstantInt::get(I.getContext(), Mask));
6733 // Okay, if we get here, one shift must be left, and the other shift must be
6734 // right. See if the amounts are equal.
6735 if (ShiftAmt1 == ShiftAmt2) {
6736 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
6737 if (I.getOpcode() == Instruction::Shl) {
6738 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
6739 return BinaryOperator::CreateAnd(X,
6740 ConstantInt::get(I.getContext(),Mask));
6742 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
6743 if (I.getOpcode() == Instruction::LShr) {
6744 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
6745 return BinaryOperator::CreateAnd(X,
6746 ConstantInt::get(I.getContext(), Mask));
6748 // We can simplify ((X << C) >>s C) into a trunc + sext.
6749 // NOTE: we could do this for any C, but that would make 'unusual' integer
6750 // types. For now, just stick to ones well-supported by the code
6752 const Type *SExtType = 0;
6753 switch (Ty->getBitWidth() - ShiftAmt1) {
6760 SExtType = IntegerType::get(I.getContext(),
6761 Ty->getBitWidth() - ShiftAmt1);
6766 return new SExtInst(Builder->CreateTrunc(X, SExtType, "sext"), Ty);
6767 // Otherwise, we can't handle it yet.
6768 } else if (ShiftAmt1 < ShiftAmt2) {
6769 uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
6771 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
6772 if (I.getOpcode() == Instruction::Shl) {
6773 assert(ShiftOp->getOpcode() == Instruction::LShr ||
6774 ShiftOp->getOpcode() == Instruction::AShr);
6775 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
6777 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
6778 return BinaryOperator::CreateAnd(Shift,
6779 ConstantInt::get(I.getContext(),Mask));
6782 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
6783 if (I.getOpcode() == Instruction::LShr) {
6784 assert(ShiftOp->getOpcode() == Instruction::Shl);
6785 Value *Shift = Builder->CreateLShr(X, ConstantInt::get(Ty, ShiftDiff));
6787 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
6788 return BinaryOperator::CreateAnd(Shift,
6789 ConstantInt::get(I.getContext(),Mask));
6792 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
6794 assert(ShiftAmt2 < ShiftAmt1);
6795 uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
6797 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
6798 if (I.getOpcode() == Instruction::Shl) {
6799 assert(ShiftOp->getOpcode() == Instruction::LShr ||
6800 ShiftOp->getOpcode() == Instruction::AShr);
6801 Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(), X,
6802 ConstantInt::get(Ty, ShiftDiff));
6804 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
6805 return BinaryOperator::CreateAnd(Shift,
6806 ConstantInt::get(I.getContext(),Mask));
6809 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
6810 if (I.getOpcode() == Instruction::LShr) {
6811 assert(ShiftOp->getOpcode() == Instruction::Shl);
6812 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
6814 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
6815 return BinaryOperator::CreateAnd(Shift,
6816 ConstantInt::get(I.getContext(),Mask));
6819 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
6826 /// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
6827 /// expression. If so, decompose it, returning some value X, such that Val is
6830 static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
6832 assert(Val->getType() == Type::getInt32Ty(Val->getContext()) &&
6833 "Unexpected allocation size type!");
6834 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
6835 Offset = CI->getZExtValue();
6837 return ConstantInt::get(Type::getInt32Ty(Val->getContext()), 0);
6838 } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
6839 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
6840 if (I->getOpcode() == Instruction::Shl) {
6841 // This is a value scaled by '1 << the shift amt'.
6842 Scale = 1U << RHS->getZExtValue();
6844 return I->getOperand(0);
6845 } else if (I->getOpcode() == Instruction::Mul) {
6846 // This value is scaled by 'RHS'.
6847 Scale = RHS->getZExtValue();
6849 return I->getOperand(0);
6850 } else if (I->getOpcode() == Instruction::Add) {
6851 // We have X+C. Check to see if we really have (X*C2)+C1,
6852 // where C1 is divisible by C2.
6855 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
6856 Offset += RHS->getZExtValue();
6863 // Otherwise, we can't look past this.
6870 /// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
6871 /// try to eliminate the cast by moving the type information into the alloc.
6872 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
6874 const PointerType *PTy = cast<PointerType>(CI.getType());
6876 BuilderTy AllocaBuilder(*Builder);
6877 AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
6879 // Remove any uses of AI that are dead.
6880 assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
6882 for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
6883 Instruction *User = cast<Instruction>(*UI++);
6884 if (isInstructionTriviallyDead(User)) {
6885 while (UI != E && *UI == User)
6886 ++UI; // If this instruction uses AI more than once, don't break UI.
6889 DEBUG(errs() << "IC: DCE: " << *User << '\n');
6890 EraseInstFromFunction(*User);
6894 // This requires TargetData to get the alloca alignment and size information.
6897 // Get the type really allocated and the type casted to.
6898 const Type *AllocElTy = AI.getAllocatedType();
6899 const Type *CastElTy = PTy->getElementType();
6900 if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
6902 unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
6903 unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
6904 if (CastElTyAlign < AllocElTyAlign) return 0;
6906 // If the allocation has multiple uses, only promote it if we are strictly
6907 // increasing the alignment of the resultant allocation. If we keep it the
6908 // same, we open the door to infinite loops of various kinds. (A reference
6909 // from a dbg.declare doesn't count as a use for this purpose.)
6910 if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
6911 CastElTyAlign == AllocElTyAlign) return 0;
6913 uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
6914 uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
6915 if (CastElTySize == 0 || AllocElTySize == 0) return 0;
6917 // See if we can satisfy the modulus by pulling a scale out of the array
6919 unsigned ArraySizeScale;
6921 Value *NumElements = // See if the array size is a decomposable linear expr.
6922 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
6924 // If we can now satisfy the modulus, by using a non-1 scale, we really can
6926 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
6927 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
6929 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
6934 Amt = ConstantInt::get(Type::getInt32Ty(CI.getContext()), Scale);
6935 // Insert before the alloca, not before the cast.
6936 Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
6939 if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
6940 Value *Off = ConstantInt::get(Type::getInt32Ty(CI.getContext()),
6942 Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
6945 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
6946 New->setAlignment(AI.getAlignment());
6949 // If the allocation has one real use plus a dbg.declare, just remove the
6951 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
6952 EraseInstFromFunction(*DI);
6954 // If the allocation has multiple real uses, insert a cast and change all
6955 // things that used it to use the new cast. This will also hack on CI, but it
6957 else if (!AI.hasOneUse()) {
6958 // New is the allocation instruction, pointer typed. AI is the original
6959 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
6960 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
6961 AI.replaceAllUsesWith(NewCast);
6963 return ReplaceInstUsesWith(CI, New);
6966 /// CanEvaluateInDifferentType - Return true if we can take the specified value
6967 /// and return it as type Ty without inserting any new casts and without
6968 /// changing the computed value. This is used by code that tries to decide
6969 /// whether promoting or shrinking integer operations to wider or smaller types
6970 /// will allow us to eliminate a truncate or extend.
6972 /// This is a truncation operation if Ty is smaller than V->getType(), or an
6973 /// extension operation if Ty is larger.
6975 /// If CastOpc is a truncation, then Ty will be a type smaller than V. We
6976 /// should return true if trunc(V) can be computed by computing V in the smaller
6977 /// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
6978 /// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
6979 /// efficiently truncated.
6981 /// If CastOpc is a sext or zext, we are asking if the low bits of the value can
6982 /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
6983 /// the final result.
6984 bool InstCombiner::CanEvaluateInDifferentType(Value *V, const Type *Ty,
6986 int &NumCastsRemoved){
6987 // We can always evaluate constants in another type.
6988 if (isa<Constant>(V))
6991 Instruction *I = dyn_cast<Instruction>(V);
6992 if (!I) return false;
6994 const Type *OrigTy = V->getType();
6996 // If this is an extension or truncate, we can often eliminate it.
6997 if (isa<TruncInst>(I) || isa<ZExtInst>(I) || isa<SExtInst>(I)) {
6998 // If this is a cast from the destination type, we can trivially eliminate
6999 // it, and this will remove a cast overall.
7000 if (I->getOperand(0)->getType() == Ty) {
7001 // If the first operand is itself a cast, and is eliminable, do not count
7002 // this as an eliminable cast. We would prefer to eliminate those two
7004 if (!isa<CastInst>(I->getOperand(0)) && I->hasOneUse())
7010 // We can't extend or shrink something that has multiple uses: doing so would
7011 // require duplicating the instruction in general, which isn't profitable.
7012 if (!I->hasOneUse()) return false;
7014 unsigned Opc = I->getOpcode();
7016 case Instruction::Add:
7017 case Instruction::Sub:
7018 case Instruction::Mul:
7019 case Instruction::And:
7020 case Instruction::Or:
7021 case Instruction::Xor:
7022 // These operators can all arbitrarily be extended or truncated.
7023 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7025 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7028 case Instruction::UDiv:
7029 case Instruction::URem: {
7030 // UDiv and URem can be truncated if all the truncated bits are zero.
7031 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
7032 uint32_t BitWidth = Ty->getScalarSizeInBits();
7033 if (BitWidth < OrigBitWidth) {
7034 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
7035 if (MaskedValueIsZero(I->getOperand(0), Mask) &&
7036 MaskedValueIsZero(I->getOperand(1), Mask)) {
7037 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7039 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7045 case Instruction::Shl:
7046 // If we are truncating the result of this SHL, and if it's a shift of a
7047 // constant amount, we can always perform a SHL in a smaller type.
7048 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7049 uint32_t BitWidth = Ty->getScalarSizeInBits();
7050 if (BitWidth < OrigTy->getScalarSizeInBits() &&
7051 CI->getLimitedValue(BitWidth) < BitWidth)
7052 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7056 case Instruction::LShr:
7057 // If this is a truncate of a logical shr, we can truncate it to a smaller
7058 // lshr iff we know that the bits we would otherwise be shifting in are
7060 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7061 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
7062 uint32_t BitWidth = Ty->getScalarSizeInBits();
7063 if (BitWidth < OrigBitWidth &&
7064 MaskedValueIsZero(I->getOperand(0),
7065 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
7066 CI->getLimitedValue(BitWidth) < BitWidth) {
7067 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7072 case Instruction::ZExt:
7073 case Instruction::SExt:
7074 case Instruction::Trunc:
7075 // If this is the same kind of case as our original (e.g. zext+zext), we
7076 // can safely replace it. Note that replacing it does not reduce the number
7077 // of casts in the input.
7081 // sext (zext ty1), ty2 -> zext ty2
7082 if (CastOpc == Instruction::SExt && Opc == Instruction::ZExt)
7085 case Instruction::Select: {
7086 SelectInst *SI = cast<SelectInst>(I);
7087 return CanEvaluateInDifferentType(SI->getTrueValue(), Ty, CastOpc,
7089 CanEvaluateInDifferentType(SI->getFalseValue(), Ty, CastOpc,
7092 case Instruction::PHI: {
7093 // We can change a phi if we can change all operands.
7094 PHINode *PN = cast<PHINode>(I);
7095 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
7096 if (!CanEvaluateInDifferentType(PN->getIncomingValue(i), Ty, CastOpc,
7102 // TODO: Can handle more cases here.
7109 /// EvaluateInDifferentType - Given an expression that
7110 /// CanEvaluateInDifferentType returns true for, actually insert the code to
7111 /// evaluate the expression.
7112 Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
7114 if (Constant *C = dyn_cast<Constant>(V))
7115 return ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
7117 // Otherwise, it must be an instruction.
7118 Instruction *I = cast<Instruction>(V);
7119 Instruction *Res = 0;
7120 unsigned Opc = I->getOpcode();
7122 case Instruction::Add:
7123 case Instruction::Sub:
7124 case Instruction::Mul:
7125 case Instruction::And:
7126 case Instruction::Or:
7127 case Instruction::Xor:
7128 case Instruction::AShr:
7129 case Instruction::LShr:
7130 case Instruction::Shl:
7131 case Instruction::UDiv:
7132 case Instruction::URem: {
7133 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
7134 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
7135 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
7138 case Instruction::Trunc:
7139 case Instruction::ZExt:
7140 case Instruction::SExt:
7141 // If the source type of the cast is the type we're trying for then we can
7142 // just return the source. There's no need to insert it because it is not
7144 if (I->getOperand(0)->getType() == Ty)
7145 return I->getOperand(0);
7147 // Otherwise, must be the same type of cast, so just reinsert a new one.
7148 Res = CastInst::Create(cast<CastInst>(I)->getOpcode(), I->getOperand(0),Ty);
7150 case Instruction::Select: {
7151 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
7152 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
7153 Res = SelectInst::Create(I->getOperand(0), True, False);
7156 case Instruction::PHI: {
7157 PHINode *OPN = cast<PHINode>(I);
7158 PHINode *NPN = PHINode::Create(Ty);
7159 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
7160 Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
7161 NPN->addIncoming(V, OPN->getIncomingBlock(i));
7167 // TODO: Can handle more cases here.
7168 llvm_unreachable("Unreachable!");
7173 return InsertNewInstBefore(Res, *I);
7176 /// @brief Implement the transforms common to all CastInst visitors.
7177 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
7178 Value *Src = CI.getOperand(0);
7180 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
7181 // eliminate it now.
7182 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
7183 if (Instruction::CastOps opc =
7184 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
7185 // The first cast (CSrc) is eliminable so we need to fix up or replace
7186 // the second cast (CI). CSrc will then have a good chance of being dead.
7187 return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
7191 // If we are casting a select then fold the cast into the select
7192 if (SelectInst *SI = dyn_cast<SelectInst>(Src))
7193 if (Instruction *NV = FoldOpIntoSelect(CI, SI, this))
7196 // If we are casting a PHI then fold the cast into the PHI
7197 if (isa<PHINode>(Src)) {
7198 // We don't do this if this would create a PHI node with an illegal type if
7199 // it is currently legal.
7200 if (!isa<IntegerType>(Src->getType()) ||
7201 !isa<IntegerType>(CI.getType()) ||
7202 ShouldChangeType(CI.getType(), Src->getType(), TD))
7203 if (Instruction *NV = FoldOpIntoPhi(CI))
7210 /// FindElementAtOffset - Given a type and a constant offset, determine whether
7211 /// or not there is a sequence of GEP indices into the type that will land us at
7212 /// the specified offset. If so, fill them into NewIndices and return the
7213 /// resultant element type, otherwise return null.
7214 static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
7215 SmallVectorImpl<Value*> &NewIndices,
7216 const TargetData *TD) {
7218 if (!Ty->isSized()) return 0;
7220 // Start with the index over the outer type. Note that the type size
7221 // might be zero (even if the offset isn't zero) if the indexed type
7222 // is something like [0 x {int, int}]
7223 const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
7224 int64_t FirstIdx = 0;
7225 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
7226 FirstIdx = Offset/TySize;
7227 Offset -= FirstIdx*TySize;
7229 // Handle hosts where % returns negative instead of values [0..TySize).
7233 assert(Offset >= 0);
7235 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
7238 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
7240 // Index into the types. If we fail, set OrigBase to null.
7242 // Indexing into tail padding between struct/array elements.
7243 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
7246 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
7247 const StructLayout *SL = TD->getStructLayout(STy);
7248 assert(Offset < (int64_t)SL->getSizeInBytes() &&
7249 "Offset must stay within the indexed type");
7251 unsigned Elt = SL->getElementContainingOffset(Offset);
7252 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
7255 Offset -= SL->getElementOffset(Elt);
7256 Ty = STy->getElementType(Elt);
7257 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
7258 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
7259 assert(EltSize && "Cannot index into a zero-sized array");
7260 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
7262 Ty = AT->getElementType();
7264 // Otherwise, we can't index into the middle of this atomic type, bail.
7272 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
7273 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
7274 Value *Src = CI.getOperand(0);
7276 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
7277 // If casting the result of a getelementptr instruction with no offset, turn
7278 // this into a cast of the original pointer!
7279 if (GEP->hasAllZeroIndices()) {
7280 // Changing the cast operand is usually not a good idea but it is safe
7281 // here because the pointer operand is being replaced with another
7282 // pointer operand so the opcode doesn't need to change.
7284 CI.setOperand(0, GEP->getOperand(0));
7288 // If the GEP has a single use, and the base pointer is a bitcast, and the
7289 // GEP computes a constant offset, see if we can convert these three
7290 // instructions into fewer. This typically happens with unions and other
7291 // non-type-safe code.
7292 if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
7293 if (GEP->hasAllConstantIndices()) {
7294 // We are guaranteed to get a constant from EmitGEPOffset.
7295 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP, *this));
7296 int64_t Offset = OffsetV->getSExtValue();
7298 // Get the base pointer input of the bitcast, and the type it points to.
7299 Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
7300 const Type *GEPIdxTy =
7301 cast<PointerType>(OrigBase->getType())->getElementType();
7302 SmallVector<Value*, 8> NewIndices;
7303 if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices, TD)) {
7304 // If we were able to index down into an element, create the GEP
7305 // and bitcast the result. This eliminates one bitcast, potentially
7307 Value *NGEP = cast<GEPOperator>(GEP)->isInBounds() ?
7308 Builder->CreateInBoundsGEP(OrigBase,
7309 NewIndices.begin(), NewIndices.end()) :
7310 Builder->CreateGEP(OrigBase, NewIndices.begin(), NewIndices.end());
7311 NGEP->takeName(GEP);
7313 if (isa<BitCastInst>(CI))
7314 return new BitCastInst(NGEP, CI.getType());
7315 assert(isa<PtrToIntInst>(CI));
7316 return new PtrToIntInst(NGEP, CI.getType());
7322 return commonCastTransforms(CI);
7325 /// commonIntCastTransforms - This function implements the common transforms
7326 /// for trunc, zext, and sext.
7327 Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
7328 if (Instruction *Result = commonCastTransforms(CI))
7331 Value *Src = CI.getOperand(0);
7332 const Type *SrcTy = Src->getType();
7333 const Type *DestTy = CI.getType();
7334 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
7335 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
7337 // See if we can simplify any instructions used by the LHS whose sole
7338 // purpose is to compute bits we don't care about.
7339 if (SimplifyDemandedInstructionBits(CI))
7342 // If the source isn't an instruction or has more than one use then we
7343 // can't do anything more.
7344 Instruction *SrcI = dyn_cast<Instruction>(Src);
7345 if (!SrcI || !Src->hasOneUse())
7348 // Attempt to propagate the cast into the instruction for int->int casts.
7349 int NumCastsRemoved = 0;
7350 // Only do this if the dest type is a simple type, don't convert the
7351 // expression tree to something weird like i93 unless the source is also
7353 if ((isa<VectorType>(DestTy) ||
7354 ShouldChangeType(SrcI->getType(), DestTy, TD)) &&
7355 CanEvaluateInDifferentType(SrcI, DestTy,
7356 CI.getOpcode(), NumCastsRemoved)) {
7357 // If this cast is a truncate, evaluting in a different type always
7358 // eliminates the cast, so it is always a win. If this is a zero-extension,
7359 // we need to do an AND to maintain the clear top-part of the computation,
7360 // so we require that the input have eliminated at least one cast. If this
7361 // is a sign extension, we insert two new casts (to do the extension) so we
7362 // require that two casts have been eliminated.
7363 bool DoXForm = false;
7364 bool JustReplace = false;
7365 switch (CI.getOpcode()) {
7367 // All the others use floating point so we shouldn't actually
7368 // get here because of the check above.
7369 llvm_unreachable("Unknown cast type");
7370 case Instruction::Trunc:
7373 case Instruction::ZExt: {
7374 DoXForm = NumCastsRemoved >= 1;
7376 if (!DoXForm && 0) {
7377 // If it's unnecessary to issue an AND to clear the high bits, it's
7378 // always profitable to do this xform.
7379 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, false);
7380 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
7381 if (MaskedValueIsZero(TryRes, Mask))
7382 return ReplaceInstUsesWith(CI, TryRes);
7384 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
7385 if (TryI->use_empty())
7386 EraseInstFromFunction(*TryI);
7390 case Instruction::SExt: {
7391 DoXForm = NumCastsRemoved >= 2;
7392 if (!DoXForm && !isa<TruncInst>(SrcI) && 0) {
7393 // If we do not have to emit the truncate + sext pair, then it's always
7394 // profitable to do this xform.
7396 // It's not safe to eliminate the trunc + sext pair if one of the
7397 // eliminated cast is a truncate. e.g.
7398 // t2 = trunc i32 t1 to i16
7399 // t3 = sext i16 t2 to i32
7402 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, true);
7403 unsigned NumSignBits = ComputeNumSignBits(TryRes);
7404 if (NumSignBits > (DestBitSize - SrcBitSize))
7405 return ReplaceInstUsesWith(CI, TryRes);
7407 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
7408 if (TryI->use_empty())
7409 EraseInstFromFunction(*TryI);
7416 DEBUG(errs() << "ICE: EvaluateInDifferentType converting expression type"
7417 " to avoid cast: " << CI);
7418 Value *Res = EvaluateInDifferentType(SrcI, DestTy,
7419 CI.getOpcode() == Instruction::SExt);
7421 // Just replace this cast with the result.
7422 return ReplaceInstUsesWith(CI, Res);
7424 assert(Res->getType() == DestTy);
7425 switch (CI.getOpcode()) {
7426 default: llvm_unreachable("Unknown cast type!");
7427 case Instruction::Trunc:
7428 // Just replace this cast with the result.
7429 return ReplaceInstUsesWith(CI, Res);
7430 case Instruction::ZExt: {
7431 assert(SrcBitSize < DestBitSize && "Not a zext?");
7433 // If the high bits are already zero, just replace this cast with the
7435 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
7436 if (MaskedValueIsZero(Res, Mask))
7437 return ReplaceInstUsesWith(CI, Res);
7439 // We need to emit an AND to clear the high bits.
7440 Constant *C = ConstantInt::get(CI.getContext(),
7441 APInt::getLowBitsSet(DestBitSize, SrcBitSize));
7442 return BinaryOperator::CreateAnd(Res, C);
7444 case Instruction::SExt: {
7445 // If the high bits are already filled with sign bit, just replace this
7446 // cast with the result.
7447 unsigned NumSignBits = ComputeNumSignBits(Res);
7448 if (NumSignBits > (DestBitSize - SrcBitSize))
7449 return ReplaceInstUsesWith(CI, Res);
7451 // We need to emit a cast to truncate, then a cast to sext.
7452 return new SExtInst(Builder->CreateTrunc(Res, Src->getType()), DestTy);
7458 Value *Op0 = SrcI->getNumOperands() > 0 ? SrcI->getOperand(0) : 0;
7459 Value *Op1 = SrcI->getNumOperands() > 1 ? SrcI->getOperand(1) : 0;
7461 switch (SrcI->getOpcode()) {
7462 case Instruction::Add:
7463 case Instruction::Mul:
7464 case Instruction::And:
7465 case Instruction::Or:
7466 case Instruction::Xor:
7467 // If we are discarding information, rewrite.
7468 if (DestBitSize < SrcBitSize && DestBitSize != 1) {
7469 // Don't insert two casts unless at least one can be eliminated.
7470 if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) ||
7471 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
7472 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
7473 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
7474 return BinaryOperator::Create(
7475 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
7479 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
7480 if (isa<ZExtInst>(CI) && SrcBitSize == 1 &&
7481 SrcI->getOpcode() == Instruction::Xor &&
7482 Op1 == ConstantInt::getTrue(CI.getContext()) &&
7483 (!Op0->hasOneUse() || !isa<CmpInst>(Op0))) {
7484 Value *New = Builder->CreateZExt(Op0, DestTy, Op0->getName());
7485 return BinaryOperator::CreateXor(New,
7486 ConstantInt::get(CI.getType(), 1));
7490 case Instruction::Shl: {
7491 // Canonicalize trunc inside shl, if we can.
7492 ConstantInt *CI = dyn_cast<ConstantInt>(Op1);
7493 if (CI && DestBitSize < SrcBitSize &&
7494 CI->getLimitedValue(DestBitSize) < DestBitSize) {
7495 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
7496 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
7497 return BinaryOperator::CreateShl(Op0c, Op1c);
7505 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
7506 if (Instruction *Result = commonIntCastTransforms(CI))
7509 Value *Src = CI.getOperand(0);
7510 const Type *Ty = CI.getType();
7511 uint32_t DestBitWidth = Ty->getScalarSizeInBits();
7512 uint32_t SrcBitWidth = Src->getType()->getScalarSizeInBits();
7514 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
7515 if (DestBitWidth == 1) {
7516 Constant *One = ConstantInt::get(Src->getType(), 1);
7517 Src = Builder->CreateAnd(Src, One, "tmp");
7518 Value *Zero = Constant::getNullValue(Src->getType());
7519 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
7522 // Optimize trunc(lshr(), c) to pull the shift through the truncate.
7523 ConstantInt *ShAmtV = 0;
7525 if (Src->hasOneUse() &&
7526 match(Src, m_LShr(m_Value(ShiftOp), m_ConstantInt(ShAmtV)))) {
7527 uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth);
7529 // Get a mask for the bits shifting in.
7530 APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth));
7531 if (MaskedValueIsZero(ShiftOp, Mask)) {
7532 if (ShAmt >= DestBitWidth) // All zeros.
7533 return ReplaceInstUsesWith(CI, Constant::getNullValue(Ty));
7535 // Okay, we can shrink this. Truncate the input, then return a new
7537 Value *V1 = Builder->CreateTrunc(ShiftOp, Ty, ShiftOp->getName());
7538 Value *V2 = ConstantExpr::getTrunc(ShAmtV, Ty);
7539 return BinaryOperator::CreateLShr(V1, V2);
7546 /// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
7547 /// in order to eliminate the icmp.
7548 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
7550 // If we are just checking for a icmp eq of a single bit and zext'ing it
7551 // to an integer, then shift the bit to the appropriate place and then
7552 // cast to integer to avoid the comparison.
7553 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
7554 const APInt &Op1CV = Op1C->getValue();
7556 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
7557 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
7558 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
7559 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
7560 if (!DoXform) return ICI;
7562 Value *In = ICI->getOperand(0);
7563 Value *Sh = ConstantInt::get(In->getType(),
7564 In->getType()->getScalarSizeInBits()-1);
7565 In = Builder->CreateLShr(In, Sh, In->getName()+".lobit");
7566 if (In->getType() != CI.getType())
7567 In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/, "tmp");
7569 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
7570 Constant *One = ConstantInt::get(In->getType(), 1);
7571 In = Builder->CreateXor(In, One, In->getName()+".not");
7574 return ReplaceInstUsesWith(CI, In);
7579 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
7580 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
7581 // zext (X == 1) to i32 --> X iff X has only the low bit set.
7582 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
7583 // zext (X != 0) to i32 --> X iff X has only the low bit set.
7584 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
7585 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
7586 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
7587 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
7588 // This only works for EQ and NE
7589 ICI->isEquality()) {
7590 // If Op1C some other power of two, convert:
7591 uint32_t BitWidth = Op1C->getType()->getBitWidth();
7592 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
7593 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
7594 ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
7596 APInt KnownZeroMask(~KnownZero);
7597 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
7598 if (!DoXform) return ICI;
7600 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
7601 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
7602 // (X&4) == 2 --> false
7603 // (X&4) != 2 --> true
7604 Constant *Res = ConstantInt::get(Type::getInt1Ty(CI.getContext()),
7606 Res = ConstantExpr::getZExt(Res, CI.getType());
7607 return ReplaceInstUsesWith(CI, Res);
7610 uint32_t ShiftAmt = KnownZeroMask.logBase2();
7611 Value *In = ICI->getOperand(0);
7613 // Perform a logical shr by shiftamt.
7614 // Insert the shift to put the result in the low bit.
7615 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
7616 In->getName()+".lobit");
7619 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
7620 Constant *One = ConstantInt::get(In->getType(), 1);
7621 In = Builder->CreateXor(In, One, "tmp");
7624 if (CI.getType() == In->getType())
7625 return ReplaceInstUsesWith(CI, In);
7627 return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
7632 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
7633 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
7634 // may lead to additional simplifications.
7635 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
7636 if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
7637 uint32_t BitWidth = ITy->getBitWidth();
7638 Value *LHS = ICI->getOperand(0);
7639 Value *RHS = ICI->getOperand(1);
7641 APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
7642 APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
7643 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
7644 ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS);
7645 ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS);
7647 if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
7648 APInt KnownBits = KnownZeroLHS | KnownOneLHS;
7649 APInt UnknownBit = ~KnownBits;
7650 if (UnknownBit.countPopulation() == 1) {
7651 if (!DoXform) return ICI;
7653 Value *Result = Builder->CreateXor(LHS, RHS);
7655 // Mask off any bits that are set and won't be shifted away.
7656 if (KnownOneLHS.uge(UnknownBit))
7657 Result = Builder->CreateAnd(Result,
7658 ConstantInt::get(ITy, UnknownBit));
7660 // Shift the bit we're testing down to the lsb.
7661 Result = Builder->CreateLShr(
7662 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
7664 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
7665 Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1));
7666 Result->takeName(ICI);
7667 return ReplaceInstUsesWith(CI, Result);
7676 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
7677 // If one of the common conversion will work, do it.
7678 if (Instruction *Result = commonIntCastTransforms(CI))
7681 Value *Src = CI.getOperand(0);
7683 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
7684 // types and if the sizes are just right we can convert this into a logical
7685 // 'and' which will be much cheaper than the pair of casts.
7686 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
7687 // Get the sizes of the types involved. We know that the intermediate type
7688 // will be smaller than A or C, but don't know the relation between A and C.
7689 Value *A = CSrc->getOperand(0);
7690 unsigned SrcSize = A->getType()->getScalarSizeInBits();
7691 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
7692 unsigned DstSize = CI.getType()->getScalarSizeInBits();
7693 // If we're actually extending zero bits, then if
7694 // SrcSize < DstSize: zext(a & mask)
7695 // SrcSize == DstSize: a & mask
7696 // SrcSize > DstSize: trunc(a) & mask
7697 if (SrcSize < DstSize) {
7698 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
7699 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
7700 Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
7701 return new ZExtInst(And, CI.getType());
7704 if (SrcSize == DstSize) {
7705 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
7706 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
7709 if (SrcSize > DstSize) {
7710 Value *Trunc = Builder->CreateTrunc(A, CI.getType(), "tmp");
7711 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
7712 return BinaryOperator::CreateAnd(Trunc,
7713 ConstantInt::get(Trunc->getType(),
7718 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
7719 return transformZExtICmp(ICI, CI);
7721 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
7722 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
7723 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
7724 // of the (zext icmp) will be transformed.
7725 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
7726 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
7727 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
7728 (transformZExtICmp(LHS, CI, false) ||
7729 transformZExtICmp(RHS, CI, false))) {
7730 Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
7731 Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
7732 return BinaryOperator::Create(Instruction::Or, LCast, RCast);
7736 // zext(trunc(t) & C) -> (t & zext(C)).
7737 if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
7738 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
7739 if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
7740 Value *TI0 = TI->getOperand(0);
7741 if (TI0->getType() == CI.getType())
7743 BinaryOperator::CreateAnd(TI0,
7744 ConstantExpr::getZExt(C, CI.getType()));
7747 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
7748 if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
7749 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
7750 if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
7751 if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
7752 And->getOperand(1) == C)
7753 if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
7754 Value *TI0 = TI->getOperand(0);
7755 if (TI0->getType() == CI.getType()) {
7756 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
7757 Value *NewAnd = Builder->CreateAnd(TI0, ZC, "tmp");
7758 return BinaryOperator::CreateXor(NewAnd, ZC);
7765 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
7766 if (Instruction *I = commonIntCastTransforms(CI))
7769 Value *Src = CI.getOperand(0);
7771 // Canonicalize sign-extend from i1 to a select.
7772 if (Src->getType() == Type::getInt1Ty(CI.getContext()))
7773 return SelectInst::Create(Src,
7774 Constant::getAllOnesValue(CI.getType()),
7775 Constant::getNullValue(CI.getType()));
7777 // See if the value being truncated is already sign extended. If so, just
7778 // eliminate the trunc/sext pair.
7779 if (Operator::getOpcode(Src) == Instruction::Trunc) {
7780 Value *Op = cast<User>(Src)->getOperand(0);
7781 unsigned OpBits = Op->getType()->getScalarSizeInBits();
7782 unsigned MidBits = Src->getType()->getScalarSizeInBits();
7783 unsigned DestBits = CI.getType()->getScalarSizeInBits();
7784 unsigned NumSignBits = ComputeNumSignBits(Op);
7786 if (OpBits == DestBits) {
7787 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
7788 // bits, it is already ready.
7789 if (NumSignBits > DestBits-MidBits)
7790 return ReplaceInstUsesWith(CI, Op);
7791 } else if (OpBits < DestBits) {
7792 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
7793 // bits, just sext from i32.
7794 if (NumSignBits > OpBits-MidBits)
7795 return new SExtInst(Op, CI.getType(), "tmp");
7797 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
7798 // bits, just truncate to i32.
7799 if (NumSignBits > OpBits-MidBits)
7800 return new TruncInst(Op, CI.getType(), "tmp");
7804 // If the input is a shl/ashr pair of a same constant, then this is a sign
7805 // extension from a smaller value. If we could trust arbitrary bitwidth
7806 // integers, we could turn this into a truncate to the smaller bit and then
7807 // use a sext for the whole extension. Since we don't, look deeper and check
7808 // for a truncate. If the source and dest are the same type, eliminate the
7809 // trunc and extend and just do shifts. For example, turn:
7810 // %a = trunc i32 %i to i8
7811 // %b = shl i8 %a, 6
7812 // %c = ashr i8 %b, 6
7813 // %d = sext i8 %c to i32
7815 // %a = shl i32 %i, 30
7816 // %d = ashr i32 %a, 30
7818 ConstantInt *BA = 0, *CA = 0;
7819 if (match(Src, m_AShr(m_Shl(m_Value(A), m_ConstantInt(BA)),
7820 m_ConstantInt(CA))) &&
7821 BA == CA && isa<TruncInst>(A)) {
7822 Value *I = cast<TruncInst>(A)->getOperand(0);
7823 if (I->getType() == CI.getType()) {
7824 unsigned MidSize = Src->getType()->getScalarSizeInBits();
7825 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
7826 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
7827 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
7828 I = Builder->CreateShl(I, ShAmtV, CI.getName());
7829 return BinaryOperator::CreateAShr(I, ShAmtV);
7836 /// FitsInFPType - Return a Constant* for the specified FP constant if it fits
7837 /// in the specified FP type without changing its value.
7838 static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
7840 APFloat F = CFP->getValueAPF();
7841 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
7843 return ConstantFP::get(CFP->getContext(), F);
7847 /// LookThroughFPExtensions - If this is an fp extension instruction, look
7848 /// through it until we get the source value.
7849 static Value *LookThroughFPExtensions(Value *V) {
7850 if (Instruction *I = dyn_cast<Instruction>(V))
7851 if (I->getOpcode() == Instruction::FPExt)
7852 return LookThroughFPExtensions(I->getOperand(0));
7854 // If this value is a constant, return the constant in the smallest FP type
7855 // that can accurately represent it. This allows us to turn
7856 // (float)((double)X+2.0) into x+2.0f.
7857 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
7858 if (CFP->getType() == Type::getPPC_FP128Ty(V->getContext()))
7859 return V; // No constant folding of this.
7860 // See if the value can be truncated to float and then reextended.
7861 if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle))
7863 if (CFP->getType() == Type::getDoubleTy(V->getContext()))
7864 return V; // Won't shrink.
7865 if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble))
7867 // Don't try to shrink to various long double types.
7873 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
7874 if (Instruction *I = commonCastTransforms(CI))
7877 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
7878 // smaller than the destination type, we can eliminate the truncate by doing
7879 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as
7880 // many builtins (sqrt, etc).
7881 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
7882 if (OpI && OpI->hasOneUse()) {
7883 switch (OpI->getOpcode()) {
7885 case Instruction::FAdd:
7886 case Instruction::FSub:
7887 case Instruction::FMul:
7888 case Instruction::FDiv:
7889 case Instruction::FRem:
7890 const Type *SrcTy = OpI->getType();
7891 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
7892 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
7893 if (LHSTrunc->getType() != SrcTy &&
7894 RHSTrunc->getType() != SrcTy) {
7895 unsigned DstSize = CI.getType()->getScalarSizeInBits();
7896 // If the source types were both smaller than the destination type of
7897 // the cast, do this xform.
7898 if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
7899 RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
7900 LHSTrunc = Builder->CreateFPExt(LHSTrunc, CI.getType());
7901 RHSTrunc = Builder->CreateFPExt(RHSTrunc, CI.getType());
7902 return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
7911 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
7912 return commonCastTransforms(CI);
7915 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
7916 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
7918 return commonCastTransforms(FI);
7920 // fptoui(uitofp(X)) --> X
7921 // fptoui(sitofp(X)) --> X
7922 // This is safe if the intermediate type has enough bits in its mantissa to
7923 // accurately represent all values of X. For example, do not do this with
7924 // i64->float->i64. This is also safe for sitofp case, because any negative
7925 // 'X' value would cause an undefined result for the fptoui.
7926 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
7927 OpI->getOperand(0)->getType() == FI.getType() &&
7928 (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
7929 OpI->getType()->getFPMantissaWidth())
7930 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
7932 return commonCastTransforms(FI);
7935 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
7936 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
7938 return commonCastTransforms(FI);
7940 // fptosi(sitofp(X)) --> X
7941 // fptosi(uitofp(X)) --> X
7942 // This is safe if the intermediate type has enough bits in its mantissa to
7943 // accurately represent all values of X. For example, do not do this with
7944 // i64->float->i64. This is also safe for sitofp case, because any negative
7945 // 'X' value would cause an undefined result for the fptoui.
7946 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
7947 OpI->getOperand(0)->getType() == FI.getType() &&
7948 (int)FI.getType()->getScalarSizeInBits() <=
7949 OpI->getType()->getFPMantissaWidth())
7950 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
7952 return commonCastTransforms(FI);
7955 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
7956 return commonCastTransforms(CI);
7959 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
7960 return commonCastTransforms(CI);
7963 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
7964 // If the destination integer type is smaller than the intptr_t type for
7965 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
7966 // trunc to be exposed to other transforms. Don't do this for extending
7967 // ptrtoint's, because we don't know if the target sign or zero extends its
7970 CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
7971 Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
7972 TD->getIntPtrType(CI.getContext()),
7974 return new TruncInst(P, CI.getType());
7977 return commonPointerCastTransforms(CI);
7980 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
7981 // If the source integer type is larger than the intptr_t type for
7982 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
7983 // allows the trunc to be exposed to other transforms. Don't do this for
7984 // extending inttoptr's, because we don't know if the target sign or zero
7985 // extends to pointers.
7986 if (TD && CI.getOperand(0)->getType()->getScalarSizeInBits() >
7987 TD->getPointerSizeInBits()) {
7988 Value *P = Builder->CreateTrunc(CI.getOperand(0),
7989 TD->getIntPtrType(CI.getContext()), "tmp");
7990 return new IntToPtrInst(P, CI.getType());
7993 if (Instruction *I = commonCastTransforms(CI))
7999 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
8000 // If the operands are integer typed then apply the integer transforms,
8001 // otherwise just apply the common ones.
8002 Value *Src = CI.getOperand(0);
8003 const Type *SrcTy = Src->getType();
8004 const Type *DestTy = CI.getType();
8006 if (isa<PointerType>(SrcTy)) {
8007 if (Instruction *I = commonPointerCastTransforms(CI))
8010 if (Instruction *Result = commonCastTransforms(CI))
8015 // Get rid of casts from one type to the same type. These are useless and can
8016 // be replaced by the operand.
8017 if (DestTy == Src->getType())
8018 return ReplaceInstUsesWith(CI, Src);
8020 if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
8021 const PointerType *SrcPTy = cast<PointerType>(SrcTy);
8022 const Type *DstElTy = DstPTy->getElementType();
8023 const Type *SrcElTy = SrcPTy->getElementType();
8025 // If the address spaces don't match, don't eliminate the bitcast, which is
8026 // required for changing types.
8027 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
8030 // If we are casting a alloca to a pointer to a type of the same
8031 // size, rewrite the allocation instruction to allocate the "right" type.
8032 // There is no need to modify malloc calls because it is their bitcast that
8033 // needs to be cleaned up.
8034 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
8035 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
8038 // If the source and destination are pointers, and this cast is equivalent
8039 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
8040 // This can enhance SROA and other transforms that want type-safe pointers.
8041 Constant *ZeroUInt =
8042 Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
8043 unsigned NumZeros = 0;
8044 while (SrcElTy != DstElTy &&
8045 isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
8046 SrcElTy->getNumContainedTypes() /* not "{}" */) {
8047 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
8051 // If we found a path from the src to dest, create the getelementptr now.
8052 if (SrcElTy == DstElTy) {
8053 SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
8054 return GetElementPtrInst::CreateInBounds(Src, Idxs.begin(), Idxs.end(),"",
8055 ((Instruction*) NULL));
8059 if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
8060 if (DestVTy->getNumElements() == 1) {
8061 if (!isa<VectorType>(SrcTy)) {
8062 Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
8063 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
8064 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
8066 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
8070 if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
8071 if (SrcVTy->getNumElements() == 1) {
8072 if (!isa<VectorType>(DestTy)) {
8074 Builder->CreateExtractElement(Src,
8075 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
8076 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
8081 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
8082 if (SVI->hasOneUse()) {
8083 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
8084 // a bitconvert to a vector with the same # elts.
8085 if (isa<VectorType>(DestTy) &&
8086 cast<VectorType>(DestTy)->getNumElements() ==
8087 SVI->getType()->getNumElements() &&
8088 SVI->getType()->getNumElements() ==
8089 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements()) {
8091 // If either of the operands is a cast from CI.getType(), then
8092 // evaluating the shuffle in the casted destination's type will allow
8093 // us to eliminate at least one cast.
8094 if (((Tmp = dyn_cast<CastInst>(SVI->getOperand(0))) &&
8095 Tmp->getOperand(0)->getType() == DestTy) ||
8096 ((Tmp = dyn_cast<CastInst>(SVI->getOperand(1))) &&
8097 Tmp->getOperand(0)->getType() == DestTy)) {
8098 Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
8099 Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
8100 // Return a new shuffle vector. Use the same element ID's, as we
8101 // know the vector types match #elts.
8102 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
8110 /// GetSelectFoldableOperands - We want to turn code that looks like this:
8112 /// %D = select %cond, %C, %A
8114 /// %C = select %cond, %B, 0
8117 /// Assuming that the specified instruction is an operand to the select, return
8118 /// a bitmask indicating which operands of this instruction are foldable if they
8119 /// equal the other incoming value of the select.
8121 static unsigned GetSelectFoldableOperands(Instruction *I) {
8122 switch (I->getOpcode()) {
8123 case Instruction::Add:
8124 case Instruction::Mul:
8125 case Instruction::And:
8126 case Instruction::Or:
8127 case Instruction::Xor:
8128 return 3; // Can fold through either operand.
8129 case Instruction::Sub: // Can only fold on the amount subtracted.
8130 case Instruction::Shl: // Can only fold on the shift amount.
8131 case Instruction::LShr:
8132 case Instruction::AShr:
8135 return 0; // Cannot fold
8139 /// GetSelectFoldableConstant - For the same transformation as the previous
8140 /// function, return the identity constant that goes into the select.
8141 static Constant *GetSelectFoldableConstant(Instruction *I) {
8142 switch (I->getOpcode()) {
8143 default: llvm_unreachable("This cannot happen!");
8144 case Instruction::Add:
8145 case Instruction::Sub:
8146 case Instruction::Or:
8147 case Instruction::Xor:
8148 case Instruction::Shl:
8149 case Instruction::LShr:
8150 case Instruction::AShr:
8151 return Constant::getNullValue(I->getType());
8152 case Instruction::And:
8153 return Constant::getAllOnesValue(I->getType());
8154 case Instruction::Mul:
8155 return ConstantInt::get(I->getType(), 1);
8159 /// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
8160 /// have the same opcode and only one use each. Try to simplify this.
8161 Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
8163 if (TI->getNumOperands() == 1) {
8164 // If this is a non-volatile load or a cast from the same type,
8167 if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
8170 return 0; // unknown unary op.
8173 // Fold this by inserting a select from the input values.
8174 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
8175 FI->getOperand(0), SI.getName()+".v");
8176 InsertNewInstBefore(NewSI, SI);
8177 return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
8181 // Only handle binary operators here.
8182 if (!isa<BinaryOperator>(TI))
8185 // Figure out if the operations have any operands in common.
8186 Value *MatchOp, *OtherOpT, *OtherOpF;
8188 if (TI->getOperand(0) == FI->getOperand(0)) {
8189 MatchOp = TI->getOperand(0);
8190 OtherOpT = TI->getOperand(1);
8191 OtherOpF = FI->getOperand(1);
8192 MatchIsOpZero = true;
8193 } else if (TI->getOperand(1) == FI->getOperand(1)) {
8194 MatchOp = TI->getOperand(1);
8195 OtherOpT = TI->getOperand(0);
8196 OtherOpF = FI->getOperand(0);
8197 MatchIsOpZero = false;
8198 } else if (!TI->isCommutative()) {
8200 } else if (TI->getOperand(0) == FI->getOperand(1)) {
8201 MatchOp = TI->getOperand(0);
8202 OtherOpT = TI->getOperand(1);
8203 OtherOpF = FI->getOperand(0);
8204 MatchIsOpZero = true;
8205 } else if (TI->getOperand(1) == FI->getOperand(0)) {
8206 MatchOp = TI->getOperand(1);
8207 OtherOpT = TI->getOperand(0);
8208 OtherOpF = FI->getOperand(1);
8209 MatchIsOpZero = true;
8214 // If we reach here, they do have operations in common.
8215 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), OtherOpT,
8216 OtherOpF, SI.getName()+".v");
8217 InsertNewInstBefore(NewSI, SI);
8219 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TI)) {
8221 return BinaryOperator::Create(BO->getOpcode(), MatchOp, NewSI);
8223 return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp);
8225 llvm_unreachable("Shouldn't get here");
8229 static bool isSelect01(Constant *C1, Constant *C2) {
8230 ConstantInt *C1I = dyn_cast<ConstantInt>(C1);
8233 ConstantInt *C2I = dyn_cast<ConstantInt>(C2);
8236 return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne());
8239 /// FoldSelectIntoOp - Try fold the select into one of the operands to
8240 /// facilitate further optimization.
8241 Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
8243 // See the comment above GetSelectFoldableOperands for a description of the
8244 // transformation we are doing here.
8245 if (Instruction *TVI = dyn_cast<Instruction>(TrueVal)) {
8246 if (TVI->hasOneUse() && TVI->getNumOperands() == 2 &&
8247 !isa<Constant>(FalseVal)) {
8248 if (unsigned SFO = GetSelectFoldableOperands(TVI)) {
8249 unsigned OpToFold = 0;
8250 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
8252 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
8257 Constant *C = GetSelectFoldableConstant(TVI);
8258 Value *OOp = TVI->getOperand(2-OpToFold);
8259 // Avoid creating select between 2 constants unless it's selecting
8261 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
8262 Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C);
8263 InsertNewInstBefore(NewSel, SI);
8264 NewSel->takeName(TVI);
8265 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
8266 return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel);
8267 llvm_unreachable("Unknown instruction!!");
8274 if (Instruction *FVI = dyn_cast<Instruction>(FalseVal)) {
8275 if (FVI->hasOneUse() && FVI->getNumOperands() == 2 &&
8276 !isa<Constant>(TrueVal)) {
8277 if (unsigned SFO = GetSelectFoldableOperands(FVI)) {
8278 unsigned OpToFold = 0;
8279 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
8281 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
8286 Constant *C = GetSelectFoldableConstant(FVI);
8287 Value *OOp = FVI->getOperand(2-OpToFold);
8288 // Avoid creating select between 2 constants unless it's selecting
8290 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
8291 Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp);
8292 InsertNewInstBefore(NewSel, SI);
8293 NewSel->takeName(FVI);
8294 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
8295 return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel);
8296 llvm_unreachable("Unknown instruction!!");
8306 /// visitSelectInstWithICmp - Visit a SelectInst that has an
8307 /// ICmpInst as its first operand.
8309 Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
8311 bool Changed = false;
8312 ICmpInst::Predicate Pred = ICI->getPredicate();
8313 Value *CmpLHS = ICI->getOperand(0);
8314 Value *CmpRHS = ICI->getOperand(1);
8315 Value *TrueVal = SI.getTrueValue();
8316 Value *FalseVal = SI.getFalseValue();
8318 // Check cases where the comparison is with a constant that
8319 // can be adjusted to fit the min/max idiom. We may edit ICI in
8320 // place here, so make sure the select is the only user.
8321 if (ICI->hasOneUse())
8322 if (ConstantInt *CI = dyn_cast<ConstantInt>(CmpRHS)) {
8325 case ICmpInst::ICMP_ULT:
8326 case ICmpInst::ICMP_SLT: {
8327 // X < MIN ? T : F --> F
8328 if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
8329 return ReplaceInstUsesWith(SI, FalseVal);
8330 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
8331 Constant *AdjustedRHS = SubOne(CI);
8332 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
8333 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
8334 Pred = ICmpInst::getSwappedPredicate(Pred);
8335 CmpRHS = AdjustedRHS;
8336 std::swap(FalseVal, TrueVal);
8337 ICI->setPredicate(Pred);
8338 ICI->setOperand(1, CmpRHS);
8339 SI.setOperand(1, TrueVal);
8340 SI.setOperand(2, FalseVal);
8345 case ICmpInst::ICMP_UGT:
8346 case ICmpInst::ICMP_SGT: {
8347 // X > MAX ? T : F --> F
8348 if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
8349 return ReplaceInstUsesWith(SI, FalseVal);
8350 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
8351 Constant *AdjustedRHS = AddOne(CI);
8352 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
8353 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
8354 Pred = ICmpInst::getSwappedPredicate(Pred);
8355 CmpRHS = AdjustedRHS;
8356 std::swap(FalseVal, TrueVal);
8357 ICI->setPredicate(Pred);
8358 ICI->setOperand(1, CmpRHS);
8359 SI.setOperand(1, TrueVal);
8360 SI.setOperand(2, FalseVal);
8367 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
8368 // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
8369 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
8370 if (match(TrueVal, m_ConstantInt<-1>()) &&
8371 match(FalseVal, m_ConstantInt<0>()))
8372 Pred = ICI->getPredicate();
8373 else if (match(TrueVal, m_ConstantInt<0>()) &&
8374 match(FalseVal, m_ConstantInt<-1>()))
8375 Pred = CmpInst::getInversePredicate(ICI->getPredicate());
8377 if (Pred != CmpInst::BAD_ICMP_PREDICATE) {
8378 // If we are just checking for a icmp eq of a single bit and zext'ing it
8379 // to an integer, then shift the bit to the appropriate place and then
8380 // cast to integer to avoid the comparison.
8381 const APInt &Op1CV = CI->getValue();
8383 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
8384 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
8385 if ((Pred == ICmpInst::ICMP_SLT && Op1CV == 0) ||
8386 (Pred == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) {
8387 Value *In = ICI->getOperand(0);
8388 Value *Sh = ConstantInt::get(In->getType(),
8389 In->getType()->getScalarSizeInBits()-1);
8390 In = InsertNewInstBefore(BinaryOperator::CreateAShr(In, Sh,
8391 In->getName()+".lobit"),
8393 if (In->getType() != SI.getType())
8394 In = CastInst::CreateIntegerCast(In, SI.getType(),
8395 true/*SExt*/, "tmp", ICI);
8397 if (Pred == ICmpInst::ICMP_SGT)
8398 In = InsertNewInstBefore(BinaryOperator::CreateNot(In,
8399 In->getName()+".not"), *ICI);
8401 return ReplaceInstUsesWith(SI, In);
8406 if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
8407 // Transform (X == Y) ? X : Y -> Y
8408 if (Pred == ICmpInst::ICMP_EQ)
8409 return ReplaceInstUsesWith(SI, FalseVal);
8410 // Transform (X != Y) ? X : Y -> X
8411 if (Pred == ICmpInst::ICMP_NE)
8412 return ReplaceInstUsesWith(SI, TrueVal);
8413 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
8415 } else if (CmpLHS == FalseVal && CmpRHS == TrueVal) {
8416 // Transform (X == Y) ? Y : X -> X
8417 if (Pred == ICmpInst::ICMP_EQ)
8418 return ReplaceInstUsesWith(SI, FalseVal);
8419 // Transform (X != Y) ? Y : X -> Y
8420 if (Pred == ICmpInst::ICMP_NE)
8421 return ReplaceInstUsesWith(SI, TrueVal);
8422 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
8424 return Changed ? &SI : 0;
8428 /// CanSelectOperandBeMappingIntoPredBlock - SI is a select whose condition is a
8429 /// PHI node (but the two may be in different blocks). See if the true/false
8430 /// values (V) are live in all of the predecessor blocks of the PHI. For
8431 /// example, cases like this cannot be mapped:
8433 /// X = phi [ C1, BB1], [C2, BB2]
8435 /// Z = select X, Y, 0
8437 /// because Y is not live in BB1/BB2.
8439 static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V,
8440 const SelectInst &SI) {
8441 // If the value is a non-instruction value like a constant or argument, it
8442 // can always be mapped.
8443 const Instruction *I = dyn_cast<Instruction>(V);
8444 if (I == 0) return true;
8446 // If V is a PHI node defined in the same block as the condition PHI, we can
8447 // map the arguments.
8448 const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
8450 if (const PHINode *VP = dyn_cast<PHINode>(I))
8451 if (VP->getParent() == CondPHI->getParent())
8454 // Otherwise, if the PHI and select are defined in the same block and if V is
8455 // defined in a different block, then we can transform it.
8456 if (SI.getParent() == CondPHI->getParent() &&
8457 I->getParent() != CondPHI->getParent())
8460 // Otherwise we have a 'hard' case and we can't tell without doing more
8461 // detailed dominator based analysis, punt.
8465 /// FoldSPFofSPF - We have an SPF (e.g. a min or max) of an SPF of the form:
8466 /// SPF2(SPF1(A, B), C)
8467 Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
8468 SelectPatternFlavor SPF1,
8471 SelectPatternFlavor SPF2, Value *C) {
8472 if (C == A || C == B) {
8473 // MAX(MAX(A, B), B) -> MAX(A, B)
8474 // MIN(MIN(a, b), a) -> MIN(a, b)
8476 return ReplaceInstUsesWith(Outer, Inner);
8478 // MAX(MIN(a, b), a) -> a
8479 // MIN(MAX(a, b), a) -> a
8480 if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) ||
8481 (SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) ||
8482 (SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) ||
8483 (SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
8484 return ReplaceInstUsesWith(Outer, C);
8487 // TODO: MIN(MIN(A, 23), 97)
8494 Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
8495 Value *CondVal = SI.getCondition();
8496 Value *TrueVal = SI.getTrueValue();
8497 Value *FalseVal = SI.getFalseValue();
8499 // select true, X, Y -> X
8500 // select false, X, Y -> Y
8501 if (ConstantInt *C = dyn_cast<ConstantInt>(CondVal))
8502 return ReplaceInstUsesWith(SI, C->getZExtValue() ? TrueVal : FalseVal);
8504 // select C, X, X -> X
8505 if (TrueVal == FalseVal)
8506 return ReplaceInstUsesWith(SI, TrueVal);
8508 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
8509 return ReplaceInstUsesWith(SI, FalseVal);
8510 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
8511 return ReplaceInstUsesWith(SI, TrueVal);
8512 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
8513 if (isa<Constant>(TrueVal))
8514 return ReplaceInstUsesWith(SI, TrueVal);
8516 return ReplaceInstUsesWith(SI, FalseVal);
8519 if (SI.getType() == Type::getInt1Ty(SI.getContext())) {
8520 if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
8521 if (C->getZExtValue()) {
8522 // Change: A = select B, true, C --> A = or B, C
8523 return BinaryOperator::CreateOr(CondVal, FalseVal);
8525 // Change: A = select B, false, C --> A = and !B, C
8527 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
8528 "not."+CondVal->getName()), SI);
8529 return BinaryOperator::CreateAnd(NotCond, FalseVal);
8531 } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) {
8532 if (C->getZExtValue() == false) {
8533 // Change: A = select B, C, false --> A = and B, C
8534 return BinaryOperator::CreateAnd(CondVal, TrueVal);
8536 // Change: A = select B, C, true --> A = or !B, C
8538 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
8539 "not."+CondVal->getName()), SI);
8540 return BinaryOperator::CreateOr(NotCond, TrueVal);
8544 // select a, b, a -> a&b
8545 // select a, a, b -> a|b
8546 if (CondVal == TrueVal)
8547 return BinaryOperator::CreateOr(CondVal, FalseVal);
8548 else if (CondVal == FalseVal)
8549 return BinaryOperator::CreateAnd(CondVal, TrueVal);
8552 // Selecting between two integer constants?
8553 if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
8554 if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
8555 // select C, 1, 0 -> zext C to int
8556 if (FalseValC->isZero() && TrueValC->getValue() == 1) {
8557 return CastInst::Create(Instruction::ZExt, CondVal, SI.getType());
8558 } else if (TrueValC->isZero() && FalseValC->getValue() == 1) {
8559 // select C, 0, 1 -> zext !C to int
8561 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
8562 "not."+CondVal->getName()), SI);
8563 return CastInst::Create(Instruction::ZExt, NotCond, SI.getType());
8566 if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
8567 // If one of the constants is zero (we know they can't both be) and we
8568 // have an icmp instruction with zero, and we have an 'and' with the
8569 // non-constant value, eliminate this whole mess. This corresponds to
8570 // cases like this: ((X & 27) ? 27 : 0)
8571 if (TrueValC->isZero() || FalseValC->isZero())
8572 if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
8573 cast<Constant>(IC->getOperand(1))->isNullValue())
8574 if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
8575 if (ICA->getOpcode() == Instruction::And &&
8576 isa<ConstantInt>(ICA->getOperand(1)) &&
8577 (ICA->getOperand(1) == TrueValC ||
8578 ICA->getOperand(1) == FalseValC) &&
8579 isOneBitSet(cast<ConstantInt>(ICA->getOperand(1)))) {
8580 // Okay, now we know that everything is set up, we just don't
8581 // know whether we have a icmp_ne or icmp_eq and whether the
8582 // true or false val is the zero.
8583 bool ShouldNotVal = !TrueValC->isZero();
8584 ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
8587 V = InsertNewInstBefore(BinaryOperator::Create(
8588 Instruction::Xor, V, ICA->getOperand(1)), SI);
8589 return ReplaceInstUsesWith(SI, V);
8594 // See if we are selecting two values based on a comparison of the two values.
8595 if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
8596 if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
8597 // Transform (X == Y) ? X : Y -> Y
8598 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
8599 // This is not safe in general for floating point:
8600 // consider X== -0, Y== +0.
8601 // It becomes safe if either operand is a nonzero constant.
8602 ConstantFP *CFPt, *CFPf;
8603 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
8604 !CFPt->getValueAPF().isZero()) ||
8605 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
8606 !CFPf->getValueAPF().isZero()))
8607 return ReplaceInstUsesWith(SI, FalseVal);
8609 // Transform (X != Y) ? X : Y -> X
8610 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
8611 return ReplaceInstUsesWith(SI, TrueVal);
8612 // NOTE: if we wanted to, this is where to detect MIN/MAX
8614 } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
8615 // Transform (X == Y) ? Y : X -> X
8616 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
8617 // This is not safe in general for floating point:
8618 // consider X== -0, Y== +0.
8619 // It becomes safe if either operand is a nonzero constant.
8620 ConstantFP *CFPt, *CFPf;
8621 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
8622 !CFPt->getValueAPF().isZero()) ||
8623 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
8624 !CFPf->getValueAPF().isZero()))
8625 return ReplaceInstUsesWith(SI, FalseVal);
8627 // Transform (X != Y) ? Y : X -> Y
8628 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
8629 return ReplaceInstUsesWith(SI, TrueVal);
8630 // NOTE: if we wanted to, this is where to detect MIN/MAX
8632 // NOTE: if we wanted to, this is where to detect ABS
8635 // See if we are selecting two values based on a comparison of the two values.
8636 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
8637 if (Instruction *Result = visitSelectInstWithICmp(SI, ICI))
8640 if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
8641 if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
8642 if (TI->hasOneUse() && FI->hasOneUse()) {
8643 Instruction *AddOp = 0, *SubOp = 0;
8645 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
8646 if (TI->getOpcode() == FI->getOpcode())
8647 if (Instruction *IV = FoldSelectOpOp(SI, TI, FI))
8650 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
8651 // even legal for FP.
8652 if ((TI->getOpcode() == Instruction::Sub &&
8653 FI->getOpcode() == Instruction::Add) ||
8654 (TI->getOpcode() == Instruction::FSub &&
8655 FI->getOpcode() == Instruction::FAdd)) {
8656 AddOp = FI; SubOp = TI;
8657 } else if ((FI->getOpcode() == Instruction::Sub &&
8658 TI->getOpcode() == Instruction::Add) ||
8659 (FI->getOpcode() == Instruction::FSub &&
8660 TI->getOpcode() == Instruction::FAdd)) {
8661 AddOp = TI; SubOp = FI;
8665 Value *OtherAddOp = 0;
8666 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
8667 OtherAddOp = AddOp->getOperand(1);
8668 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
8669 OtherAddOp = AddOp->getOperand(0);
8673 // So at this point we know we have (Y -> OtherAddOp):
8674 // select C, (add X, Y), (sub X, Z)
8675 Value *NegVal; // Compute -Z
8676 if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
8677 NegVal = ConstantExpr::getNeg(C);
8679 NegVal = InsertNewInstBefore(
8680 BinaryOperator::CreateNeg(SubOp->getOperand(1),
8684 Value *NewTrueOp = OtherAddOp;
8685 Value *NewFalseOp = NegVal;
8687 std::swap(NewTrueOp, NewFalseOp);
8688 Instruction *NewSel =
8689 SelectInst::Create(CondVal, NewTrueOp,
8690 NewFalseOp, SI.getName() + ".p");
8692 NewSel = InsertNewInstBefore(NewSel, SI);
8693 return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
8698 // See if we can fold the select into one of our operands.
8699 if (SI.getType()->isInteger()) {
8700 if (Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal))
8703 // MAX(MAX(a, b), a) -> MAX(a, b)
8704 // MIN(MIN(a, b), a) -> MIN(a, b)
8705 // MAX(MIN(a, b), a) -> a
8706 // MIN(MAX(a, b), a) -> a
8707 Value *LHS, *RHS, *LHS2, *RHS2;
8708 if (SelectPatternFlavor SPF = MatchSelectPattern(&SI, LHS, RHS)) {
8709 if (SelectPatternFlavor SPF2 = MatchSelectPattern(LHS, LHS2, RHS2))
8710 if (Instruction *R = FoldSPFofSPF(cast<Instruction>(LHS),SPF2,LHS2,RHS2,
8713 if (SelectPatternFlavor SPF2 = MatchSelectPattern(RHS, LHS2, RHS2))
8714 if (Instruction *R = FoldSPFofSPF(cast<Instruction>(RHS),SPF2,LHS2,RHS2,
8720 // ABS(-X) -> ABS(X)
8721 // ABS(ABS(X)) -> ABS(X)
8724 // See if we can fold the select into a phi node if the condition is a select.
8725 if (isa<PHINode>(SI.getCondition()))
8726 // The true/false values have to be live in the PHI predecessor's blocks.
8727 if (CanSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
8728 CanSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
8729 if (Instruction *NV = FoldOpIntoPhi(SI))
8732 if (BinaryOperator::isNot(CondVal)) {
8733 SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
8734 SI.setOperand(1, FalseVal);
8735 SI.setOperand(2, TrueVal);
8742 /// EnforceKnownAlignment - If the specified pointer points to an object that
8743 /// we control, modify the object's alignment to PrefAlign. This isn't
8744 /// often possible though. If alignment is important, a more reliable approach
8745 /// is to simply align all global variables and allocation instructions to
8746 /// their preferred alignment from the beginning.
8748 static unsigned EnforceKnownAlignment(Value *V,
8749 unsigned Align, unsigned PrefAlign) {
8751 User *U = dyn_cast<User>(V);
8752 if (!U) return Align;
8754 switch (Operator::getOpcode(U)) {
8756 case Instruction::BitCast:
8757 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
8758 case Instruction::GetElementPtr: {
8759 // If all indexes are zero, it is just the alignment of the base pointer.
8760 bool AllZeroOperands = true;
8761 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
8762 if (!isa<Constant>(*i) ||
8763 !cast<Constant>(*i)->isNullValue()) {
8764 AllZeroOperands = false;
8768 if (AllZeroOperands) {
8769 // Treat this like a bitcast.
8770 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
8776 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
8777 // If there is a large requested alignment and we can, bump up the alignment
8779 if (!GV->isDeclaration()) {
8780 if (GV->getAlignment() >= PrefAlign)
8781 Align = GV->getAlignment();
8783 GV->setAlignment(PrefAlign);
8787 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
8788 // If there is a requested alignment and if this is an alloca, round up.
8789 if (AI->getAlignment() >= PrefAlign)
8790 Align = AI->getAlignment();
8792 AI->setAlignment(PrefAlign);
8800 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
8801 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
8802 /// and it is more than the alignment of the ultimate object, see if we can
8803 /// increase the alignment of the ultimate object, making this check succeed.
8804 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
8805 unsigned PrefAlign) {
8806 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
8807 sizeof(PrefAlign) * CHAR_BIT;
8808 APInt Mask = APInt::getAllOnesValue(BitWidth);
8809 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
8810 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
8811 unsigned TrailZ = KnownZero.countTrailingOnes();
8812 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
8814 if (PrefAlign > Align)
8815 Align = EnforceKnownAlignment(V, Align, PrefAlign);
8817 // We don't need to make any adjustment.
8821 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
8822 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
8823 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
8824 unsigned MinAlign = std::min(DstAlign, SrcAlign);
8825 unsigned CopyAlign = MI->getAlignment();
8827 if (CopyAlign < MinAlign) {
8828 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
8833 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
8835 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
8836 if (MemOpLength == 0) return 0;
8838 // Source and destination pointer types are always "i8*" for intrinsic. See
8839 // if the size is something we can handle with a single primitive load/store.
8840 // A single load+store correctly handles overlapping memory in the memmove
8842 unsigned Size = MemOpLength->getZExtValue();
8843 if (Size == 0) return MI; // Delete this mem transfer.
8845 if (Size > 8 || (Size&(Size-1)))
8846 return 0; // If not 1/2/4/8 bytes, exit.
8848 // Use an integer load+store unless we can find something better.
8850 PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
8852 // Memcpy forces the use of i8* for the source and destination. That means
8853 // that if you're using memcpy to move one double around, you'll get a cast
8854 // from double* to i8*. We'd much rather use a double load+store rather than
8855 // an i64 load+store, here because this improves the odds that the source or
8856 // dest address will be promotable. See if we can find a better type than the
8857 // integer datatype.
8858 if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
8859 const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
8860 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
8861 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
8862 // down through these levels if so.
8863 while (!SrcETy->isSingleValueType()) {
8864 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
8865 if (STy->getNumElements() == 1)
8866 SrcETy = STy->getElementType(0);
8869 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
8870 if (ATy->getNumElements() == 1)
8871 SrcETy = ATy->getElementType();
8878 if (SrcETy->isSingleValueType())
8879 NewPtrTy = PointerType::getUnqual(SrcETy);
8884 // If the memcpy/memmove provides better alignment info than we can
8886 SrcAlign = std::max(SrcAlign, CopyAlign);
8887 DstAlign = std::max(DstAlign, CopyAlign);
8889 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
8890 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
8891 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
8892 InsertNewInstBefore(L, *MI);
8893 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
8895 // Set the size of the copy to 0, it will be deleted on the next iteration.
8896 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
8900 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
8901 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
8902 if (MI->getAlignment() < Alignment) {
8903 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
8908 // Extract the length and alignment and fill if they are constant.
8909 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
8910 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
8911 if (!LenC || !FillC || FillC->getType() != Type::getInt8Ty(MI->getContext()))
8913 uint64_t Len = LenC->getZExtValue();
8914 Alignment = MI->getAlignment();
8916 // If the length is zero, this is a no-op
8917 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
8919 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
8920 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
8921 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
8923 Value *Dest = MI->getDest();
8924 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
8926 // Alignment 0 is identity for alignment 1 for memset, but not store.
8927 if (Alignment == 0) Alignment = 1;
8929 // Extract the fill value and store.
8930 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
8931 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
8932 Dest, false, Alignment), *MI);
8934 // Set the size of the copy to 0, it will be deleted on the next iteration.
8935 MI->setLength(Constant::getNullValue(LenC->getType()));
8943 /// visitCallInst - CallInst simplification. This mostly only handles folding
8944 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
8945 /// the heavy lifting.
8947 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
8948 if (isFreeCall(&CI))
8949 return visitFree(CI);
8951 // If the caller function is nounwind, mark the call as nounwind, even if the
8953 if (CI.getParent()->getParent()->doesNotThrow() &&
8954 !CI.doesNotThrow()) {
8955 CI.setDoesNotThrow();
8959 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
8960 if (!II) return visitCallSite(&CI);
8962 // Intrinsics cannot occur in an invoke, so handle them here instead of in
8964 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
8965 bool Changed = false;
8967 // memmove/cpy/set of zero bytes is a noop.
8968 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
8969 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
8971 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
8972 if (CI->getZExtValue() == 1) {
8973 // Replace the instruction with just byte operations. We would
8974 // transform other cases to loads/stores, but we don't know if
8975 // alignment is sufficient.
8979 // If we have a memmove and the source operation is a constant global,
8980 // then the source and dest pointers can't alias, so we can change this
8981 // into a call to memcpy.
8982 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
8983 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
8984 if (GVSrc->isConstant()) {
8985 Module *M = CI.getParent()->getParent()->getParent();
8986 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
8988 Tys[0] = CI.getOperand(3)->getType();
8990 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
8995 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
8996 // memmove(x,x,size) -> noop.
8997 if (MTI->getSource() == MTI->getDest())
8998 return EraseInstFromFunction(CI);
9001 // If we can determine a pointer alignment that is bigger than currently
9002 // set, update the alignment.
9003 if (isa<MemTransferInst>(MI)) {
9004 if (Instruction *I = SimplifyMemTransfer(MI))
9006 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
9007 if (Instruction *I = SimplifyMemSet(MSI))
9011 if (Changed) return II;
9014 switch (II->getIntrinsicID()) {
9016 case Intrinsic::bswap:
9017 // bswap(bswap(x)) -> x
9018 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
9019 if (Operand->getIntrinsicID() == Intrinsic::bswap)
9020 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
9022 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
9023 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
9024 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
9025 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
9026 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
9027 TI->getType()->getPrimitiveSizeInBits();
9028 Value *CV = ConstantInt::get(Operand->getType(), C);
9029 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
9030 return new TruncInst(V, TI->getType());
9035 case Intrinsic::powi:
9036 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
9037 // powi(x, 0) -> 1.0
9038 if (Power->isZero())
9039 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
9042 return ReplaceInstUsesWith(CI, II->getOperand(1));
9043 // powi(x, -1) -> 1/x
9044 if (Power->isAllOnesValue())
9045 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
9050 case Intrinsic::uadd_with_overflow: {
9051 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
9052 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
9053 uint32_t BitWidth = IT->getBitWidth();
9054 APInt Mask = APInt::getSignBit(BitWidth);
9055 APInt LHSKnownZero(BitWidth, 0);
9056 APInt LHSKnownOne(BitWidth, 0);
9057 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
9058 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
9059 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
9061 if (LHSKnownNegative || LHSKnownPositive) {
9062 APInt RHSKnownZero(BitWidth, 0);
9063 APInt RHSKnownOne(BitWidth, 0);
9064 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
9065 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
9066 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
9067 if (LHSKnownNegative && RHSKnownNegative) {
9068 // The sign bit is set in both cases: this MUST overflow.
9069 // Create a simple add instruction, and insert it into the struct.
9070 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
9073 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
9075 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
9076 return InsertValueInst::Create(Struct, Add, 0);
9079 if (LHSKnownPositive && RHSKnownPositive) {
9080 // The sign bit is clear in both cases: this CANNOT overflow.
9081 // Create a simple add instruction, and insert it into the struct.
9082 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
9085 UndefValue::get(LHS->getType()),
9086 ConstantInt::getFalse(II->getContext())
9088 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
9089 return InsertValueInst::Create(Struct, Add, 0);
9093 // FALL THROUGH uadd into sadd
9094 case Intrinsic::sadd_with_overflow:
9095 // Canonicalize constants into the RHS.
9096 if (isa<Constant>(II->getOperand(1)) &&
9097 !isa<Constant>(II->getOperand(2))) {
9098 Value *LHS = II->getOperand(1);
9099 II->setOperand(1, II->getOperand(2));
9100 II->setOperand(2, LHS);
9104 // X + undef -> undef
9105 if (isa<UndefValue>(II->getOperand(2)))
9106 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
9108 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
9109 // X + 0 -> {X, false}
9110 if (RHS->isZero()) {
9112 UndefValue::get(II->getOperand(0)->getType()),
9113 ConstantInt::getFalse(II->getContext())
9115 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
9116 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
9120 case Intrinsic::usub_with_overflow:
9121 case Intrinsic::ssub_with_overflow:
9122 // undef - X -> undef
9123 // X - undef -> undef
9124 if (isa<UndefValue>(II->getOperand(1)) ||
9125 isa<UndefValue>(II->getOperand(2)))
9126 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
9128 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
9129 // X - 0 -> {X, false}
9130 if (RHS->isZero()) {
9132 UndefValue::get(II->getOperand(1)->getType()),
9133 ConstantInt::getFalse(II->getContext())
9135 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
9136 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
9140 case Intrinsic::umul_with_overflow:
9141 case Intrinsic::smul_with_overflow:
9142 // Canonicalize constants into the RHS.
9143 if (isa<Constant>(II->getOperand(1)) &&
9144 !isa<Constant>(II->getOperand(2))) {
9145 Value *LHS = II->getOperand(1);
9146 II->setOperand(1, II->getOperand(2));
9147 II->setOperand(2, LHS);
9151 // X * undef -> undef
9152 if (isa<UndefValue>(II->getOperand(2)))
9153 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
9155 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
9156 // X*0 -> {0, false}
9158 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
9160 // X * 1 -> {X, false}
9161 if (RHSI->equalsInt(1)) {
9163 UndefValue::get(II->getOperand(1)->getType()),
9164 ConstantInt::getFalse(II->getContext())
9166 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
9167 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
9171 case Intrinsic::ppc_altivec_lvx:
9172 case Intrinsic::ppc_altivec_lvxl:
9173 case Intrinsic::x86_sse_loadu_ps:
9174 case Intrinsic::x86_sse2_loadu_pd:
9175 case Intrinsic::x86_sse2_loadu_dq:
9176 // Turn PPC lvx -> load if the pointer is known aligned.
9177 // Turn X86 loadups -> load if the pointer is known aligned.
9178 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
9179 Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
9180 PointerType::getUnqual(II->getType()));
9181 return new LoadInst(Ptr);
9184 case Intrinsic::ppc_altivec_stvx:
9185 case Intrinsic::ppc_altivec_stvxl:
9186 // Turn stvx -> store if the pointer is known aligned.
9187 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
9188 const Type *OpPtrTy =
9189 PointerType::getUnqual(II->getOperand(1)->getType());
9190 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
9191 return new StoreInst(II->getOperand(1), Ptr);
9194 case Intrinsic::x86_sse_storeu_ps:
9195 case Intrinsic::x86_sse2_storeu_pd:
9196 case Intrinsic::x86_sse2_storeu_dq:
9197 // Turn X86 storeu -> store if the pointer is known aligned.
9198 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
9199 const Type *OpPtrTy =
9200 PointerType::getUnqual(II->getOperand(2)->getType());
9201 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
9202 return new StoreInst(II->getOperand(2), Ptr);
9206 case Intrinsic::x86_sse_cvttss2si: {
9207 // These intrinsics only demands the 0th element of its input vector. If
9208 // we can simplify the input based on that, do so now.
9210 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
9211 APInt DemandedElts(VWidth, 1);
9212 APInt UndefElts(VWidth, 0);
9213 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
9215 II->setOperand(1, V);
9221 case Intrinsic::ppc_altivec_vperm:
9222 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
9223 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
9224 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
9226 // Check that all of the elements are integer constants or undefs.
9227 bool AllEltsOk = true;
9228 for (unsigned i = 0; i != 16; ++i) {
9229 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
9230 !isa<UndefValue>(Mask->getOperand(i))) {
9237 // Cast the input vectors to byte vectors.
9238 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
9239 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
9240 Value *Result = UndefValue::get(Op0->getType());
9242 // Only extract each element once.
9243 Value *ExtractedElts[32];
9244 memset(ExtractedElts, 0, sizeof(ExtractedElts));
9246 for (unsigned i = 0; i != 16; ++i) {
9247 if (isa<UndefValue>(Mask->getOperand(i)))
9249 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
9250 Idx &= 31; // Match the hardware behavior.
9252 if (ExtractedElts[Idx] == 0) {
9253 ExtractedElts[Idx] =
9254 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
9255 ConstantInt::get(Type::getInt32Ty(II->getContext()),
9256 Idx&15, false), "tmp");
9259 // Insert this value into the result vector.
9260 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
9261 ConstantInt::get(Type::getInt32Ty(II->getContext()),
9264 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
9269 case Intrinsic::stackrestore: {
9270 // If the save is right next to the restore, remove the restore. This can
9271 // happen when variable allocas are DCE'd.
9272 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
9273 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
9274 BasicBlock::iterator BI = SS;
9276 return EraseInstFromFunction(CI);
9280 // Scan down this block to see if there is another stack restore in the
9281 // same block without an intervening call/alloca.
9282 BasicBlock::iterator BI = II;
9283 TerminatorInst *TI = II->getParent()->getTerminator();
9284 bool CannotRemove = false;
9285 for (++BI; &*BI != TI; ++BI) {
9286 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
9287 CannotRemove = true;
9290 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
9291 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
9292 // If there is a stackrestore below this one, remove this one.
9293 if (II->getIntrinsicID() == Intrinsic::stackrestore)
9294 return EraseInstFromFunction(CI);
9295 // Otherwise, ignore the intrinsic.
9297 // If we found a non-intrinsic call, we can't remove the stack
9299 CannotRemove = true;
9305 // If the stack restore is in a return/unwind block and if there are no
9306 // allocas or calls between the restore and the return, nuke the restore.
9307 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
9308 return EraseInstFromFunction(CI);
9313 return visitCallSite(II);
9316 // InvokeInst simplification
9318 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
9319 return visitCallSite(&II);
9322 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
9323 /// passed through the varargs area, we can eliminate the use of the cast.
9324 static bool isSafeToEliminateVarargsCast(const CallSite CS,
9325 const CastInst * const CI,
9326 const TargetData * const TD,
9328 if (!CI->isLosslessCast())
9331 // The size of ByVal arguments is derived from the type, so we
9332 // can't change to a type with a different size. If the size were
9333 // passed explicitly we could avoid this check.
9334 if (!CS.paramHasAttr(ix, Attribute::ByVal))
9338 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
9339 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
9340 if (!SrcTy->isSized() || !DstTy->isSized())
9342 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
9347 // visitCallSite - Improvements for call and invoke instructions.
9349 Instruction *InstCombiner::visitCallSite(CallSite CS) {
9350 bool Changed = false;
9352 // If the callee is a constexpr cast of a function, attempt to move the cast
9353 // to the arguments of the call/invoke.
9354 if (transformConstExprCastCall(CS)) return 0;
9356 Value *Callee = CS.getCalledValue();
9358 if (Function *CalleeF = dyn_cast<Function>(Callee))
9359 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
9360 Instruction *OldCall = CS.getInstruction();
9361 // If the call and callee calling conventions don't match, this call must
9362 // be unreachable, as the call is undefined.
9363 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
9364 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
9366 // If OldCall dues not return void then replaceAllUsesWith undef.
9367 // This allows ValueHandlers and custom metadata to adjust itself.
9368 if (!OldCall->getType()->isVoidTy())
9369 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
9370 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
9371 return EraseInstFromFunction(*OldCall);
9375 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
9376 // This instruction is not reachable, just remove it. We insert a store to
9377 // undef so that we know that this code is not reachable, despite the fact
9378 // that we can't modify the CFG here.
9379 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
9380 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
9381 CS.getInstruction());
9383 // If CS dues not return void then replaceAllUsesWith undef.
9384 // This allows ValueHandlers and custom metadata to adjust itself.
9385 if (!CS.getInstruction()->getType()->isVoidTy())
9386 CS.getInstruction()->
9387 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
9389 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
9390 // Don't break the CFG, insert a dummy cond branch.
9391 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
9392 ConstantInt::getTrue(Callee->getContext()), II);
9394 return EraseInstFromFunction(*CS.getInstruction());
9397 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
9398 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
9399 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
9400 return transformCallThroughTrampoline(CS);
9402 const PointerType *PTy = cast<PointerType>(Callee->getType());
9403 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
9404 if (FTy->isVarArg()) {
9405 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
9406 // See if we can optimize any arguments passed through the varargs area of
9408 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
9409 E = CS.arg_end(); I != E; ++I, ++ix) {
9410 CastInst *CI = dyn_cast<CastInst>(*I);
9411 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
9412 *I = CI->getOperand(0);
9418 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
9419 // Inline asm calls cannot throw - mark them 'nounwind'.
9420 CS.setDoesNotThrow();
9424 return Changed ? CS.getInstruction() : 0;
9427 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
9428 // attempt to move the cast to the arguments of the call/invoke.
9430 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
9431 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
9432 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
9433 if (CE->getOpcode() != Instruction::BitCast ||
9434 !isa<Function>(CE->getOperand(0)))
9436 Function *Callee = cast<Function>(CE->getOperand(0));
9437 Instruction *Caller = CS.getInstruction();
9438 const AttrListPtr &CallerPAL = CS.getAttributes();
9440 // Okay, this is a cast from a function to a different type. Unless doing so
9441 // would cause a type conversion of one of our arguments, change this call to
9442 // be a direct call with arguments casted to the appropriate types.
9444 const FunctionType *FT = Callee->getFunctionType();
9445 const Type *OldRetTy = Caller->getType();
9446 const Type *NewRetTy = FT->getReturnType();
9448 if (isa<StructType>(NewRetTy))
9449 return false; // TODO: Handle multiple return values.
9451 // Check to see if we are changing the return type...
9452 if (OldRetTy != NewRetTy) {
9453 if (Callee->isDeclaration() &&
9454 // Conversion is ok if changing from one pointer type to another or from
9455 // a pointer to an integer of the same size.
9456 !((isa<PointerType>(OldRetTy) || !TD ||
9457 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
9458 (isa<PointerType>(NewRetTy) || !TD ||
9459 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
9460 return false; // Cannot transform this return value.
9462 if (!Caller->use_empty() &&
9463 // void -> non-void is handled specially
9464 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
9465 return false; // Cannot transform this return value.
9467 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
9468 Attributes RAttrs = CallerPAL.getRetAttributes();
9469 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
9470 return false; // Attribute not compatible with transformed value.
9473 // If the callsite is an invoke instruction, and the return value is used by
9474 // a PHI node in a successor, we cannot change the return type of the call
9475 // because there is no place to put the cast instruction (without breaking
9476 // the critical edge). Bail out in this case.
9477 if (!Caller->use_empty())
9478 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
9479 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
9481 if (PHINode *PN = dyn_cast<PHINode>(*UI))
9482 if (PN->getParent() == II->getNormalDest() ||
9483 PN->getParent() == II->getUnwindDest())
9487 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
9488 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
9490 CallSite::arg_iterator AI = CS.arg_begin();
9491 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
9492 const Type *ParamTy = FT->getParamType(i);
9493 const Type *ActTy = (*AI)->getType();
9495 if (!CastInst::isCastable(ActTy, ParamTy))
9496 return false; // Cannot transform this parameter value.
9498 if (CallerPAL.getParamAttributes(i + 1)
9499 & Attribute::typeIncompatible(ParamTy))
9500 return false; // Attribute not compatible with transformed value.
9502 // Converting from one pointer type to another or between a pointer and an
9503 // integer of the same size is safe even if we do not have a body.
9504 bool isConvertible = ActTy == ParamTy ||
9505 (TD && ((isa<PointerType>(ParamTy) ||
9506 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
9507 (isa<PointerType>(ActTy) ||
9508 ActTy == TD->getIntPtrType(Caller->getContext()))));
9509 if (Callee->isDeclaration() && !isConvertible) return false;
9512 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
9513 Callee->isDeclaration())
9514 return false; // Do not delete arguments unless we have a function body.
9516 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
9517 !CallerPAL.isEmpty())
9518 // In this case we have more arguments than the new function type, but we
9519 // won't be dropping them. Check that these extra arguments have attributes
9520 // that are compatible with being a vararg call argument.
9521 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
9522 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
9524 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
9525 if (PAttrs & Attribute::VarArgsIncompatible)
9529 // Okay, we decided that this is a safe thing to do: go ahead and start
9530 // inserting cast instructions as necessary...
9531 std::vector<Value*> Args;
9532 Args.reserve(NumActualArgs);
9533 SmallVector<AttributeWithIndex, 8> attrVec;
9534 attrVec.reserve(NumCommonArgs);
9536 // Get any return attributes.
9537 Attributes RAttrs = CallerPAL.getRetAttributes();
9539 // If the return value is not being used, the type may not be compatible
9540 // with the existing attributes. Wipe out any problematic attributes.
9541 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
9543 // Add the new return attributes.
9545 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
9547 AI = CS.arg_begin();
9548 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
9549 const Type *ParamTy = FT->getParamType(i);
9550 if ((*AI)->getType() == ParamTy) {
9551 Args.push_back(*AI);
9553 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
9554 false, ParamTy, false);
9555 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
9558 // Add any parameter attributes.
9559 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
9560 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
9563 // If the function takes more arguments than the call was taking, add them
9565 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
9566 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
9568 // If we are removing arguments to the function, emit an obnoxious warning.
9569 if (FT->getNumParams() < NumActualArgs) {
9570 if (!FT->isVarArg()) {
9571 errs() << "WARNING: While resolving call to function '"
9572 << Callee->getName() << "' arguments were dropped!\n";
9574 // Add all of the arguments in their promoted form to the arg list.
9575 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
9576 const Type *PTy = getPromotedType((*AI)->getType());
9577 if (PTy != (*AI)->getType()) {
9578 // Must promote to pass through va_arg area!
9579 Instruction::CastOps opcode =
9580 CastInst::getCastOpcode(*AI, false, PTy, false);
9581 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
9583 Args.push_back(*AI);
9586 // Add any parameter attributes.
9587 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
9588 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
9593 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
9594 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
9596 if (NewRetTy->isVoidTy())
9597 Caller->setName(""); // Void type should not have a name.
9599 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
9603 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
9604 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
9605 Args.begin(), Args.end(),
9606 Caller->getName(), Caller);
9607 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
9608 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
9610 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
9611 Caller->getName(), Caller);
9612 CallInst *CI = cast<CallInst>(Caller);
9613 if (CI->isTailCall())
9614 cast<CallInst>(NC)->setTailCall();
9615 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
9616 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
9619 // Insert a cast of the return type as necessary.
9621 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
9622 if (!NV->getType()->isVoidTy()) {
9623 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
9625 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
9627 // If this is an invoke instruction, we should insert it after the first
9628 // non-phi, instruction in the normal successor block.
9629 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
9630 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
9631 InsertNewInstBefore(NC, *I);
9633 // Otherwise, it's a call, just insert cast right after the call instr
9634 InsertNewInstBefore(NC, *Caller);
9636 Worklist.AddUsersToWorkList(*Caller);
9638 NV = UndefValue::get(Caller->getType());
9643 if (!Caller->use_empty())
9644 Caller->replaceAllUsesWith(NV);
9646 EraseInstFromFunction(*Caller);
9650 // transformCallThroughTrampoline - Turn a call to a function created by the
9651 // init_trampoline intrinsic into a direct call to the underlying function.
9653 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
9654 Value *Callee = CS.getCalledValue();
9655 const PointerType *PTy = cast<PointerType>(Callee->getType());
9656 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
9657 const AttrListPtr &Attrs = CS.getAttributes();
9659 // If the call already has the 'nest' attribute somewhere then give up -
9660 // otherwise 'nest' would occur twice after splicing in the chain.
9661 if (Attrs.hasAttrSomewhere(Attribute::Nest))
9664 IntrinsicInst *Tramp =
9665 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
9667 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
9668 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
9669 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
9671 const AttrListPtr &NestAttrs = NestF->getAttributes();
9672 if (!NestAttrs.isEmpty()) {
9673 unsigned NestIdx = 1;
9674 const Type *NestTy = 0;
9675 Attributes NestAttr = Attribute::None;
9677 // Look for a parameter marked with the 'nest' attribute.
9678 for (FunctionType::param_iterator I = NestFTy->param_begin(),
9679 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
9680 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
9681 // Record the parameter type and any other attributes.
9683 NestAttr = NestAttrs.getParamAttributes(NestIdx);
9688 Instruction *Caller = CS.getInstruction();
9689 std::vector<Value*> NewArgs;
9690 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
9692 SmallVector<AttributeWithIndex, 8> NewAttrs;
9693 NewAttrs.reserve(Attrs.getNumSlots() + 1);
9695 // Insert the nest argument into the call argument list, which may
9696 // mean appending it. Likewise for attributes.
9698 // Add any result attributes.
9699 if (Attributes Attr = Attrs.getRetAttributes())
9700 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
9704 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
9706 if (Idx == NestIdx) {
9707 // Add the chain argument and attributes.
9708 Value *NestVal = Tramp->getOperand(3);
9709 if (NestVal->getType() != NestTy)
9710 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
9711 NewArgs.push_back(NestVal);
9712 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
9718 // Add the original argument and attributes.
9719 NewArgs.push_back(*I);
9720 if (Attributes Attr = Attrs.getParamAttributes(Idx))
9722 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
9728 // Add any function attributes.
9729 if (Attributes Attr = Attrs.getFnAttributes())
9730 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
9732 // The trampoline may have been bitcast to a bogus type (FTy).
9733 // Handle this by synthesizing a new function type, equal to FTy
9734 // with the chain parameter inserted.
9736 std::vector<const Type*> NewTypes;
9737 NewTypes.reserve(FTy->getNumParams()+1);
9739 // Insert the chain's type into the list of parameter types, which may
9740 // mean appending it.
9743 FunctionType::param_iterator I = FTy->param_begin(),
9744 E = FTy->param_end();
9748 // Add the chain's type.
9749 NewTypes.push_back(NestTy);
9754 // Add the original type.
9755 NewTypes.push_back(*I);
9761 // Replace the trampoline call with a direct call. Let the generic
9762 // code sort out any function type mismatches.
9763 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
9765 Constant *NewCallee =
9766 NestF->getType() == PointerType::getUnqual(NewFTy) ?
9767 NestF : ConstantExpr::getBitCast(NestF,
9768 PointerType::getUnqual(NewFTy));
9769 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
9772 Instruction *NewCaller;
9773 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
9774 NewCaller = InvokeInst::Create(NewCallee,
9775 II->getNormalDest(), II->getUnwindDest(),
9776 NewArgs.begin(), NewArgs.end(),
9777 Caller->getName(), Caller);
9778 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
9779 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
9781 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
9782 Caller->getName(), Caller);
9783 if (cast<CallInst>(Caller)->isTailCall())
9784 cast<CallInst>(NewCaller)->setTailCall();
9785 cast<CallInst>(NewCaller)->
9786 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
9787 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
9789 if (!Caller->getType()->isVoidTy())
9790 Caller->replaceAllUsesWith(NewCaller);
9791 Caller->eraseFromParent();
9792 Worklist.Remove(Caller);
9797 // Replace the trampoline call with a direct call. Since there is no 'nest'
9798 // parameter, there is no need to adjust the argument list. Let the generic
9799 // code sort out any function type mismatches.
9800 Constant *NewCallee =
9801 NestF->getType() == PTy ? NestF :
9802 ConstantExpr::getBitCast(NestF, PTy);
9803 CS.setCalledFunction(NewCallee);
9804 return CS.getInstruction();
9807 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
9808 /// and if a/b/c and the add's all have a single use, turn this into a phi
9809 /// and a single binop.
9810 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
9811 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
9812 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
9813 unsigned Opc = FirstInst->getOpcode();
9814 Value *LHSVal = FirstInst->getOperand(0);
9815 Value *RHSVal = FirstInst->getOperand(1);
9817 const Type *LHSType = LHSVal->getType();
9818 const Type *RHSType = RHSVal->getType();
9820 // Scan to see if all operands are the same opcode, and all have one use.
9821 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
9822 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
9823 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
9824 // Verify type of the LHS matches so we don't fold cmp's of different
9825 // types or GEP's with different index types.
9826 I->getOperand(0)->getType() != LHSType ||
9827 I->getOperand(1)->getType() != RHSType)
9830 // If they are CmpInst instructions, check their predicates
9831 if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
9832 if (cast<CmpInst>(I)->getPredicate() !=
9833 cast<CmpInst>(FirstInst)->getPredicate())
9836 // Keep track of which operand needs a phi node.
9837 if (I->getOperand(0) != LHSVal) LHSVal = 0;
9838 if (I->getOperand(1) != RHSVal) RHSVal = 0;
9841 // If both LHS and RHS would need a PHI, don't do this transformation,
9842 // because it would increase the number of PHIs entering the block,
9843 // which leads to higher register pressure. This is especially
9844 // bad when the PHIs are in the header of a loop.
9845 if (!LHSVal && !RHSVal)
9848 // Otherwise, this is safe to transform!
9850 Value *InLHS = FirstInst->getOperand(0);
9851 Value *InRHS = FirstInst->getOperand(1);
9852 PHINode *NewLHS = 0, *NewRHS = 0;
9854 NewLHS = PHINode::Create(LHSType,
9855 FirstInst->getOperand(0)->getName() + ".pn");
9856 NewLHS->reserveOperandSpace(PN.getNumOperands()/2);
9857 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
9858 InsertNewInstBefore(NewLHS, PN);
9863 NewRHS = PHINode::Create(RHSType,
9864 FirstInst->getOperand(1)->getName() + ".pn");
9865 NewRHS->reserveOperandSpace(PN.getNumOperands()/2);
9866 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
9867 InsertNewInstBefore(NewRHS, PN);
9871 // Add all operands to the new PHIs.
9872 if (NewLHS || NewRHS) {
9873 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
9874 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
9876 Value *NewInLHS = InInst->getOperand(0);
9877 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
9880 Value *NewInRHS = InInst->getOperand(1);
9881 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
9886 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
9887 return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
9888 CmpInst *CIOp = cast<CmpInst>(FirstInst);
9889 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
9893 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
9894 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
9896 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
9897 FirstInst->op_end());
9898 // This is true if all GEP bases are allocas and if all indices into them are
9900 bool AllBasePointersAreAllocas = true;
9902 // We don't want to replace this phi if the replacement would require
9903 // more than one phi, which leads to higher register pressure. This is
9904 // especially bad when the PHIs are in the header of a loop.
9905 bool NeededPhi = false;
9907 // Scan to see if all operands are the same opcode, and all have one use.
9908 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
9909 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
9910 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
9911 GEP->getNumOperands() != FirstInst->getNumOperands())
9914 // Keep track of whether or not all GEPs are of alloca pointers.
9915 if (AllBasePointersAreAllocas &&
9916 (!isa<AllocaInst>(GEP->getOperand(0)) ||
9917 !GEP->hasAllConstantIndices()))
9918 AllBasePointersAreAllocas = false;
9920 // Compare the operand lists.
9921 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
9922 if (FirstInst->getOperand(op) == GEP->getOperand(op))
9925 // Don't merge two GEPs when two operands differ (introducing phi nodes)
9926 // if one of the PHIs has a constant for the index. The index may be
9927 // substantially cheaper to compute for the constants, so making it a
9928 // variable index could pessimize the path. This also handles the case
9929 // for struct indices, which must always be constant.
9930 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
9931 isa<ConstantInt>(GEP->getOperand(op)))
9934 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
9937 // If we already needed a PHI for an earlier operand, and another operand
9938 // also requires a PHI, we'd be introducing more PHIs than we're
9939 // eliminating, which increases register pressure on entry to the PHI's
9944 FixedOperands[op] = 0; // Needs a PHI.
9949 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
9950 // bother doing this transformation. At best, this will just save a bit of
9951 // offset calculation, but all the predecessors will have to materialize the
9952 // stack address into a register anyway. We'd actually rather *clone* the
9953 // load up into the predecessors so that we have a load of a gep of an alloca,
9954 // which can usually all be folded into the load.
9955 if (AllBasePointersAreAllocas)
9958 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
9959 // that is variable.
9960 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
9962 bool HasAnyPHIs = false;
9963 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
9964 if (FixedOperands[i]) continue; // operand doesn't need a phi.
9965 Value *FirstOp = FirstInst->getOperand(i);
9966 PHINode *NewPN = PHINode::Create(FirstOp->getType(),
9967 FirstOp->getName()+".pn");
9968 InsertNewInstBefore(NewPN, PN);
9970 NewPN->reserveOperandSpace(e);
9971 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
9972 OperandPhis[i] = NewPN;
9973 FixedOperands[i] = NewPN;
9978 // Add all operands to the new PHIs.
9980 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
9981 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
9982 BasicBlock *InBB = PN.getIncomingBlock(i);
9984 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
9985 if (PHINode *OpPhi = OperandPhis[op])
9986 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
9990 Value *Base = FixedOperands[0];
9991 return cast<GEPOperator>(FirstInst)->isInBounds() ?
9992 GetElementPtrInst::CreateInBounds(Base, FixedOperands.begin()+1,
9993 FixedOperands.end()) :
9994 GetElementPtrInst::Create(Base, FixedOperands.begin()+1,
9995 FixedOperands.end());
9999 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
10000 /// sink the load out of the block that defines it. This means that it must be
10001 /// obvious the value of the load is not changed from the point of the load to
10002 /// the end of the block it is in.
10004 /// Finally, it is safe, but not profitable, to sink a load targetting a
10005 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
10007 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
10008 BasicBlock::iterator BBI = L, E = L->getParent()->end();
10010 for (++BBI; BBI != E; ++BBI)
10011 if (BBI->mayWriteToMemory())
10014 // Check for non-address taken alloca. If not address-taken already, it isn't
10015 // profitable to do this xform.
10016 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
10017 bool isAddressTaken = false;
10018 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
10020 if (isa<LoadInst>(UI)) continue;
10021 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
10022 // If storing TO the alloca, then the address isn't taken.
10023 if (SI->getOperand(1) == AI) continue;
10025 isAddressTaken = true;
10029 if (!isAddressTaken && AI->isStaticAlloca())
10033 // If this load is a load from a GEP with a constant offset from an alloca,
10034 // then we don't want to sink it. In its present form, it will be
10035 // load [constant stack offset]. Sinking it will cause us to have to
10036 // materialize the stack addresses in each predecessor in a register only to
10037 // do a shared load from register in the successor.
10038 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
10039 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
10040 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
10046 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
10047 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
10049 // When processing loads, we need to propagate two bits of information to the
10050 // sunk load: whether it is volatile, and what its alignment is. We currently
10051 // don't sink loads when some have their alignment specified and some don't.
10052 // visitLoadInst will propagate an alignment onto the load when TD is around,
10053 // and if TD isn't around, we can't handle the mixed case.
10054 bool isVolatile = FirstLI->isVolatile();
10055 unsigned LoadAlignment = FirstLI->getAlignment();
10057 // We can't sink the load if the loaded value could be modified between the
10058 // load and the PHI.
10059 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
10060 !isSafeAndProfitableToSinkLoad(FirstLI))
10063 // If the PHI is of volatile loads and the load block has multiple
10064 // successors, sinking it would remove a load of the volatile value from
10065 // the path through the other successor.
10067 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
10070 // Check to see if all arguments are the same operation.
10071 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10072 LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
10073 if (!LI || !LI->hasOneUse())
10076 // We can't sink the load if the loaded value could be modified between
10077 // the load and the PHI.
10078 if (LI->isVolatile() != isVolatile ||
10079 LI->getParent() != PN.getIncomingBlock(i) ||
10080 !isSafeAndProfitableToSinkLoad(LI))
10083 // If some of the loads have an alignment specified but not all of them,
10084 // we can't do the transformation.
10085 if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
10088 LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
10090 // If the PHI is of volatile loads and the load block has multiple
10091 // successors, sinking it would remove a load of the volatile value from
10092 // the path through the other successor.
10094 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
10098 // Okay, they are all the same operation. Create a new PHI node of the
10099 // correct type, and PHI together all of the LHS's of the instructions.
10100 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
10101 PN.getName()+".in");
10102 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
10104 Value *InVal = FirstLI->getOperand(0);
10105 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
10107 // Add all operands to the new PHI.
10108 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10109 Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
10110 if (NewInVal != InVal)
10112 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
10117 // The new PHI unions all of the same values together. This is really
10118 // common, so we handle it intelligently here for compile-time speed.
10122 InsertNewInstBefore(NewPN, PN);
10126 // If this was a volatile load that we are merging, make sure to loop through
10127 // and mark all the input loads as non-volatile. If we don't do this, we will
10128 // insert a new volatile load and the old ones will not be deletable.
10130 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
10131 cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
10133 return new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
10138 /// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
10139 /// operator and they all are only used by the PHI, PHI together their
10140 /// inputs, and do the operation once, to the result of the PHI.
10141 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
10142 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
10144 if (isa<GetElementPtrInst>(FirstInst))
10145 return FoldPHIArgGEPIntoPHI(PN);
10146 if (isa<LoadInst>(FirstInst))
10147 return FoldPHIArgLoadIntoPHI(PN);
10149 // Scan the instruction, looking for input operations that can be folded away.
10150 // If all input operands to the phi are the same instruction (e.g. a cast from
10151 // the same type or "+42") we can pull the operation through the PHI, reducing
10152 // code size and simplifying code.
10153 Constant *ConstantOp = 0;
10154 const Type *CastSrcTy = 0;
10156 if (isa<CastInst>(FirstInst)) {
10157 CastSrcTy = FirstInst->getOperand(0)->getType();
10159 // Be careful about transforming integer PHIs. We don't want to pessimize
10160 // the code by turning an i32 into an i1293.
10161 if (isa<IntegerType>(PN.getType()) && isa<IntegerType>(CastSrcTy)) {
10162 if (!ShouldChangeType(PN.getType(), CastSrcTy, TD))
10165 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
10166 // Can fold binop, compare or shift here if the RHS is a constant,
10167 // otherwise call FoldPHIArgBinOpIntoPHI.
10168 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
10169 if (ConstantOp == 0)
10170 return FoldPHIArgBinOpIntoPHI(PN);
10172 return 0; // Cannot fold this operation.
10175 // Check to see if all arguments are the same operation.
10176 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10177 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
10178 if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
10181 if (I->getOperand(0)->getType() != CastSrcTy)
10182 return 0; // Cast operation must match.
10183 } else if (I->getOperand(1) != ConstantOp) {
10188 // Okay, they are all the same operation. Create a new PHI node of the
10189 // correct type, and PHI together all of the LHS's of the instructions.
10190 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
10191 PN.getName()+".in");
10192 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
10194 Value *InVal = FirstInst->getOperand(0);
10195 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
10197 // Add all operands to the new PHI.
10198 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10199 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
10200 if (NewInVal != InVal)
10202 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
10207 // The new PHI unions all of the same values together. This is really
10208 // common, so we handle it intelligently here for compile-time speed.
10212 InsertNewInstBefore(NewPN, PN);
10216 // Insert and return the new operation.
10217 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst))
10218 return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
10220 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
10221 return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
10223 CmpInst *CIOp = cast<CmpInst>(FirstInst);
10224 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
10225 PhiVal, ConstantOp);
10228 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
10230 static bool DeadPHICycle(PHINode *PN,
10231 SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
10232 if (PN->use_empty()) return true;
10233 if (!PN->hasOneUse()) return false;
10235 // Remember this node, and if we find the cycle, return.
10236 if (!PotentiallyDeadPHIs.insert(PN))
10239 // Don't scan crazily complex things.
10240 if (PotentiallyDeadPHIs.size() == 16)
10243 if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
10244 return DeadPHICycle(PU, PotentiallyDeadPHIs);
10249 /// PHIsEqualValue - Return true if this phi node is always equal to
10250 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
10251 /// z = some value; x = phi (y, z); y = phi (x, z)
10252 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
10253 SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
10254 // See if we already saw this PHI node.
10255 if (!ValueEqualPHIs.insert(PN))
10258 // Don't scan crazily complex things.
10259 if (ValueEqualPHIs.size() == 16)
10262 // Scan the operands to see if they are either phi nodes or are equal to
10264 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
10265 Value *Op = PN->getIncomingValue(i);
10266 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
10267 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
10269 } else if (Op != NonPhiInVal)
10278 struct PHIUsageRecord {
10279 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
10280 unsigned Shift; // The amount shifted.
10281 Instruction *Inst; // The trunc instruction.
10283 PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
10284 : PHIId(pn), Shift(Sh), Inst(User) {}
10286 bool operator<(const PHIUsageRecord &RHS) const {
10287 if (PHIId < RHS.PHIId) return true;
10288 if (PHIId > RHS.PHIId) return false;
10289 if (Shift < RHS.Shift) return true;
10290 if (Shift > RHS.Shift) return false;
10291 return Inst->getType()->getPrimitiveSizeInBits() <
10292 RHS.Inst->getType()->getPrimitiveSizeInBits();
10296 struct LoweredPHIRecord {
10297 PHINode *PN; // The PHI that was lowered.
10298 unsigned Shift; // The amount shifted.
10299 unsigned Width; // The width extracted.
10301 LoweredPHIRecord(PHINode *pn, unsigned Sh, const Type *Ty)
10302 : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
10304 // Ctor form used by DenseMap.
10305 LoweredPHIRecord(PHINode *pn, unsigned Sh)
10306 : PN(pn), Shift(Sh), Width(0) {}
10312 struct DenseMapInfo<LoweredPHIRecord> {
10313 static inline LoweredPHIRecord getEmptyKey() {
10314 return LoweredPHIRecord(0, 0);
10316 static inline LoweredPHIRecord getTombstoneKey() {
10317 return LoweredPHIRecord(0, 1);
10319 static unsigned getHashValue(const LoweredPHIRecord &Val) {
10320 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
10323 static bool isEqual(const LoweredPHIRecord &LHS,
10324 const LoweredPHIRecord &RHS) {
10325 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
10326 LHS.Width == RHS.Width;
10330 struct isPodLike<LoweredPHIRecord> { static const bool value = true; };
10334 /// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
10335 /// illegal type: see if it is only used by trunc or trunc(lshr) operations. If
10336 /// so, we split the PHI into the various pieces being extracted. This sort of
10337 /// thing is introduced when SROA promotes an aggregate to large integer values.
10339 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
10340 /// inttoptr. We should produce new PHIs in the right type.
10342 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
10343 // PHIUsers - Keep track of all of the truncated values extracted from a set
10344 // of PHIs, along with their offset. These are the things we want to rewrite.
10345 SmallVector<PHIUsageRecord, 16> PHIUsers;
10347 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
10348 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
10349 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
10350 // check the uses of (to ensure they are all extracts).
10351 SmallVector<PHINode*, 8> PHIsToSlice;
10352 SmallPtrSet<PHINode*, 8> PHIsInspected;
10354 PHIsToSlice.push_back(&FirstPhi);
10355 PHIsInspected.insert(&FirstPhi);
10357 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
10358 PHINode *PN = PHIsToSlice[PHIId];
10360 // Scan the input list of the PHI. If any input is an invoke, and if the
10361 // input is defined in the predecessor, then we won't be split the critical
10362 // edge which is required to insert a truncate. Because of this, we have to
10364 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
10365 InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
10366 if (II == 0) continue;
10367 if (II->getParent() != PN->getIncomingBlock(i))
10370 // If we have a phi, and if it's directly in the predecessor, then we have
10371 // a critical edge where we need to put the truncate. Since we can't
10372 // split the edge in instcombine, we have to bail out.
10377 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
10379 Instruction *User = cast<Instruction>(*UI);
10381 // If the user is a PHI, inspect its uses recursively.
10382 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
10383 if (PHIsInspected.insert(UserPN))
10384 PHIsToSlice.push_back(UserPN);
10388 // Truncates are always ok.
10389 if (isa<TruncInst>(User)) {
10390 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User));
10394 // Otherwise it must be a lshr which can only be used by one trunc.
10395 if (User->getOpcode() != Instruction::LShr ||
10396 !User->hasOneUse() || !isa<TruncInst>(User->use_back()) ||
10397 !isa<ConstantInt>(User->getOperand(1)))
10400 unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue();
10401 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back()));
10405 // If we have no users, they must be all self uses, just nuke the PHI.
10406 if (PHIUsers.empty())
10407 return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
10409 // If this phi node is transformable, create new PHIs for all the pieces
10410 // extracted out of it. First, sort the users by their offset and size.
10411 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
10413 DEBUG(errs() << "SLICING UP PHI: " << FirstPhi << '\n';
10414 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
10415 errs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] <<'\n';
10418 // PredValues - This is a temporary used when rewriting PHI nodes. It is
10419 // hoisted out here to avoid construction/destruction thrashing.
10420 DenseMap<BasicBlock*, Value*> PredValues;
10422 // ExtractedVals - Each new PHI we introduce is saved here so we don't
10423 // introduce redundant PHIs.
10424 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
10426 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
10427 unsigned PHIId = PHIUsers[UserI].PHIId;
10428 PHINode *PN = PHIsToSlice[PHIId];
10429 unsigned Offset = PHIUsers[UserI].Shift;
10430 const Type *Ty = PHIUsers[UserI].Inst->getType();
10434 // If we've already lowered a user like this, reuse the previously lowered
10436 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) {
10438 // Otherwise, Create the new PHI node for this user.
10439 EltPHI = PHINode::Create(Ty, PN->getName()+".off"+Twine(Offset), PN);
10440 assert(EltPHI->getType() != PN->getType() &&
10441 "Truncate didn't shrink phi?");
10443 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
10444 BasicBlock *Pred = PN->getIncomingBlock(i);
10445 Value *&PredVal = PredValues[Pred];
10447 // If we already have a value for this predecessor, reuse it.
10449 EltPHI->addIncoming(PredVal, Pred);
10453 // Handle the PHI self-reuse case.
10454 Value *InVal = PN->getIncomingValue(i);
10457 EltPHI->addIncoming(PredVal, Pred);
10461 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
10462 // If the incoming value was a PHI, and if it was one of the PHIs we
10463 // already rewrote it, just use the lowered value.
10464 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
10466 EltPHI->addIncoming(PredVal, Pred);
10471 // Otherwise, do an extract in the predecessor.
10472 Builder->SetInsertPoint(Pred, Pred->getTerminator());
10473 Value *Res = InVal;
10475 Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
10476 Offset), "extract");
10477 Res = Builder->CreateTrunc(Res, Ty, "extract.t");
10479 EltPHI->addIncoming(Res, Pred);
10481 // If the incoming value was a PHI, and if it was one of the PHIs we are
10482 // rewriting, we will ultimately delete the code we inserted. This
10483 // means we need to revisit that PHI to make sure we extract out the
10485 if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
10486 if (PHIsInspected.count(OldInVal)) {
10487 unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
10488 OldInVal)-PHIsToSlice.begin();
10489 PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
10490 cast<Instruction>(Res)));
10494 PredValues.clear();
10496 DEBUG(errs() << " Made element PHI for offset " << Offset << ": "
10497 << *EltPHI << '\n');
10498 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
10501 // Replace the use of this piece with the PHI node.
10502 ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
10505 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
10507 Value *Undef = UndefValue::get(FirstPhi.getType());
10508 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
10509 ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
10510 return ReplaceInstUsesWith(FirstPhi, Undef);
10513 // PHINode simplification
10515 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
10516 // If LCSSA is around, don't mess with Phi nodes
10517 if (MustPreserveLCSSA) return 0;
10519 if (Value *V = PN.hasConstantValue())
10520 return ReplaceInstUsesWith(PN, V);
10522 // If all PHI operands are the same operation, pull them through the PHI,
10523 // reducing code size.
10524 if (isa<Instruction>(PN.getIncomingValue(0)) &&
10525 isa<Instruction>(PN.getIncomingValue(1)) &&
10526 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
10527 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
10528 // FIXME: The hasOneUse check will fail for PHIs that use the value more
10529 // than themselves more than once.
10530 PN.getIncomingValue(0)->hasOneUse())
10531 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
10534 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
10535 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
10536 // PHI)... break the cycle.
10537 if (PN.hasOneUse()) {
10538 Instruction *PHIUser = cast<Instruction>(PN.use_back());
10539 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
10540 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
10541 PotentiallyDeadPHIs.insert(&PN);
10542 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
10543 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
10546 // If this phi has a single use, and if that use just computes a value for
10547 // the next iteration of a loop, delete the phi. This occurs with unused
10548 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
10549 // common case here is good because the only other things that catch this
10550 // are induction variable analysis (sometimes) and ADCE, which is only run
10552 if (PHIUser->hasOneUse() &&
10553 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
10554 PHIUser->use_back() == &PN) {
10555 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
10559 // We sometimes end up with phi cycles that non-obviously end up being the
10560 // same value, for example:
10561 // z = some value; x = phi (y, z); y = phi (x, z)
10562 // where the phi nodes don't necessarily need to be in the same block. Do a
10563 // quick check to see if the PHI node only contains a single non-phi value, if
10564 // so, scan to see if the phi cycle is actually equal to that value.
10566 unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues();
10567 // Scan for the first non-phi operand.
10568 while (InValNo != NumOperandVals &&
10569 isa<PHINode>(PN.getIncomingValue(InValNo)))
10572 if (InValNo != NumOperandVals) {
10573 Value *NonPhiInVal = PN.getOperand(InValNo);
10575 // Scan the rest of the operands to see if there are any conflicts, if so
10576 // there is no need to recursively scan other phis.
10577 for (++InValNo; InValNo != NumOperandVals; ++InValNo) {
10578 Value *OpVal = PN.getIncomingValue(InValNo);
10579 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
10583 // If we scanned over all operands, then we have one unique value plus
10584 // phi values. Scan PHI nodes to see if they all merge in each other or
10586 if (InValNo == NumOperandVals) {
10587 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
10588 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
10589 return ReplaceInstUsesWith(PN, NonPhiInVal);
10594 // If there are multiple PHIs, sort their operands so that they all list
10595 // the blocks in the same order. This will help identical PHIs be eliminated
10596 // by other passes. Other passes shouldn't depend on this for correctness
10598 PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
10599 if (&PN != FirstPN)
10600 for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
10601 BasicBlock *BBA = PN.getIncomingBlock(i);
10602 BasicBlock *BBB = FirstPN->getIncomingBlock(i);
10604 Value *VA = PN.getIncomingValue(i);
10605 unsigned j = PN.getBasicBlockIndex(BBB);
10606 Value *VB = PN.getIncomingValue(j);
10607 PN.setIncomingBlock(i, BBB);
10608 PN.setIncomingValue(i, VB);
10609 PN.setIncomingBlock(j, BBA);
10610 PN.setIncomingValue(j, VA);
10611 // NOTE: Instcombine normally would want us to "return &PN" if we
10612 // modified any of the operands of an instruction. However, since we
10613 // aren't adding or removing uses (just rearranging them) we don't do
10614 // this in this case.
10618 // If this is an integer PHI and we know that it has an illegal type, see if
10619 // it is only used by trunc or trunc(lshr) operations. If so, we split the
10620 // PHI into the various pieces being extracted. This sort of thing is
10621 // introduced when SROA promotes an aggregate to a single large integer type.
10622 if (isa<IntegerType>(PN.getType()) && TD &&
10623 !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
10624 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
10630 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
10631 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
10633 if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD))
10634 return ReplaceInstUsesWith(GEP, V);
10636 Value *PtrOp = GEP.getOperand(0);
10638 if (isa<UndefValue>(GEP.getOperand(0)))
10639 return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
10641 // Eliminate unneeded casts for indices.
10643 bool MadeChange = false;
10644 unsigned PtrSize = TD->getPointerSizeInBits();
10646 gep_type_iterator GTI = gep_type_begin(GEP);
10647 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
10648 I != E; ++I, ++GTI) {
10649 if (!isa<SequentialType>(*GTI)) continue;
10651 // If we are using a wider index than needed for this platform, shrink it
10652 // to what we need. If narrower, sign-extend it to what we need. This
10653 // explicit cast can make subsequent optimizations more obvious.
10654 unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
10655 if (OpBits == PtrSize)
10658 *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
10661 if (MadeChange) return &GEP;
10664 // Combine Indices - If the source pointer to this getelementptr instruction
10665 // is a getelementptr instruction, combine the indices of the two
10666 // getelementptr instructions into a single instruction.
10668 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
10669 // Note that if our source is a gep chain itself that we wait for that
10670 // chain to be resolved before we perform this transformation. This
10671 // avoids us creating a TON of code in some cases.
10673 if (GetElementPtrInst *SrcGEP =
10674 dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
10675 if (SrcGEP->getNumOperands() == 2)
10676 return 0; // Wait until our source is folded to completion.
10678 SmallVector<Value*, 8> Indices;
10680 // Find out whether the last index in the source GEP is a sequential idx.
10681 bool EndsWithSequential = false;
10682 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
10684 EndsWithSequential = !isa<StructType>(*I);
10686 // Can we combine the two pointer arithmetics offsets?
10687 if (EndsWithSequential) {
10688 // Replace: gep (gep %P, long B), long A, ...
10689 // With: T = long A+B; gep %P, T, ...
10692 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
10693 Value *GO1 = GEP.getOperand(1);
10694 if (SO1 == Constant::getNullValue(SO1->getType())) {
10696 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
10699 // If they aren't the same type, then the input hasn't been processed
10700 // by the loop above yet (which canonicalizes sequential index types to
10701 // intptr_t). Just avoid transforming this until the input has been
10703 if (SO1->getType() != GO1->getType())
10705 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
10708 // Update the GEP in place if possible.
10709 if (Src->getNumOperands() == 2) {
10710 GEP.setOperand(0, Src->getOperand(0));
10711 GEP.setOperand(1, Sum);
10714 Indices.append(Src->op_begin()+1, Src->op_end()-1);
10715 Indices.push_back(Sum);
10716 Indices.append(GEP.op_begin()+2, GEP.op_end());
10717 } else if (isa<Constant>(*GEP.idx_begin()) &&
10718 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
10719 Src->getNumOperands() != 1) {
10720 // Otherwise we can do the fold if the first index of the GEP is a zero
10721 Indices.append(Src->op_begin()+1, Src->op_end());
10722 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
10725 if (!Indices.empty())
10726 return (cast<GEPOperator>(&GEP)->isInBounds() &&
10727 Src->isInBounds()) ?
10728 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
10729 Indices.end(), GEP.getName()) :
10730 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
10731 Indices.end(), GEP.getName());
10734 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
10735 if (Value *X = getBitCastOperand(PtrOp)) {
10736 assert(isa<PointerType>(X->getType()) && "Must be cast from pointer");
10738 // If the input bitcast is actually "bitcast(bitcast(x))", then we don't
10739 // want to change the gep until the bitcasts are eliminated.
10740 if (getBitCastOperand(X)) {
10741 Worklist.AddValue(PtrOp);
10745 bool HasZeroPointerIndex = false;
10746 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
10747 HasZeroPointerIndex = C->isZero();
10749 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
10750 // into : GEP [10 x i8]* X, i32 0, ...
10752 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
10753 // into : GEP i8* X, ...
10755 // This occurs when the program declares an array extern like "int X[];"
10756 if (HasZeroPointerIndex) {
10757 const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
10758 const PointerType *XTy = cast<PointerType>(X->getType());
10759 if (const ArrayType *CATy =
10760 dyn_cast<ArrayType>(CPTy->getElementType())) {
10761 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
10762 if (CATy->getElementType() == XTy->getElementType()) {
10763 // -> GEP i8* X, ...
10764 SmallVector<Value*, 8> Indices(GEP.idx_begin()+1, GEP.idx_end());
10765 return cast<GEPOperator>(&GEP)->isInBounds() ?
10766 GetElementPtrInst::CreateInBounds(X, Indices.begin(), Indices.end(),
10768 GetElementPtrInst::Create(X, Indices.begin(), Indices.end(),
10772 if (const ArrayType *XATy = dyn_cast<ArrayType>(XTy->getElementType())){
10773 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
10774 if (CATy->getElementType() == XATy->getElementType()) {
10775 // -> GEP [10 x i8]* X, i32 0, ...
10776 // At this point, we know that the cast source type is a pointer
10777 // to an array of the same type as the destination pointer
10778 // array. Because the array type is never stepped over (there
10779 // is a leading zero) we can fold the cast into this GEP.
10780 GEP.setOperand(0, X);
10785 } else if (GEP.getNumOperands() == 2) {
10786 // Transform things like:
10787 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
10788 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
10789 const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
10790 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
10791 if (TD && isa<ArrayType>(SrcElTy) &&
10792 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
10793 TD->getTypeAllocSize(ResElTy)) {
10795 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
10796 Idx[1] = GEP.getOperand(1);
10797 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
10798 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
10799 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
10800 // V and GEP are both pointer types --> BitCast
10801 return new BitCastInst(NewGEP, GEP.getType());
10804 // Transform things like:
10805 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
10806 // (where tmp = 8*tmp2) into:
10807 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
10809 if (TD && isa<ArrayType>(SrcElTy) &&
10810 ResElTy == Type::getInt8Ty(GEP.getContext())) {
10811 uint64_t ArrayEltSize =
10812 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
10814 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
10815 // allow either a mul, shift, or constant here.
10817 ConstantInt *Scale = 0;
10818 if (ArrayEltSize == 1) {
10819 NewIdx = GEP.getOperand(1);
10820 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
10821 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
10822 NewIdx = ConstantInt::get(CI->getType(), 1);
10824 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
10825 if (Inst->getOpcode() == Instruction::Shl &&
10826 isa<ConstantInt>(Inst->getOperand(1))) {
10827 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
10828 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
10829 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
10831 NewIdx = Inst->getOperand(0);
10832 } else if (Inst->getOpcode() == Instruction::Mul &&
10833 isa<ConstantInt>(Inst->getOperand(1))) {
10834 Scale = cast<ConstantInt>(Inst->getOperand(1));
10835 NewIdx = Inst->getOperand(0);
10839 // If the index will be to exactly the right offset with the scale taken
10840 // out, perform the transformation. Note, we don't know whether Scale is
10841 // signed or not. We'll use unsigned version of division/modulo
10842 // operation after making sure Scale doesn't have the sign bit set.
10843 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
10844 Scale->getZExtValue() % ArrayEltSize == 0) {
10845 Scale = ConstantInt::get(Scale->getType(),
10846 Scale->getZExtValue() / ArrayEltSize);
10847 if (Scale->getZExtValue() != 1) {
10848 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
10850 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
10853 // Insert the new GEP instruction.
10855 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
10857 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
10858 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
10859 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
10860 // The NewGEP must be pointer typed, so must the old one -> BitCast
10861 return new BitCastInst(NewGEP, GEP.getType());
10867 /// See if we can simplify:
10868 /// X = bitcast A* to B*
10869 /// Y = gep X, <...constant indices...>
10870 /// into a gep of the original struct. This is important for SROA and alias
10871 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
10872 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
10874 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
10875 // Determine how much the GEP moves the pointer. We are guaranteed to get
10876 // a constant back from EmitGEPOffset.
10877 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP, *this));
10878 int64_t Offset = OffsetV->getSExtValue();
10880 // If this GEP instruction doesn't move the pointer, just replace the GEP
10881 // with a bitcast of the real input to the dest type.
10883 // If the bitcast is of an allocation, and the allocation will be
10884 // converted to match the type of the cast, don't touch this.
10885 if (isa<AllocaInst>(BCI->getOperand(0)) ||
10886 isMalloc(BCI->getOperand(0))) {
10887 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
10888 if (Instruction *I = visitBitCast(*BCI)) {
10891 BCI->getParent()->getInstList().insert(BCI, I);
10892 ReplaceInstUsesWith(*BCI, I);
10897 return new BitCastInst(BCI->getOperand(0), GEP.getType());
10900 // Otherwise, if the offset is non-zero, we need to find out if there is a
10901 // field at Offset in 'A's type. If so, we can pull the cast through the
10903 SmallVector<Value*, 8> NewIndices;
10905 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
10906 if (FindElementAtOffset(InTy, Offset, NewIndices, TD)) {
10907 Value *NGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
10908 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
10909 NewIndices.end()) :
10910 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
10913 if (NGEP->getType() == GEP.getType())
10914 return ReplaceInstUsesWith(GEP, NGEP);
10915 NGEP->takeName(&GEP);
10916 return new BitCastInst(NGEP, GEP.getType());
10924 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
10925 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
10926 if (AI.isArrayAllocation()) { // Check C != 1
10927 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
10928 const Type *NewTy =
10929 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
10930 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
10931 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
10932 New->setAlignment(AI.getAlignment());
10934 // Scan to the end of the allocation instructions, to skip over a block of
10935 // allocas if possible...also skip interleaved debug info
10937 BasicBlock::iterator It = New;
10938 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
10940 // Now that I is pointing to the first non-allocation-inst in the block,
10941 // insert our getelementptr instruction...
10943 Value *NullIdx =Constant::getNullValue(Type::getInt32Ty(AI.getContext()));
10947 Value *V = GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
10948 New->getName()+".sub", It);
10950 // Now make everything use the getelementptr instead of the original
10952 return ReplaceInstUsesWith(AI, V);
10953 } else if (isa<UndefValue>(AI.getArraySize())) {
10954 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
10958 if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
10959 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
10960 // Note that we only do this for alloca's, because malloc should allocate
10961 // and return a unique pointer, even for a zero byte allocation.
10962 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
10963 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
10965 // If the alignment is 0 (unspecified), assign it the preferred alignment.
10966 if (AI.getAlignment() == 0)
10967 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
10973 Instruction *InstCombiner::visitFree(Instruction &FI) {
10974 Value *Op = FI.getOperand(1);
10976 // free undef -> unreachable.
10977 if (isa<UndefValue>(Op)) {
10978 // Insert a new store to null because we cannot modify the CFG here.
10979 new StoreInst(ConstantInt::getTrue(FI.getContext()),
10980 UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI);
10981 return EraseInstFromFunction(FI);
10984 // If we have 'free null' delete the instruction. This can happen in stl code
10985 // when lots of inlining happens.
10986 if (isa<ConstantPointerNull>(Op))
10987 return EraseInstFromFunction(FI);
10989 // If we have a malloc call whose only use is a free call, delete both.
10990 if (isMalloc(Op)) {
10991 if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
10992 if (Op->hasOneUse() && CI->hasOneUse()) {
10993 EraseInstFromFunction(FI);
10994 EraseInstFromFunction(*CI);
10995 return EraseInstFromFunction(*cast<Instruction>(Op));
10998 // Op is a call to malloc
10999 if (Op->hasOneUse()) {
11000 EraseInstFromFunction(FI);
11001 return EraseInstFromFunction(*cast<Instruction>(Op));
11009 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
11010 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
11011 const TargetData *TD) {
11012 User *CI = cast<User>(LI.getOperand(0));
11013 Value *CastOp = CI->getOperand(0);
11015 const PointerType *DestTy = cast<PointerType>(CI->getType());
11016 const Type *DestPTy = DestTy->getElementType();
11017 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
11019 // If the address spaces don't match, don't eliminate the cast.
11020 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
11023 const Type *SrcPTy = SrcTy->getElementType();
11025 if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
11026 isa<VectorType>(DestPTy)) {
11027 // If the source is an array, the code below will not succeed. Check to
11028 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11030 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
11031 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
11032 if (ASrcTy->getNumElements() != 0) {
11034 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(LI.getContext()));
11036 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
11037 SrcTy = cast<PointerType>(CastOp->getType());
11038 SrcPTy = SrcTy->getElementType();
11041 if (IC.getTargetData() &&
11042 (SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
11043 isa<VectorType>(SrcPTy)) &&
11044 // Do not allow turning this into a load of an integer, which is then
11045 // casted to a pointer, this pessimizes pointer analysis a lot.
11046 (isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
11047 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
11048 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
11050 // Okay, we are casting from one integer or pointer type to another of
11051 // the same size. Instead of casting the pointer before the load, cast
11052 // the result of the loaded value.
11054 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
11055 // Now cast the result of the load.
11056 return new BitCastInst(NewLoad, LI.getType());
11063 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
11064 Value *Op = LI.getOperand(0);
11066 // Attempt to improve the alignment.
11068 unsigned KnownAlign =
11069 GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
11071 (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
11072 LI.getAlignment()))
11073 LI.setAlignment(KnownAlign);
11076 // load (cast X) --> cast (load X) iff safe.
11077 if (isa<CastInst>(Op))
11078 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11081 // None of the following transforms are legal for volatile loads.
11082 if (LI.isVolatile()) return 0;
11084 // Do really simple store-to-load forwarding and load CSE, to catch cases
11085 // where there are several consequtive memory accesses to the same location,
11086 // separated by a few arithmetic operations.
11087 BasicBlock::iterator BBI = &LI;
11088 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
11089 return ReplaceInstUsesWith(LI, AvailableVal);
11091 // load(gep null, ...) -> unreachable
11092 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
11093 const Value *GEPI0 = GEPI->getOperand(0);
11094 // TODO: Consider a target hook for valid address spaces for this xform.
11095 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
11096 // Insert a new store to null instruction before the load to indicate
11097 // that this code is not reachable. We do this instead of inserting
11098 // an unreachable instruction directly because we cannot modify the
11100 new StoreInst(UndefValue::get(LI.getType()),
11101 Constant::getNullValue(Op->getType()), &LI);
11102 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11106 // load null/undef -> unreachable
11107 // TODO: Consider a target hook for valid address spaces for this xform.
11108 if (isa<UndefValue>(Op) ||
11109 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
11110 // Insert a new store to null instruction before the load to indicate that
11111 // this code is not reachable. We do this instead of inserting an
11112 // unreachable instruction directly because we cannot modify the CFG.
11113 new StoreInst(UndefValue::get(LI.getType()),
11114 Constant::getNullValue(Op->getType()), &LI);
11115 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
11118 // Instcombine load (constantexpr_cast global) -> cast (load global)
11119 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
11121 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11124 if (Op->hasOneUse()) {
11125 // Change select and PHI nodes to select values instead of addresses: this
11126 // helps alias analysis out a lot, allows many others simplifications, and
11127 // exposes redundancy in the code.
11129 // Note that we cannot do the transformation unless we know that the
11130 // introduced loads cannot trap! Something like this is valid as long as
11131 // the condition is always false: load (select bool %C, int* null, int* %G),
11132 // but it would not be valid if we transformed it to load from null
11133 // unconditionally.
11135 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
11136 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
11137 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI) &&
11138 isSafeToLoadUnconditionally(SI->getOperand(2), SI)) {
11139 Value *V1 = Builder->CreateLoad(SI->getOperand(1),
11140 SI->getOperand(1)->getName()+".val");
11141 Value *V2 = Builder->CreateLoad(SI->getOperand(2),
11142 SI->getOperand(2)->getName()+".val");
11143 return SelectInst::Create(SI->getCondition(), V1, V2);
11146 // load (select (cond, null, P)) -> load P
11147 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
11148 if (C->isNullValue()) {
11149 LI.setOperand(0, SI->getOperand(2));
11153 // load (select (cond, P, null)) -> load P
11154 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
11155 if (C->isNullValue()) {
11156 LI.setOperand(0, SI->getOperand(1));
11164 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
11165 /// when possible. This makes it generally easy to do alias analysis and/or
11166 /// SROA/mem2reg of the memory object.
11167 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
11168 User *CI = cast<User>(SI.getOperand(1));
11169 Value *CastOp = CI->getOperand(0);
11171 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
11172 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
11173 if (SrcTy == 0) return 0;
11175 const Type *SrcPTy = SrcTy->getElementType();
11177 if (!DestPTy->isInteger() && !isa<PointerType>(DestPTy))
11180 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
11181 /// to its first element. This allows us to handle things like:
11182 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
11183 /// on 32-bit hosts.
11184 SmallVector<Value*, 4> NewGEPIndices;
11186 // If the source is an array, the code below will not succeed. Check to
11187 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11189 if (isa<ArrayType>(SrcPTy) || isa<StructType>(SrcPTy)) {
11190 // Index through pointer.
11191 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
11192 NewGEPIndices.push_back(Zero);
11195 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
11196 if (!STy->getNumElements()) /* Struct can be empty {} */
11198 NewGEPIndices.push_back(Zero);
11199 SrcPTy = STy->getElementType(0);
11200 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
11201 NewGEPIndices.push_back(Zero);
11202 SrcPTy = ATy->getElementType();
11208 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
11211 if (!SrcPTy->isInteger() && !isa<PointerType>(SrcPTy))
11214 // If the pointers point into different address spaces or if they point to
11215 // values with different sizes, we can't do the transformation.
11216 if (!IC.getTargetData() ||
11217 SrcTy->getAddressSpace() !=
11218 cast<PointerType>(CI->getType())->getAddressSpace() ||
11219 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
11220 IC.getTargetData()->getTypeSizeInBits(DestPTy))
11223 // Okay, we are casting from one integer or pointer type to another of
11224 // the same size. Instead of casting the pointer before
11225 // the store, cast the value to be stored.
11227 Value *SIOp0 = SI.getOperand(0);
11228 Instruction::CastOps opcode = Instruction::BitCast;
11229 const Type* CastSrcTy = SIOp0->getType();
11230 const Type* CastDstTy = SrcPTy;
11231 if (isa<PointerType>(CastDstTy)) {
11232 if (CastSrcTy->isInteger())
11233 opcode = Instruction::IntToPtr;
11234 } else if (isa<IntegerType>(CastDstTy)) {
11235 if (isa<PointerType>(SIOp0->getType()))
11236 opcode = Instruction::PtrToInt;
11239 // SIOp0 is a pointer to aggregate and this is a store to the first field,
11240 // emit a GEP to index into its first field.
11241 if (!NewGEPIndices.empty())
11242 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
11243 NewGEPIndices.end());
11245 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
11246 SIOp0->getName()+".c");
11247 return new StoreInst(NewCast, CastOp);
11250 /// equivalentAddressValues - Test if A and B will obviously have the same
11251 /// value. This includes recognizing that %t0 and %t1 will have the same
11252 /// value in code like this:
11253 /// %t0 = getelementptr \@a, 0, 3
11254 /// store i32 0, i32* %t0
11255 /// %t1 = getelementptr \@a, 0, 3
11256 /// %t2 = load i32* %t1
11258 static bool equivalentAddressValues(Value *A, Value *B) {
11259 // Test if the values are trivially equivalent.
11260 if (A == B) return true;
11262 // Test if the values come form identical arithmetic instructions.
11263 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
11264 // its only used to compare two uses within the same basic block, which
11265 // means that they'll always either have the same value or one of them
11266 // will have an undefined value.
11267 if (isa<BinaryOperator>(A) ||
11268 isa<CastInst>(A) ||
11270 isa<GetElementPtrInst>(A))
11271 if (Instruction *BI = dyn_cast<Instruction>(B))
11272 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
11275 // Otherwise they may not be equivalent.
11279 // If this instruction has two uses, one of which is a llvm.dbg.declare,
11280 // return the llvm.dbg.declare.
11281 DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
11282 if (!V->hasNUses(2))
11284 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
11286 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
11288 if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
11289 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
11296 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
11297 Value *Val = SI.getOperand(0);
11298 Value *Ptr = SI.getOperand(1);
11300 // If the RHS is an alloca with a single use, zapify the store, making the
11302 // If the RHS is an alloca with a two uses, the other one being a
11303 // llvm.dbg.declare, zapify the store and the declare, making the
11304 // alloca dead. We must do this to prevent declare's from affecting
11306 if (!SI.isVolatile()) {
11307 if (Ptr->hasOneUse()) {
11308 if (isa<AllocaInst>(Ptr)) {
11309 EraseInstFromFunction(SI);
11313 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
11314 if (isa<AllocaInst>(GEP->getOperand(0))) {
11315 if (GEP->getOperand(0)->hasOneUse()) {
11316 EraseInstFromFunction(SI);
11320 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
11321 EraseInstFromFunction(*DI);
11322 EraseInstFromFunction(SI);
11329 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
11330 EraseInstFromFunction(*DI);
11331 EraseInstFromFunction(SI);
11337 // Attempt to improve the alignment.
11339 unsigned KnownAlign =
11340 GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
11342 (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
11343 SI.getAlignment()))
11344 SI.setAlignment(KnownAlign);
11347 // Do really simple DSE, to catch cases where there are several consecutive
11348 // stores to the same location, separated by a few arithmetic operations. This
11349 // situation often occurs with bitfield accesses.
11350 BasicBlock::iterator BBI = &SI;
11351 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
11354 // Don't count debug info directives, lest they affect codegen,
11355 // and we skip pointer-to-pointer bitcasts, which are NOPs.
11356 // It is necessary for correctness to skip those that feed into a
11357 // llvm.dbg.declare, as these are not present when debugging is off.
11358 if (isa<DbgInfoIntrinsic>(BBI) ||
11359 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
11364 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
11365 // Prev store isn't volatile, and stores to the same location?
11366 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
11367 SI.getOperand(1))) {
11370 EraseInstFromFunction(*PrevSI);
11376 // If this is a load, we have to stop. However, if the loaded value is from
11377 // the pointer we're loading and is producing the pointer we're storing,
11378 // then *this* store is dead (X = load P; store X -> P).
11379 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
11380 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
11381 !SI.isVolatile()) {
11382 EraseInstFromFunction(SI);
11386 // Otherwise, this is a load from some other location. Stores before it
11387 // may not be dead.
11391 // Don't skip over loads or things that can modify memory.
11392 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
11397 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
11399 // store X, null -> turns into 'unreachable' in SimplifyCFG
11400 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
11401 if (!isa<UndefValue>(Val)) {
11402 SI.setOperand(0, UndefValue::get(Val->getType()));
11403 if (Instruction *U = dyn_cast<Instruction>(Val))
11404 Worklist.Add(U); // Dropped a use.
11407 return 0; // Do not modify these!
11410 // store undef, Ptr -> noop
11411 if (isa<UndefValue>(Val)) {
11412 EraseInstFromFunction(SI);
11417 // If the pointer destination is a cast, see if we can fold the cast into the
11419 if (isa<CastInst>(Ptr))
11420 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
11422 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
11424 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
11428 // If this store is the last instruction in the basic block (possibly
11429 // excepting debug info instructions and the pointer bitcasts that feed
11430 // into them), and if the block ends with an unconditional branch, try
11431 // to move it to the successor block.
11435 } while (isa<DbgInfoIntrinsic>(BBI) ||
11436 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType())));
11437 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
11438 if (BI->isUnconditional())
11439 if (SimplifyStoreAtEndOfBlock(SI))
11440 return 0; // xform done!
11445 /// SimplifyStoreAtEndOfBlock - Turn things like:
11446 /// if () { *P = v1; } else { *P = v2 }
11447 /// into a phi node with a store in the successor.
11449 /// Simplify things like:
11450 /// *P = v1; if () { *P = v2; }
11451 /// into a phi node with a store in the successor.
11453 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
11454 BasicBlock *StoreBB = SI.getParent();
11456 // Check to see if the successor block has exactly two incoming edges. If
11457 // so, see if the other predecessor contains a store to the same location.
11458 // if so, insert a PHI node (if needed) and move the stores down.
11459 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
11461 // Determine whether Dest has exactly two predecessors and, if so, compute
11462 // the other predecessor.
11463 pred_iterator PI = pred_begin(DestBB);
11464 BasicBlock *OtherBB = 0;
11465 if (*PI != StoreBB)
11468 if (PI == pred_end(DestBB))
11471 if (*PI != StoreBB) {
11476 if (++PI != pred_end(DestBB))
11479 // Bail out if all the relevant blocks aren't distinct (this can happen,
11480 // for example, if SI is in an infinite loop)
11481 if (StoreBB == DestBB || OtherBB == DestBB)
11484 // Verify that the other block ends in a branch and is not otherwise empty.
11485 BasicBlock::iterator BBI = OtherBB->getTerminator();
11486 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
11487 if (!OtherBr || BBI == OtherBB->begin())
11490 // If the other block ends in an unconditional branch, check for the 'if then
11491 // else' case. there is an instruction before the branch.
11492 StoreInst *OtherStore = 0;
11493 if (OtherBr->isUnconditional()) {
11495 // Skip over debugging info.
11496 while (isa<DbgInfoIntrinsic>(BBI) ||
11497 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
11498 if (BBI==OtherBB->begin())
11502 // If this isn't a store, isn't a store to the same location, or if the
11503 // alignments differ, bail out.
11504 OtherStore = dyn_cast<StoreInst>(BBI);
11505 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
11506 OtherStore->getAlignment() != SI.getAlignment())
11509 // Otherwise, the other block ended with a conditional branch. If one of the
11510 // destinations is StoreBB, then we have the if/then case.
11511 if (OtherBr->getSuccessor(0) != StoreBB &&
11512 OtherBr->getSuccessor(1) != StoreBB)
11515 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
11516 // if/then triangle. See if there is a store to the same ptr as SI that
11517 // lives in OtherBB.
11519 // Check to see if we find the matching store.
11520 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
11521 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
11522 OtherStore->getAlignment() != SI.getAlignment())
11526 // If we find something that may be using or overwriting the stored
11527 // value, or if we run out of instructions, we can't do the xform.
11528 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
11529 BBI == OtherBB->begin())
11533 // In order to eliminate the store in OtherBr, we have to
11534 // make sure nothing reads or overwrites the stored value in
11536 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
11537 // FIXME: This should really be AA driven.
11538 if (I->mayReadFromMemory() || I->mayWriteToMemory())
11543 // Insert a PHI node now if we need it.
11544 Value *MergedVal = OtherStore->getOperand(0);
11545 if (MergedVal != SI.getOperand(0)) {
11546 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
11547 PN->reserveOperandSpace(2);
11548 PN->addIncoming(SI.getOperand(0), SI.getParent());
11549 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
11550 MergedVal = InsertNewInstBefore(PN, DestBB->front());
11553 // Advance to a place where it is safe to insert the new store and
11555 BBI = DestBB->getFirstNonPHI();
11556 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
11557 OtherStore->isVolatile(),
11558 SI.getAlignment()), *BBI);
11560 // Nuke the old stores.
11561 EraseInstFromFunction(SI);
11562 EraseInstFromFunction(*OtherStore);
11568 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
11569 // Change br (not X), label True, label False to: br X, label False, True
11571 BasicBlock *TrueDest;
11572 BasicBlock *FalseDest;
11573 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
11574 !isa<Constant>(X)) {
11575 // Swap Destinations and condition...
11576 BI.setCondition(X);
11577 BI.setSuccessor(0, FalseDest);
11578 BI.setSuccessor(1, TrueDest);
11582 // Cannonicalize fcmp_one -> fcmp_oeq
11583 FCmpInst::Predicate FPred; Value *Y;
11584 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
11585 TrueDest, FalseDest)) &&
11586 BI.getCondition()->hasOneUse())
11587 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
11588 FPred == FCmpInst::FCMP_OGE) {
11589 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
11590 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
11592 // Swap Destinations and condition.
11593 BI.setSuccessor(0, FalseDest);
11594 BI.setSuccessor(1, TrueDest);
11595 Worklist.Add(Cond);
11599 // Cannonicalize icmp_ne -> icmp_eq
11600 ICmpInst::Predicate IPred;
11601 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
11602 TrueDest, FalseDest)) &&
11603 BI.getCondition()->hasOneUse())
11604 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
11605 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
11606 IPred == ICmpInst::ICMP_SGE) {
11607 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
11608 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
11609 // Swap Destinations and condition.
11610 BI.setSuccessor(0, FalseDest);
11611 BI.setSuccessor(1, TrueDest);
11612 Worklist.Add(Cond);
11619 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
11620 Value *Cond = SI.getCondition();
11621 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
11622 if (I->getOpcode() == Instruction::Add)
11623 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
11624 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
11625 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
11627 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
11629 SI.setOperand(0, I->getOperand(0));
11637 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
11638 Value *Agg = EV.getAggregateOperand();
11640 if (!EV.hasIndices())
11641 return ReplaceInstUsesWith(EV, Agg);
11643 if (Constant *C = dyn_cast<Constant>(Agg)) {
11644 if (isa<UndefValue>(C))
11645 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
11647 if (isa<ConstantAggregateZero>(C))
11648 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
11650 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
11651 // Extract the element indexed by the first index out of the constant
11652 Value *V = C->getOperand(*EV.idx_begin());
11653 if (EV.getNumIndices() > 1)
11654 // Extract the remaining indices out of the constant indexed by the
11656 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
11658 return ReplaceInstUsesWith(EV, V);
11660 return 0; // Can't handle other constants
11662 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
11663 // We're extracting from an insertvalue instruction, compare the indices
11664 const unsigned *exti, *exte, *insi, *inse;
11665 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
11666 exte = EV.idx_end(), inse = IV->idx_end();
11667 exti != exte && insi != inse;
11669 if (*insi != *exti)
11670 // The insert and extract both reference distinctly different elements.
11671 // This means the extract is not influenced by the insert, and we can
11672 // replace the aggregate operand of the extract with the aggregate
11673 // operand of the insert. i.e., replace
11674 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
11675 // %E = extractvalue { i32, { i32 } } %I, 0
11677 // %E = extractvalue { i32, { i32 } } %A, 0
11678 return ExtractValueInst::Create(IV->getAggregateOperand(),
11679 EV.idx_begin(), EV.idx_end());
11681 if (exti == exte && insi == inse)
11682 // Both iterators are at the end: Index lists are identical. Replace
11683 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
11684 // %C = extractvalue { i32, { i32 } } %B, 1, 0
11686 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
11687 if (exti == exte) {
11688 // The extract list is a prefix of the insert list. i.e. replace
11689 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
11690 // %E = extractvalue { i32, { i32 } } %I, 1
11692 // %X = extractvalue { i32, { i32 } } %A, 1
11693 // %E = insertvalue { i32 } %X, i32 42, 0
11694 // by switching the order of the insert and extract (though the
11695 // insertvalue should be left in, since it may have other uses).
11696 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
11697 EV.idx_begin(), EV.idx_end());
11698 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
11702 // The insert list is a prefix of the extract list
11703 // We can simply remove the common indices from the extract and make it
11704 // operate on the inserted value instead of the insertvalue result.
11706 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
11707 // %E = extractvalue { i32, { i32 } } %I, 1, 0
11709 // %E extractvalue { i32 } { i32 42 }, 0
11710 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
11713 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
11714 // We're extracting from an intrinsic, see if we're the only user, which
11715 // allows us to simplify multiple result intrinsics to simpler things that
11716 // just get one value..
11717 if (II->hasOneUse()) {
11718 // Check if we're grabbing the overflow bit or the result of a 'with
11719 // overflow' intrinsic. If it's the latter we can remove the intrinsic
11720 // and replace it with a traditional binary instruction.
11721 switch (II->getIntrinsicID()) {
11722 case Intrinsic::uadd_with_overflow:
11723 case Intrinsic::sadd_with_overflow:
11724 if (*EV.idx_begin() == 0) { // Normal result.
11725 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
11726 II->replaceAllUsesWith(UndefValue::get(II->getType()));
11727 EraseInstFromFunction(*II);
11728 return BinaryOperator::CreateAdd(LHS, RHS);
11731 case Intrinsic::usub_with_overflow:
11732 case Intrinsic::ssub_with_overflow:
11733 if (*EV.idx_begin() == 0) { // Normal result.
11734 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
11735 II->replaceAllUsesWith(UndefValue::get(II->getType()));
11736 EraseInstFromFunction(*II);
11737 return BinaryOperator::CreateSub(LHS, RHS);
11740 case Intrinsic::umul_with_overflow:
11741 case Intrinsic::smul_with_overflow:
11742 if (*EV.idx_begin() == 0) { // Normal result.
11743 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
11744 II->replaceAllUsesWith(UndefValue::get(II->getType()));
11745 EraseInstFromFunction(*II);
11746 return BinaryOperator::CreateMul(LHS, RHS);
11754 // Can't simplify extracts from other values. Note that nested extracts are
11755 // already simplified implicitely by the above (extract ( extract (insert) )
11756 // will be translated into extract ( insert ( extract ) ) first and then just
11757 // the value inserted, if appropriate).
11761 /// CheapToScalarize - Return true if the value is cheaper to scalarize than it
11762 /// is to leave as a vector operation.
11763 static bool CheapToScalarize(Value *V, bool isConstant) {
11764 if (isa<ConstantAggregateZero>(V))
11766 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
11767 if (isConstant) return true;
11768 // If all elts are the same, we can extract.
11769 Constant *Op0 = C->getOperand(0);
11770 for (unsigned i = 1; i < C->getNumOperands(); ++i)
11771 if (C->getOperand(i) != Op0)
11775 Instruction *I = dyn_cast<Instruction>(V);
11776 if (!I) return false;
11778 // Insert element gets simplified to the inserted element or is deleted if
11779 // this is constant idx extract element and its a constant idx insertelt.
11780 if (I->getOpcode() == Instruction::InsertElement && isConstant &&
11781 isa<ConstantInt>(I->getOperand(2)))
11783 if (I->getOpcode() == Instruction::Load && I->hasOneUse())
11785 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
11786 if (BO->hasOneUse() &&
11787 (CheapToScalarize(BO->getOperand(0), isConstant) ||
11788 CheapToScalarize(BO->getOperand(1), isConstant)))
11790 if (CmpInst *CI = dyn_cast<CmpInst>(I))
11791 if (CI->hasOneUse() &&
11792 (CheapToScalarize(CI->getOperand(0), isConstant) ||
11793 CheapToScalarize(CI->getOperand(1), isConstant)))
11799 /// Read and decode a shufflevector mask.
11801 /// It turns undef elements into values that are larger than the number of
11802 /// elements in the input.
11803 static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
11804 unsigned NElts = SVI->getType()->getNumElements();
11805 if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
11806 return std::vector<unsigned>(NElts, 0);
11807 if (isa<UndefValue>(SVI->getOperand(2)))
11808 return std::vector<unsigned>(NElts, 2*NElts);
11810 std::vector<unsigned> Result;
11811 const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
11812 for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
11813 if (isa<UndefValue>(*i))
11814 Result.push_back(NElts*2); // undef -> 8
11816 Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
11820 /// FindScalarElement - Given a vector and an element number, see if the scalar
11821 /// value is already around as a register, for example if it were inserted then
11822 /// extracted from the vector.
11823 static Value *FindScalarElement(Value *V, unsigned EltNo) {
11824 assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
11825 const VectorType *PTy = cast<VectorType>(V->getType());
11826 unsigned Width = PTy->getNumElements();
11827 if (EltNo >= Width) // Out of range access.
11828 return UndefValue::get(PTy->getElementType());
11830 if (isa<UndefValue>(V))
11831 return UndefValue::get(PTy->getElementType());
11832 else if (isa<ConstantAggregateZero>(V))
11833 return Constant::getNullValue(PTy->getElementType());
11834 else if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
11835 return CP->getOperand(EltNo);
11836 else if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
11837 // If this is an insert to a variable element, we don't know what it is.
11838 if (!isa<ConstantInt>(III->getOperand(2)))
11840 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
11842 // If this is an insert to the element we are looking for, return the
11844 if (EltNo == IIElt)
11845 return III->getOperand(1);
11847 // Otherwise, the insertelement doesn't modify the value, recurse on its
11849 return FindScalarElement(III->getOperand(0), EltNo);
11850 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
11851 unsigned LHSWidth =
11852 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
11853 unsigned InEl = getShuffleMask(SVI)[EltNo];
11854 if (InEl < LHSWidth)
11855 return FindScalarElement(SVI->getOperand(0), InEl);
11856 else if (InEl < LHSWidth*2)
11857 return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth);
11859 return UndefValue::get(PTy->getElementType());
11862 // Otherwise, we don't know.
11866 Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
11867 // If vector val is undef, replace extract with scalar undef.
11868 if (isa<UndefValue>(EI.getOperand(0)))
11869 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
11871 // If vector val is constant 0, replace extract with scalar 0.
11872 if (isa<ConstantAggregateZero>(EI.getOperand(0)))
11873 return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
11875 if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
11876 // If vector val is constant with all elements the same, replace EI with
11877 // that element. When the elements are not identical, we cannot replace yet
11878 // (we do that below, but only when the index is constant).
11879 Constant *op0 = C->getOperand(0);
11880 for (unsigned i = 1; i != C->getNumOperands(); ++i)
11881 if (C->getOperand(i) != op0) {
11886 return ReplaceInstUsesWith(EI, op0);
11889 // If extracting a specified index from the vector, see if we can recursively
11890 // find a previously computed scalar that was inserted into the vector.
11891 if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
11892 unsigned IndexVal = IdxC->getZExtValue();
11893 unsigned VectorWidth = EI.getVectorOperandType()->getNumElements();
11895 // If this is extracting an invalid index, turn this into undef, to avoid
11896 // crashing the code below.
11897 if (IndexVal >= VectorWidth)
11898 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
11900 // This instruction only demands the single element from the input vector.
11901 // If the input vector has a single use, simplify it based on this use
11903 if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
11904 APInt UndefElts(VectorWidth, 0);
11905 APInt DemandedMask(VectorWidth, 1 << IndexVal);
11906 if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
11907 DemandedMask, UndefElts)) {
11908 EI.setOperand(0, V);
11913 if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal))
11914 return ReplaceInstUsesWith(EI, Elt);
11916 // If the this extractelement is directly using a bitcast from a vector of
11917 // the same number of elements, see if we can find the source element from
11918 // it. In this case, we will end up needing to bitcast the scalars.
11919 if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
11920 if (const VectorType *VT =
11921 dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
11922 if (VT->getNumElements() == VectorWidth)
11923 if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal))
11924 return new BitCastInst(Elt, EI.getType());
11928 if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
11929 // Push extractelement into predecessor operation if legal and
11930 // profitable to do so
11931 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
11932 if (I->hasOneUse() &&
11933 CheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
11935 Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
11936 EI.getName()+".lhs");
11938 Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
11939 EI.getName()+".rhs");
11940 return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
11942 } else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
11943 // Extracting the inserted element?
11944 if (IE->getOperand(2) == EI.getOperand(1))
11945 return ReplaceInstUsesWith(EI, IE->getOperand(1));
11946 // If the inserted and extracted elements are constants, they must not
11947 // be the same value, extract from the pre-inserted value instead.
11948 if (isa<Constant>(IE->getOperand(2)) && isa<Constant>(EI.getOperand(1))) {
11949 Worklist.AddValue(EI.getOperand(0));
11950 EI.setOperand(0, IE->getOperand(0));
11953 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
11954 // If this is extracting an element from a shufflevector, figure out where
11955 // it came from and extract from the appropriate input element instead.
11956 if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
11957 unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
11959 unsigned LHSWidth =
11960 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
11962 if (SrcIdx < LHSWidth)
11963 Src = SVI->getOperand(0);
11964 else if (SrcIdx < LHSWidth*2) {
11965 SrcIdx -= LHSWidth;
11966 Src = SVI->getOperand(1);
11968 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
11970 return ExtractElementInst::Create(Src,
11971 ConstantInt::get(Type::getInt32Ty(EI.getContext()),
11975 // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
11980 /// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
11981 /// elements from either LHS or RHS, return the shuffle mask and true.
11982 /// Otherwise, return false.
11983 static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
11984 std::vector<Constant*> &Mask) {
11985 assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
11986 "Invalid CollectSingleShuffleElements");
11987 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
11989 if (isa<UndefValue>(V)) {
11990 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
11995 for (unsigned i = 0; i != NumElts; ++i)
11996 Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
12001 for (unsigned i = 0; i != NumElts; ++i)
12002 Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()),
12007 if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12008 // If this is an insert of an extract from some other vector, include it.
12009 Value *VecOp = IEI->getOperand(0);
12010 Value *ScalarOp = IEI->getOperand(1);
12011 Value *IdxOp = IEI->getOperand(2);
12013 if (!isa<ConstantInt>(IdxOp))
12015 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12017 if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
12018 // Okay, we can handle this if the vector we are insertinting into is
12019 // transitively ok.
12020 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
12021 // If so, update the mask to reflect the inserted undef.
12022 Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(V->getContext()));
12025 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
12026 if (isa<ConstantInt>(EI->getOperand(1)) &&
12027 EI->getOperand(0)->getType() == V->getType()) {
12028 unsigned ExtractedIdx =
12029 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12031 // This must be extracting from either LHS or RHS.
12032 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
12033 // Okay, we can handle this if the vector we are insertinting into is
12034 // transitively ok.
12035 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) {
12036 // If so, update the mask to reflect the inserted value.
12037 if (EI->getOperand(0) == LHS) {
12038 Mask[InsertedIdx % NumElts] =
12039 ConstantInt::get(Type::getInt32Ty(V->getContext()),
12042 assert(EI->getOperand(0) == RHS);
12043 Mask[InsertedIdx % NumElts] =
12044 ConstantInt::get(Type::getInt32Ty(V->getContext()),
12045 ExtractedIdx+NumElts);
12054 // TODO: Handle shufflevector here!
12059 /// CollectShuffleElements - We are building a shuffle of V, using RHS as the
12060 /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
12061 /// that computes V and the LHS value of the shuffle.
12062 static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
12064 assert(isa<VectorType>(V->getType()) &&
12065 (RHS == 0 || V->getType() == RHS->getType()) &&
12066 "Invalid shuffle!");
12067 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
12069 if (isa<UndefValue>(V)) {
12070 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(V->getContext())));
12072 } else if (isa<ConstantAggregateZero>(V)) {
12073 Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(V->getContext()),0));
12075 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12076 // If this is an insert of an extract from some other vector, include it.
12077 Value *VecOp = IEI->getOperand(0);
12078 Value *ScalarOp = IEI->getOperand(1);
12079 Value *IdxOp = IEI->getOperand(2);
12081 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12082 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12083 EI->getOperand(0)->getType() == V->getType()) {
12084 unsigned ExtractedIdx =
12085 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12086 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12088 // Either the extracted from or inserted into vector must be RHSVec,
12089 // otherwise we'd end up with a shuffle of three inputs.
12090 if (EI->getOperand(0) == RHS || RHS == 0) {
12091 RHS = EI->getOperand(0);
12092 Value *V = CollectShuffleElements(VecOp, Mask, RHS);
12093 Mask[InsertedIdx % NumElts] =
12094 ConstantInt::get(Type::getInt32Ty(V->getContext()),
12095 NumElts+ExtractedIdx);
12099 if (VecOp == RHS) {
12100 Value *V = CollectShuffleElements(EI->getOperand(0), Mask, RHS);
12101 // Everything but the extracted element is replaced with the RHS.
12102 for (unsigned i = 0; i != NumElts; ++i) {
12103 if (i != InsertedIdx)
12104 Mask[i] = ConstantInt::get(Type::getInt32Ty(V->getContext()),
12110 // If this insertelement is a chain that comes from exactly these two
12111 // vectors, return the vector and the effective shuffle.
12112 if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask))
12113 return EI->getOperand(0);
12117 // TODO: Handle shufflevector here!
12119 // Otherwise, can't do anything fancy. Return an identity vector.
12120 for (unsigned i = 0; i != NumElts; ++i)
12121 Mask.push_back(ConstantInt::get(Type::getInt32Ty(V->getContext()), i));
12125 Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
12126 Value *VecOp = IE.getOperand(0);
12127 Value *ScalarOp = IE.getOperand(1);
12128 Value *IdxOp = IE.getOperand(2);
12130 // Inserting an undef or into an undefined place, remove this.
12131 if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
12132 ReplaceInstUsesWith(IE, VecOp);
12134 // If the inserted element was extracted from some other vector, and if the
12135 // indexes are constant, try to turn this into a shufflevector operation.
12136 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12137 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12138 EI->getOperand(0)->getType() == IE.getType()) {
12139 unsigned NumVectorElts = IE.getType()->getNumElements();
12140 unsigned ExtractedIdx =
12141 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12142 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12144 if (ExtractedIdx >= NumVectorElts) // Out of range extract.
12145 return ReplaceInstUsesWith(IE, VecOp);
12147 if (InsertedIdx >= NumVectorElts) // Out of range insert.
12148 return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
12150 // If we are extracting a value from a vector, then inserting it right
12151 // back into the same place, just use the input vector.
12152 if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
12153 return ReplaceInstUsesWith(IE, VecOp);
12155 // If this insertelement isn't used by some other insertelement, turn it
12156 // (and any insertelements it points to), into one big shuffle.
12157 if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
12158 std::vector<Constant*> Mask;
12160 Value *LHS = CollectShuffleElements(&IE, Mask, RHS);
12161 if (RHS == 0) RHS = UndefValue::get(LHS->getType());
12162 // We now have a shuffle of LHS, RHS, Mask.
12163 return new ShuffleVectorInst(LHS, RHS,
12164 ConstantVector::get(Mask));
12169 unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
12170 APInt UndefElts(VWidth, 0);
12171 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12172 if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts))
12179 Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
12180 Value *LHS = SVI.getOperand(0);
12181 Value *RHS = SVI.getOperand(1);
12182 std::vector<unsigned> Mask = getShuffleMask(&SVI);
12184 bool MadeChange = false;
12186 // Undefined shuffle mask -> undefined value.
12187 if (isa<UndefValue>(SVI.getOperand(2)))
12188 return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
12190 unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
12192 if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
12195 APInt UndefElts(VWidth, 0);
12196 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12197 if (SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
12198 LHS = SVI.getOperand(0);
12199 RHS = SVI.getOperand(1);
12203 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
12204 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
12205 if (LHS == RHS || isa<UndefValue>(LHS)) {
12206 if (isa<UndefValue>(LHS) && LHS == RHS) {
12207 // shuffle(undef,undef,mask) -> undef.
12208 return ReplaceInstUsesWith(SVI, LHS);
12211 // Remap any references to RHS to use LHS.
12212 std::vector<Constant*> Elts;
12213 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
12214 if (Mask[i] >= 2*e)
12215 Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
12217 if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
12218 (Mask[i] < e && isa<UndefValue>(LHS))) {
12219 Mask[i] = 2*e; // Turn into undef.
12220 Elts.push_back(UndefValue::get(Type::getInt32Ty(SVI.getContext())));
12222 Mask[i] = Mask[i] % e; // Force to LHS.
12223 Elts.push_back(ConstantInt::get(Type::getInt32Ty(SVI.getContext()),
12228 SVI.setOperand(0, SVI.getOperand(1));
12229 SVI.setOperand(1, UndefValue::get(RHS->getType()));
12230 SVI.setOperand(2, ConstantVector::get(Elts));
12231 LHS = SVI.getOperand(0);
12232 RHS = SVI.getOperand(1);
12236 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
12237 bool isLHSID = true, isRHSID = true;
12239 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
12240 if (Mask[i] >= e*2) continue; // Ignore undef values.
12241 // Is this an identity shuffle of the LHS value?
12242 isLHSID &= (Mask[i] == i);
12244 // Is this an identity shuffle of the RHS value?
12245 isRHSID &= (Mask[i]-e == i);
12248 // Eliminate identity shuffles.
12249 if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
12250 if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
12252 // If the LHS is a shufflevector itself, see if we can combine it with this
12253 // one without producing an unusual shuffle. Here we are really conservative:
12254 // we are absolutely afraid of producing a shuffle mask not in the input
12255 // program, because the code gen may not be smart enough to turn a merged
12256 // shuffle into two specific shuffles: it may produce worse code. As such,
12257 // we only merge two shuffles if the result is one of the two input shuffle
12258 // masks. In this case, merging the shuffles just removes one instruction,
12259 // which we know is safe. This is good for things like turning:
12260 // (splat(splat)) -> splat.
12261 if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
12262 if (isa<UndefValue>(RHS)) {
12263 std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
12265 if (LHSMask.size() == Mask.size()) {
12266 std::vector<unsigned> NewMask;
12267 for (unsigned i = 0, e = Mask.size(); i != e; ++i)
12269 NewMask.push_back(2*e);
12271 NewMask.push_back(LHSMask[Mask[i]]);
12273 // If the result mask is equal to the src shuffle or this
12274 // shuffle mask, do the replacement.
12275 if (NewMask == LHSMask || NewMask == Mask) {
12276 unsigned LHSInNElts =
12277 cast<VectorType>(LHSSVI->getOperand(0)->getType())->
12279 std::vector<Constant*> Elts;
12280 for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
12281 if (NewMask[i] >= LHSInNElts*2) {
12282 Elts.push_back(UndefValue::get(
12283 Type::getInt32Ty(SVI.getContext())));
12285 Elts.push_back(ConstantInt::get(
12286 Type::getInt32Ty(SVI.getContext()),
12290 return new ShuffleVectorInst(LHSSVI->getOperand(0),
12291 LHSSVI->getOperand(1),
12292 ConstantVector::get(Elts));
12298 return MadeChange ? &SVI : 0;
12304 /// TryToSinkInstruction - Try to move the specified instruction from its
12305 /// current block into the beginning of DestBlock, which can only happen if it's
12306 /// safe to move the instruction past all of the instructions between it and the
12307 /// end of its block.
12308 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
12309 assert(I->hasOneUse() && "Invariants didn't hold!");
12311 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
12312 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
12315 // Do not sink alloca instructions out of the entry block.
12316 if (isa<AllocaInst>(I) && I->getParent() ==
12317 &DestBlock->getParent()->getEntryBlock())
12320 // We can only sink load instructions if there is nothing between the load and
12321 // the end of block that could change the value.
12322 if (I->mayReadFromMemory()) {
12323 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
12325 if (Scan->mayWriteToMemory())
12329 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
12331 CopyPrecedingStopPoint(I, InsertPos);
12332 I->moveBefore(InsertPos);
12338 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
12339 /// all reachable code to the worklist.
12341 /// This has a couple of tricks to make the code faster and more powerful. In
12342 /// particular, we constant fold and DCE instructions as we go, to avoid adding
12343 /// them to the worklist (this significantly speeds up instcombine on code where
12344 /// many instructions are dead or constant). Additionally, if we find a branch
12345 /// whose condition is a known constant, we only visit the reachable successors.
12347 static bool AddReachableCodeToWorklist(BasicBlock *BB,
12348 SmallPtrSet<BasicBlock*, 64> &Visited,
12350 const TargetData *TD) {
12351 bool MadeIRChange = false;
12352 SmallVector<BasicBlock*, 256> Worklist;
12353 Worklist.push_back(BB);
12355 std::vector<Instruction*> InstrsForInstCombineWorklist;
12356 InstrsForInstCombineWorklist.reserve(128);
12358 SmallPtrSet<ConstantExpr*, 64> FoldedConstants;
12360 while (!Worklist.empty()) {
12361 BB = Worklist.back();
12362 Worklist.pop_back();
12364 // We have now visited this block! If we've already been here, ignore it.
12365 if (!Visited.insert(BB)) continue;
12367 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
12368 Instruction *Inst = BBI++;
12370 // DCE instruction if trivially dead.
12371 if (isInstructionTriviallyDead(Inst)) {
12373 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
12374 Inst->eraseFromParent();
12378 // ConstantProp instruction if trivially constant.
12379 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
12380 if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
12381 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
12383 Inst->replaceAllUsesWith(C);
12385 Inst->eraseFromParent();
12392 // See if we can constant fold its operands.
12393 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
12395 ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
12396 if (CE == 0) continue;
12398 // If we already folded this constant, don't try again.
12399 if (!FoldedConstants.insert(CE))
12402 Constant *NewC = ConstantFoldConstantExpression(CE, TD);
12403 if (NewC && NewC != CE) {
12405 MadeIRChange = true;
12411 InstrsForInstCombineWorklist.push_back(Inst);
12414 // Recursively visit successors. If this is a branch or switch on a
12415 // constant, only visit the reachable successor.
12416 TerminatorInst *TI = BB->getTerminator();
12417 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
12418 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
12419 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
12420 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
12421 Worklist.push_back(ReachableBB);
12424 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
12425 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
12426 // See if this is an explicit destination.
12427 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
12428 if (SI->getCaseValue(i) == Cond) {
12429 BasicBlock *ReachableBB = SI->getSuccessor(i);
12430 Worklist.push_back(ReachableBB);
12434 // Otherwise it is the default destination.
12435 Worklist.push_back(SI->getSuccessor(0));
12440 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
12441 Worklist.push_back(TI->getSuccessor(i));
12444 // Once we've found all of the instructions to add to instcombine's worklist,
12445 // add them in reverse order. This way instcombine will visit from the top
12446 // of the function down. This jives well with the way that it adds all uses
12447 // of instructions to the worklist after doing a transformation, thus avoiding
12448 // some N^2 behavior in pathological cases.
12449 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
12450 InstrsForInstCombineWorklist.size());
12452 return MadeIRChange;
12455 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
12456 MadeIRChange = false;
12458 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
12459 << F.getNameStr() << "\n");
12462 // Do a depth-first traversal of the function, populate the worklist with
12463 // the reachable instructions. Ignore blocks that are not reachable. Keep
12464 // track of which blocks we visit.
12465 SmallPtrSet<BasicBlock*, 64> Visited;
12466 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
12468 // Do a quick scan over the function. If we find any blocks that are
12469 // unreachable, remove any instructions inside of them. This prevents
12470 // the instcombine code from having to deal with some bad special cases.
12471 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
12472 if (!Visited.count(BB)) {
12473 Instruction *Term = BB->getTerminator();
12474 while (Term != BB->begin()) { // Remove instrs bottom-up
12475 BasicBlock::iterator I = Term; --I;
12477 DEBUG(errs() << "IC: DCE: " << *I << '\n');
12478 // A debug intrinsic shouldn't force another iteration if we weren't
12479 // going to do one without it.
12480 if (!isa<DbgInfoIntrinsic>(I)) {
12482 MadeIRChange = true;
12485 // If I is not void type then replaceAllUsesWith undef.
12486 // This allows ValueHandlers and custom metadata to adjust itself.
12487 if (!I->getType()->isVoidTy())
12488 I->replaceAllUsesWith(UndefValue::get(I->getType()));
12489 I->eraseFromParent();
12494 while (!Worklist.isEmpty()) {
12495 Instruction *I = Worklist.RemoveOne();
12496 if (I == 0) continue; // skip null values.
12498 // Check to see if we can DCE the instruction.
12499 if (isInstructionTriviallyDead(I)) {
12500 DEBUG(errs() << "IC: DCE: " << *I << '\n');
12501 EraseInstFromFunction(*I);
12503 MadeIRChange = true;
12507 // Instruction isn't dead, see if we can constant propagate it.
12508 if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
12509 if (Constant *C = ConstantFoldInstruction(I, TD)) {
12510 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
12512 // Add operands to the worklist.
12513 ReplaceInstUsesWith(*I, C);
12515 EraseInstFromFunction(*I);
12516 MadeIRChange = true;
12520 // See if we can trivially sink this instruction to a successor basic block.
12521 if (I->hasOneUse()) {
12522 BasicBlock *BB = I->getParent();
12523 Instruction *UserInst = cast<Instruction>(I->use_back());
12524 BasicBlock *UserParent;
12526 // Get the block the use occurs in.
12527 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
12528 UserParent = PN->getIncomingBlock(I->use_begin().getUse());
12530 UserParent = UserInst->getParent();
12532 if (UserParent != BB) {
12533 bool UserIsSuccessor = false;
12534 // See if the user is one of our successors.
12535 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
12536 if (*SI == UserParent) {
12537 UserIsSuccessor = true;
12541 // If the user is one of our immediate successors, and if that successor
12542 // only has us as a predecessors (we'd have to split the critical edge
12543 // otherwise), we can keep going.
12544 if (UserIsSuccessor && UserParent->getSinglePredecessor())
12545 // Okay, the CFG is simple enough, try to sink this instruction.
12546 MadeIRChange |= TryToSinkInstruction(I, UserParent);
12550 // Now that we have an instruction, try combining it to simplify it.
12551 Builder->SetInsertPoint(I->getParent(), I);
12556 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
12557 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
12559 if (Instruction *Result = visit(*I)) {
12561 // Should we replace the old instruction with a new one?
12563 DEBUG(errs() << "IC: Old = " << *I << '\n'
12564 << " New = " << *Result << '\n');
12566 // Everything uses the new instruction now.
12567 I->replaceAllUsesWith(Result);
12569 // Push the new instruction and any users onto the worklist.
12570 Worklist.Add(Result);
12571 Worklist.AddUsersToWorkList(*Result);
12573 // Move the name to the new instruction first.
12574 Result->takeName(I);
12576 // Insert the new instruction into the basic block...
12577 BasicBlock *InstParent = I->getParent();
12578 BasicBlock::iterator InsertPos = I;
12580 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
12581 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
12584 InstParent->getInstList().insert(InsertPos, Result);
12586 EraseInstFromFunction(*I);
12589 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
12590 << " New = " << *I << '\n');
12593 // If the instruction was modified, it's possible that it is now dead.
12594 // if so, remove it.
12595 if (isInstructionTriviallyDead(I)) {
12596 EraseInstFromFunction(*I);
12599 Worklist.AddUsersToWorkList(*I);
12602 MadeIRChange = true;
12607 return MadeIRChange;
12611 bool InstCombiner::runOnFunction(Function &F) {
12612 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
12613 TD = getAnalysisIfAvailable<TargetData>();
12616 /// Builder - This is an IRBuilder that automatically inserts new
12617 /// instructions into the worklist when they are created.
12618 IRBuilder<true, TargetFolder, InstCombineIRInserter>
12619 TheBuilder(F.getContext(), TargetFolder(TD),
12620 InstCombineIRInserter(Worklist));
12621 Builder = &TheBuilder;
12623 bool EverMadeChange = false;
12625 // Iterate while there is work to do.
12626 unsigned Iteration = 0;
12627 while (DoOneIteration(F, Iteration++))
12628 EverMadeChange = true;
12631 return EverMadeChange;
12634 FunctionPass *llvm::createInstructionCombiningPass() {
12635 return new InstCombiner();