1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "InstCombine.h"
39 #include "llvm/IntrinsicInst.h"
40 #include "llvm/LLVMContext.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Operator.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/InstructionSimplify.h"
46 #include "llvm/Analysis/MemoryBuiltins.h"
47 #include "llvm/Target/TargetData.h"
48 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
49 #include "llvm/Transforms/Utils/Local.h"
50 #include "llvm/Support/CallSite.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/GetElementPtrTypeIterator.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/PatternMatch.h"
56 #include "llvm/ADT/SmallPtrSet.h"
57 #include "llvm/ADT/Statistic.h"
58 #include "llvm/ADT/STLExtras.h"
62 using namespace llvm::PatternMatch;
64 STATISTIC(NumCombined , "Number of insts combined");
65 STATISTIC(NumConstProp, "Number of constant folds");
66 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
67 STATISTIC(NumSunkInst , "Number of instructions sunk");
70 char InstCombiner::ID = 0;
71 static RegisterPass<InstCombiner>
72 X("instcombine", "Combine redundant instructions");
74 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
75 AU.addPreservedID(LCSSAID);
80 // getPromotedType - Return the specified type promoted as it would be to pass
81 // though a va_arg area.
82 static const Type *getPromotedType(const Type *Ty) {
83 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
84 if (ITy->getBitWidth() < 32)
85 return Type::getInt32Ty(Ty->getContext());
90 /// ShouldChangeType - Return true if it is desirable to convert a computation
91 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal
92 /// type for example, or from a smaller to a larger illegal type.
93 bool InstCombiner::ShouldChangeType(const Type *From, const Type *To) const {
94 assert(isa<IntegerType>(From) && isa<IntegerType>(To));
96 // If we don't have TD, we don't know if the source/dest are legal.
97 if (!TD) return false;
99 unsigned FromWidth = From->getPrimitiveSizeInBits();
100 unsigned ToWidth = To->getPrimitiveSizeInBits();
101 bool FromLegal = TD->isLegalInteger(FromWidth);
102 bool ToLegal = TD->isLegalInteger(ToWidth);
104 // If this is a legal integer from type, and the result would be an illegal
105 // type, don't do the transformation.
106 if (FromLegal && !ToLegal)
109 // Otherwise, if both are illegal, do not increase the size of the result. We
110 // do allow things like i160 -> i64, but not i64 -> i160.
111 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
117 /// getBitCastOperand - If the specified operand is a CastInst, a constant
118 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
119 /// operand value, otherwise return null.
120 static Value *getBitCastOperand(Value *V) {
121 if (Operator *O = dyn_cast<Operator>(V)) {
122 if (O->getOpcode() == Instruction::BitCast)
123 return O->getOperand(0);
124 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
125 if (GEP->hasAllZeroIndices())
126 return GEP->getPointerOperand();
133 // SimplifyCommutative - This performs a few simplifications for commutative
136 // 1. Order operands such that they are listed from right (least complex) to
137 // left (most complex). This puts constants before unary operators before
140 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
141 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
143 bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
144 bool Changed = false;
145 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
146 Changed = !I.swapOperands();
148 if (!I.isAssociative()) return Changed;
150 Instruction::BinaryOps Opcode = I.getOpcode();
151 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
152 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
153 if (isa<Constant>(I.getOperand(1))) {
154 Constant *Folded = ConstantExpr::get(I.getOpcode(),
155 cast<Constant>(I.getOperand(1)),
156 cast<Constant>(Op->getOperand(1)));
157 I.setOperand(0, Op->getOperand(0));
158 I.setOperand(1, Folded);
162 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)))
163 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
164 Op->hasOneUse() && Op1->hasOneUse()) {
165 Constant *C1 = cast<Constant>(Op->getOperand(1));
166 Constant *C2 = cast<Constant>(Op1->getOperand(1));
168 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
169 Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
170 Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
174 I.setOperand(0, New);
175 I.setOperand(1, Folded);
182 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
183 // if the LHS is a constant zero (which is the 'negate' form).
185 Value *InstCombiner::dyn_castNegVal(Value *V) const {
186 if (BinaryOperator::isNeg(V))
187 return BinaryOperator::getNegArgument(V);
189 // Constants can be considered to be negated values if they can be folded.
190 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
191 return ConstantExpr::getNeg(C);
193 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
194 if (C->getType()->getElementType()->isInteger())
195 return ConstantExpr::getNeg(C);
200 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
201 // instruction if the LHS is a constant negative zero (which is the 'negate'
204 Value *InstCombiner::dyn_castFNegVal(Value *V) const {
205 if (BinaryOperator::isFNeg(V))
206 return BinaryOperator::getFNegArgument(V);
208 // Constants can be considered to be negated values if they can be folded.
209 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
210 return ConstantExpr::getFNeg(C);
212 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
213 if (C->getType()->getElementType()->isFloatingPoint())
214 return ConstantExpr::getFNeg(C);
219 /// isFreeToInvert - Return true if the specified value is free to invert (apply
220 /// ~ to). This happens in cases where the ~ can be eliminated.
221 static inline bool isFreeToInvert(Value *V) {
223 if (BinaryOperator::isNot(V))
226 // Constants can be considered to be not'ed values.
227 if (isa<ConstantInt>(V))
230 // Compares can be inverted if they have a single use.
231 if (CmpInst *CI = dyn_cast<CmpInst>(V))
232 return CI->hasOneUse();
237 static inline Value *dyn_castNotVal(Value *V) {
238 // If this is not(not(x)) don't return that this is a not: we want the two
239 // not's to be folded first.
240 if (BinaryOperator::isNot(V)) {
241 Value *Operand = BinaryOperator::getNotArgument(V);
242 if (!isFreeToInvert(Operand))
246 // Constants can be considered to be not'ed values...
247 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
248 return ConstantInt::get(C->getType(), ~C->getValue());
254 /// AddOne - Add one to a ConstantInt.
255 static Constant *AddOne(Constant *C) {
256 return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1));
258 /// SubOne - Subtract one from a ConstantInt.
259 static Constant *SubOne(ConstantInt *C) {
260 return ConstantInt::get(C->getContext(), C->getValue()-1);
264 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
266 if (CastInst *CI = dyn_cast<CastInst>(&I))
267 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
269 // Figure out if the constant is the left or the right argument.
270 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
271 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
273 if (Constant *SOC = dyn_cast<Constant>(SO)) {
275 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
276 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
279 Value *Op0 = SO, *Op1 = ConstOperand;
283 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
284 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
285 SO->getName()+".op");
286 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
287 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
288 SO->getName()+".cmp");
289 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
290 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
291 SO->getName()+".cmp");
292 llvm_unreachable("Unknown binary instruction type!");
295 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
296 // constant as the other operand, try to fold the binary operator into the
297 // select arguments. This also works for Cast instructions, which obviously do
298 // not have a second operand.
299 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
300 // Don't modify shared select instructions
301 if (!SI->hasOneUse()) return 0;
302 Value *TV = SI->getOperand(1);
303 Value *FV = SI->getOperand(2);
305 if (isa<Constant>(TV) || isa<Constant>(FV)) {
306 // Bool selects with constant operands can be folded to logical ops.
307 if (SI->getType() == Type::getInt1Ty(SI->getContext())) return 0;
309 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
310 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
312 return SelectInst::Create(SI->getCondition(), SelectTrueVal,
319 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
320 /// has a PHI node as operand #0, see if we can fold the instruction into the
321 /// PHI (which is only possible if all operands to the PHI are constants).
323 /// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
324 /// that would normally be unprofitable because they strongly encourage jump
326 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
327 bool AllowAggressive) {
328 AllowAggressive = false;
329 PHINode *PN = cast<PHINode>(I.getOperand(0));
330 unsigned NumPHIValues = PN->getNumIncomingValues();
331 if (NumPHIValues == 0 ||
332 // We normally only transform phis with a single use, unless we're trying
333 // hard to make jump threading happen.
334 (!PN->hasOneUse() && !AllowAggressive))
338 // Check to see if all of the operands of the PHI are simple constants
339 // (constantint/constantfp/undef). If there is one non-constant value,
340 // remember the BB it is in. If there is more than one or if *it* is a PHI,
341 // bail out. We don't do arbitrary constant expressions here because moving
342 // their computation can be expensive without a cost model.
343 BasicBlock *NonConstBB = 0;
344 for (unsigned i = 0; i != NumPHIValues; ++i)
345 if (!isa<Constant>(PN->getIncomingValue(i)) ||
346 isa<ConstantExpr>(PN->getIncomingValue(i))) {
347 if (NonConstBB) return 0; // More than one non-const value.
348 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
349 NonConstBB = PN->getIncomingBlock(i);
351 // If the incoming non-constant value is in I's block, we have an infinite
353 if (NonConstBB == I.getParent())
357 // If there is exactly one non-constant value, we can insert a copy of the
358 // operation in that block. However, if this is a critical edge, we would be
359 // inserting the computation one some other paths (e.g. inside a loop). Only
360 // do this if the pred block is unconditionally branching into the phi block.
361 if (NonConstBB != 0 && !AllowAggressive) {
362 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
363 if (!BI || !BI->isUnconditional()) return 0;
366 // Okay, we can do the transformation: create the new PHI node.
367 PHINode *NewPN = PHINode::Create(I.getType(), "");
368 NewPN->reserveOperandSpace(PN->getNumOperands()/2);
369 InsertNewInstBefore(NewPN, *PN);
372 // Next, add all of the operands to the PHI.
373 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
374 // We only currently try to fold the condition of a select when it is a phi,
375 // not the true/false values.
376 Value *TrueV = SI->getTrueValue();
377 Value *FalseV = SI->getFalseValue();
378 BasicBlock *PhiTransBB = PN->getParent();
379 for (unsigned i = 0; i != NumPHIValues; ++i) {
380 BasicBlock *ThisBB = PN->getIncomingBlock(i);
381 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
382 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
384 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
385 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
387 assert(PN->getIncomingBlock(i) == NonConstBB);
388 InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
390 "phitmp", NonConstBB->getTerminator());
391 Worklist.Add(cast<Instruction>(InV));
393 NewPN->addIncoming(InV, ThisBB);
395 } else if (I.getNumOperands() == 2) {
396 Constant *C = cast<Constant>(I.getOperand(1));
397 for (unsigned i = 0; i != NumPHIValues; ++i) {
399 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
400 if (CmpInst *CI = dyn_cast<CmpInst>(&I))
401 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
403 InV = ConstantExpr::get(I.getOpcode(), InC, C);
405 assert(PN->getIncomingBlock(i) == NonConstBB);
406 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
407 InV = BinaryOperator::Create(BO->getOpcode(),
408 PN->getIncomingValue(i), C, "phitmp",
409 NonConstBB->getTerminator());
410 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
411 InV = CmpInst::Create(CI->getOpcode(),
413 PN->getIncomingValue(i), C, "phitmp",
414 NonConstBB->getTerminator());
416 llvm_unreachable("Unknown binop!");
418 Worklist.Add(cast<Instruction>(InV));
420 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
423 CastInst *CI = cast<CastInst>(&I);
424 const Type *RetTy = CI->getType();
425 for (unsigned i = 0; i != NumPHIValues; ++i) {
427 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
428 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
430 assert(PN->getIncomingBlock(i) == NonConstBB);
431 InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
432 I.getType(), "phitmp",
433 NonConstBB->getTerminator());
434 Worklist.Add(cast<Instruction>(InV));
436 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
439 return ReplaceInstUsesWith(I, NewPN);
443 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
444 /// are carefully arranged to allow folding of expressions such as:
446 /// (A < B) | (A > B) --> (A != B)
448 /// Note that this is only valid if the first and second predicates have the
449 /// same sign. Is illegal to do: (A u< B) | (A s> B)
451 /// Three bits are used to represent the condition, as follows:
456 /// <=> Value Definition
457 /// 000 0 Always false
464 /// 111 7 Always true
466 static unsigned getICmpCode(const ICmpInst *ICI) {
467 switch (ICI->getPredicate()) {
469 case ICmpInst::ICMP_UGT: return 1; // 001
470 case ICmpInst::ICMP_SGT: return 1; // 001
471 case ICmpInst::ICMP_EQ: return 2; // 010
472 case ICmpInst::ICMP_UGE: return 3; // 011
473 case ICmpInst::ICMP_SGE: return 3; // 011
474 case ICmpInst::ICMP_ULT: return 4; // 100
475 case ICmpInst::ICMP_SLT: return 4; // 100
476 case ICmpInst::ICMP_NE: return 5; // 101
477 case ICmpInst::ICMP_ULE: return 6; // 110
478 case ICmpInst::ICMP_SLE: return 6; // 110
481 llvm_unreachable("Invalid ICmp predicate!");
486 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
487 /// predicate into a three bit mask. It also returns whether it is an ordered
488 /// predicate by reference.
489 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
492 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
493 case FCmpInst::FCMP_UNO: return 0; // 000
494 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
495 case FCmpInst::FCMP_UGT: return 1; // 001
496 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
497 case FCmpInst::FCMP_UEQ: return 2; // 010
498 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
499 case FCmpInst::FCMP_UGE: return 3; // 011
500 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
501 case FCmpInst::FCMP_ULT: return 4; // 100
502 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
503 case FCmpInst::FCMP_UNE: return 5; // 101
504 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
505 case FCmpInst::FCMP_ULE: return 6; // 110
508 // Not expecting FCMP_FALSE and FCMP_TRUE;
509 llvm_unreachable("Unexpected FCmp predicate!");
514 /// getICmpValue - This is the complement of getICmpCode, which turns an
515 /// opcode and two operands into either a constant true or false, or a brand
516 /// new ICmp instruction. The sign is passed in to determine which kind
517 /// of predicate to use in the new icmp instruction.
518 static Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS) {
520 default: assert(0 && "Illegal ICmp code!");
522 return ConstantInt::getFalse(LHS->getContext());
525 return new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS);
526 return new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS);
528 return new ICmpInst(ICmpInst::ICMP_EQ, LHS, RHS);
531 return new ICmpInst(ICmpInst::ICMP_SGE, LHS, RHS);
532 return new ICmpInst(ICmpInst::ICMP_UGE, LHS, RHS);
535 return new ICmpInst(ICmpInst::ICMP_SLT, LHS, RHS);
536 return new ICmpInst(ICmpInst::ICMP_ULT, LHS, RHS);
538 return new ICmpInst(ICmpInst::ICMP_NE, LHS, RHS);
541 return new ICmpInst(ICmpInst::ICMP_SLE, LHS, RHS);
542 return new ICmpInst(ICmpInst::ICMP_ULE, LHS, RHS);
544 return ConstantInt::getTrue(LHS->getContext());
548 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
549 /// opcode and two operands into either a FCmp instruction. isordered is passed
550 /// in to determine which kind of predicate to use in the new fcmp instruction.
551 static Value *getFCmpValue(bool isordered, unsigned code,
552 Value *LHS, Value *RHS) {
554 default: llvm_unreachable("Illegal FCmp code!");
557 return new FCmpInst(FCmpInst::FCMP_ORD, LHS, RHS);
559 return new FCmpInst(FCmpInst::FCMP_UNO, LHS, RHS);
562 return new FCmpInst(FCmpInst::FCMP_OGT, LHS, RHS);
564 return new FCmpInst(FCmpInst::FCMP_UGT, LHS, RHS);
567 return new FCmpInst(FCmpInst::FCMP_OEQ, LHS, RHS);
569 return new FCmpInst(FCmpInst::FCMP_UEQ, LHS, RHS);
572 return new FCmpInst(FCmpInst::FCMP_OGE, LHS, RHS);
574 return new FCmpInst(FCmpInst::FCMP_UGE, LHS, RHS);
577 return new FCmpInst(FCmpInst::FCMP_OLT, LHS, RHS);
579 return new FCmpInst(FCmpInst::FCMP_ULT, LHS, RHS);
582 return new FCmpInst(FCmpInst::FCMP_ONE, LHS, RHS);
584 return new FCmpInst(FCmpInst::FCMP_UNE, LHS, RHS);
587 return new FCmpInst(FCmpInst::FCMP_OLE, LHS, RHS);
589 return new FCmpInst(FCmpInst::FCMP_ULE, LHS, RHS);
590 case 7: return ConstantInt::getTrue(LHS->getContext());
594 /// PredicatesFoldable - Return true if both predicates match sign or if at
595 /// least one of them is an equality comparison (which is signless).
596 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
597 return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
598 (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
599 (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
602 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
603 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
604 // guaranteed to be a binary operator.
605 Instruction *InstCombiner::OptAndOp(Instruction *Op,
608 BinaryOperator &TheAnd) {
609 Value *X = Op->getOperand(0);
610 Constant *Together = 0;
612 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
614 switch (Op->getOpcode()) {
615 case Instruction::Xor:
616 if (Op->hasOneUse()) {
617 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
618 Value *And = Builder->CreateAnd(X, AndRHS);
620 return BinaryOperator::CreateXor(And, Together);
623 case Instruction::Or:
624 if (Together == AndRHS) // (X | C) & C --> C
625 return ReplaceInstUsesWith(TheAnd, AndRHS);
627 if (Op->hasOneUse() && Together != OpRHS) {
628 // (X | C1) & C2 --> (X | (C1&C2)) & C2
629 Value *Or = Builder->CreateOr(X, Together);
631 return BinaryOperator::CreateAnd(Or, AndRHS);
634 case Instruction::Add:
635 if (Op->hasOneUse()) {
636 // Adding a one to a single bit bit-field should be turned into an XOR
637 // of the bit. First thing to check is to see if this AND is with a
638 // single bit constant.
639 const APInt &AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
641 // If there is only one bit set.
642 if (AndRHSV.isPowerOf2()) {
643 // Ok, at this point, we know that we are masking the result of the
644 // ADD down to exactly one bit. If the constant we are adding has
645 // no bits set below this bit, then we can eliminate the ADD.
646 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
648 // Check to see if any bits below the one bit set in AndRHSV are set.
649 if ((AddRHS & (AndRHSV-1)) == 0) {
650 // If not, the only thing that can effect the output of the AND is
651 // the bit specified by AndRHSV. If that bit is set, the effect of
652 // the XOR is to toggle the bit. If it is clear, then the ADD has
654 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
655 TheAnd.setOperand(0, X);
658 // Pull the XOR out of the AND.
659 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
660 NewAnd->takeName(Op);
661 return BinaryOperator::CreateXor(NewAnd, AndRHS);
668 case Instruction::Shl: {
669 // We know that the AND will not produce any of the bits shifted in, so if
670 // the anded constant includes them, clear them now!
672 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
673 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
674 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
675 ConstantInt *CI = ConstantInt::get(AndRHS->getContext(),
676 AndRHS->getValue() & ShlMask);
678 if (CI->getValue() == ShlMask) {
679 // Masking out bits that the shift already masks
680 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
681 } else if (CI != AndRHS) { // Reducing bits set in and.
682 TheAnd.setOperand(1, CI);
687 case Instruction::LShr: {
688 // We know that the AND will not produce any of the bits shifted in, so if
689 // the anded constant includes them, clear them now! This only applies to
690 // unsigned shifts, because a signed shr may bring in set bits!
692 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
693 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
694 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
695 ConstantInt *CI = ConstantInt::get(Op->getContext(),
696 AndRHS->getValue() & ShrMask);
698 if (CI->getValue() == ShrMask) {
699 // Masking out bits that the shift already masks.
700 return ReplaceInstUsesWith(TheAnd, Op);
701 } else if (CI != AndRHS) {
702 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
707 case Instruction::AShr:
709 // See if this is shifting in some sign extension, then masking it out
711 if (Op->hasOneUse()) {
712 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
713 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
714 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
715 Constant *C = ConstantInt::get(Op->getContext(),
716 AndRHS->getValue() & ShrMask);
717 if (C == AndRHS) { // Masking out bits shifted in.
718 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
719 // Make the argument unsigned.
720 Value *ShVal = Op->getOperand(0);
721 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
722 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
731 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
732 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
733 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
734 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
735 /// insert new instructions.
736 Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
737 bool isSigned, bool Inside,
739 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
740 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
741 "Lo is not <= Hi in range emission code!");
744 if (Lo == Hi) // Trivially false.
745 return new ICmpInst(ICmpInst::ICMP_NE, V, V);
747 // V >= Min && V < Hi --> V < Hi
748 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
749 ICmpInst::Predicate pred = (isSigned ?
750 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
751 return new ICmpInst(pred, V, Hi);
754 // Emit V-Lo <u Hi-Lo
755 Constant *NegLo = ConstantExpr::getNeg(Lo);
756 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
757 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
758 return new ICmpInst(ICmpInst::ICMP_ULT, Add, UpperBound);
761 if (Lo == Hi) // Trivially true.
762 return new ICmpInst(ICmpInst::ICMP_EQ, V, V);
764 // V < Min || V >= Hi -> V > Hi-1
765 Hi = SubOne(cast<ConstantInt>(Hi));
766 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
767 ICmpInst::Predicate pred = (isSigned ?
768 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
769 return new ICmpInst(pred, V, Hi);
772 // Emit V-Lo >u Hi-1-Lo
773 // Note that Hi has already had one subtracted from it, above.
774 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
775 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
776 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
777 return new ICmpInst(ICmpInst::ICMP_UGT, Add, LowerBound);
780 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
781 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
782 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
783 // not, since all 1s are not contiguous.
784 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
785 const APInt& V = Val->getValue();
786 uint32_t BitWidth = Val->getType()->getBitWidth();
787 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
789 // look for the first zero bit after the run of ones
790 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
791 // look for the first non-zero bit
792 ME = V.getActiveBits();
796 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
797 /// where isSub determines whether the operator is a sub. If we can fold one of
798 /// the following xforms:
800 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
801 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
802 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
804 /// return (A +/- B).
806 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
807 ConstantInt *Mask, bool isSub,
809 Instruction *LHSI = dyn_cast<Instruction>(LHS);
810 if (!LHSI || LHSI->getNumOperands() != 2 ||
811 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
813 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
815 switch (LHSI->getOpcode()) {
817 case Instruction::And:
818 if (ConstantExpr::getAnd(N, Mask) == Mask) {
819 // If the AndRHS is a power of two minus one (0+1+), this is simple.
820 if ((Mask->getValue().countLeadingZeros() +
821 Mask->getValue().countPopulation()) ==
822 Mask->getValue().getBitWidth())
825 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
826 // part, we don't need any explicit masks to take them out of A. If that
827 // is all N is, ignore it.
828 uint32_t MB = 0, ME = 0;
829 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
830 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
831 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
832 if (MaskedValueIsZero(RHS, Mask))
837 case Instruction::Or:
838 case Instruction::Xor:
839 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
840 if ((Mask->getValue().countLeadingZeros() +
841 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
842 && ConstantExpr::getAnd(N, Mask)->isNullValue())
848 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
849 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
852 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
853 Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
854 ICmpInst *LHS, ICmpInst *RHS) {
855 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
857 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
858 if (PredicatesFoldable(LHSCC, RHSCC)) {
859 if (LHS->getOperand(0) == RHS->getOperand(1) &&
860 LHS->getOperand(1) == RHS->getOperand(0))
862 if (LHS->getOperand(0) == RHS->getOperand(0) &&
863 LHS->getOperand(1) == RHS->getOperand(1)) {
864 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
865 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
866 bool isSigned = LHS->isSigned() || RHS->isSigned();
867 Value *RV = getICmpValue(isSigned, Code, Op0, Op1);
868 if (Instruction *I = dyn_cast<Instruction>(RV))
870 // Otherwise, it's a constant boolean value.
871 return ReplaceInstUsesWith(I, RV);
875 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
876 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
877 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
878 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
879 if (LHSCst == 0 || RHSCst == 0) return 0;
881 if (LHSCst == RHSCst && LHSCC == RHSCC) {
882 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
883 // where C is a power of 2
884 if (LHSCC == ICmpInst::ICMP_ULT &&
885 LHSCst->getValue().isPowerOf2()) {
886 Value *NewOr = Builder->CreateOr(Val, Val2);
887 return new ICmpInst(LHSCC, NewOr, LHSCst);
890 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
891 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
892 Value *NewOr = Builder->CreateOr(Val, Val2);
893 return new ICmpInst(LHSCC, NewOr, LHSCst);
897 // From here on, we only handle:
898 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
899 if (Val != Val2) return 0;
901 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
902 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
903 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
904 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
905 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
908 // We can't fold (ugt x, C) & (sgt x, C2).
909 if (!PredicatesFoldable(LHSCC, RHSCC))
912 // Ensure that the larger constant is on the RHS.
914 if (CmpInst::isSigned(LHSCC) ||
915 (ICmpInst::isEquality(LHSCC) &&
916 CmpInst::isSigned(RHSCC)))
917 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
919 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
923 std::swap(LHSCst, RHSCst);
924 std::swap(LHSCC, RHSCC);
927 // At this point, we know we have have two icmp instructions
928 // comparing a value against two constants and and'ing the result
929 // together. Because of the above check, we know that we only have
930 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
931 // (from the icmp folding check above), that the two constants
932 // are not equal and that the larger constant is on the RHS
933 assert(LHSCst != RHSCst && "Compares not folded above?");
936 default: llvm_unreachable("Unknown integer condition code!");
937 case ICmpInst::ICMP_EQ:
939 default: llvm_unreachable("Unknown integer condition code!");
940 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
941 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
942 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
943 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
944 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
945 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
946 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
947 return ReplaceInstUsesWith(I, LHS);
949 case ICmpInst::ICMP_NE:
951 default: llvm_unreachable("Unknown integer condition code!");
952 case ICmpInst::ICMP_ULT:
953 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
954 return new ICmpInst(ICmpInst::ICMP_ULT, Val, LHSCst);
955 break; // (X != 13 & X u< 15) -> no change
956 case ICmpInst::ICMP_SLT:
957 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
958 return new ICmpInst(ICmpInst::ICMP_SLT, Val, LHSCst);
959 break; // (X != 13 & X s< 15) -> no change
960 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
961 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
962 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
963 return ReplaceInstUsesWith(I, RHS);
964 case ICmpInst::ICMP_NE:
965 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
966 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
967 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
968 return new ICmpInst(ICmpInst::ICMP_UGT, Add,
969 ConstantInt::get(Add->getType(), 1));
971 break; // (X != 13 & X != 15) -> no change
974 case ICmpInst::ICMP_ULT:
976 default: llvm_unreachable("Unknown integer condition code!");
977 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
978 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
979 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
980 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
982 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
983 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
984 return ReplaceInstUsesWith(I, LHS);
985 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
989 case ICmpInst::ICMP_SLT:
991 default: llvm_unreachable("Unknown integer condition code!");
992 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
993 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
994 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
995 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
997 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
998 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
999 return ReplaceInstUsesWith(I, LHS);
1000 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
1004 case ICmpInst::ICMP_UGT:
1006 default: llvm_unreachable("Unknown integer condition code!");
1007 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
1008 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
1009 return ReplaceInstUsesWith(I, RHS);
1010 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
1012 case ICmpInst::ICMP_NE:
1013 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
1014 return new ICmpInst(LHSCC, Val, RHSCst);
1015 break; // (X u> 13 & X != 15) -> no change
1016 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
1017 return InsertRangeTest(Val, AddOne(LHSCst),
1018 RHSCst, false, true, I);
1019 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
1023 case ICmpInst::ICMP_SGT:
1025 default: llvm_unreachable("Unknown integer condition code!");
1026 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
1027 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
1028 return ReplaceInstUsesWith(I, RHS);
1029 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
1031 case ICmpInst::ICMP_NE:
1032 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
1033 return new ICmpInst(LHSCC, Val, RHSCst);
1034 break; // (X s> 13 & X != 15) -> no change
1035 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
1036 return InsertRangeTest(Val, AddOne(LHSCst),
1037 RHSCst, true, true, I);
1038 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
1047 Instruction *InstCombiner::FoldAndOfFCmps(Instruction &I, FCmpInst *LHS,
1050 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
1051 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
1052 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
1053 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
1054 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
1055 // If either of the constants are nans, then the whole thing returns
1057 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
1058 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
1059 return new FCmpInst(FCmpInst::FCMP_ORD,
1060 LHS->getOperand(0), RHS->getOperand(0));
1063 // Handle vector zeros. This occurs because the canonical form of
1064 // "fcmp ord x,x" is "fcmp ord x, 0".
1065 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
1066 isa<ConstantAggregateZero>(RHS->getOperand(1)))
1067 return new FCmpInst(FCmpInst::FCMP_ORD,
1068 LHS->getOperand(0), RHS->getOperand(0));
1072 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
1073 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
1074 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
1077 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
1078 // Swap RHS operands to match LHS.
1079 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
1080 std::swap(Op1LHS, Op1RHS);
1083 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
1084 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1086 return new FCmpInst((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
1088 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
1089 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
1090 if (Op0CC == FCmpInst::FCMP_TRUE)
1091 return ReplaceInstUsesWith(I, RHS);
1092 if (Op1CC == FCmpInst::FCMP_TRUE)
1093 return ReplaceInstUsesWith(I, LHS);
1097 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
1098 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
1100 std::swap(LHS, RHS);
1101 std::swap(Op0Pred, Op1Pred);
1102 std::swap(Op0Ordered, Op1Ordered);
1105 // uno && ueq -> uno && (uno || eq) -> ueq
1106 // ord && olt -> ord && (ord && lt) -> olt
1107 if (Op0Ordered == Op1Ordered)
1108 return ReplaceInstUsesWith(I, RHS);
1110 // uno && oeq -> uno && (ord && eq) -> false
1111 // uno && ord -> false
1113 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getContext()));
1114 // ord && ueq -> ord && (uno || eq) -> oeq
1115 return cast<Instruction>(getFCmpValue(true, Op1Pred, Op0LHS, Op0RHS));
1123 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
1124 bool Changed = SimplifyCommutative(I);
1125 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1127 if (Value *V = SimplifyAndInst(Op0, Op1, TD))
1128 return ReplaceInstUsesWith(I, V);
1130 // See if we can simplify any instructions used by the instruction whose sole
1131 // purpose is to compute bits we don't care about.
1132 if (SimplifyDemandedInstructionBits(I))
1135 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
1136 const APInt &AndRHSMask = AndRHS->getValue();
1137 APInt NotAndRHS(~AndRHSMask);
1139 // Optimize a variety of ((val OP C1) & C2) combinations...
1140 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1141 Value *Op0LHS = Op0I->getOperand(0);
1142 Value *Op0RHS = Op0I->getOperand(1);
1143 switch (Op0I->getOpcode()) {
1145 case Instruction::Xor:
1146 case Instruction::Or:
1147 // If the mask is only needed on one incoming arm, push it up.
1148 if (!Op0I->hasOneUse()) break;
1150 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
1151 // Not masking anything out for the LHS, move to RHS.
1152 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
1153 Op0RHS->getName()+".masked");
1154 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
1156 if (!isa<Constant>(Op0RHS) &&
1157 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
1158 // Not masking anything out for the RHS, move to LHS.
1159 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
1160 Op0LHS->getName()+".masked");
1161 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
1165 case Instruction::Add:
1166 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
1167 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
1168 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
1169 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
1170 return BinaryOperator::CreateAnd(V, AndRHS);
1171 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
1172 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
1175 case Instruction::Sub:
1176 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
1177 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
1178 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
1179 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
1180 return BinaryOperator::CreateAnd(V, AndRHS);
1182 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
1183 // has 1's for all bits that the subtraction with A might affect.
1184 if (Op0I->hasOneUse()) {
1185 uint32_t BitWidth = AndRHSMask.getBitWidth();
1186 uint32_t Zeros = AndRHSMask.countLeadingZeros();
1187 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
1189 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
1190 if (!(A && A->isZero()) && // avoid infinite recursion.
1191 MaskedValueIsZero(Op0LHS, Mask)) {
1192 Value *NewNeg = Builder->CreateNeg(Op0RHS);
1193 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
1198 case Instruction::Shl:
1199 case Instruction::LShr:
1200 // (1 << x) & 1 --> zext(x == 0)
1201 // (1 >> x) & 1 --> zext(x == 0)
1202 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
1204 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
1205 return new ZExtInst(NewICmp, I.getType());
1210 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1211 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1213 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
1214 // If this is an integer truncation or change from signed-to-unsigned, and
1215 // if the source is an and/or with immediate, transform it. This
1216 // frequently occurs for bitfield accesses.
1217 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
1218 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
1219 CastOp->getNumOperands() == 2)
1220 if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
1221 if (CastOp->getOpcode() == Instruction::And) {
1222 // Change: and (cast (and X, C1) to T), C2
1223 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
1224 // This will fold the two constants together, which may allow
1225 // other simplifications.
1226 Value *NewCast = Builder->CreateTruncOrBitCast(
1227 CastOp->getOperand(0), I.getType(),
1228 CastOp->getName()+".shrunk");
1229 // trunc_or_bitcast(C1)&C2
1230 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
1231 C3 = ConstantExpr::getAnd(C3, AndRHS);
1232 return BinaryOperator::CreateAnd(NewCast, C3);
1233 } else if (CastOp->getOpcode() == Instruction::Or) {
1234 // Change: and (cast (or X, C1) to T), C2
1235 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
1236 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
1237 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
1239 return ReplaceInstUsesWith(I, AndRHS);
1245 // Try to fold constant and into select arguments.
1246 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1247 if (Instruction *R = FoldOpIntoSelect(I, SI))
1249 if (isa<PHINode>(Op0))
1250 if (Instruction *NV = FoldOpIntoPhi(I))
1255 // (~A & ~B) == (~(A | B)) - De Morgan's Law
1256 if (Value *Op0NotVal = dyn_castNotVal(Op0))
1257 if (Value *Op1NotVal = dyn_castNotVal(Op1))
1258 if (Op0->hasOneUse() && Op1->hasOneUse()) {
1259 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
1260 I.getName()+".demorgan");
1261 return BinaryOperator::CreateNot(Or);
1265 Value *A = 0, *B = 0, *C = 0, *D = 0;
1266 // (A|B) & ~(A&B) -> A^B
1267 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
1268 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
1269 ((A == C && B == D) || (A == D && B == C)))
1270 return BinaryOperator::CreateXor(A, B);
1272 // ~(A&B) & (A|B) -> A^B
1273 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
1274 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
1275 ((A == C && B == D) || (A == D && B == C)))
1276 return BinaryOperator::CreateXor(A, B);
1278 if (Op0->hasOneUse() &&
1279 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
1280 if (A == Op1) { // (A^B)&A -> A&(A^B)
1281 I.swapOperands(); // Simplify below
1282 std::swap(Op0, Op1);
1283 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
1284 cast<BinaryOperator>(Op0)->swapOperands();
1285 I.swapOperands(); // Simplify below
1286 std::swap(Op0, Op1);
1290 if (Op1->hasOneUse() &&
1291 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
1292 if (B == Op0) { // B&(A^B) -> B&(B^A)
1293 cast<BinaryOperator>(Op1)->swapOperands();
1296 if (A == Op0) // A&(A^B) -> A & ~B
1297 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
1300 // (A&((~A)|B)) -> A&B
1301 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
1302 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
1303 return BinaryOperator::CreateAnd(A, Op1);
1304 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
1305 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
1306 return BinaryOperator::CreateAnd(A, Op0);
1309 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1))
1310 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
1311 if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS))
1314 // fold (and (cast A), (cast B)) -> (cast (and A, B))
1315 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
1316 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
1317 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
1318 const Type *SrcTy = Op0C->getOperand(0)->getType();
1319 if (SrcTy == Op1C->getOperand(0)->getType() &&
1320 SrcTy->isIntOrIntVector() &&
1321 // Only do this if the casts both really cause code to be generated.
1322 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
1324 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
1326 Value *NewOp = Builder->CreateAnd(Op0C->getOperand(0),
1327 Op1C->getOperand(0), I.getName());
1328 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
1332 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
1333 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
1334 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
1335 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
1336 SI0->getOperand(1) == SI1->getOperand(1) &&
1337 (SI0->hasOneUse() || SI1->hasOneUse())) {
1339 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
1341 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
1342 SI1->getOperand(1));
1346 // If and'ing two fcmp, try combine them into one.
1347 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
1348 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1349 if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS))
1353 return Changed ? &I : 0;
1356 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
1357 /// capable of providing pieces of a bswap. The subexpression provides pieces
1358 /// of a bswap if it is proven that each of the non-zero bytes in the output of
1359 /// the expression came from the corresponding "byte swapped" byte in some other
1360 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
1361 /// we know that the expression deposits the low byte of %X into the high byte
1362 /// of the bswap result and that all other bytes are zero. This expression is
1363 /// accepted, the high byte of ByteValues is set to X to indicate a correct
1366 /// This function returns true if the match was unsuccessful and false if so.
1367 /// On entry to the function the "OverallLeftShift" is a signed integer value
1368 /// indicating the number of bytes that the subexpression is later shifted. For
1369 /// example, if the expression is later right shifted by 16 bits, the
1370 /// OverallLeftShift value would be -2 on entry. This is used to specify which
1371 /// byte of ByteValues is actually being set.
1373 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
1374 /// byte is masked to zero by a user. For example, in (X & 255), X will be
1375 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
1376 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
1377 /// always in the local (OverallLeftShift) coordinate space.
1379 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
1380 SmallVector<Value*, 8> &ByteValues) {
1381 if (Instruction *I = dyn_cast<Instruction>(V)) {
1382 // If this is an or instruction, it may be an inner node of the bswap.
1383 if (I->getOpcode() == Instruction::Or) {
1384 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1386 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
1390 // If this is a logical shift by a constant multiple of 8, recurse with
1391 // OverallLeftShift and ByteMask adjusted.
1392 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
1394 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
1395 // Ensure the shift amount is defined and of a byte value.
1396 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
1399 unsigned ByteShift = ShAmt >> 3;
1400 if (I->getOpcode() == Instruction::Shl) {
1401 // X << 2 -> collect(X, +2)
1402 OverallLeftShift += ByteShift;
1403 ByteMask >>= ByteShift;
1405 // X >>u 2 -> collect(X, -2)
1406 OverallLeftShift -= ByteShift;
1407 ByteMask <<= ByteShift;
1408 ByteMask &= (~0U >> (32-ByteValues.size()));
1411 if (OverallLeftShift >= (int)ByteValues.size()) return true;
1412 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
1414 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1418 // If this is a logical 'and' with a mask that clears bytes, clear the
1419 // corresponding bytes in ByteMask.
1420 if (I->getOpcode() == Instruction::And &&
1421 isa<ConstantInt>(I->getOperand(1))) {
1422 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
1423 unsigned NumBytes = ByteValues.size();
1424 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
1425 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
1427 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
1428 // If this byte is masked out by a later operation, we don't care what
1430 if ((ByteMask & (1 << i)) == 0)
1433 // If the AndMask is all zeros for this byte, clear the bit.
1434 APInt MaskB = AndMask & Byte;
1436 ByteMask &= ~(1U << i);
1440 // If the AndMask is not all ones for this byte, it's not a bytezap.
1444 // Otherwise, this byte is kept.
1447 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
1452 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
1453 // the input value to the bswap. Some observations: 1) if more than one byte
1454 // is demanded from this input, then it could not be successfully assembled
1455 // into a byteswap. At least one of the two bytes would not be aligned with
1456 // their ultimate destination.
1457 if (!isPowerOf2_32(ByteMask)) return true;
1458 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
1460 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
1461 // is demanded, it needs to go into byte 0 of the result. This means that the
1462 // byte needs to be shifted until it lands in the right byte bucket. The
1463 // shift amount depends on the position: if the byte is coming from the high
1464 // part of the value (e.g. byte 3) then it must be shifted right. If from the
1465 // low part, it must be shifted left.
1466 unsigned DestByteNo = InputByteNo + OverallLeftShift;
1467 if (InputByteNo < ByteValues.size()/2) {
1468 if (ByteValues.size()-1-DestByteNo != InputByteNo)
1471 if (ByteValues.size()-1-DestByteNo != InputByteNo)
1475 // If the destination byte value is already defined, the values are or'd
1476 // together, which isn't a bswap (unless it's an or of the same bits).
1477 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
1479 ByteValues[DestByteNo] = V;
1483 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
1484 /// If so, insert the new bswap intrinsic and return it.
1485 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
1486 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
1487 if (!ITy || ITy->getBitWidth() % 16 ||
1488 // ByteMask only allows up to 32-byte values.
1489 ITy->getBitWidth() > 32*8)
1490 return 0; // Can only bswap pairs of bytes. Can't do vectors.
1492 /// ByteValues - For each byte of the result, we keep track of which value
1493 /// defines each byte.
1494 SmallVector<Value*, 8> ByteValues;
1495 ByteValues.resize(ITy->getBitWidth()/8);
1497 // Try to find all the pieces corresponding to the bswap.
1498 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
1499 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
1502 // Check to see if all of the bytes come from the same value.
1503 Value *V = ByteValues[0];
1504 if (V == 0) return 0; // Didn't find a byte? Must be zero.
1506 // Check to make sure that all of the bytes come from the same value.
1507 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
1508 if (ByteValues[i] != V)
1510 const Type *Tys[] = { ITy };
1511 Module *M = I.getParent()->getParent()->getParent();
1512 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
1513 return CallInst::Create(F, V);
1516 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
1517 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
1518 /// we can simplify this expression to "cond ? C : D or B".
1519 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
1520 Value *C, Value *D) {
1521 // If A is not a select of -1/0, this cannot match.
1523 if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond))))
1526 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
1527 if (match(D, m_SelectCst<0, -1>(m_Specific(Cond))))
1528 return SelectInst::Create(Cond, C, B);
1529 if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
1530 return SelectInst::Create(Cond, C, B);
1531 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
1532 if (match(B, m_SelectCst<0, -1>(m_Specific(Cond))))
1533 return SelectInst::Create(Cond, C, D);
1534 if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
1535 return SelectInst::Create(Cond, C, D);
1539 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
1540 Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
1541 ICmpInst *LHS, ICmpInst *RHS) {
1542 ICmpInst::Predicate LHSCC = LHS->getPredicate(), RHSCC = RHS->getPredicate();
1544 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
1545 if (PredicatesFoldable(LHSCC, RHSCC)) {
1546 if (LHS->getOperand(0) == RHS->getOperand(1) &&
1547 LHS->getOperand(1) == RHS->getOperand(0))
1548 LHS->swapOperands();
1549 if (LHS->getOperand(0) == RHS->getOperand(0) &&
1550 LHS->getOperand(1) == RHS->getOperand(1)) {
1551 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1552 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
1553 bool isSigned = LHS->isSigned() || RHS->isSigned();
1554 Value *RV = getICmpValue(isSigned, Code, Op0, Op1);
1555 if (Instruction *I = dyn_cast<Instruction>(RV))
1557 // Otherwise, it's a constant boolean value.
1558 return ReplaceInstUsesWith(I, RV);
1562 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
1563 Value *Val = LHS->getOperand(0), *Val2 = RHS->getOperand(0);
1564 ConstantInt *LHSCst = dyn_cast<ConstantInt>(LHS->getOperand(1));
1565 ConstantInt *RHSCst = dyn_cast<ConstantInt>(RHS->getOperand(1));
1566 if (LHSCst == 0 || RHSCst == 0) return 0;
1568 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
1569 if (LHSCst == RHSCst && LHSCC == RHSCC &&
1570 LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
1571 Value *NewOr = Builder->CreateOr(Val, Val2);
1572 return new ICmpInst(LHSCC, NewOr, LHSCst);
1575 // From here on, we only handle:
1576 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
1577 if (Val != Val2) return 0;
1579 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
1580 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
1581 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
1582 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
1583 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
1586 // We can't fold (ugt x, C) | (sgt x, C2).
1587 if (!PredicatesFoldable(LHSCC, RHSCC))
1590 // Ensure that the larger constant is on the RHS.
1592 if (CmpInst::isSigned(LHSCC) ||
1593 (ICmpInst::isEquality(LHSCC) &&
1594 CmpInst::isSigned(RHSCC)))
1595 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
1597 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
1600 std::swap(LHS, RHS);
1601 std::swap(LHSCst, RHSCst);
1602 std::swap(LHSCC, RHSCC);
1605 // At this point, we know we have have two icmp instructions
1606 // comparing a value against two constants and or'ing the result
1607 // together. Because of the above check, we know that we only have
1608 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
1609 // icmp folding check above), that the two constants are not
1611 assert(LHSCst != RHSCst && "Compares not folded above?");
1614 default: llvm_unreachable("Unknown integer condition code!");
1615 case ICmpInst::ICMP_EQ:
1617 default: llvm_unreachable("Unknown integer condition code!");
1618 case ICmpInst::ICMP_EQ:
1619 if (LHSCst == SubOne(RHSCst)) {
1620 // (X == 13 | X == 14) -> X-13 <u 2
1621 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
1622 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
1623 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
1624 return new ICmpInst(ICmpInst::ICMP_ULT, Add, AddCST);
1626 break; // (X == 13 | X == 15) -> no change
1627 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
1628 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
1630 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
1631 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
1632 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
1633 return ReplaceInstUsesWith(I, RHS);
1636 case ICmpInst::ICMP_NE:
1638 default: llvm_unreachable("Unknown integer condition code!");
1639 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
1640 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
1641 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
1642 return ReplaceInstUsesWith(I, LHS);
1643 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
1644 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
1645 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
1646 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
1649 case ICmpInst::ICMP_ULT:
1651 default: llvm_unreachable("Unknown integer condition code!");
1652 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
1654 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
1655 // If RHSCst is [us]MAXINT, it is always false. Not handling
1656 // this can cause overflow.
1657 if (RHSCst->isMaxValue(false))
1658 return ReplaceInstUsesWith(I, LHS);
1659 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
1661 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
1663 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
1664 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
1665 return ReplaceInstUsesWith(I, RHS);
1666 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
1670 case ICmpInst::ICMP_SLT:
1672 default: llvm_unreachable("Unknown integer condition code!");
1673 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
1675 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
1676 // If RHSCst is [us]MAXINT, it is always false. Not handling
1677 // this can cause overflow.
1678 if (RHSCst->isMaxValue(true))
1679 return ReplaceInstUsesWith(I, LHS);
1680 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
1682 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
1684 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
1685 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
1686 return ReplaceInstUsesWith(I, RHS);
1687 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
1691 case ICmpInst::ICMP_UGT:
1693 default: llvm_unreachable("Unknown integer condition code!");
1694 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
1695 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
1696 return ReplaceInstUsesWith(I, LHS);
1697 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
1699 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
1700 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
1701 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
1702 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
1706 case ICmpInst::ICMP_SGT:
1708 default: llvm_unreachable("Unknown integer condition code!");
1709 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
1710 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
1711 return ReplaceInstUsesWith(I, LHS);
1712 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
1714 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
1715 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
1716 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
1717 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
1725 Instruction *InstCombiner::FoldOrOfFCmps(Instruction &I, FCmpInst *LHS,
1727 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
1728 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
1729 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
1730 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
1731 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
1732 // If either of the constants are nans, then the whole thing returns
1734 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
1735 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
1737 // Otherwise, no need to compare the two constants, compare the
1739 return new FCmpInst(FCmpInst::FCMP_UNO,
1740 LHS->getOperand(0), RHS->getOperand(0));
1743 // Handle vector zeros. This occurs because the canonical form of
1744 // "fcmp uno x,x" is "fcmp uno x, 0".
1745 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
1746 isa<ConstantAggregateZero>(RHS->getOperand(1)))
1747 return new FCmpInst(FCmpInst::FCMP_UNO,
1748 LHS->getOperand(0), RHS->getOperand(0));
1753 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
1754 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
1755 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
1757 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
1758 // Swap RHS operands to match LHS.
1759 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
1760 std::swap(Op1LHS, Op1RHS);
1762 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
1763 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
1765 return new FCmpInst((FCmpInst::Predicate)Op0CC,
1767 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
1768 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getContext()));
1769 if (Op0CC == FCmpInst::FCMP_FALSE)
1770 return ReplaceInstUsesWith(I, RHS);
1771 if (Op1CC == FCmpInst::FCMP_FALSE)
1772 return ReplaceInstUsesWith(I, LHS);
1775 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
1776 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
1777 if (Op0Ordered == Op1Ordered) {
1778 // If both are ordered or unordered, return a new fcmp with
1779 // or'ed predicates.
1780 Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, Op0LHS, Op0RHS);
1781 if (Instruction *I = dyn_cast<Instruction>(RV))
1783 // Otherwise, it's a constant boolean value...
1784 return ReplaceInstUsesWith(I, RV);
1790 /// FoldOrWithConstants - This helper function folds:
1792 /// ((A | B) & C1) | (B & C2)
1798 /// when the XOR of the two constants is "all ones" (-1).
1799 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
1800 Value *A, Value *B, Value *C) {
1801 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
1805 ConstantInt *CI2 = 0;
1806 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
1808 APInt Xor = CI1->getValue() ^ CI2->getValue();
1809 if (!Xor.isAllOnesValue()) return 0;
1811 if (V1 == A || V1 == B) {
1812 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
1813 return BinaryOperator::CreateOr(NewOp, V1);
1819 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
1820 bool Changed = SimplifyCommutative(I);
1821 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1823 if (Value *V = SimplifyOrInst(Op0, Op1, TD))
1824 return ReplaceInstUsesWith(I, V);
1827 // See if we can simplify any instructions used by the instruction whose sole
1828 // purpose is to compute bits we don't care about.
1829 if (SimplifyDemandedInstructionBits(I))
1832 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
1833 ConstantInt *C1 = 0; Value *X = 0;
1834 // (X & C1) | C2 --> (X | C2) & (C1|C2)
1835 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
1837 Value *Or = Builder->CreateOr(X, RHS);
1839 return BinaryOperator::CreateAnd(Or,
1840 ConstantInt::get(I.getContext(),
1841 RHS->getValue() | C1->getValue()));
1844 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
1845 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
1847 Value *Or = Builder->CreateOr(X, RHS);
1849 return BinaryOperator::CreateXor(Or,
1850 ConstantInt::get(I.getContext(),
1851 C1->getValue() & ~RHS->getValue()));
1854 // Try to fold constant and into select arguments.
1855 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1856 if (Instruction *R = FoldOpIntoSelect(I, SI))
1858 if (isa<PHINode>(Op0))
1859 if (Instruction *NV = FoldOpIntoPhi(I))
1863 Value *A = 0, *B = 0;
1864 ConstantInt *C1 = 0, *C2 = 0;
1866 // (A | B) | C and A | (B | C) -> bswap if possible.
1867 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
1868 if (match(Op0, m_Or(m_Value(), m_Value())) ||
1869 match(Op1, m_Or(m_Value(), m_Value())) ||
1870 (match(Op0, m_Shift(m_Value(), m_Value())) &&
1871 match(Op1, m_Shift(m_Value(), m_Value())))) {
1872 if (Instruction *BSwap = MatchBSwap(I))
1876 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
1877 if (Op0->hasOneUse() &&
1878 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1879 MaskedValueIsZero(Op1, C1->getValue())) {
1880 Value *NOr = Builder->CreateOr(A, Op1);
1882 return BinaryOperator::CreateXor(NOr, C1);
1885 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
1886 if (Op1->hasOneUse() &&
1887 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
1888 MaskedValueIsZero(Op0, C1->getValue())) {
1889 Value *NOr = Builder->CreateOr(A, Op0);
1891 return BinaryOperator::CreateXor(NOr, C1);
1895 Value *C = 0, *D = 0;
1896 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
1897 match(Op1, m_And(m_Value(B), m_Value(D)))) {
1898 Value *V1 = 0, *V2 = 0, *V3 = 0;
1899 C1 = dyn_cast<ConstantInt>(C);
1900 C2 = dyn_cast<ConstantInt>(D);
1901 if (C1 && C2) { // (A & C1)|(B & C2)
1902 // If we have: ((V + N) & C1) | (V & C2)
1903 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
1904 // replace with V+N.
1905 if (C1->getValue() == ~C2->getValue()) {
1906 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
1907 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
1908 // Add commutes, try both ways.
1909 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
1910 return ReplaceInstUsesWith(I, A);
1911 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
1912 return ReplaceInstUsesWith(I, A);
1914 // Or commutes, try both ways.
1915 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
1916 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
1917 // Add commutes, try both ways.
1918 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
1919 return ReplaceInstUsesWith(I, B);
1920 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
1921 return ReplaceInstUsesWith(I, B);
1925 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
1926 // iff (C1&C2) == 0 and (N&~C1) == 0
1927 if ((C1->getValue() & C2->getValue()) == 0) {
1928 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
1929 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
1930 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
1931 return BinaryOperator::CreateAnd(A,
1932 ConstantInt::get(A->getContext(),
1933 C1->getValue()|C2->getValue()));
1934 // Or commutes, try both ways.
1935 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
1936 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N)
1937 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V)
1938 return BinaryOperator::CreateAnd(B,
1939 ConstantInt::get(B->getContext(),
1940 C1->getValue()|C2->getValue()));
1944 // Check to see if we have any common things being and'ed. If so, find the
1945 // terms for V1 & (V2|V3).
1946 if (Op0->hasOneUse() || Op1->hasOneUse()) {
1948 if (A == B) // (A & C)|(A & D) == A & (C|D)
1949 V1 = A, V2 = C, V3 = D;
1950 else if (A == D) // (A & C)|(B & A) == A & (B|C)
1951 V1 = A, V2 = B, V3 = C;
1952 else if (C == B) // (A & C)|(C & D) == C & (A|D)
1953 V1 = C, V2 = A, V3 = D;
1954 else if (C == D) // (A & C)|(B & C) == C & (A|B)
1955 V1 = C, V2 = A, V3 = B;
1958 Value *Or = Builder->CreateOr(V2, V3, "tmp");
1959 return BinaryOperator::CreateAnd(V1, Or);
1963 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
1964 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D))
1966 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C))
1968 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D))
1970 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C))
1973 // ((A&~B)|(~A&B)) -> A^B
1974 if ((match(C, m_Not(m_Specific(D))) &&
1975 match(B, m_Not(m_Specific(A)))))
1976 return BinaryOperator::CreateXor(A, D);
1977 // ((~B&A)|(~A&B)) -> A^B
1978 if ((match(A, m_Not(m_Specific(D))) &&
1979 match(B, m_Not(m_Specific(C)))))
1980 return BinaryOperator::CreateXor(C, D);
1981 // ((A&~B)|(B&~A)) -> A^B
1982 if ((match(C, m_Not(m_Specific(B))) &&
1983 match(D, m_Not(m_Specific(A)))))
1984 return BinaryOperator::CreateXor(A, B);
1985 // ((~B&A)|(B&~A)) -> A^B
1986 if ((match(A, m_Not(m_Specific(B))) &&
1987 match(D, m_Not(m_Specific(C)))))
1988 return BinaryOperator::CreateXor(C, B);
1991 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
1992 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
1993 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
1994 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
1995 SI0->getOperand(1) == SI1->getOperand(1) &&
1996 (SI0->hasOneUse() || SI1->hasOneUse())) {
1997 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
1999 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
2000 SI1->getOperand(1));
2004 // ((A|B)&1)|(B&-2) -> (A&1) | B
2005 if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
2006 match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
2007 Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
2008 if (Ret) return Ret;
2010 // (B&-2)|((A|B)&1) -> (A&1) | B
2011 if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
2012 match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
2013 Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
2014 if (Ret) return Ret;
2017 // (~A | ~B) == (~(A & B)) - De Morgan's Law
2018 if (Value *Op0NotVal = dyn_castNotVal(Op0))
2019 if (Value *Op1NotVal = dyn_castNotVal(Op1))
2020 if (Op0->hasOneUse() && Op1->hasOneUse()) {
2021 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
2022 I.getName()+".demorgan");
2023 return BinaryOperator::CreateNot(And);
2026 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2027 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2028 if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS))
2031 // fold (or (cast A), (cast B)) -> (cast (or A, B))
2032 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2033 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
2034 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
2035 if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
2036 !isa<ICmpInst>(Op1C->getOperand(0))) {
2037 const Type *SrcTy = Op0C->getOperand(0)->getType();
2038 if (SrcTy == Op1C->getOperand(0)->getType() &&
2039 SrcTy->isIntOrIntVector() &&
2040 // Only do this if the casts both really cause code to be
2042 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
2044 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
2046 Value *NewOp = Builder->CreateOr(Op0C->getOperand(0),
2047 Op1C->getOperand(0), I.getName());
2048 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
2055 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
2056 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
2057 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
2058 if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS))
2062 return Changed ? &I : 0;
2065 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
2066 bool Changed = SimplifyCommutative(I);
2067 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2069 if (isa<UndefValue>(Op1)) {
2070 if (isa<UndefValue>(Op0))
2071 // Handle undef ^ undef -> 0 special case. This is a common
2073 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2074 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
2079 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2081 // See if we can simplify any instructions used by the instruction whose sole
2082 // purpose is to compute bits we don't care about.
2083 if (SimplifyDemandedInstructionBits(I))
2085 if (isa<VectorType>(I.getType()))
2086 if (isa<ConstantAggregateZero>(Op1))
2087 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
2089 // Is this a ~ operation?
2090 if (Value *NotOp = dyn_castNotVal(&I)) {
2091 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
2092 if (Op0I->getOpcode() == Instruction::And ||
2093 Op0I->getOpcode() == Instruction::Or) {
2094 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
2095 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
2096 if (dyn_castNotVal(Op0I->getOperand(1)))
2097 Op0I->swapOperands();
2098 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
2100 Builder->CreateNot(Op0I->getOperand(1),
2101 Op0I->getOperand(1)->getName()+".not");
2102 if (Op0I->getOpcode() == Instruction::And)
2103 return BinaryOperator::CreateOr(Op0NotVal, NotY);
2104 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
2107 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
2108 // ~(X | Y) === (~X & ~Y) - De Morgan's Law
2109 if (isFreeToInvert(Op0I->getOperand(0)) &&
2110 isFreeToInvert(Op0I->getOperand(1))) {
2112 Builder->CreateNot(Op0I->getOperand(0), "notlhs");
2114 Builder->CreateNot(Op0I->getOperand(1), "notrhs");
2115 if (Op0I->getOpcode() == Instruction::And)
2116 return BinaryOperator::CreateOr(NotX, NotY);
2117 return BinaryOperator::CreateAnd(NotX, NotY);
2124 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2125 if (RHS->isOne() && Op0->hasOneUse()) {
2126 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
2127 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
2128 return new ICmpInst(ICI->getInversePredicate(),
2129 ICI->getOperand(0), ICI->getOperand(1));
2131 if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
2132 return new FCmpInst(FCI->getInversePredicate(),
2133 FCI->getOperand(0), FCI->getOperand(1));
2136 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
2137 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2138 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
2139 if (CI->hasOneUse() && Op0C->hasOneUse()) {
2140 Instruction::CastOps Opcode = Op0C->getOpcode();
2141 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
2142 (RHS == ConstantExpr::getCast(Opcode,
2143 ConstantInt::getTrue(I.getContext()),
2144 Op0C->getDestTy()))) {
2145 CI->setPredicate(CI->getInversePredicate());
2146 return CastInst::Create(Opcode, CI, Op0C->getType());
2152 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2153 // ~(c-X) == X-c-1 == X+(-c-1)
2154 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
2155 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
2156 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
2157 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
2158 ConstantInt::get(I.getType(), 1));
2159 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
2162 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
2163 if (Op0I->getOpcode() == Instruction::Add) {
2164 // ~(X-c) --> (-c-1)-X
2165 if (RHS->isAllOnesValue()) {
2166 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
2167 return BinaryOperator::CreateSub(
2168 ConstantExpr::getSub(NegOp0CI,
2169 ConstantInt::get(I.getType(), 1)),
2170 Op0I->getOperand(0));
2171 } else if (RHS->getValue().isSignBit()) {
2172 // (X + C) ^ signbit -> (X + C + signbit)
2173 Constant *C = ConstantInt::get(I.getContext(),
2174 RHS->getValue() + Op0CI->getValue());
2175 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
2178 } else if (Op0I->getOpcode() == Instruction::Or) {
2179 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
2180 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
2181 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
2182 // Anything in both C1 and C2 is known to be zero, remove it from
2184 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
2185 NewRHS = ConstantExpr::getAnd(NewRHS,
2186 ConstantExpr::getNot(CommonBits));
2188 I.setOperand(0, Op0I->getOperand(0));
2189 I.setOperand(1, NewRHS);
2196 // Try to fold constant and into select arguments.
2197 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2198 if (Instruction *R = FoldOpIntoSelect(I, SI))
2200 if (isa<PHINode>(Op0))
2201 if (Instruction *NV = FoldOpIntoPhi(I))
2205 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
2207 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2209 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
2211 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2214 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
2217 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
2218 if (A == Op0) { // B^(B|A) == (A|B)^B
2219 Op1I->swapOperands();
2221 std::swap(Op0, Op1);
2222 } else if (B == Op0) { // B^(A|B) == (A|B)^B
2223 I.swapOperands(); // Simplified below.
2224 std::swap(Op0, Op1);
2226 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
2227 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
2228 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
2229 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
2230 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
2232 if (A == Op0) { // A^(A&B) -> A^(B&A)
2233 Op1I->swapOperands();
2236 if (B == Op0) { // A^(B&A) -> (B&A)^A
2237 I.swapOperands(); // Simplified below.
2238 std::swap(Op0, Op1);
2243 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
2246 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
2247 Op0I->hasOneUse()) {
2248 if (A == Op1) // (B|A)^B == (A|B)^B
2250 if (B == Op1) // (A|B)^B == A & ~B
2251 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
2252 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
2253 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
2254 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
2255 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
2256 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2258 if (A == Op1) // (A&B)^A -> (B&A)^A
2260 if (B == Op1 && // (B&A)^A == ~B & A
2261 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
2262 return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
2267 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
2268 if (Op0I && Op1I && Op0I->isShift() &&
2269 Op0I->getOpcode() == Op1I->getOpcode() &&
2270 Op0I->getOperand(1) == Op1I->getOperand(1) &&
2271 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
2273 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
2275 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
2276 Op1I->getOperand(1));
2280 Value *A, *B, *C, *D;
2281 // (A & B)^(A | B) -> A ^ B
2282 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2283 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
2284 if ((A == C && B == D) || (A == D && B == C))
2285 return BinaryOperator::CreateXor(A, B);
2287 // (A | B)^(A & B) -> A ^ B
2288 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
2289 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
2290 if ((A == C && B == D) || (A == D && B == C))
2291 return BinaryOperator::CreateXor(A, B);
2295 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
2296 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
2297 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
2298 // (X & Y)^(X & Y) -> (Y^Z) & X
2299 Value *X = 0, *Y = 0, *Z = 0;
2301 X = A, Y = B, Z = D;
2303 X = A, Y = B, Z = C;
2305 X = B, Y = A, Z = D;
2307 X = B, Y = A, Z = C;
2310 Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
2311 return BinaryOperator::CreateAnd(NewOp, X);
2316 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2317 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
2318 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
2319 if (PredicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2320 if (LHS->getOperand(0) == RHS->getOperand(1) &&
2321 LHS->getOperand(1) == RHS->getOperand(0))
2322 LHS->swapOperands();
2323 if (LHS->getOperand(0) == RHS->getOperand(0) &&
2324 LHS->getOperand(1) == RHS->getOperand(1)) {
2325 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2326 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2327 bool isSigned = LHS->isSigned() || RHS->isSigned();
2328 Value *RV = getICmpValue(isSigned, Code, Op0, Op1);
2329 if (Instruction *I = dyn_cast<Instruction>(RV))
2331 // Otherwise, it's a constant boolean value.
2332 return ReplaceInstUsesWith(I, RV);
2336 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
2337 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
2338 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
2339 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
2340 const Type *SrcTy = Op0C->getOperand(0)->getType();
2341 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
2342 // Only do this if the casts both really cause code to be generated.
2343 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
2345 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
2347 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
2348 Op1C->getOperand(0), I.getName());
2349 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
2354 return Changed ? &I : 0;
2358 Instruction *InstCombiner::visitShl(BinaryOperator &I) {
2359 return commonShiftTransforms(I);
2362 Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
2363 return commonShiftTransforms(I);
2366 Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
2367 if (Instruction *R = commonShiftTransforms(I))
2370 Value *Op0 = I.getOperand(0);
2372 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
2373 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
2374 if (CSI->isAllOnesValue())
2375 return ReplaceInstUsesWith(I, CSI);
2377 // See if we can turn a signed shr into an unsigned shr.
2378 if (MaskedValueIsZero(Op0,
2379 APInt::getSignBit(I.getType()->getScalarSizeInBits())))
2380 return BinaryOperator::CreateLShr(Op0, I.getOperand(1));
2382 // Arithmetic shifting an all-sign-bit value is a no-op.
2383 unsigned NumSignBits = ComputeNumSignBits(Op0);
2384 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
2385 return ReplaceInstUsesWith(I, Op0);
2390 Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
2391 assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
2392 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2394 // shl X, 0 == X and shr X, 0 == X
2395 // shl 0, X == 0 and shr 0, X == 0
2396 if (Op1 == Constant::getNullValue(Op1->getType()) ||
2397 Op0 == Constant::getNullValue(Op0->getType()))
2398 return ReplaceInstUsesWith(I, Op0);
2400 if (isa<UndefValue>(Op0)) {
2401 if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
2402 return ReplaceInstUsesWith(I, Op0);
2403 else // undef << X -> 0, undef >>u X -> 0
2404 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2406 if (isa<UndefValue>(Op1)) {
2407 if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
2408 return ReplaceInstUsesWith(I, Op0);
2409 else // X << undef, X >>u undef -> 0
2410 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2413 // See if we can fold away this shift.
2414 if (SimplifyDemandedInstructionBits(I))
2417 // Try to fold constant and into select arguments.
2418 if (isa<Constant>(Op0))
2419 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2420 if (Instruction *R = FoldOpIntoSelect(I, SI))
2423 if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
2424 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
2429 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
2430 BinaryOperator &I) {
2431 bool isLeftShift = I.getOpcode() == Instruction::Shl;
2433 // See if we can simplify any instructions used by the instruction whose sole
2434 // purpose is to compute bits we don't care about.
2435 uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
2437 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
2440 if (Op1->uge(TypeBits)) {
2441 if (I.getOpcode() != Instruction::AShr)
2442 return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
2444 I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
2449 // ((X*C1) << C2) == (X * (C1 << C2))
2450 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
2451 if (BO->getOpcode() == Instruction::Mul && isLeftShift)
2452 if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
2453 return BinaryOperator::CreateMul(BO->getOperand(0),
2454 ConstantExpr::getShl(BOOp, Op1));
2456 // Try to fold constant and into select arguments.
2457 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2458 if (Instruction *R = FoldOpIntoSelect(I, SI))
2460 if (isa<PHINode>(Op0))
2461 if (Instruction *NV = FoldOpIntoPhi(I))
2464 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
2465 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
2466 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
2467 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
2468 // place. Don't try to do this transformation in this case. Also, we
2469 // require that the input operand is a shift-by-constant so that we have
2470 // confidence that the shifts will get folded together. We could do this
2471 // xform in more cases, but it is unlikely to be profitable.
2472 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
2473 isa<ConstantInt>(TrOp->getOperand(1))) {
2474 // Okay, we'll do this xform. Make the shift of shift.
2475 Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
2476 // (shift2 (shift1 & 0x00FF), c2)
2477 Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
2479 // For logical shifts, the truncation has the effect of making the high
2480 // part of the register be zeros. Emulate this by inserting an AND to
2481 // clear the top bits as needed. This 'and' will usually be zapped by
2482 // other xforms later if dead.
2483 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
2484 unsigned DstSize = TI->getType()->getScalarSizeInBits();
2485 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
2487 // The mask we constructed says what the trunc would do if occurring
2488 // between the shifts. We want to know the effect *after* the second
2489 // shift. We know that it is a logical shift by a constant, so adjust the
2490 // mask as appropriate.
2491 if (I.getOpcode() == Instruction::Shl)
2492 MaskV <<= Op1->getZExtValue();
2494 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
2495 MaskV = MaskV.lshr(Op1->getZExtValue());
2499 Value *And = Builder->CreateAnd(NSh,
2500 ConstantInt::get(I.getContext(), MaskV),
2503 // Return the value truncated to the interesting size.
2504 return new TruncInst(And, I.getType());
2508 if (Op0->hasOneUse()) {
2509 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
2510 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
2513 switch (Op0BO->getOpcode()) {
2515 case Instruction::Add:
2516 case Instruction::And:
2517 case Instruction::Or:
2518 case Instruction::Xor: {
2519 // These operators commute.
2520 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
2521 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
2522 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
2523 m_Specific(Op1)))) {
2524 Value *YS = // (Y << C)
2525 Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
2527 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
2528 Op0BO->getOperand(1)->getName());
2529 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
2530 return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
2531 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
2534 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
2535 Value *Op0BOOp1 = Op0BO->getOperand(1);
2536 if (isLeftShift && Op0BOOp1->hasOneUse() &&
2538 m_And(m_Shr(m_Value(V1), m_Specific(Op1)),
2539 m_ConstantInt(CC))) &&
2540 cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse()) {
2541 Value *YS = // (Y << C)
2542 Builder->CreateShl(Op0BO->getOperand(0), Op1,
2545 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
2546 V1->getName()+".mask");
2547 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
2552 case Instruction::Sub: {
2553 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
2554 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
2555 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
2556 m_Specific(Op1)))) {
2557 Value *YS = // (Y << C)
2558 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
2560 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
2561 Op0BO->getOperand(0)->getName());
2562 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
2563 return BinaryOperator::CreateAnd(X, ConstantInt::get(I.getContext(),
2564 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
2567 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
2568 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
2569 match(Op0BO->getOperand(0),
2570 m_And(m_Shr(m_Value(V1), m_Value(V2)),
2571 m_ConstantInt(CC))) && V2 == Op1 &&
2572 cast<BinaryOperator>(Op0BO->getOperand(0))
2573 ->getOperand(0)->hasOneUse()) {
2574 Value *YS = // (Y << C)
2575 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
2577 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
2578 V1->getName()+".mask");
2580 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
2588 // If the operand is an bitwise operator with a constant RHS, and the
2589 // shift is the only use, we can pull it out of the shift.
2590 if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
2591 bool isValid = true; // Valid only for And, Or, Xor
2592 bool highBitSet = false; // Transform if high bit of constant set?
2594 switch (Op0BO->getOpcode()) {
2595 default: isValid = false; break; // Do not perform transform!
2596 case Instruction::Add:
2597 isValid = isLeftShift;
2599 case Instruction::Or:
2600 case Instruction::Xor:
2603 case Instruction::And:
2608 // If this is a signed shift right, and the high bit is modified
2609 // by the logical operation, do not perform the transformation.
2610 // The highBitSet boolean indicates the value of the high bit of
2611 // the constant which would cause it to be modified for this
2614 if (isValid && I.getOpcode() == Instruction::AShr)
2615 isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
2618 Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
2621 Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
2622 NewShift->takeName(Op0BO);
2624 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
2631 // Find out if this is a shift of a shift by a constant.
2632 BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
2633 if (ShiftOp && !ShiftOp->isShift())
2636 if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
2637 ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
2638 uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
2639 uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
2640 assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
2641 if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
2642 Value *X = ShiftOp->getOperand(0);
2644 uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
2646 const IntegerType *Ty = cast<IntegerType>(I.getType());
2648 // Check for (X << c1) << c2 and (X >> c1) >> c2
2649 if (I.getOpcode() == ShiftOp->getOpcode()) {
2650 // If this is oversized composite shift, then unsigned shifts get 0, ashr
2652 if (AmtSum >= TypeBits) {
2653 if (I.getOpcode() != Instruction::AShr)
2654 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2655 AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
2658 return BinaryOperator::Create(I.getOpcode(), X,
2659 ConstantInt::get(Ty, AmtSum));
2662 if (ShiftOp->getOpcode() == Instruction::LShr &&
2663 I.getOpcode() == Instruction::AShr) {
2664 if (AmtSum >= TypeBits)
2665 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2667 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
2668 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
2671 if (ShiftOp->getOpcode() == Instruction::AShr &&
2672 I.getOpcode() == Instruction::LShr) {
2673 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
2674 if (AmtSum >= TypeBits)
2675 AmtSum = TypeBits-1;
2677 Value *Shift = Builder->CreateAShr(X, ConstantInt::get(Ty, AmtSum));
2679 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
2680 return BinaryOperator::CreateAnd(Shift,
2681 ConstantInt::get(I.getContext(), Mask));
2684 // Okay, if we get here, one shift must be left, and the other shift must be
2685 // right. See if the amounts are equal.
2686 if (ShiftAmt1 == ShiftAmt2) {
2687 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
2688 if (I.getOpcode() == Instruction::Shl) {
2689 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
2690 return BinaryOperator::CreateAnd(X,
2691 ConstantInt::get(I.getContext(),Mask));
2693 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
2694 if (I.getOpcode() == Instruction::LShr) {
2695 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
2696 return BinaryOperator::CreateAnd(X,
2697 ConstantInt::get(I.getContext(), Mask));
2699 // We can simplify ((X << C) >>s C) into a trunc + sext.
2700 // NOTE: we could do this for any C, but that would make 'unusual' integer
2701 // types. For now, just stick to ones well-supported by the code
2703 const Type *SExtType = 0;
2704 switch (Ty->getBitWidth() - ShiftAmt1) {
2711 SExtType = IntegerType::get(I.getContext(),
2712 Ty->getBitWidth() - ShiftAmt1);
2717 return new SExtInst(Builder->CreateTrunc(X, SExtType, "sext"), Ty);
2718 // Otherwise, we can't handle it yet.
2719 } else if (ShiftAmt1 < ShiftAmt2) {
2720 uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
2722 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
2723 if (I.getOpcode() == Instruction::Shl) {
2724 assert(ShiftOp->getOpcode() == Instruction::LShr ||
2725 ShiftOp->getOpcode() == Instruction::AShr);
2726 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
2728 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
2729 return BinaryOperator::CreateAnd(Shift,
2730 ConstantInt::get(I.getContext(),Mask));
2733 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
2734 if (I.getOpcode() == Instruction::LShr) {
2735 assert(ShiftOp->getOpcode() == Instruction::Shl);
2736 Value *Shift = Builder->CreateLShr(X, ConstantInt::get(Ty, ShiftDiff));
2738 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
2739 return BinaryOperator::CreateAnd(Shift,
2740 ConstantInt::get(I.getContext(),Mask));
2743 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
2745 assert(ShiftAmt2 < ShiftAmt1);
2746 uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
2748 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
2749 if (I.getOpcode() == Instruction::Shl) {
2750 assert(ShiftOp->getOpcode() == Instruction::LShr ||
2751 ShiftOp->getOpcode() == Instruction::AShr);
2752 Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(), X,
2753 ConstantInt::get(Ty, ShiftDiff));
2755 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
2756 return BinaryOperator::CreateAnd(Shift,
2757 ConstantInt::get(I.getContext(),Mask));
2760 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
2761 if (I.getOpcode() == Instruction::LShr) {
2762 assert(ShiftOp->getOpcode() == Instruction::Shl);
2763 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
2765 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
2766 return BinaryOperator::CreateAnd(Shift,
2767 ConstantInt::get(I.getContext(),Mask));
2770 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
2778 /// FindElementAtOffset - Given a type and a constant offset, determine whether
2779 /// or not there is a sequence of GEP indices into the type that will land us at
2780 /// the specified offset. If so, fill them into NewIndices and return the
2781 /// resultant element type, otherwise return null.
2782 const Type *InstCombiner::FindElementAtOffset(const Type *Ty, int64_t Offset,
2783 SmallVectorImpl<Value*> &NewIndices) {
2785 if (!Ty->isSized()) return 0;
2787 // Start with the index over the outer type. Note that the type size
2788 // might be zero (even if the offset isn't zero) if the indexed type
2789 // is something like [0 x {int, int}]
2790 const Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
2791 int64_t FirstIdx = 0;
2792 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
2793 FirstIdx = Offset/TySize;
2794 Offset -= FirstIdx*TySize;
2796 // Handle hosts where % returns negative instead of values [0..TySize).
2800 assert(Offset >= 0);
2802 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
2805 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
2807 // Index into the types. If we fail, set OrigBase to null.
2809 // Indexing into tail padding between struct/array elements.
2810 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
2813 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
2814 const StructLayout *SL = TD->getStructLayout(STy);
2815 assert(Offset < (int64_t)SL->getSizeInBytes() &&
2816 "Offset must stay within the indexed type");
2818 unsigned Elt = SL->getElementContainingOffset(Offset);
2819 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
2822 Offset -= SL->getElementOffset(Elt);
2823 Ty = STy->getElementType(Elt);
2824 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
2825 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
2826 assert(EltSize && "Cannot index into a zero-sized array");
2827 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
2829 Ty = AT->getElementType();
2831 // Otherwise, we can't index into the middle of this atomic type, bail.
2840 /// EnforceKnownAlignment - If the specified pointer points to an object that
2841 /// we control, modify the object's alignment to PrefAlign. This isn't
2842 /// often possible though. If alignment is important, a more reliable approach
2843 /// is to simply align all global variables and allocation instructions to
2844 /// their preferred alignment from the beginning.
2846 static unsigned EnforceKnownAlignment(Value *V,
2847 unsigned Align, unsigned PrefAlign) {
2849 User *U = dyn_cast<User>(V);
2850 if (!U) return Align;
2852 switch (Operator::getOpcode(U)) {
2854 case Instruction::BitCast:
2855 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
2856 case Instruction::GetElementPtr: {
2857 // If all indexes are zero, it is just the alignment of the base pointer.
2858 bool AllZeroOperands = true;
2859 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
2860 if (!isa<Constant>(*i) ||
2861 !cast<Constant>(*i)->isNullValue()) {
2862 AllZeroOperands = false;
2866 if (AllZeroOperands) {
2867 // Treat this like a bitcast.
2868 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
2874 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2875 // If there is a large requested alignment and we can, bump up the alignment
2877 if (!GV->isDeclaration()) {
2878 if (GV->getAlignment() >= PrefAlign)
2879 Align = GV->getAlignment();
2881 GV->setAlignment(PrefAlign);
2885 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
2886 // If there is a requested alignment and if this is an alloca, round up.
2887 if (AI->getAlignment() >= PrefAlign)
2888 Align = AI->getAlignment();
2890 AI->setAlignment(PrefAlign);
2898 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
2899 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
2900 /// and it is more than the alignment of the ultimate object, see if we can
2901 /// increase the alignment of the ultimate object, making this check succeed.
2902 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
2903 unsigned PrefAlign) {
2904 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
2905 sizeof(PrefAlign) * CHAR_BIT;
2906 APInt Mask = APInt::getAllOnesValue(BitWidth);
2907 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2908 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
2909 unsigned TrailZ = KnownZero.countTrailingOnes();
2910 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
2912 if (PrefAlign > Align)
2913 Align = EnforceKnownAlignment(V, Align, PrefAlign);
2915 // We don't need to make any adjustment.
2919 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
2920 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
2921 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
2922 unsigned MinAlign = std::min(DstAlign, SrcAlign);
2923 unsigned CopyAlign = MI->getAlignment();
2925 if (CopyAlign < MinAlign) {
2926 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
2931 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
2933 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
2934 if (MemOpLength == 0) return 0;
2936 // Source and destination pointer types are always "i8*" for intrinsic. See
2937 // if the size is something we can handle with a single primitive load/store.
2938 // A single load+store correctly handles overlapping memory in the memmove
2940 unsigned Size = MemOpLength->getZExtValue();
2941 if (Size == 0) return MI; // Delete this mem transfer.
2943 if (Size > 8 || (Size&(Size-1)))
2944 return 0; // If not 1/2/4/8 bytes, exit.
2946 // Use an integer load+store unless we can find something better.
2948 PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
2950 // Memcpy forces the use of i8* for the source and destination. That means
2951 // that if you're using memcpy to move one double around, you'll get a cast
2952 // from double* to i8*. We'd much rather use a double load+store rather than
2953 // an i64 load+store, here because this improves the odds that the source or
2954 // dest address will be promotable. See if we can find a better type than the
2955 // integer datatype.
2956 if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
2957 const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
2958 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
2959 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
2960 // down through these levels if so.
2961 while (!SrcETy->isSingleValueType()) {
2962 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
2963 if (STy->getNumElements() == 1)
2964 SrcETy = STy->getElementType(0);
2967 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
2968 if (ATy->getNumElements() == 1)
2969 SrcETy = ATy->getElementType();
2976 if (SrcETy->isSingleValueType())
2977 NewPtrTy = PointerType::getUnqual(SrcETy);
2982 // If the memcpy/memmove provides better alignment info than we can
2984 SrcAlign = std::max(SrcAlign, CopyAlign);
2985 DstAlign = std::max(DstAlign, CopyAlign);
2987 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
2988 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
2989 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
2990 InsertNewInstBefore(L, *MI);
2991 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
2993 // Set the size of the copy to 0, it will be deleted on the next iteration.
2994 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
2998 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
2999 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
3000 if (MI->getAlignment() < Alignment) {
3001 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
3006 // Extract the length and alignment and fill if they are constant.
3007 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
3008 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
3009 if (!LenC || !FillC || FillC->getType() != Type::getInt8Ty(MI->getContext()))
3011 uint64_t Len = LenC->getZExtValue();
3012 Alignment = MI->getAlignment();
3014 // If the length is zero, this is a no-op
3015 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
3017 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
3018 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
3019 const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
3021 Value *Dest = MI->getDest();
3022 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
3024 // Alignment 0 is identity for alignment 1 for memset, but not store.
3025 if (Alignment == 0) Alignment = 1;
3027 // Extract the fill value and store.
3028 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
3029 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
3030 Dest, false, Alignment), *MI);
3032 // Set the size of the copy to 0, it will be deleted on the next iteration.
3033 MI->setLength(Constant::getNullValue(LenC->getType()));
3041 /// visitCallInst - CallInst simplification. This mostly only handles folding
3042 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
3043 /// the heavy lifting.
3045 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
3046 if (isFreeCall(&CI))
3047 return visitFree(CI);
3049 // If the caller function is nounwind, mark the call as nounwind, even if the
3051 if (CI.getParent()->getParent()->doesNotThrow() &&
3052 !CI.doesNotThrow()) {
3053 CI.setDoesNotThrow();
3057 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
3058 if (!II) return visitCallSite(&CI);
3060 // Intrinsics cannot occur in an invoke, so handle them here instead of in
3062 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
3063 bool Changed = false;
3065 // memmove/cpy/set of zero bytes is a noop.
3066 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
3067 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
3069 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
3070 if (CI->getZExtValue() == 1) {
3071 // Replace the instruction with just byte operations. We would
3072 // transform other cases to loads/stores, but we don't know if
3073 // alignment is sufficient.
3077 // If we have a memmove and the source operation is a constant global,
3078 // then the source and dest pointers can't alias, so we can change this
3079 // into a call to memcpy.
3080 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
3081 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
3082 if (GVSrc->isConstant()) {
3083 Module *M = CI.getParent()->getParent()->getParent();
3084 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
3086 Tys[0] = CI.getOperand(3)->getType();
3088 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
3093 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
3094 // memmove(x,x,size) -> noop.
3095 if (MTI->getSource() == MTI->getDest())
3096 return EraseInstFromFunction(CI);
3099 // If we can determine a pointer alignment that is bigger than currently
3100 // set, update the alignment.
3101 if (isa<MemTransferInst>(MI)) {
3102 if (Instruction *I = SimplifyMemTransfer(MI))
3104 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
3105 if (Instruction *I = SimplifyMemSet(MSI))
3109 if (Changed) return II;
3112 switch (II->getIntrinsicID()) {
3114 case Intrinsic::bswap:
3115 // bswap(bswap(x)) -> x
3116 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
3117 if (Operand->getIntrinsicID() == Intrinsic::bswap)
3118 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
3120 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
3121 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
3122 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
3123 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
3124 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
3125 TI->getType()->getPrimitiveSizeInBits();
3126 Value *CV = ConstantInt::get(Operand->getType(), C);
3127 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
3128 return new TruncInst(V, TI->getType());
3133 case Intrinsic::powi:
3134 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
3135 // powi(x, 0) -> 1.0
3136 if (Power->isZero())
3137 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
3140 return ReplaceInstUsesWith(CI, II->getOperand(1));
3141 // powi(x, -1) -> 1/x
3142 if (Power->isAllOnesValue())
3143 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
3147 case Intrinsic::cttz: {
3148 // If all bits below the first known one are known zero,
3149 // this value is constant.
3150 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
3151 uint32_t BitWidth = IT->getBitWidth();
3152 APInt KnownZero(BitWidth, 0);
3153 APInt KnownOne(BitWidth, 0);
3154 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
3155 KnownZero, KnownOne);
3156 unsigned TrailingZeros = KnownOne.countTrailingZeros();
3157 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
3158 if ((Mask & KnownZero) == Mask)
3159 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
3160 APInt(BitWidth, TrailingZeros)));
3164 case Intrinsic::ctlz: {
3165 // If all bits above the first known one are known zero,
3166 // this value is constant.
3167 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
3168 uint32_t BitWidth = IT->getBitWidth();
3169 APInt KnownZero(BitWidth, 0);
3170 APInt KnownOne(BitWidth, 0);
3171 ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
3172 KnownZero, KnownOne);
3173 unsigned LeadingZeros = KnownOne.countLeadingZeros();
3174 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
3175 if ((Mask & KnownZero) == Mask)
3176 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
3177 APInt(BitWidth, LeadingZeros)));
3181 case Intrinsic::uadd_with_overflow: {
3182 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
3183 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
3184 uint32_t BitWidth = IT->getBitWidth();
3185 APInt Mask = APInt::getSignBit(BitWidth);
3186 APInt LHSKnownZero(BitWidth, 0);
3187 APInt LHSKnownOne(BitWidth, 0);
3188 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
3189 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
3190 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
3192 if (LHSKnownNegative || LHSKnownPositive) {
3193 APInt RHSKnownZero(BitWidth, 0);
3194 APInt RHSKnownOne(BitWidth, 0);
3195 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
3196 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
3197 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
3198 if (LHSKnownNegative && RHSKnownNegative) {
3199 // The sign bit is set in both cases: this MUST overflow.
3200 // Create a simple add instruction, and insert it into the struct.
3201 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
3204 UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
3206 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
3207 return InsertValueInst::Create(Struct, Add, 0);
3210 if (LHSKnownPositive && RHSKnownPositive) {
3211 // The sign bit is clear in both cases: this CANNOT overflow.
3212 // Create a simple add instruction, and insert it into the struct.
3213 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
3216 UndefValue::get(LHS->getType()),
3217 ConstantInt::getFalse(II->getContext())
3219 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
3220 return InsertValueInst::Create(Struct, Add, 0);
3224 // FALL THROUGH uadd into sadd
3225 case Intrinsic::sadd_with_overflow:
3226 // Canonicalize constants into the RHS.
3227 if (isa<Constant>(II->getOperand(1)) &&
3228 !isa<Constant>(II->getOperand(2))) {
3229 Value *LHS = II->getOperand(1);
3230 II->setOperand(1, II->getOperand(2));
3231 II->setOperand(2, LHS);
3235 // X + undef -> undef
3236 if (isa<UndefValue>(II->getOperand(2)))
3237 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
3239 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
3240 // X + 0 -> {X, false}
3241 if (RHS->isZero()) {
3243 UndefValue::get(II->getOperand(0)->getType()),
3244 ConstantInt::getFalse(II->getContext())
3246 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
3247 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
3251 case Intrinsic::usub_with_overflow:
3252 case Intrinsic::ssub_with_overflow:
3253 // undef - X -> undef
3254 // X - undef -> undef
3255 if (isa<UndefValue>(II->getOperand(1)) ||
3256 isa<UndefValue>(II->getOperand(2)))
3257 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
3259 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
3260 // X - 0 -> {X, false}
3261 if (RHS->isZero()) {
3263 UndefValue::get(II->getOperand(1)->getType()),
3264 ConstantInt::getFalse(II->getContext())
3266 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
3267 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
3271 case Intrinsic::umul_with_overflow:
3272 case Intrinsic::smul_with_overflow:
3273 // Canonicalize constants into the RHS.
3274 if (isa<Constant>(II->getOperand(1)) &&
3275 !isa<Constant>(II->getOperand(2))) {
3276 Value *LHS = II->getOperand(1);
3277 II->setOperand(1, II->getOperand(2));
3278 II->setOperand(2, LHS);
3282 // X * undef -> undef
3283 if (isa<UndefValue>(II->getOperand(2)))
3284 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
3286 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
3287 // X*0 -> {0, false}
3289 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
3291 // X * 1 -> {X, false}
3292 if (RHSI->equalsInt(1)) {
3294 UndefValue::get(II->getOperand(1)->getType()),
3295 ConstantInt::getFalse(II->getContext())
3297 Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
3298 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
3302 case Intrinsic::ppc_altivec_lvx:
3303 case Intrinsic::ppc_altivec_lvxl:
3304 case Intrinsic::x86_sse_loadu_ps:
3305 case Intrinsic::x86_sse2_loadu_pd:
3306 case Intrinsic::x86_sse2_loadu_dq:
3307 // Turn PPC lvx -> load if the pointer is known aligned.
3308 // Turn X86 loadups -> load if the pointer is known aligned.
3309 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
3310 Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
3311 PointerType::getUnqual(II->getType()));
3312 return new LoadInst(Ptr);
3315 case Intrinsic::ppc_altivec_stvx:
3316 case Intrinsic::ppc_altivec_stvxl:
3317 // Turn stvx -> store if the pointer is known aligned.
3318 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
3319 const Type *OpPtrTy =
3320 PointerType::getUnqual(II->getOperand(1)->getType());
3321 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
3322 return new StoreInst(II->getOperand(1), Ptr);
3325 case Intrinsic::x86_sse_storeu_ps:
3326 case Intrinsic::x86_sse2_storeu_pd:
3327 case Intrinsic::x86_sse2_storeu_dq:
3328 // Turn X86 storeu -> store if the pointer is known aligned.
3329 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
3330 const Type *OpPtrTy =
3331 PointerType::getUnqual(II->getOperand(2)->getType());
3332 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
3333 return new StoreInst(II->getOperand(2), Ptr);
3337 case Intrinsic::x86_sse_cvttss2si: {
3338 // These intrinsics only demands the 0th element of its input vector. If
3339 // we can simplify the input based on that, do so now.
3341 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
3342 APInt DemandedElts(VWidth, 1);
3343 APInt UndefElts(VWidth, 0);
3344 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
3346 II->setOperand(1, V);
3352 case Intrinsic::ppc_altivec_vperm:
3353 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
3354 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
3355 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
3357 // Check that all of the elements are integer constants or undefs.
3358 bool AllEltsOk = true;
3359 for (unsigned i = 0; i != 16; ++i) {
3360 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
3361 !isa<UndefValue>(Mask->getOperand(i))) {
3368 // Cast the input vectors to byte vectors.
3369 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
3370 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
3371 Value *Result = UndefValue::get(Op0->getType());
3373 // Only extract each element once.
3374 Value *ExtractedElts[32];
3375 memset(ExtractedElts, 0, sizeof(ExtractedElts));
3377 for (unsigned i = 0; i != 16; ++i) {
3378 if (isa<UndefValue>(Mask->getOperand(i)))
3380 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
3381 Idx &= 31; // Match the hardware behavior.
3383 if (ExtractedElts[Idx] == 0) {
3384 ExtractedElts[Idx] =
3385 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
3386 ConstantInt::get(Type::getInt32Ty(II->getContext()),
3387 Idx&15, false), "tmp");
3390 // Insert this value into the result vector.
3391 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
3392 ConstantInt::get(Type::getInt32Ty(II->getContext()),
3395 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
3400 case Intrinsic::stackrestore: {
3401 // If the save is right next to the restore, remove the restore. This can
3402 // happen when variable allocas are DCE'd.
3403 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
3404 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
3405 BasicBlock::iterator BI = SS;
3407 return EraseInstFromFunction(CI);
3411 // Scan down this block to see if there is another stack restore in the
3412 // same block without an intervening call/alloca.
3413 BasicBlock::iterator BI = II;
3414 TerminatorInst *TI = II->getParent()->getTerminator();
3415 bool CannotRemove = false;
3416 for (++BI; &*BI != TI; ++BI) {
3417 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
3418 CannotRemove = true;
3421 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
3422 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
3423 // If there is a stackrestore below this one, remove this one.
3424 if (II->getIntrinsicID() == Intrinsic::stackrestore)
3425 return EraseInstFromFunction(CI);
3426 // Otherwise, ignore the intrinsic.
3428 // If we found a non-intrinsic call, we can't remove the stack
3430 CannotRemove = true;
3436 // If the stack restore is in a return/unwind block and if there are no
3437 // allocas or calls between the restore and the return, nuke the restore.
3438 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
3439 return EraseInstFromFunction(CI);
3444 return visitCallSite(II);
3447 // InvokeInst simplification
3449 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
3450 return visitCallSite(&II);
3453 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
3454 /// passed through the varargs area, we can eliminate the use of the cast.
3455 static bool isSafeToEliminateVarargsCast(const CallSite CS,
3456 const CastInst * const CI,
3457 const TargetData * const TD,
3459 if (!CI->isLosslessCast())
3462 // The size of ByVal arguments is derived from the type, so we
3463 // can't change to a type with a different size. If the size were
3464 // passed explicitly we could avoid this check.
3465 if (!CS.paramHasAttr(ix, Attribute::ByVal))
3469 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
3470 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
3471 if (!SrcTy->isSized() || !DstTy->isSized())
3473 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
3478 // visitCallSite - Improvements for call and invoke instructions.
3480 Instruction *InstCombiner::visitCallSite(CallSite CS) {
3481 bool Changed = false;
3483 // If the callee is a constexpr cast of a function, attempt to move the cast
3484 // to the arguments of the call/invoke.
3485 if (transformConstExprCastCall(CS)) return 0;
3487 Value *Callee = CS.getCalledValue();
3489 if (Function *CalleeF = dyn_cast<Function>(Callee))
3490 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
3491 Instruction *OldCall = CS.getInstruction();
3492 // If the call and callee calling conventions don't match, this call must
3493 // be unreachable, as the call is undefined.
3494 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
3495 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
3497 // If OldCall dues not return void then replaceAllUsesWith undef.
3498 // This allows ValueHandlers and custom metadata to adjust itself.
3499 if (!OldCall->getType()->isVoidTy())
3500 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
3501 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
3502 return EraseInstFromFunction(*OldCall);
3506 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
3507 // This instruction is not reachable, just remove it. We insert a store to
3508 // undef so that we know that this code is not reachable, despite the fact
3509 // that we can't modify the CFG here.
3510 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
3511 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
3512 CS.getInstruction());
3514 // If CS dues not return void then replaceAllUsesWith undef.
3515 // This allows ValueHandlers and custom metadata to adjust itself.
3516 if (!CS.getInstruction()->getType()->isVoidTy())
3517 CS.getInstruction()->
3518 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
3520 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
3521 // Don't break the CFG, insert a dummy cond branch.
3522 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
3523 ConstantInt::getTrue(Callee->getContext()), II);
3525 return EraseInstFromFunction(*CS.getInstruction());
3528 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
3529 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
3530 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
3531 return transformCallThroughTrampoline(CS);
3533 const PointerType *PTy = cast<PointerType>(Callee->getType());
3534 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
3535 if (FTy->isVarArg()) {
3536 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
3537 // See if we can optimize any arguments passed through the varargs area of
3539 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
3540 E = CS.arg_end(); I != E; ++I, ++ix) {
3541 CastInst *CI = dyn_cast<CastInst>(*I);
3542 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
3543 *I = CI->getOperand(0);
3549 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
3550 // Inline asm calls cannot throw - mark them 'nounwind'.
3551 CS.setDoesNotThrow();
3555 return Changed ? CS.getInstruction() : 0;
3558 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
3559 // attempt to move the cast to the arguments of the call/invoke.
3561 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
3562 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
3563 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
3564 if (CE->getOpcode() != Instruction::BitCast ||
3565 !isa<Function>(CE->getOperand(0)))
3567 Function *Callee = cast<Function>(CE->getOperand(0));
3568 Instruction *Caller = CS.getInstruction();
3569 const AttrListPtr &CallerPAL = CS.getAttributes();
3571 // Okay, this is a cast from a function to a different type. Unless doing so
3572 // would cause a type conversion of one of our arguments, change this call to
3573 // be a direct call with arguments casted to the appropriate types.
3575 const FunctionType *FT = Callee->getFunctionType();
3576 const Type *OldRetTy = Caller->getType();
3577 const Type *NewRetTy = FT->getReturnType();
3579 if (isa<StructType>(NewRetTy))
3580 return false; // TODO: Handle multiple return values.
3582 // Check to see if we are changing the return type...
3583 if (OldRetTy != NewRetTy) {
3584 if (Callee->isDeclaration() &&
3585 // Conversion is ok if changing from one pointer type to another or from
3586 // a pointer to an integer of the same size.
3587 !((isa<PointerType>(OldRetTy) || !TD ||
3588 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
3589 (isa<PointerType>(NewRetTy) || !TD ||
3590 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
3591 return false; // Cannot transform this return value.
3593 if (!Caller->use_empty() &&
3594 // void -> non-void is handled specially
3595 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
3596 return false; // Cannot transform this return value.
3598 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
3599 Attributes RAttrs = CallerPAL.getRetAttributes();
3600 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
3601 return false; // Attribute not compatible with transformed value.
3604 // If the callsite is an invoke instruction, and the return value is used by
3605 // a PHI node in a successor, we cannot change the return type of the call
3606 // because there is no place to put the cast instruction (without breaking
3607 // the critical edge). Bail out in this case.
3608 if (!Caller->use_empty())
3609 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
3610 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
3612 if (PHINode *PN = dyn_cast<PHINode>(*UI))
3613 if (PN->getParent() == II->getNormalDest() ||
3614 PN->getParent() == II->getUnwindDest())
3618 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
3619 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
3621 CallSite::arg_iterator AI = CS.arg_begin();
3622 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
3623 const Type *ParamTy = FT->getParamType(i);
3624 const Type *ActTy = (*AI)->getType();
3626 if (!CastInst::isCastable(ActTy, ParamTy))
3627 return false; // Cannot transform this parameter value.
3629 if (CallerPAL.getParamAttributes(i + 1)
3630 & Attribute::typeIncompatible(ParamTy))
3631 return false; // Attribute not compatible with transformed value.
3633 // Converting from one pointer type to another or between a pointer and an
3634 // integer of the same size is safe even if we do not have a body.
3635 bool isConvertible = ActTy == ParamTy ||
3636 (TD && ((isa<PointerType>(ParamTy) ||
3637 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
3638 (isa<PointerType>(ActTy) ||
3639 ActTy == TD->getIntPtrType(Caller->getContext()))));
3640 if (Callee->isDeclaration() && !isConvertible) return false;
3643 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
3644 Callee->isDeclaration())
3645 return false; // Do not delete arguments unless we have a function body.
3647 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
3648 !CallerPAL.isEmpty())
3649 // In this case we have more arguments than the new function type, but we
3650 // won't be dropping them. Check that these extra arguments have attributes
3651 // that are compatible with being a vararg call argument.
3652 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
3653 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
3655 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
3656 if (PAttrs & Attribute::VarArgsIncompatible)
3660 // Okay, we decided that this is a safe thing to do: go ahead and start
3661 // inserting cast instructions as necessary...
3662 std::vector<Value*> Args;
3663 Args.reserve(NumActualArgs);
3664 SmallVector<AttributeWithIndex, 8> attrVec;
3665 attrVec.reserve(NumCommonArgs);
3667 // Get any return attributes.
3668 Attributes RAttrs = CallerPAL.getRetAttributes();
3670 // If the return value is not being used, the type may not be compatible
3671 // with the existing attributes. Wipe out any problematic attributes.
3672 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
3674 // Add the new return attributes.
3676 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
3678 AI = CS.arg_begin();
3679 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
3680 const Type *ParamTy = FT->getParamType(i);
3681 if ((*AI)->getType() == ParamTy) {
3682 Args.push_back(*AI);
3684 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
3685 false, ParamTy, false);
3686 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
3689 // Add any parameter attributes.
3690 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
3691 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
3694 // If the function takes more arguments than the call was taking, add them
3696 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
3697 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
3699 // If we are removing arguments to the function, emit an obnoxious warning.
3700 if (FT->getNumParams() < NumActualArgs) {
3701 if (!FT->isVarArg()) {
3702 errs() << "WARNING: While resolving call to function '"
3703 << Callee->getName() << "' arguments were dropped!\n";
3705 // Add all of the arguments in their promoted form to the arg list.
3706 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
3707 const Type *PTy = getPromotedType((*AI)->getType());
3708 if (PTy != (*AI)->getType()) {
3709 // Must promote to pass through va_arg area!
3710 Instruction::CastOps opcode =
3711 CastInst::getCastOpcode(*AI, false, PTy, false);
3712 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
3714 Args.push_back(*AI);
3717 // Add any parameter attributes.
3718 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
3719 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
3724 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
3725 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
3727 if (NewRetTy->isVoidTy())
3728 Caller->setName(""); // Void type should not have a name.
3730 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
3734 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3735 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
3736 Args.begin(), Args.end(),
3737 Caller->getName(), Caller);
3738 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
3739 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
3741 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
3742 Caller->getName(), Caller);
3743 CallInst *CI = cast<CallInst>(Caller);
3744 if (CI->isTailCall())
3745 cast<CallInst>(NC)->setTailCall();
3746 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
3747 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
3750 // Insert a cast of the return type as necessary.
3752 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
3753 if (!NV->getType()->isVoidTy()) {
3754 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
3756 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
3758 // If this is an invoke instruction, we should insert it after the first
3759 // non-phi, instruction in the normal successor block.
3760 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3761 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
3762 InsertNewInstBefore(NC, *I);
3764 // Otherwise, it's a call, just insert cast right after the call instr
3765 InsertNewInstBefore(NC, *Caller);
3767 Worklist.AddUsersToWorkList(*Caller);
3769 NV = UndefValue::get(Caller->getType());
3774 if (!Caller->use_empty())
3775 Caller->replaceAllUsesWith(NV);
3777 EraseInstFromFunction(*Caller);
3781 // transformCallThroughTrampoline - Turn a call to a function created by the
3782 // init_trampoline intrinsic into a direct call to the underlying function.
3784 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
3785 Value *Callee = CS.getCalledValue();
3786 const PointerType *PTy = cast<PointerType>(Callee->getType());
3787 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
3788 const AttrListPtr &Attrs = CS.getAttributes();
3790 // If the call already has the 'nest' attribute somewhere then give up -
3791 // otherwise 'nest' would occur twice after splicing in the chain.
3792 if (Attrs.hasAttrSomewhere(Attribute::Nest))
3795 IntrinsicInst *Tramp =
3796 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
3798 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
3799 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
3800 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
3802 const AttrListPtr &NestAttrs = NestF->getAttributes();
3803 if (!NestAttrs.isEmpty()) {
3804 unsigned NestIdx = 1;
3805 const Type *NestTy = 0;
3806 Attributes NestAttr = Attribute::None;
3808 // Look for a parameter marked with the 'nest' attribute.
3809 for (FunctionType::param_iterator I = NestFTy->param_begin(),
3810 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
3811 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
3812 // Record the parameter type and any other attributes.
3814 NestAttr = NestAttrs.getParamAttributes(NestIdx);
3819 Instruction *Caller = CS.getInstruction();
3820 std::vector<Value*> NewArgs;
3821 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
3823 SmallVector<AttributeWithIndex, 8> NewAttrs;
3824 NewAttrs.reserve(Attrs.getNumSlots() + 1);
3826 // Insert the nest argument into the call argument list, which may
3827 // mean appending it. Likewise for attributes.
3829 // Add any result attributes.
3830 if (Attributes Attr = Attrs.getRetAttributes())
3831 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
3835 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
3837 if (Idx == NestIdx) {
3838 // Add the chain argument and attributes.
3839 Value *NestVal = Tramp->getOperand(3);
3840 if (NestVal->getType() != NestTy)
3841 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
3842 NewArgs.push_back(NestVal);
3843 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
3849 // Add the original argument and attributes.
3850 NewArgs.push_back(*I);
3851 if (Attributes Attr = Attrs.getParamAttributes(Idx))
3853 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
3859 // Add any function attributes.
3860 if (Attributes Attr = Attrs.getFnAttributes())
3861 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
3863 // The trampoline may have been bitcast to a bogus type (FTy).
3864 // Handle this by synthesizing a new function type, equal to FTy
3865 // with the chain parameter inserted.
3867 std::vector<const Type*> NewTypes;
3868 NewTypes.reserve(FTy->getNumParams()+1);
3870 // Insert the chain's type into the list of parameter types, which may
3871 // mean appending it.
3874 FunctionType::param_iterator I = FTy->param_begin(),
3875 E = FTy->param_end();
3879 // Add the chain's type.
3880 NewTypes.push_back(NestTy);
3885 // Add the original type.
3886 NewTypes.push_back(*I);
3892 // Replace the trampoline call with a direct call. Let the generic
3893 // code sort out any function type mismatches.
3894 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
3896 Constant *NewCallee =
3897 NestF->getType() == PointerType::getUnqual(NewFTy) ?
3898 NestF : ConstantExpr::getBitCast(NestF,
3899 PointerType::getUnqual(NewFTy));
3900 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
3903 Instruction *NewCaller;
3904 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3905 NewCaller = InvokeInst::Create(NewCallee,
3906 II->getNormalDest(), II->getUnwindDest(),
3907 NewArgs.begin(), NewArgs.end(),
3908 Caller->getName(), Caller);
3909 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
3910 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
3912 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
3913 Caller->getName(), Caller);
3914 if (cast<CallInst>(Caller)->isTailCall())
3915 cast<CallInst>(NewCaller)->setTailCall();
3916 cast<CallInst>(NewCaller)->
3917 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
3918 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
3920 if (!Caller->getType()->isVoidTy())
3921 Caller->replaceAllUsesWith(NewCaller);
3922 Caller->eraseFromParent();
3923 Worklist.Remove(Caller);
3928 // Replace the trampoline call with a direct call. Since there is no 'nest'
3929 // parameter, there is no need to adjust the argument list. Let the generic
3930 // code sort out any function type mismatches.
3931 Constant *NewCallee =
3932 NestF->getType() == PTy ? NestF :
3933 ConstantExpr::getBitCast(NestF, PTy);
3934 CS.setCalledFunction(NewCallee);
3935 return CS.getInstruction();
3940 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3941 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
3943 if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD))
3944 return ReplaceInstUsesWith(GEP, V);
3946 Value *PtrOp = GEP.getOperand(0);
3948 if (isa<UndefValue>(GEP.getOperand(0)))
3949 return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
3951 // Eliminate unneeded casts for indices.
3953 bool MadeChange = false;
3954 unsigned PtrSize = TD->getPointerSizeInBits();
3956 gep_type_iterator GTI = gep_type_begin(GEP);
3957 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
3958 I != E; ++I, ++GTI) {
3959 if (!isa<SequentialType>(*GTI)) continue;
3961 // If we are using a wider index than needed for this platform, shrink it
3962 // to what we need. If narrower, sign-extend it to what we need. This
3963 // explicit cast can make subsequent optimizations more obvious.
3964 unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
3965 if (OpBits == PtrSize)
3968 *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
3971 if (MadeChange) return &GEP;
3974 // Combine Indices - If the source pointer to this getelementptr instruction
3975 // is a getelementptr instruction, combine the indices of the two
3976 // getelementptr instructions into a single instruction.
3978 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
3979 // Note that if our source is a gep chain itself that we wait for that
3980 // chain to be resolved before we perform this transformation. This
3981 // avoids us creating a TON of code in some cases.
3983 if (GetElementPtrInst *SrcGEP =
3984 dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
3985 if (SrcGEP->getNumOperands() == 2)
3986 return 0; // Wait until our source is folded to completion.
3988 SmallVector<Value*, 8> Indices;
3990 // Find out whether the last index in the source GEP is a sequential idx.
3991 bool EndsWithSequential = false;
3992 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
3994 EndsWithSequential = !isa<StructType>(*I);
3996 // Can we combine the two pointer arithmetics offsets?
3997 if (EndsWithSequential) {
3998 // Replace: gep (gep %P, long B), long A, ...
3999 // With: T = long A+B; gep %P, T, ...
4002 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
4003 Value *GO1 = GEP.getOperand(1);
4004 if (SO1 == Constant::getNullValue(SO1->getType())) {
4006 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
4009 // If they aren't the same type, then the input hasn't been processed
4010 // by the loop above yet (which canonicalizes sequential index types to
4011 // intptr_t). Just avoid transforming this until the input has been
4013 if (SO1->getType() != GO1->getType())
4015 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
4018 // Update the GEP in place if possible.
4019 if (Src->getNumOperands() == 2) {
4020 GEP.setOperand(0, Src->getOperand(0));
4021 GEP.setOperand(1, Sum);
4024 Indices.append(Src->op_begin()+1, Src->op_end()-1);
4025 Indices.push_back(Sum);
4026 Indices.append(GEP.op_begin()+2, GEP.op_end());
4027 } else if (isa<Constant>(*GEP.idx_begin()) &&
4028 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
4029 Src->getNumOperands() != 1) {
4030 // Otherwise we can do the fold if the first index of the GEP is a zero
4031 Indices.append(Src->op_begin()+1, Src->op_end());
4032 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
4035 if (!Indices.empty())
4036 return (cast<GEPOperator>(&GEP)->isInBounds() &&
4037 Src->isInBounds()) ?
4038 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
4039 Indices.end(), GEP.getName()) :
4040 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
4041 Indices.end(), GEP.getName());
4044 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
4045 if (Value *X = getBitCastOperand(PtrOp)) {
4046 assert(isa<PointerType>(X->getType()) && "Must be cast from pointer");
4048 // If the input bitcast is actually "bitcast(bitcast(x))", then we don't
4049 // want to change the gep until the bitcasts are eliminated.
4050 if (getBitCastOperand(X)) {
4051 Worklist.AddValue(PtrOp);
4055 bool HasZeroPointerIndex = false;
4056 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
4057 HasZeroPointerIndex = C->isZero();
4059 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
4060 // into : GEP [10 x i8]* X, i32 0, ...
4062 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
4063 // into : GEP i8* X, ...
4065 // This occurs when the program declares an array extern like "int X[];"
4066 if (HasZeroPointerIndex) {
4067 const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
4068 const PointerType *XTy = cast<PointerType>(X->getType());
4069 if (const ArrayType *CATy =
4070 dyn_cast<ArrayType>(CPTy->getElementType())) {
4071 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
4072 if (CATy->getElementType() == XTy->getElementType()) {
4073 // -> GEP i8* X, ...
4074 SmallVector<Value*, 8> Indices(GEP.idx_begin()+1, GEP.idx_end());
4075 return cast<GEPOperator>(&GEP)->isInBounds() ?
4076 GetElementPtrInst::CreateInBounds(X, Indices.begin(), Indices.end(),
4078 GetElementPtrInst::Create(X, Indices.begin(), Indices.end(),
4082 if (const ArrayType *XATy = dyn_cast<ArrayType>(XTy->getElementType())){
4083 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
4084 if (CATy->getElementType() == XATy->getElementType()) {
4085 // -> GEP [10 x i8]* X, i32 0, ...
4086 // At this point, we know that the cast source type is a pointer
4087 // to an array of the same type as the destination pointer
4088 // array. Because the array type is never stepped over (there
4089 // is a leading zero) we can fold the cast into this GEP.
4090 GEP.setOperand(0, X);
4095 } else if (GEP.getNumOperands() == 2) {
4096 // Transform things like:
4097 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
4098 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
4099 const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
4100 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
4101 if (TD && isa<ArrayType>(SrcElTy) &&
4102 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
4103 TD->getTypeAllocSize(ResElTy)) {
4105 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
4106 Idx[1] = GEP.getOperand(1);
4107 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
4108 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
4109 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
4110 // V and GEP are both pointer types --> BitCast
4111 return new BitCastInst(NewGEP, GEP.getType());
4114 // Transform things like:
4115 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
4116 // (where tmp = 8*tmp2) into:
4117 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
4119 if (TD && isa<ArrayType>(SrcElTy) &&
4120 ResElTy == Type::getInt8Ty(GEP.getContext())) {
4121 uint64_t ArrayEltSize =
4122 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
4124 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
4125 // allow either a mul, shift, or constant here.
4127 ConstantInt *Scale = 0;
4128 if (ArrayEltSize == 1) {
4129 NewIdx = GEP.getOperand(1);
4130 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
4131 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
4132 NewIdx = ConstantInt::get(CI->getType(), 1);
4134 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
4135 if (Inst->getOpcode() == Instruction::Shl &&
4136 isa<ConstantInt>(Inst->getOperand(1))) {
4137 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
4138 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
4139 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
4141 NewIdx = Inst->getOperand(0);
4142 } else if (Inst->getOpcode() == Instruction::Mul &&
4143 isa<ConstantInt>(Inst->getOperand(1))) {
4144 Scale = cast<ConstantInt>(Inst->getOperand(1));
4145 NewIdx = Inst->getOperand(0);
4149 // If the index will be to exactly the right offset with the scale taken
4150 // out, perform the transformation. Note, we don't know whether Scale is
4151 // signed or not. We'll use unsigned version of division/modulo
4152 // operation after making sure Scale doesn't have the sign bit set.
4153 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
4154 Scale->getZExtValue() % ArrayEltSize == 0) {
4155 Scale = ConstantInt::get(Scale->getType(),
4156 Scale->getZExtValue() / ArrayEltSize);
4157 if (Scale->getZExtValue() != 1) {
4158 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
4160 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
4163 // Insert the new GEP instruction.
4165 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
4167 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
4168 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
4169 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
4170 // The NewGEP must be pointer typed, so must the old one -> BitCast
4171 return new BitCastInst(NewGEP, GEP.getType());
4177 /// See if we can simplify:
4178 /// X = bitcast A* to B*
4179 /// Y = gep X, <...constant indices...>
4180 /// into a gep of the original struct. This is important for SROA and alias
4181 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
4182 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
4184 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
4185 // Determine how much the GEP moves the pointer. We are guaranteed to get
4186 // a constant back from EmitGEPOffset.
4187 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
4188 int64_t Offset = OffsetV->getSExtValue();
4190 // If this GEP instruction doesn't move the pointer, just replace the GEP
4191 // with a bitcast of the real input to the dest type.
4193 // If the bitcast is of an allocation, and the allocation will be
4194 // converted to match the type of the cast, don't touch this.
4195 if (isa<AllocaInst>(BCI->getOperand(0)) ||
4196 isMalloc(BCI->getOperand(0))) {
4197 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
4198 if (Instruction *I = visitBitCast(*BCI)) {
4201 BCI->getParent()->getInstList().insert(BCI, I);
4202 ReplaceInstUsesWith(*BCI, I);
4207 return new BitCastInst(BCI->getOperand(0), GEP.getType());
4210 // Otherwise, if the offset is non-zero, we need to find out if there is a
4211 // field at Offset in 'A's type. If so, we can pull the cast through the
4213 SmallVector<Value*, 8> NewIndices;
4215 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
4216 if (FindElementAtOffset(InTy, Offset, NewIndices)) {
4217 Value *NGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
4218 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
4220 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
4223 if (NGEP->getType() == GEP.getType())
4224 return ReplaceInstUsesWith(GEP, NGEP);
4225 NGEP->takeName(&GEP);
4226 return new BitCastInst(NGEP, GEP.getType());
4234 Instruction *InstCombiner::visitFree(Instruction &FI) {
4235 Value *Op = FI.getOperand(1);
4237 // free undef -> unreachable.
4238 if (isa<UndefValue>(Op)) {
4239 // Insert a new store to null because we cannot modify the CFG here.
4240 new StoreInst(ConstantInt::getTrue(FI.getContext()),
4241 UndefValue::get(Type::getInt1PtrTy(FI.getContext())), &FI);
4242 return EraseInstFromFunction(FI);
4245 // If we have 'free null' delete the instruction. This can happen in stl code
4246 // when lots of inlining happens.
4247 if (isa<ConstantPointerNull>(Op))
4248 return EraseInstFromFunction(FI);
4250 // If we have a malloc call whose only use is a free call, delete both.
4252 if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
4253 if (Op->hasOneUse() && CI->hasOneUse()) {
4254 EraseInstFromFunction(FI);
4255 EraseInstFromFunction(*CI);
4256 return EraseInstFromFunction(*cast<Instruction>(Op));
4259 // Op is a call to malloc
4260 if (Op->hasOneUse()) {
4261 EraseInstFromFunction(FI);
4262 return EraseInstFromFunction(*cast<Instruction>(Op));
4272 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
4273 // Change br (not X), label True, label False to: br X, label False, True
4275 BasicBlock *TrueDest;
4276 BasicBlock *FalseDest;
4277 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
4278 !isa<Constant>(X)) {
4279 // Swap Destinations and condition...
4281 BI.setSuccessor(0, FalseDest);
4282 BI.setSuccessor(1, TrueDest);
4286 // Cannonicalize fcmp_one -> fcmp_oeq
4287 FCmpInst::Predicate FPred; Value *Y;
4288 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
4289 TrueDest, FalseDest)) &&
4290 BI.getCondition()->hasOneUse())
4291 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
4292 FPred == FCmpInst::FCMP_OGE) {
4293 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
4294 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
4296 // Swap Destinations and condition.
4297 BI.setSuccessor(0, FalseDest);
4298 BI.setSuccessor(1, TrueDest);
4303 // Cannonicalize icmp_ne -> icmp_eq
4304 ICmpInst::Predicate IPred;
4305 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
4306 TrueDest, FalseDest)) &&
4307 BI.getCondition()->hasOneUse())
4308 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
4309 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
4310 IPred == ICmpInst::ICMP_SGE) {
4311 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
4312 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
4313 // Swap Destinations and condition.
4314 BI.setSuccessor(0, FalseDest);
4315 BI.setSuccessor(1, TrueDest);
4323 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
4324 Value *Cond = SI.getCondition();
4325 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
4326 if (I->getOpcode() == Instruction::Add)
4327 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
4328 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
4329 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
4331 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
4333 SI.setOperand(0, I->getOperand(0));
4341 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
4342 Value *Agg = EV.getAggregateOperand();
4344 if (!EV.hasIndices())
4345 return ReplaceInstUsesWith(EV, Agg);
4347 if (Constant *C = dyn_cast<Constant>(Agg)) {
4348 if (isa<UndefValue>(C))
4349 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
4351 if (isa<ConstantAggregateZero>(C))
4352 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
4354 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
4355 // Extract the element indexed by the first index out of the constant
4356 Value *V = C->getOperand(*EV.idx_begin());
4357 if (EV.getNumIndices() > 1)
4358 // Extract the remaining indices out of the constant indexed by the
4360 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
4362 return ReplaceInstUsesWith(EV, V);
4364 return 0; // Can't handle other constants
4366 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
4367 // We're extracting from an insertvalue instruction, compare the indices
4368 const unsigned *exti, *exte, *insi, *inse;
4369 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
4370 exte = EV.idx_end(), inse = IV->idx_end();
4371 exti != exte && insi != inse;
4374 // The insert and extract both reference distinctly different elements.
4375 // This means the extract is not influenced by the insert, and we can
4376 // replace the aggregate operand of the extract with the aggregate
4377 // operand of the insert. i.e., replace
4378 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4379 // %E = extractvalue { i32, { i32 } } %I, 0
4381 // %E = extractvalue { i32, { i32 } } %A, 0
4382 return ExtractValueInst::Create(IV->getAggregateOperand(),
4383 EV.idx_begin(), EV.idx_end());
4385 if (exti == exte && insi == inse)
4386 // Both iterators are at the end: Index lists are identical. Replace
4387 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4388 // %C = extractvalue { i32, { i32 } } %B, 1, 0
4390 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
4392 // The extract list is a prefix of the insert list. i.e. replace
4393 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
4394 // %E = extractvalue { i32, { i32 } } %I, 1
4396 // %X = extractvalue { i32, { i32 } } %A, 1
4397 // %E = insertvalue { i32 } %X, i32 42, 0
4398 // by switching the order of the insert and extract (though the
4399 // insertvalue should be left in, since it may have other uses).
4400 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
4401 EV.idx_begin(), EV.idx_end());
4402 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
4406 // The insert list is a prefix of the extract list
4407 // We can simply remove the common indices from the extract and make it
4408 // operate on the inserted value instead of the insertvalue result.
4410 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
4411 // %E = extractvalue { i32, { i32 } } %I, 1, 0
4413 // %E extractvalue { i32 } { i32 42 }, 0
4414 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
4417 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
4418 // We're extracting from an intrinsic, see if we're the only user, which
4419 // allows us to simplify multiple result intrinsics to simpler things that
4420 // just get one value..
4421 if (II->hasOneUse()) {
4422 // Check if we're grabbing the overflow bit or the result of a 'with
4423 // overflow' intrinsic. If it's the latter we can remove the intrinsic
4424 // and replace it with a traditional binary instruction.
4425 switch (II->getIntrinsicID()) {
4426 case Intrinsic::uadd_with_overflow:
4427 case Intrinsic::sadd_with_overflow:
4428 if (*EV.idx_begin() == 0) { // Normal result.
4429 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
4430 II->replaceAllUsesWith(UndefValue::get(II->getType()));
4431 EraseInstFromFunction(*II);
4432 return BinaryOperator::CreateAdd(LHS, RHS);
4435 case Intrinsic::usub_with_overflow:
4436 case Intrinsic::ssub_with_overflow:
4437 if (*EV.idx_begin() == 0) { // Normal result.
4438 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
4439 II->replaceAllUsesWith(UndefValue::get(II->getType()));
4440 EraseInstFromFunction(*II);
4441 return BinaryOperator::CreateSub(LHS, RHS);
4444 case Intrinsic::umul_with_overflow:
4445 case Intrinsic::smul_with_overflow:
4446 if (*EV.idx_begin() == 0) { // Normal result.
4447 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
4448 II->replaceAllUsesWith(UndefValue::get(II->getType()));
4449 EraseInstFromFunction(*II);
4450 return BinaryOperator::CreateMul(LHS, RHS);
4458 // Can't simplify extracts from other values. Note that nested extracts are
4459 // already simplified implicitely by the above (extract ( extract (insert) )
4460 // will be translated into extract ( insert ( extract ) ) first and then just
4461 // the value inserted, if appropriate).
4468 /// TryToSinkInstruction - Try to move the specified instruction from its
4469 /// current block into the beginning of DestBlock, which can only happen if it's
4470 /// safe to move the instruction past all of the instructions between it and the
4471 /// end of its block.
4472 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
4473 assert(I->hasOneUse() && "Invariants didn't hold!");
4475 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
4476 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
4479 // Do not sink alloca instructions out of the entry block.
4480 if (isa<AllocaInst>(I) && I->getParent() ==
4481 &DestBlock->getParent()->getEntryBlock())
4484 // We can only sink load instructions if there is nothing between the load and
4485 // the end of block that could change the value.
4486 if (I->mayReadFromMemory()) {
4487 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
4489 if (Scan->mayWriteToMemory())
4493 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
4495 I->moveBefore(InsertPos);
4501 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
4502 /// all reachable code to the worklist.
4504 /// This has a couple of tricks to make the code faster and more powerful. In
4505 /// particular, we constant fold and DCE instructions as we go, to avoid adding
4506 /// them to the worklist (this significantly speeds up instcombine on code where
4507 /// many instructions are dead or constant). Additionally, if we find a branch
4508 /// whose condition is a known constant, we only visit the reachable successors.
4510 static bool AddReachableCodeToWorklist(BasicBlock *BB,
4511 SmallPtrSet<BasicBlock*, 64> &Visited,
4513 const TargetData *TD) {
4514 bool MadeIRChange = false;
4515 SmallVector<BasicBlock*, 256> Worklist;
4516 Worklist.push_back(BB);
4518 std::vector<Instruction*> InstrsForInstCombineWorklist;
4519 InstrsForInstCombineWorklist.reserve(128);
4521 SmallPtrSet<ConstantExpr*, 64> FoldedConstants;
4523 while (!Worklist.empty()) {
4524 BB = Worklist.back();
4525 Worklist.pop_back();
4527 // We have now visited this block! If we've already been here, ignore it.
4528 if (!Visited.insert(BB)) continue;
4530 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
4531 Instruction *Inst = BBI++;
4533 // DCE instruction if trivially dead.
4534 if (isInstructionTriviallyDead(Inst)) {
4536 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
4537 Inst->eraseFromParent();
4541 // ConstantProp instruction if trivially constant.
4542 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
4543 if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
4544 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
4546 Inst->replaceAllUsesWith(C);
4548 Inst->eraseFromParent();
4555 // See if we can constant fold its operands.
4556 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
4558 ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
4559 if (CE == 0) continue;
4561 // If we already folded this constant, don't try again.
4562 if (!FoldedConstants.insert(CE))
4565 Constant *NewC = ConstantFoldConstantExpression(CE, TD);
4566 if (NewC && NewC != CE) {
4568 MadeIRChange = true;
4574 InstrsForInstCombineWorklist.push_back(Inst);
4577 // Recursively visit successors. If this is a branch or switch on a
4578 // constant, only visit the reachable successor.
4579 TerminatorInst *TI = BB->getTerminator();
4580 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
4581 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
4582 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
4583 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
4584 Worklist.push_back(ReachableBB);
4587 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
4588 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
4589 // See if this is an explicit destination.
4590 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
4591 if (SI->getCaseValue(i) == Cond) {
4592 BasicBlock *ReachableBB = SI->getSuccessor(i);
4593 Worklist.push_back(ReachableBB);
4597 // Otherwise it is the default destination.
4598 Worklist.push_back(SI->getSuccessor(0));
4603 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
4604 Worklist.push_back(TI->getSuccessor(i));
4607 // Once we've found all of the instructions to add to instcombine's worklist,
4608 // add them in reverse order. This way instcombine will visit from the top
4609 // of the function down. This jives well with the way that it adds all uses
4610 // of instructions to the worklist after doing a transformation, thus avoiding
4611 // some N^2 behavior in pathological cases.
4612 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
4613 InstrsForInstCombineWorklist.size());
4615 return MadeIRChange;
4618 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
4619 MadeIRChange = false;
4621 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
4622 << F.getNameStr() << "\n");
4625 // Do a depth-first traversal of the function, populate the worklist with
4626 // the reachable instructions. Ignore blocks that are not reachable. Keep
4627 // track of which blocks we visit.
4628 SmallPtrSet<BasicBlock*, 64> Visited;
4629 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
4631 // Do a quick scan over the function. If we find any blocks that are
4632 // unreachable, remove any instructions inside of them. This prevents
4633 // the instcombine code from having to deal with some bad special cases.
4634 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
4635 if (!Visited.count(BB)) {
4636 Instruction *Term = BB->getTerminator();
4637 while (Term != BB->begin()) { // Remove instrs bottom-up
4638 BasicBlock::iterator I = Term; --I;
4640 DEBUG(errs() << "IC: DCE: " << *I << '\n');
4641 // A debug intrinsic shouldn't force another iteration if we weren't
4642 // going to do one without it.
4643 if (!isa<DbgInfoIntrinsic>(I)) {
4645 MadeIRChange = true;
4648 // If I is not void type then replaceAllUsesWith undef.
4649 // This allows ValueHandlers and custom metadata to adjust itself.
4650 if (!I->getType()->isVoidTy())
4651 I->replaceAllUsesWith(UndefValue::get(I->getType()));
4652 I->eraseFromParent();
4657 while (!Worklist.isEmpty()) {
4658 Instruction *I = Worklist.RemoveOne();
4659 if (I == 0) continue; // skip null values.
4661 // Check to see if we can DCE the instruction.
4662 if (isInstructionTriviallyDead(I)) {
4663 DEBUG(errs() << "IC: DCE: " << *I << '\n');
4664 EraseInstFromFunction(*I);
4666 MadeIRChange = true;
4670 // Instruction isn't dead, see if we can constant propagate it.
4671 if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
4672 if (Constant *C = ConstantFoldInstruction(I, TD)) {
4673 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
4675 // Add operands to the worklist.
4676 ReplaceInstUsesWith(*I, C);
4678 EraseInstFromFunction(*I);
4679 MadeIRChange = true;
4683 // See if we can trivially sink this instruction to a successor basic block.
4684 if (I->hasOneUse()) {
4685 BasicBlock *BB = I->getParent();
4686 Instruction *UserInst = cast<Instruction>(I->use_back());
4687 BasicBlock *UserParent;
4689 // Get the block the use occurs in.
4690 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
4691 UserParent = PN->getIncomingBlock(I->use_begin().getUse());
4693 UserParent = UserInst->getParent();
4695 if (UserParent != BB) {
4696 bool UserIsSuccessor = false;
4697 // See if the user is one of our successors.
4698 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
4699 if (*SI == UserParent) {
4700 UserIsSuccessor = true;
4704 // If the user is one of our immediate successors, and if that successor
4705 // only has us as a predecessors (we'd have to split the critical edge
4706 // otherwise), we can keep going.
4707 if (UserIsSuccessor && UserParent->getSinglePredecessor())
4708 // Okay, the CFG is simple enough, try to sink this instruction.
4709 MadeIRChange |= TryToSinkInstruction(I, UserParent);
4713 // Now that we have an instruction, try combining it to simplify it.
4714 Builder->SetInsertPoint(I->getParent(), I);
4719 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
4720 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
4722 if (Instruction *Result = visit(*I)) {
4724 // Should we replace the old instruction with a new one?
4726 DEBUG(errs() << "IC: Old = " << *I << '\n'
4727 << " New = " << *Result << '\n');
4729 // Everything uses the new instruction now.
4730 I->replaceAllUsesWith(Result);
4732 // Push the new instruction and any users onto the worklist.
4733 Worklist.Add(Result);
4734 Worklist.AddUsersToWorkList(*Result);
4736 // Move the name to the new instruction first.
4737 Result->takeName(I);
4739 // Insert the new instruction into the basic block...
4740 BasicBlock *InstParent = I->getParent();
4741 BasicBlock::iterator InsertPos = I;
4743 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
4744 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
4747 InstParent->getInstList().insert(InsertPos, Result);
4749 EraseInstFromFunction(*I);
4752 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
4753 << " New = " << *I << '\n');
4756 // If the instruction was modified, it's possible that it is now dead.
4757 // if so, remove it.
4758 if (isInstructionTriviallyDead(I)) {
4759 EraseInstFromFunction(*I);
4762 Worklist.AddUsersToWorkList(*I);
4765 MadeIRChange = true;
4770 return MadeIRChange;
4774 bool InstCombiner::runOnFunction(Function &F) {
4775 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
4776 TD = getAnalysisIfAvailable<TargetData>();
4779 /// Builder - This is an IRBuilder that automatically inserts new
4780 /// instructions into the worklist when they are created.
4781 IRBuilder<true, TargetFolder, InstCombineIRInserter>
4782 TheBuilder(F.getContext(), TargetFolder(TD),
4783 InstCombineIRInserter(Worklist));
4784 Builder = &TheBuilder;
4786 bool EverMadeChange = false;
4788 // Iterate while there is work to do.
4789 unsigned Iteration = 0;
4790 while (DoOneIteration(F, Iteration++))
4791 EverMadeChange = true;
4794 return EverMadeChange;
4797 FunctionPass *llvm::createInstructionCombiningPass() {
4798 return new InstCombiner();