1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "InstCombine.h"
39 #include "llvm/IntrinsicInst.h"
40 #include "llvm/Analysis/ConstantFolding.h"
41 #include "llvm/Analysis/InstructionSimplify.h"
42 #include "llvm/Analysis/MemoryBuiltins.h"
43 #include "llvm/Target/TargetData.h"
44 #include "llvm/Transforms/Utils/Local.h"
45 #include "llvm/Support/CFG.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/GetElementPtrTypeIterator.h"
48 #include "llvm/Support/PatternMatch.h"
49 #include "llvm/Support/ValueHandle.h"
50 #include "llvm/ADT/SmallPtrSet.h"
51 #include "llvm/ADT/Statistic.h"
52 #include "llvm-c/Initialization.h"
56 using namespace llvm::PatternMatch;
58 STATISTIC(NumCombined , "Number of insts combined");
59 STATISTIC(NumConstProp, "Number of constant folds");
60 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
61 STATISTIC(NumSunkInst , "Number of instructions sunk");
62 STATISTIC(NumExpand, "Number of expansions");
63 STATISTIC(NumFactor , "Number of factorizations");
64 STATISTIC(NumReassoc , "Number of reassociations");
66 // Initialization Routines
67 void llvm::initializeInstCombine(PassRegistry &Registry) {
68 initializeInstCombinerPass(Registry);
71 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) {
72 initializeInstCombine(*unwrap(R));
75 char InstCombiner::ID = 0;
76 INITIALIZE_PASS(InstCombiner, "instcombine",
77 "Combine redundant instructions", false, false)
79 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
84 /// ShouldChangeType - Return true if it is desirable to convert a computation
85 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal
86 /// type for example, or from a smaller to a larger illegal type.
87 bool InstCombiner::ShouldChangeType(Type *From, Type *To) const {
88 assert(From->isIntegerTy() && To->isIntegerTy());
90 // If we don't have TD, we don't know if the source/dest are legal.
91 if (!TD) return false;
93 unsigned FromWidth = From->getPrimitiveSizeInBits();
94 unsigned ToWidth = To->getPrimitiveSizeInBits();
95 bool FromLegal = TD->isLegalInteger(FromWidth);
96 bool ToLegal = TD->isLegalInteger(ToWidth);
98 // If this is a legal integer from type, and the result would be an illegal
99 // type, don't do the transformation.
100 if (FromLegal && !ToLegal)
103 // Otherwise, if both are illegal, do not increase the size of the result. We
104 // do allow things like i160 -> i64, but not i64 -> i160.
105 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
111 // Return true, if No Signed Wrap should be maintained for I.
112 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C",
113 // where both B and C should be ConstantInts, results in a constant that does
114 // not overflow. This function only handles the Add and Sub opcodes. For
115 // all other opcodes, the function conservatively returns false.
116 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) {
117 OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I);
118 if (!OBO || !OBO->hasNoSignedWrap()) {
122 // We reason about Add and Sub Only.
123 Instruction::BinaryOps Opcode = I.getOpcode();
124 if (Opcode != Instruction::Add &&
125 Opcode != Instruction::Sub) {
129 ConstantInt *CB = dyn_cast<ConstantInt>(B);
130 ConstantInt *CC = dyn_cast<ConstantInt>(C);
136 const APInt &BVal = CB->getValue();
137 const APInt &CVal = CC->getValue();
138 bool Overflow = false;
140 if (Opcode == Instruction::Add) {
141 BVal.sadd_ov(CVal, Overflow);
143 BVal.ssub_ov(CVal, Overflow);
149 /// SimplifyAssociativeOrCommutative - This performs a few simplifications for
150 /// operators which are associative or commutative:
152 // Commutative operators:
154 // 1. Order operands such that they are listed from right (least complex) to
155 // left (most complex). This puts constants before unary operators before
158 // Associative operators:
160 // 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
161 // 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
163 // Associative and commutative operators:
165 // 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
166 // 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
167 // 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
168 // if C1 and C2 are constants.
170 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) {
171 Instruction::BinaryOps Opcode = I.getOpcode();
172 bool Changed = false;
175 // Order operands such that they are listed from right (least complex) to
176 // left (most complex). This puts constants before unary operators before
178 if (I.isCommutative() && getComplexity(I.getOperand(0)) <
179 getComplexity(I.getOperand(1)))
180 Changed = !I.swapOperands();
182 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0));
183 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1));
185 if (I.isAssociative()) {
186 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies.
187 if (Op0 && Op0->getOpcode() == Opcode) {
188 Value *A = Op0->getOperand(0);
189 Value *B = Op0->getOperand(1);
190 Value *C = I.getOperand(1);
192 // Does "B op C" simplify?
193 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) {
194 // It simplifies to V. Form "A op V".
197 // Conservatively clear the optional flags, since they may not be
198 // preserved by the reassociation.
199 if (MaintainNoSignedWrap(I, B, C) &&
200 (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) {
201 // Note: this is only valid because SimplifyBinOp doesn't look at
202 // the operands to Op0.
203 I.clearSubclassOptionalData();
204 I.setHasNoSignedWrap(true);
206 I.clearSubclassOptionalData();
215 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies.
216 if (Op1 && Op1->getOpcode() == Opcode) {
217 Value *A = I.getOperand(0);
218 Value *B = Op1->getOperand(0);
219 Value *C = Op1->getOperand(1);
221 // Does "A op B" simplify?
222 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) {
223 // It simplifies to V. Form "V op C".
226 // Conservatively clear the optional flags, since they may not be
227 // preserved by the reassociation.
228 I.clearSubclassOptionalData();
236 if (I.isAssociative() && I.isCommutative()) {
237 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies.
238 if (Op0 && Op0->getOpcode() == Opcode) {
239 Value *A = Op0->getOperand(0);
240 Value *B = Op0->getOperand(1);
241 Value *C = I.getOperand(1);
243 // Does "C op A" simplify?
244 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
245 // It simplifies to V. Form "V op B".
248 // Conservatively clear the optional flags, since they may not be
249 // preserved by the reassociation.
250 I.clearSubclassOptionalData();
257 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies.
258 if (Op1 && Op1->getOpcode() == Opcode) {
259 Value *A = I.getOperand(0);
260 Value *B = Op1->getOperand(0);
261 Value *C = Op1->getOperand(1);
263 // Does "C op A" simplify?
264 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) {
265 // It simplifies to V. Form "B op V".
268 // Conservatively clear the optional flags, since they may not be
269 // preserved by the reassociation.
270 I.clearSubclassOptionalData();
277 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)"
278 // if C1 and C2 are constants.
280 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode &&
281 isa<Constant>(Op0->getOperand(1)) &&
282 isa<Constant>(Op1->getOperand(1)) &&
283 Op0->hasOneUse() && Op1->hasOneUse()) {
284 Value *A = Op0->getOperand(0);
285 Constant *C1 = cast<Constant>(Op0->getOperand(1));
286 Value *B = Op1->getOperand(0);
287 Constant *C2 = cast<Constant>(Op1->getOperand(1));
289 Constant *Folded = ConstantExpr::get(Opcode, C1, C2);
290 BinaryOperator *New = BinaryOperator::Create(Opcode, A, B);
291 InsertNewInstWith(New, I);
293 I.setOperand(0, New);
294 I.setOperand(1, Folded);
295 // Conservatively clear the optional flags, since they may not be
296 // preserved by the reassociation.
297 I.clearSubclassOptionalData();
304 // No further simplifications.
309 /// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to
310 /// "(X LOp Y) ROp (X LOp Z)".
311 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp,
312 Instruction::BinaryOps ROp) {
317 case Instruction::And:
318 // And distributes over Or and Xor.
322 case Instruction::Or:
323 case Instruction::Xor:
327 case Instruction::Mul:
328 // Multiplication distributes over addition and subtraction.
332 case Instruction::Add:
333 case Instruction::Sub:
337 case Instruction::Or:
338 // Or distributes over And.
342 case Instruction::And:
348 /// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to
349 /// "(X ROp Z) LOp (Y ROp Z)".
350 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp,
351 Instruction::BinaryOps ROp) {
352 if (Instruction::isCommutative(ROp))
353 return LeftDistributesOverRight(ROp, LOp);
354 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z",
355 // but this requires knowing that the addition does not overflow and other
360 /// SimplifyUsingDistributiveLaws - This tries to simplify binary operations
361 /// which some other binary operation distributes over either by factorizing
362 /// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this
363 /// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is
364 /// a win). Returns the simplified value, or null if it didn't simplify.
365 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
366 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
367 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
368 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
369 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op
372 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) {
373 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize
375 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
376 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1);
377 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
379 // Does "X op' Y" always equal "Y op' X"?
380 bool InnerCommutative = Instruction::isCommutative(InnerOpcode);
382 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"?
383 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode))
384 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the
385 // commutative case, "(A op' B) op (C op' A)"?
386 if (A == C || (InnerCommutative && A == D)) {
389 // Consider forming "A op' (B op D)".
390 // If "B op D" simplifies then it can be formed with no cost.
391 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD);
392 // If "B op D" doesn't simplify then only go on if both of the existing
393 // operations "A op' B" and "C op' D" will be zapped as no longer used.
394 if (!V && Op0->hasOneUse() && Op1->hasOneUse())
395 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName());
398 V = Builder->CreateBinOp(InnerOpcode, A, V);
404 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"?
405 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode))
406 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the
407 // commutative case, "(A op' B) op (B op' D)"?
408 if (B == D || (InnerCommutative && B == C)) {
411 // Consider forming "(A op C) op' B".
412 // If "A op C" simplifies then it can be formed with no cost.
413 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD);
414 // If "A op C" doesn't simplify then only go on if both of the existing
415 // operations "A op' B" and "C op' D" will be zapped as no longer used.
416 if (!V && Op0->hasOneUse() && Op1->hasOneUse())
417 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName());
420 V = Builder->CreateBinOp(InnerOpcode, V, B);
428 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) {
429 // The instruction has the form "(A op' B) op C". See if expanding it out
430 // to "(A op C) op' (B op C)" results in simplifications.
431 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS;
432 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op'
434 // Do "A op C" and "B op C" both simplify?
435 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD))
436 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) {
437 // They do! Return "L op' R".
439 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS.
440 if ((L == A && R == B) ||
441 (Instruction::isCommutative(InnerOpcode) && L == B && R == A))
443 // Otherwise return "L op' R" if it simplifies.
444 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
446 // Otherwise, create a new instruction.
447 C = Builder->CreateBinOp(InnerOpcode, L, R);
453 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) {
454 // The instruction has the form "A op (B op' C)". See if expanding it out
455 // to "(A op B) op' (A op C)" results in simplifications.
456 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1);
457 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op'
459 // Do "A op B" and "A op C" both simplify?
460 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD))
461 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) {
462 // They do! Return "L op' R".
464 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS.
465 if ((L == B && R == C) ||
466 (Instruction::isCommutative(InnerOpcode) && L == C && R == B))
468 // Otherwise return "L op' R" if it simplifies.
469 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD))
471 // Otherwise, create a new instruction.
472 A = Builder->CreateBinOp(InnerOpcode, L, R);
481 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
482 // if the LHS is a constant zero (which is the 'negate' form).
484 Value *InstCombiner::dyn_castNegVal(Value *V) const {
485 if (BinaryOperator::isNeg(V))
486 return BinaryOperator::getNegArgument(V);
488 // Constants can be considered to be negated values if they can be folded.
489 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
490 return ConstantExpr::getNeg(C);
492 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
493 if (C->getType()->getElementType()->isIntegerTy())
494 return ConstantExpr::getNeg(C);
499 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
500 // instruction if the LHS is a constant negative zero (which is the 'negate'
503 Value *InstCombiner::dyn_castFNegVal(Value *V) const {
504 if (BinaryOperator::isFNeg(V))
505 return BinaryOperator::getFNegArgument(V);
507 // Constants can be considered to be negated values if they can be folded.
508 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
509 return ConstantExpr::getFNeg(C);
511 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
512 if (C->getType()->getElementType()->isFloatingPointTy())
513 return ConstantExpr::getFNeg(C);
518 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
520 if (CastInst *CI = dyn_cast<CastInst>(&I)) {
521 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
524 // Figure out if the constant is the left or the right argument.
525 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
526 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
528 if (Constant *SOC = dyn_cast<Constant>(SO)) {
530 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
531 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
534 Value *Op0 = SO, *Op1 = ConstOperand;
538 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
539 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
540 SO->getName()+".op");
541 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
542 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
543 SO->getName()+".cmp");
544 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
545 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
546 SO->getName()+".cmp");
547 llvm_unreachable("Unknown binary instruction type!");
550 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
551 // constant as the other operand, try to fold the binary operator into the
552 // select arguments. This also works for Cast instructions, which obviously do
553 // not have a second operand.
554 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
555 // Don't modify shared select instructions
556 if (!SI->hasOneUse()) return 0;
557 Value *TV = SI->getOperand(1);
558 Value *FV = SI->getOperand(2);
560 if (isa<Constant>(TV) || isa<Constant>(FV)) {
561 // Bool selects with constant operands can be folded to logical ops.
562 if (SI->getType()->isIntegerTy(1)) return 0;
564 // If it's a bitcast involving vectors, make sure it has the same number of
565 // elements on both sides.
566 if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) {
567 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy());
568 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy());
570 // Verify that either both or neither are vectors.
571 if ((SrcTy == NULL) != (DestTy == NULL)) return 0;
572 // If vectors, verify that they have the same number of elements.
573 if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements())
577 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this);
578 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this);
580 return SelectInst::Create(SI->getCondition(),
581 SelectTrueVal, SelectFalseVal);
587 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
588 /// has a PHI node as operand #0, see if we can fold the instruction into the
589 /// PHI (which is only possible if all operands to the PHI are constants).
591 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
592 PHINode *PN = cast<PHINode>(I.getOperand(0));
593 unsigned NumPHIValues = PN->getNumIncomingValues();
594 if (NumPHIValues == 0)
597 // We normally only transform phis with a single use. However, if a PHI has
598 // multiple uses and they are all the same operation, we can fold *all* of the
599 // uses into the PHI.
600 if (!PN->hasOneUse()) {
601 // Walk the use list for the instruction, comparing them to I.
602 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
604 Instruction *User = cast<Instruction>(*UI);
605 if (User != &I && !I.isIdenticalTo(User))
608 // Otherwise, we can replace *all* users with the new PHI we form.
611 // Check to see if all of the operands of the PHI are simple constants
612 // (constantint/constantfp/undef). If there is one non-constant value,
613 // remember the BB it is in. If there is more than one or if *it* is a PHI,
614 // bail out. We don't do arbitrary constant expressions here because moving
615 // their computation can be expensive without a cost model.
616 BasicBlock *NonConstBB = 0;
617 for (unsigned i = 0; i != NumPHIValues; ++i) {
618 Value *InVal = PN->getIncomingValue(i);
619 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal))
622 if (isa<PHINode>(InVal)) return 0; // Itself a phi.
623 if (NonConstBB) return 0; // More than one non-const value.
625 NonConstBB = PN->getIncomingBlock(i);
627 // If the InVal is an invoke at the end of the pred block, then we can't
628 // insert a computation after it without breaking the edge.
629 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
630 if (II->getParent() == NonConstBB)
633 // If the incoming non-constant value is in I's block, we will remove one
634 // instruction, but insert another equivalent one, leading to infinite
636 if (NonConstBB == I.getParent())
640 // If there is exactly one non-constant value, we can insert a copy of the
641 // operation in that block. However, if this is a critical edge, we would be
642 // inserting the computation one some other paths (e.g. inside a loop). Only
643 // do this if the pred block is unconditionally branching into the phi block.
644 if (NonConstBB != 0) {
645 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
646 if (!BI || !BI->isUnconditional()) return 0;
649 // Okay, we can do the transformation: create the new PHI node.
650 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues());
651 InsertNewInstBefore(NewPN, *PN);
654 // If we are going to have to insert a new computation, do so right before the
655 // predecessors terminator.
657 Builder->SetInsertPoint(NonConstBB->getTerminator());
659 // Next, add all of the operands to the PHI.
660 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
661 // We only currently try to fold the condition of a select when it is a phi,
662 // not the true/false values.
663 Value *TrueV = SI->getTrueValue();
664 Value *FalseV = SI->getFalseValue();
665 BasicBlock *PhiTransBB = PN->getParent();
666 for (unsigned i = 0; i != NumPHIValues; ++i) {
667 BasicBlock *ThisBB = PN->getIncomingBlock(i);
668 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
669 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
671 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
672 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
674 InV = Builder->CreateSelect(PN->getIncomingValue(i),
675 TrueVInPred, FalseVInPred, "phitmp");
676 NewPN->addIncoming(InV, ThisBB);
678 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) {
679 Constant *C = cast<Constant>(I.getOperand(1));
680 for (unsigned i = 0; i != NumPHIValues; ++i) {
682 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
683 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
684 else if (isa<ICmpInst>(CI))
685 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
688 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
690 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
692 } else if (I.getNumOperands() == 2) {
693 Constant *C = cast<Constant>(I.getOperand(1));
694 for (unsigned i = 0; i != NumPHIValues; ++i) {
696 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
697 InV = ConstantExpr::get(I.getOpcode(), InC, C);
699 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(),
700 PN->getIncomingValue(i), C, "phitmp");
701 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
704 CastInst *CI = cast<CastInst>(&I);
705 Type *RetTy = CI->getType();
706 for (unsigned i = 0; i != NumPHIValues; ++i) {
708 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
709 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
711 InV = Builder->CreateCast(CI->getOpcode(),
712 PN->getIncomingValue(i), I.getType(), "phitmp");
713 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
717 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
719 Instruction *User = cast<Instruction>(*UI++);
720 if (User == &I) continue;
721 ReplaceInstUsesWith(*User, NewPN);
722 EraseInstFromFunction(*User);
724 return ReplaceInstUsesWith(I, NewPN);
727 /// FindElementAtOffset - Given a type and a constant offset, determine whether
728 /// or not there is a sequence of GEP indices into the type that will land us at
729 /// the specified offset. If so, fill them into NewIndices and return the
730 /// resultant element type, otherwise return null.
731 Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
732 SmallVectorImpl<Value*> &NewIndices) {
734 if (!Ty->isSized()) return 0;
736 // Start with the index over the outer type. Note that the type size
737 // might be zero (even if the offset isn't zero) if the indexed type
738 // is something like [0 x {int, int}]
739 Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
740 int64_t FirstIdx = 0;
741 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
742 FirstIdx = Offset/TySize;
743 Offset -= FirstIdx*TySize;
745 // Handle hosts where % returns negative instead of values [0..TySize).
751 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
754 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
756 // Index into the types. If we fail, set OrigBase to null.
758 // Indexing into tail padding between struct/array elements.
759 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
762 if (StructType *STy = dyn_cast<StructType>(Ty)) {
763 const StructLayout *SL = TD->getStructLayout(STy);
764 assert(Offset < (int64_t)SL->getSizeInBytes() &&
765 "Offset must stay within the indexed type");
767 unsigned Elt = SL->getElementContainingOffset(Offset);
768 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
771 Offset -= SL->getElementOffset(Elt);
772 Ty = STy->getElementType(Elt);
773 } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
774 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
775 assert(EltSize && "Cannot index into a zero-sized array");
776 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
778 Ty = AT->getElementType();
780 // Otherwise, we can't index into the middle of this atomic type, bail.
788 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) {
789 // If this GEP has only 0 indices, it is the same pointer as
790 // Src. If Src is not a trivial GEP too, don't combine
792 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
798 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
799 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
801 if (Value *V = SimplifyGEPInst(Ops, TD))
802 return ReplaceInstUsesWith(GEP, V);
804 Value *PtrOp = GEP.getOperand(0);
806 // Eliminate unneeded casts for indices, and replace indices which displace
807 // by multiples of a zero size type with zero.
809 bool MadeChange = false;
810 Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
812 gep_type_iterator GTI = gep_type_begin(GEP);
813 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
814 I != E; ++I, ++GTI) {
815 // Skip indices into struct types.
816 SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI);
817 if (!SeqTy) continue;
819 // If the element type has zero size then any index over it is equivalent
820 // to an index of zero, so replace it with zero if it is not zero already.
821 if (SeqTy->getElementType()->isSized() &&
822 TD->getTypeAllocSize(SeqTy->getElementType()) == 0)
823 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) {
824 *I = Constant::getNullValue(IntPtrTy);
828 if ((*I)->getType() != IntPtrTy) {
829 // If we are using a wider index than needed for this platform, shrink
830 // it to what we need. If narrower, sign-extend it to what we need.
831 // This explicit cast can make subsequent optimizations more obvious.
832 *I = Builder->CreateIntCast(*I, IntPtrTy, true);
836 if (MadeChange) return &GEP;
839 // Combine Indices - If the source pointer to this getelementptr instruction
840 // is a getelementptr instruction, combine the indices of the two
841 // getelementptr instructions into a single instruction.
843 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
844 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src))
847 // Note that if our source is a gep chain itself that we wait for that
848 // chain to be resolved before we perform this transformation. This
849 // avoids us creating a TON of code in some cases.
850 if (GEPOperator *SrcGEP =
851 dyn_cast<GEPOperator>(Src->getOperand(0)))
852 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP))
853 return 0; // Wait until our source is folded to completion.
855 SmallVector<Value*, 8> Indices;
857 // Find out whether the last index in the source GEP is a sequential idx.
858 bool EndsWithSequential = false;
859 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
861 EndsWithSequential = !(*I)->isStructTy();
863 // Can we combine the two pointer arithmetics offsets?
864 if (EndsWithSequential) {
865 // Replace: gep (gep %P, long B), long A, ...
866 // With: T = long A+B; gep %P, T, ...
869 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
870 Value *GO1 = GEP.getOperand(1);
871 if (SO1 == Constant::getNullValue(SO1->getType())) {
873 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
876 // If they aren't the same type, then the input hasn't been processed
877 // by the loop above yet (which canonicalizes sequential index types to
878 // intptr_t). Just avoid transforming this until the input has been
880 if (SO1->getType() != GO1->getType())
882 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
885 // Update the GEP in place if possible.
886 if (Src->getNumOperands() == 2) {
887 GEP.setOperand(0, Src->getOperand(0));
888 GEP.setOperand(1, Sum);
891 Indices.append(Src->op_begin()+1, Src->op_end()-1);
892 Indices.push_back(Sum);
893 Indices.append(GEP.op_begin()+2, GEP.op_end());
894 } else if (isa<Constant>(*GEP.idx_begin()) &&
895 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
896 Src->getNumOperands() != 1) {
897 // Otherwise we can do the fold if the first index of the GEP is a zero
898 Indices.append(Src->op_begin()+1, Src->op_end());
899 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
902 if (!Indices.empty())
903 return (GEP.isInBounds() && Src->isInBounds()) ?
904 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices,
906 GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName());
909 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
910 Value *StrippedPtr = PtrOp->stripPointerCasts();
911 PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType());
912 if (StrippedPtr != PtrOp &&
913 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
915 bool HasZeroPointerIndex = false;
916 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
917 HasZeroPointerIndex = C->isZero();
919 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
920 // into : GEP [10 x i8]* X, i32 0, ...
922 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
923 // into : GEP i8* X, ...
925 // This occurs when the program declares an array extern like "int X[];"
926 if (HasZeroPointerIndex) {
927 PointerType *CPTy = cast<PointerType>(PtrOp->getType());
928 if (ArrayType *CATy =
929 dyn_cast<ArrayType>(CPTy->getElementType())) {
930 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
931 if (CATy->getElementType() == StrippedPtrTy->getElementType()) {
933 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end());
934 GetElementPtrInst *Res =
935 GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName());
936 Res->setIsInBounds(GEP.isInBounds());
940 if (ArrayType *XATy =
941 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){
942 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
943 if (CATy->getElementType() == XATy->getElementType()) {
944 // -> GEP [10 x i8]* X, i32 0, ...
945 // At this point, we know that the cast source type is a pointer
946 // to an array of the same type as the destination pointer
947 // array. Because the array type is never stepped over (there
948 // is a leading zero) we can fold the cast into this GEP.
949 GEP.setOperand(0, StrippedPtr);
954 } else if (GEP.getNumOperands() == 2) {
955 // Transform things like:
956 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
957 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
958 Type *SrcElTy = StrippedPtrTy->getElementType();
959 Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
960 if (TD && SrcElTy->isArrayTy() &&
961 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
962 TD->getTypeAllocSize(ResElTy)) {
964 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
965 Idx[1] = GEP.getOperand(1);
966 Value *NewGEP = GEP.isInBounds() ?
967 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) :
968 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
969 // V and GEP are both pointer types --> BitCast
970 return new BitCastInst(NewGEP, GEP.getType());
973 // Transform things like:
974 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
975 // (where tmp = 8*tmp2) into:
976 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
978 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) {
979 uint64_t ArrayEltSize =
980 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
982 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
983 // allow either a mul, shift, or constant here.
985 ConstantInt *Scale = 0;
986 if (ArrayEltSize == 1) {
987 NewIdx = GEP.getOperand(1);
988 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
989 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
990 NewIdx = ConstantInt::get(CI->getType(), 1);
992 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
993 if (Inst->getOpcode() == Instruction::Shl &&
994 isa<ConstantInt>(Inst->getOperand(1))) {
995 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
996 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
997 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
999 NewIdx = Inst->getOperand(0);
1000 } else if (Inst->getOpcode() == Instruction::Mul &&
1001 isa<ConstantInt>(Inst->getOperand(1))) {
1002 Scale = cast<ConstantInt>(Inst->getOperand(1));
1003 NewIdx = Inst->getOperand(0);
1007 // If the index will be to exactly the right offset with the scale taken
1008 // out, perform the transformation. Note, we don't know whether Scale is
1009 // signed or not. We'll use unsigned version of division/modulo
1010 // operation after making sure Scale doesn't have the sign bit set.
1011 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
1012 Scale->getZExtValue() % ArrayEltSize == 0) {
1013 Scale = ConstantInt::get(Scale->getType(),
1014 Scale->getZExtValue() / ArrayEltSize);
1015 if (Scale->getZExtValue() != 1) {
1016 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
1018 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
1021 // Insert the new GEP instruction.
1023 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext()));
1025 Value *NewGEP = GEP.isInBounds() ?
1026 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()):
1027 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName());
1028 // The NewGEP must be pointer typed, so must the old one -> BitCast
1029 return new BitCastInst(NewGEP, GEP.getType());
1035 /// See if we can simplify:
1036 /// X = bitcast A* to B*
1037 /// Y = gep X, <...constant indices...>
1038 /// into a gep of the original struct. This is important for SROA and alias
1039 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
1040 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
1042 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() &&
1043 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) {
1045 // Determine how much the GEP moves the pointer. We are guaranteed to get
1046 // a constant back from EmitGEPOffset.
1047 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP));
1048 int64_t Offset = OffsetV->getSExtValue();
1050 // If this GEP instruction doesn't move the pointer, just replace the GEP
1051 // with a bitcast of the real input to the dest type.
1053 // If the bitcast is of an allocation, and the allocation will be
1054 // converted to match the type of the cast, don't touch this.
1055 if (isa<AllocaInst>(BCI->getOperand(0)) ||
1056 isMalloc(BCI->getOperand(0))) {
1057 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
1058 if (Instruction *I = visitBitCast(*BCI)) {
1061 BCI->getParent()->getInstList().insert(BCI, I);
1062 ReplaceInstUsesWith(*BCI, I);
1067 return new BitCastInst(BCI->getOperand(0), GEP.getType());
1070 // Otherwise, if the offset is non-zero, we need to find out if there is a
1071 // field at Offset in 'A's type. If so, we can pull the cast through the
1073 SmallVector<Value*, 8> NewIndices;
1075 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
1076 if (FindElementAtOffset(InTy, Offset, NewIndices)) {
1077 Value *NGEP = GEP.isInBounds() ?
1078 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
1079 Builder->CreateGEP(BCI->getOperand(0), NewIndices);
1081 if (NGEP->getType() == GEP.getType())
1082 return ReplaceInstUsesWith(GEP, NGEP);
1083 NGEP->takeName(&GEP);
1084 return new BitCastInst(NGEP, GEP.getType());
1094 static bool IsOnlyNullComparedAndFreed(Value *V, SmallVectorImpl<WeakVH> &Users,
1099 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
1102 if (isFreeCall(U)) {
1106 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
1107 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) {
1108 Users.push_back(ICI);
1112 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
1113 if (IsOnlyNullComparedAndFreed(BCI, Users, Depth+1)) {
1114 Users.push_back(BCI);
1118 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1119 if (IsOnlyNullComparedAndFreed(GEPI, Users, Depth+1)) {
1120 Users.push_back(GEPI);
1124 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1125 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
1126 II->getIntrinsicID() == Intrinsic::lifetime_end) {
1127 Users.push_back(II);
1136 Instruction *InstCombiner::visitMalloc(Instruction &MI) {
1137 // If we have a malloc call which is only used in any amount of comparisons
1138 // to null and free calls, delete the calls and replace the comparisons with
1139 // true or false as appropriate.
1140 SmallVector<WeakVH, 64> Users;
1141 if (IsOnlyNullComparedAndFreed(&MI, Users)) {
1142 for (unsigned i = 0, e = Users.size(); i != e; ++i) {
1143 Instruction *I = cast_or_null<Instruction>(&*Users[i]);
1146 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) {
1147 ReplaceInstUsesWith(*C,
1148 ConstantInt::get(Type::getInt1Ty(C->getContext()),
1149 C->isFalseWhenEqual()));
1150 } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) {
1151 ReplaceInstUsesWith(*I, UndefValue::get(I->getType()));
1153 EraseInstFromFunction(*I);
1155 return EraseInstFromFunction(MI);
1162 Instruction *InstCombiner::visitFree(CallInst &FI) {
1163 Value *Op = FI.getArgOperand(0);
1165 // free undef -> unreachable.
1166 if (isa<UndefValue>(Op)) {
1167 // Insert a new store to null because we cannot modify the CFG here.
1168 Builder->CreateStore(ConstantInt::getTrue(FI.getContext()),
1169 UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
1170 return EraseInstFromFunction(FI);
1173 // If we have 'free null' delete the instruction. This can happen in stl code
1174 // when lots of inlining happens.
1175 if (isa<ConstantPointerNull>(Op))
1176 return EraseInstFromFunction(FI);
1183 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
1184 // Change br (not X), label True, label False to: br X, label False, True
1186 BasicBlock *TrueDest;
1187 BasicBlock *FalseDest;
1188 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
1189 !isa<Constant>(X)) {
1190 // Swap Destinations and condition...
1192 BI.setSuccessor(0, FalseDest);
1193 BI.setSuccessor(1, TrueDest);
1197 // Cannonicalize fcmp_one -> fcmp_oeq
1198 FCmpInst::Predicate FPred; Value *Y;
1199 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
1200 TrueDest, FalseDest)) &&
1201 BI.getCondition()->hasOneUse())
1202 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
1203 FPred == FCmpInst::FCMP_OGE) {
1204 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
1205 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
1207 // Swap Destinations and condition.
1208 BI.setSuccessor(0, FalseDest);
1209 BI.setSuccessor(1, TrueDest);
1214 // Cannonicalize icmp_ne -> icmp_eq
1215 ICmpInst::Predicate IPred;
1216 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
1217 TrueDest, FalseDest)) &&
1218 BI.getCondition()->hasOneUse())
1219 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
1220 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
1221 IPred == ICmpInst::ICMP_SGE) {
1222 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
1223 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
1224 // Swap Destinations and condition.
1225 BI.setSuccessor(0, FalseDest);
1226 BI.setSuccessor(1, TrueDest);
1234 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
1235 Value *Cond = SI.getCondition();
1236 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
1237 if (I->getOpcode() == Instruction::Add)
1238 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1239 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
1240 unsigned NumCases = SI.getNumCases();
1241 // Skip the first item since that's the default case.
1242 for (unsigned i = 1; i < NumCases; ++i) {
1243 ConstantInt* CaseVal = SI.getCaseValue(i);
1244 Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal),
1246 assert(isa<ConstantInt>(NewCaseVal) &&
1247 "Result of expression should be constant");
1248 SI.setSuccessorValue(i, cast<ConstantInt>(NewCaseVal));
1250 SI.setCondition(I->getOperand(0));
1258 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
1259 Value *Agg = EV.getAggregateOperand();
1261 if (!EV.hasIndices())
1262 return ReplaceInstUsesWith(EV, Agg);
1264 if (Constant *C = dyn_cast<Constant>(Agg)) {
1265 if (isa<UndefValue>(C))
1266 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
1268 if (isa<ConstantAggregateZero>(C))
1269 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
1271 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
1272 // Extract the element indexed by the first index out of the constant
1273 Value *V = C->getOperand(*EV.idx_begin());
1274 if (EV.getNumIndices() > 1)
1275 // Extract the remaining indices out of the constant indexed by the
1277 return ExtractValueInst::Create(V, EV.getIndices().slice(1));
1279 return ReplaceInstUsesWith(EV, V);
1281 return 0; // Can't handle other constants
1283 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
1284 // We're extracting from an insertvalue instruction, compare the indices
1285 const unsigned *exti, *exte, *insi, *inse;
1286 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
1287 exte = EV.idx_end(), inse = IV->idx_end();
1288 exti != exte && insi != inse;
1291 // The insert and extract both reference distinctly different elements.
1292 // This means the extract is not influenced by the insert, and we can
1293 // replace the aggregate operand of the extract with the aggregate
1294 // operand of the insert. i.e., replace
1295 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1296 // %E = extractvalue { i32, { i32 } } %I, 0
1298 // %E = extractvalue { i32, { i32 } } %A, 0
1299 return ExtractValueInst::Create(IV->getAggregateOperand(),
1302 if (exti == exte && insi == inse)
1303 // Both iterators are at the end: Index lists are identical. Replace
1304 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1305 // %C = extractvalue { i32, { i32 } } %B, 1, 0
1307 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
1309 // The extract list is a prefix of the insert list. i.e. replace
1310 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
1311 // %E = extractvalue { i32, { i32 } } %I, 1
1313 // %X = extractvalue { i32, { i32 } } %A, 1
1314 // %E = insertvalue { i32 } %X, i32 42, 0
1315 // by switching the order of the insert and extract (though the
1316 // insertvalue should be left in, since it may have other uses).
1317 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
1319 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
1320 makeArrayRef(insi, inse));
1323 // The insert list is a prefix of the extract list
1324 // We can simply remove the common indices from the extract and make it
1325 // operate on the inserted value instead of the insertvalue result.
1327 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
1328 // %E = extractvalue { i32, { i32 } } %I, 1, 0
1330 // %E extractvalue { i32 } { i32 42 }, 0
1331 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
1332 makeArrayRef(exti, exte));
1334 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
1335 // We're extracting from an intrinsic, see if we're the only user, which
1336 // allows us to simplify multiple result intrinsics to simpler things that
1337 // just get one value.
1338 if (II->hasOneUse()) {
1339 // Check if we're grabbing the overflow bit or the result of a 'with
1340 // overflow' intrinsic. If it's the latter we can remove the intrinsic
1341 // and replace it with a traditional binary instruction.
1342 switch (II->getIntrinsicID()) {
1343 case Intrinsic::uadd_with_overflow:
1344 case Intrinsic::sadd_with_overflow:
1345 if (*EV.idx_begin() == 0) { // Normal result.
1346 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1347 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1348 EraseInstFromFunction(*II);
1349 return BinaryOperator::CreateAdd(LHS, RHS);
1352 // If the normal result of the add is dead, and the RHS is a constant,
1353 // we can transform this into a range comparison.
1354 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3
1355 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow)
1356 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1)))
1357 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0),
1358 ConstantExpr::getNot(CI));
1360 case Intrinsic::usub_with_overflow:
1361 case Intrinsic::ssub_with_overflow:
1362 if (*EV.idx_begin() == 0) { // Normal result.
1363 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1364 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1365 EraseInstFromFunction(*II);
1366 return BinaryOperator::CreateSub(LHS, RHS);
1369 case Intrinsic::umul_with_overflow:
1370 case Intrinsic::smul_with_overflow:
1371 if (*EV.idx_begin() == 0) { // Normal result.
1372 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
1373 ReplaceInstUsesWith(*II, UndefValue::get(II->getType()));
1374 EraseInstFromFunction(*II);
1375 return BinaryOperator::CreateMul(LHS, RHS);
1383 if (LoadInst *L = dyn_cast<LoadInst>(Agg))
1384 // If the (non-volatile) load only has one use, we can rewrite this to a
1385 // load from a GEP. This reduces the size of the load.
1386 // FIXME: If a load is used only by extractvalue instructions then this
1387 // could be done regardless of having multiple uses.
1388 if (L->isSimple() && L->hasOneUse()) {
1389 // extractvalue has integer indices, getelementptr has Value*s. Convert.
1390 SmallVector<Value*, 4> Indices;
1391 // Prefix an i32 0 since we need the first element.
1392 Indices.push_back(Builder->getInt32(0));
1393 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
1395 Indices.push_back(Builder->getInt32(*I));
1397 // We need to insert these at the location of the old load, not at that of
1398 // the extractvalue.
1399 Builder->SetInsertPoint(L->getParent(), L);
1400 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices);
1401 // Returning the load directly will cause the main loop to insert it in
1402 // the wrong spot, so use ReplaceInstUsesWith().
1403 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP));
1405 // We could simplify extracts from other values. Note that nested extracts may
1406 // already be simplified implicitly by the above: extract (extract (insert) )
1407 // will be translated into extract ( insert ( extract ) ) first and then just
1408 // the value inserted, if appropriate. Similarly for extracts from single-use
1409 // loads: extract (extract (load)) will be translated to extract (load (gep))
1410 // and if again single-use then via load (gep (gep)) to load (gep).
1411 // However, double extracts from e.g. function arguments or return values
1412 // aren't handled yet.
1419 /// TryToSinkInstruction - Try to move the specified instruction from its
1420 /// current block into the beginning of DestBlock, which can only happen if it's
1421 /// safe to move the instruction past all of the instructions between it and the
1422 /// end of its block.
1423 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
1424 assert(I->hasOneUse() && "Invariants didn't hold!");
1426 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
1427 if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() ||
1428 isa<TerminatorInst>(I))
1431 // Do not sink alloca instructions out of the entry block.
1432 if (isa<AllocaInst>(I) && I->getParent() ==
1433 &DestBlock->getParent()->getEntryBlock())
1436 // We can only sink load instructions if there is nothing between the load and
1437 // the end of block that could change the value.
1438 if (I->mayReadFromMemory()) {
1439 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
1441 if (Scan->mayWriteToMemory())
1445 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt();
1446 I->moveBefore(InsertPos);
1452 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
1453 /// all reachable code to the worklist.
1455 /// This has a couple of tricks to make the code faster and more powerful. In
1456 /// particular, we constant fold and DCE instructions as we go, to avoid adding
1457 /// them to the worklist (this significantly speeds up instcombine on code where
1458 /// many instructions are dead or constant). Additionally, if we find a branch
1459 /// whose condition is a known constant, we only visit the reachable successors.
1461 static bool AddReachableCodeToWorklist(BasicBlock *BB,
1462 SmallPtrSet<BasicBlock*, 64> &Visited,
1464 const TargetData *TD) {
1465 bool MadeIRChange = false;
1466 SmallVector<BasicBlock*, 256> Worklist;
1467 Worklist.push_back(BB);
1469 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist;
1470 DenseMap<ConstantExpr*, Constant*> FoldedConstants;
1473 BB = Worklist.pop_back_val();
1475 // We have now visited this block! If we've already been here, ignore it.
1476 if (!Visited.insert(BB)) continue;
1478 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
1479 Instruction *Inst = BBI++;
1481 // DCE instruction if trivially dead.
1482 if (isInstructionTriviallyDead(Inst)) {
1484 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
1485 Inst->eraseFromParent();
1489 // ConstantProp instruction if trivially constant.
1490 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
1491 if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
1492 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
1494 Inst->replaceAllUsesWith(C);
1496 Inst->eraseFromParent();
1501 // See if we can constant fold its operands.
1502 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
1504 ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
1505 if (CE == 0) continue;
1507 Constant*& FoldRes = FoldedConstants[CE];
1509 FoldRes = ConstantFoldConstantExpression(CE, TD);
1513 if (FoldRes != CE) {
1515 MadeIRChange = true;
1520 InstrsForInstCombineWorklist.push_back(Inst);
1523 // Recursively visit successors. If this is a branch or switch on a
1524 // constant, only visit the reachable successor.
1525 TerminatorInst *TI = BB->getTerminator();
1526 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1527 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
1528 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
1529 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
1530 Worklist.push_back(ReachableBB);
1533 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1534 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
1535 // See if this is an explicit destination.
1536 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
1537 if (SI->getCaseValue(i) == Cond) {
1538 BasicBlock *ReachableBB = SI->getSuccessor(i);
1539 Worklist.push_back(ReachableBB);
1543 // Otherwise it is the default destination.
1544 Worklist.push_back(SI->getSuccessor(0));
1549 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
1550 Worklist.push_back(TI->getSuccessor(i));
1551 } while (!Worklist.empty());
1553 // Once we've found all of the instructions to add to instcombine's worklist,
1554 // add them in reverse order. This way instcombine will visit from the top
1555 // of the function down. This jives well with the way that it adds all uses
1556 // of instructions to the worklist after doing a transformation, thus avoiding
1557 // some N^2 behavior in pathological cases.
1558 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
1559 InstrsForInstCombineWorklist.size());
1561 return MadeIRChange;
1564 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
1565 MadeIRChange = false;
1567 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
1568 << F.getNameStr() << "\n");
1571 // Do a depth-first traversal of the function, populate the worklist with
1572 // the reachable instructions. Ignore blocks that are not reachable. Keep
1573 // track of which blocks we visit.
1574 SmallPtrSet<BasicBlock*, 64> Visited;
1575 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
1577 // Do a quick scan over the function. If we find any blocks that are
1578 // unreachable, remove any instructions inside of them. This prevents
1579 // the instcombine code from having to deal with some bad special cases.
1580 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
1581 if (Visited.count(BB)) continue;
1583 // Delete the instructions backwards, as it has a reduced likelihood of
1584 // having to update as many def-use and use-def chains.
1585 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted.
1586 while (EndInst != BB->begin()) {
1587 // Delete the next to last instruction.
1588 BasicBlock::iterator I = EndInst;
1589 Instruction *Inst = --I;
1590 if (!Inst->use_empty())
1591 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
1592 if (isa<LandingPadInst>(Inst)) {
1596 if (!isa<DbgInfoIntrinsic>(Inst)) {
1598 MadeIRChange = true;
1600 Inst->eraseFromParent();
1605 while (!Worklist.isEmpty()) {
1606 Instruction *I = Worklist.RemoveOne();
1607 if (I == 0) continue; // skip null values.
1609 // Check to see if we can DCE the instruction.
1610 if (isInstructionTriviallyDead(I)) {
1611 DEBUG(errs() << "IC: DCE: " << *I << '\n');
1612 EraseInstFromFunction(*I);
1614 MadeIRChange = true;
1618 // Instruction isn't dead, see if we can constant propagate it.
1619 if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
1620 if (Constant *C = ConstantFoldInstruction(I, TD)) {
1621 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
1623 // Add operands to the worklist.
1624 ReplaceInstUsesWith(*I, C);
1626 EraseInstFromFunction(*I);
1627 MadeIRChange = true;
1631 // See if we can trivially sink this instruction to a successor basic block.
1632 if (I->hasOneUse()) {
1633 BasicBlock *BB = I->getParent();
1634 Instruction *UserInst = cast<Instruction>(I->use_back());
1635 BasicBlock *UserParent;
1637 // Get the block the use occurs in.
1638 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
1639 UserParent = PN->getIncomingBlock(I->use_begin().getUse());
1641 UserParent = UserInst->getParent();
1643 if (UserParent != BB) {
1644 bool UserIsSuccessor = false;
1645 // See if the user is one of our successors.
1646 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
1647 if (*SI == UserParent) {
1648 UserIsSuccessor = true;
1652 // If the user is one of our immediate successors, and if that successor
1653 // only has us as a predecessors (we'd have to split the critical edge
1654 // otherwise), we can keep going.
1655 if (UserIsSuccessor && UserParent->getSinglePredecessor())
1656 // Okay, the CFG is simple enough, try to sink this instruction.
1657 MadeIRChange |= TryToSinkInstruction(I, UserParent);
1661 // Now that we have an instruction, try combining it to simplify it.
1662 Builder->SetInsertPoint(I->getParent(), I);
1663 Builder->SetCurrentDebugLocation(I->getDebugLoc());
1668 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
1669 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
1671 if (Instruction *Result = visit(*I)) {
1673 // Should we replace the old instruction with a new one?
1675 DEBUG(errs() << "IC: Old = " << *I << '\n'
1676 << " New = " << *Result << '\n');
1678 if (!I->getDebugLoc().isUnknown())
1679 Result->setDebugLoc(I->getDebugLoc());
1680 // Everything uses the new instruction now.
1681 I->replaceAllUsesWith(Result);
1683 // Push the new instruction and any users onto the worklist.
1684 Worklist.Add(Result);
1685 Worklist.AddUsersToWorkList(*Result);
1687 // Move the name to the new instruction first.
1688 Result->takeName(I);
1690 // Insert the new instruction into the basic block...
1691 BasicBlock *InstParent = I->getParent();
1692 BasicBlock::iterator InsertPos = I;
1694 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
1695 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
1698 InstParent->getInstList().insert(InsertPos, Result);
1700 EraseInstFromFunction(*I);
1703 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
1704 << " New = " << *I << '\n');
1707 // If the instruction was modified, it's possible that it is now dead.
1708 // if so, remove it.
1709 if (isInstructionTriviallyDead(I)) {
1710 EraseInstFromFunction(*I);
1713 Worklist.AddUsersToWorkList(*I);
1716 MadeIRChange = true;
1721 return MadeIRChange;
1725 bool InstCombiner::runOnFunction(Function &F) {
1726 TD = getAnalysisIfAvailable<TargetData>();
1729 /// Builder - This is an IRBuilder that automatically inserts new
1730 /// instructions into the worklist when they are created.
1731 IRBuilder<true, TargetFolder, InstCombineIRInserter>
1732 TheBuilder(F.getContext(), TargetFolder(TD),
1733 InstCombineIRInserter(Worklist));
1734 Builder = &TheBuilder;
1736 bool EverMadeChange = false;
1738 // Lower dbg.declare intrinsics otherwise their value may be clobbered
1740 EverMadeChange = LowerDbgDeclare(F);
1742 // Iterate while there is work to do.
1743 unsigned Iteration = 0;
1744 while (DoOneIteration(F, Iteration++))
1745 EverMadeChange = true;
1748 return EverMadeChange;
1751 FunctionPass *llvm::createInstructionCombiningPass() {
1752 return new InstCombiner();