1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "llvm/IntrinsicInst.h"
39 #include "llvm/LLVMContext.h"
40 #include "llvm/Pass.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Analysis/ConstantFolding.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/Target/TargetData.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/Support/CallSite.h"
49 #include "llvm/Support/ConstantRange.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/GetElementPtrTypeIterator.h"
53 #include "llvm/Support/InstVisitor.h"
54 #include "llvm/Support/MathExtras.h"
55 #include "llvm/Support/PatternMatch.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/ADT/DenseMap.h"
58 #include "llvm/ADT/SmallVector.h"
59 #include "llvm/ADT/SmallPtrSet.h"
60 #include "llvm/ADT/Statistic.h"
61 #include "llvm/ADT/STLExtras.h"
66 using namespace llvm::PatternMatch;
68 STATISTIC(NumCombined , "Number of insts combined");
69 STATISTIC(NumConstProp, "Number of constant folds");
70 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
71 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
72 STATISTIC(NumSunkInst , "Number of instructions sunk");
75 class VISIBILITY_HIDDEN InstCombiner
76 : public FunctionPass,
77 public InstVisitor<InstCombiner, Instruction*> {
78 // Worklist of all of the instructions that need to be simplified.
79 SmallVector<Instruction*, 256> Worklist;
80 DenseMap<Instruction*, unsigned> WorklistMap;
82 bool MustPreserveLCSSA;
84 static char ID; // Pass identification, replacement for typeid
85 InstCombiner() : FunctionPass(&ID) {}
87 LLVMContext *getContext() { return Context; }
89 /// AddToWorkList - Add the specified instruction to the worklist if it
90 /// isn't already in it.
91 void AddToWorkList(Instruction *I) {
92 if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second)
93 Worklist.push_back(I);
96 // RemoveFromWorkList - remove I from the worklist if it exists.
97 void RemoveFromWorkList(Instruction *I) {
98 DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
99 if (It == WorklistMap.end()) return; // Not in worklist.
101 // Don't bother moving everything down, just null out the slot.
102 Worklist[It->second] = 0;
104 WorklistMap.erase(It);
107 Instruction *RemoveOneFromWorkList() {
108 Instruction *I = Worklist.back();
110 WorklistMap.erase(I);
115 /// AddUsersToWorkList - When an instruction is simplified, add all users of
116 /// the instruction to the work lists because they might get more simplified
119 void AddUsersToWorkList(Value &I) {
120 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
122 AddToWorkList(cast<Instruction>(*UI));
125 /// AddUsesToWorkList - When an instruction is simplified, add operands to
126 /// the work lists because they might get more simplified now.
128 void AddUsesToWorkList(Instruction &I) {
129 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
130 if (Instruction *Op = dyn_cast<Instruction>(*i))
134 /// AddSoonDeadInstToWorklist - The specified instruction is about to become
135 /// dead. Add all of its operands to the worklist, turning them into
136 /// undef's to reduce the number of uses of those instructions.
138 /// Return the specified operand before it is turned into an undef.
140 Value *AddSoonDeadInstToWorklist(Instruction &I, unsigned op) {
141 Value *R = I.getOperand(op);
143 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
144 if (Instruction *Op = dyn_cast<Instruction>(*i)) {
146 // Set the operand to undef to drop the use.
147 *i = Context->getUndef(Op->getType());
154 virtual bool runOnFunction(Function &F);
156 bool DoOneIteration(Function &F, unsigned ItNum);
158 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
159 AU.addRequired<TargetData>();
160 AU.addPreservedID(LCSSAID);
161 AU.setPreservesCFG();
164 TargetData &getTargetData() const { return *TD; }
166 // Visitation implementation - Implement instruction combining for different
167 // instruction types. The semantics are as follows:
169 // null - No change was made
170 // I - Change was made, I is still valid, I may be dead though
171 // otherwise - Change was made, replace I with returned instruction
173 Instruction *visitAdd(BinaryOperator &I);
174 Instruction *visitFAdd(BinaryOperator &I);
175 Instruction *visitSub(BinaryOperator &I);
176 Instruction *visitFSub(BinaryOperator &I);
177 Instruction *visitMul(BinaryOperator &I);
178 Instruction *visitFMul(BinaryOperator &I);
179 Instruction *visitURem(BinaryOperator &I);
180 Instruction *visitSRem(BinaryOperator &I);
181 Instruction *visitFRem(BinaryOperator &I);
182 bool SimplifyDivRemOfSelect(BinaryOperator &I);
183 Instruction *commonRemTransforms(BinaryOperator &I);
184 Instruction *commonIRemTransforms(BinaryOperator &I);
185 Instruction *commonDivTransforms(BinaryOperator &I);
186 Instruction *commonIDivTransforms(BinaryOperator &I);
187 Instruction *visitUDiv(BinaryOperator &I);
188 Instruction *visitSDiv(BinaryOperator &I);
189 Instruction *visitFDiv(BinaryOperator &I);
190 Instruction *FoldAndOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
191 Instruction *visitAnd(BinaryOperator &I);
192 Instruction *FoldOrOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
193 Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
194 Value *A, Value *B, Value *C);
195 Instruction *visitOr (BinaryOperator &I);
196 Instruction *visitXor(BinaryOperator &I);
197 Instruction *visitShl(BinaryOperator &I);
198 Instruction *visitAShr(BinaryOperator &I);
199 Instruction *visitLShr(BinaryOperator &I);
200 Instruction *commonShiftTransforms(BinaryOperator &I);
201 Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
203 Instruction *visitFCmpInst(FCmpInst &I);
204 Instruction *visitICmpInst(ICmpInst &I);
205 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
206 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
209 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
210 ConstantInt *DivRHS);
212 Instruction *FoldGEPICmp(User *GEPLHS, Value *RHS,
213 ICmpInst::Predicate Cond, Instruction &I);
214 Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
216 Instruction *commonCastTransforms(CastInst &CI);
217 Instruction *commonIntCastTransforms(CastInst &CI);
218 Instruction *commonPointerCastTransforms(CastInst &CI);
219 Instruction *visitTrunc(TruncInst &CI);
220 Instruction *visitZExt(ZExtInst &CI);
221 Instruction *visitSExt(SExtInst &CI);
222 Instruction *visitFPTrunc(FPTruncInst &CI);
223 Instruction *visitFPExt(CastInst &CI);
224 Instruction *visitFPToUI(FPToUIInst &FI);
225 Instruction *visitFPToSI(FPToSIInst &FI);
226 Instruction *visitUIToFP(CastInst &CI);
227 Instruction *visitSIToFP(CastInst &CI);
228 Instruction *visitPtrToInt(PtrToIntInst &CI);
229 Instruction *visitIntToPtr(IntToPtrInst &CI);
230 Instruction *visitBitCast(BitCastInst &CI);
231 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
233 Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*);
234 Instruction *visitSelectInst(SelectInst &SI);
235 Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
236 Instruction *visitCallInst(CallInst &CI);
237 Instruction *visitInvokeInst(InvokeInst &II);
238 Instruction *visitPHINode(PHINode &PN);
239 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
240 Instruction *visitAllocationInst(AllocationInst &AI);
241 Instruction *visitFreeInst(FreeInst &FI);
242 Instruction *visitLoadInst(LoadInst &LI);
243 Instruction *visitStoreInst(StoreInst &SI);
244 Instruction *visitBranchInst(BranchInst &BI);
245 Instruction *visitSwitchInst(SwitchInst &SI);
246 Instruction *visitInsertElementInst(InsertElementInst &IE);
247 Instruction *visitExtractElementInst(ExtractElementInst &EI);
248 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
249 Instruction *visitExtractValueInst(ExtractValueInst &EV);
251 // visitInstruction - Specify what to return for unhandled instructions...
252 Instruction *visitInstruction(Instruction &I) { return 0; }
255 Instruction *visitCallSite(CallSite CS);
256 bool transformConstExprCastCall(CallSite CS);
257 Instruction *transformCallThroughTrampoline(CallSite CS);
258 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
259 bool DoXform = true);
260 bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS);
261 DbgDeclareInst *hasOneUsePlusDeclare(Value *V);
265 // InsertNewInstBefore - insert an instruction New before instruction Old
266 // in the program. Add the new instruction to the worklist.
268 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
269 assert(New && New->getParent() == 0 &&
270 "New instruction already inserted into a basic block!");
271 BasicBlock *BB = Old.getParent();
272 BB->getInstList().insert(&Old, New); // Insert inst
277 /// InsertCastBefore - Insert a cast of V to TY before the instruction POS.
278 /// This also adds the cast to the worklist. Finally, this returns the
280 Value *InsertCastBefore(Instruction::CastOps opc, Value *V, const Type *Ty,
282 if (V->getType() == Ty) return V;
284 if (Constant *CV = dyn_cast<Constant>(V))
285 return Context->getConstantExprCast(opc, CV, Ty);
287 Instruction *C = CastInst::Create(opc, V, Ty, V->getName(), &Pos);
292 Value *InsertBitCastBefore(Value *V, const Type *Ty, Instruction &Pos) {
293 return InsertCastBefore(Instruction::BitCast, V, Ty, Pos);
297 // ReplaceInstUsesWith - This method is to be used when an instruction is
298 // found to be dead, replacable with another preexisting expression. Here
299 // we add all uses of I to the worklist, replace all uses of I with the new
300 // value, then return I, so that the inst combiner will know that I was
303 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
304 AddUsersToWorkList(I); // Add all modified instrs to worklist
306 I.replaceAllUsesWith(V);
309 // If we are replacing the instruction with itself, this must be in a
310 // segment of unreachable code, so just clobber the instruction.
311 I.replaceAllUsesWith(Context->getUndef(I.getType()));
316 // EraseInstFromFunction - When dealing with an instruction that has side
317 // effects or produces a void value, we can't rely on DCE to delete the
318 // instruction. Instead, visit methods should return the value returned by
320 Instruction *EraseInstFromFunction(Instruction &I) {
321 assert(I.use_empty() && "Cannot erase instruction that is used!");
322 AddUsesToWorkList(I);
323 RemoveFromWorkList(&I);
325 return 0; // Don't do anything with FI
328 void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
329 APInt &KnownOne, unsigned Depth = 0) const {
330 return llvm::ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
333 bool MaskedValueIsZero(Value *V, const APInt &Mask,
334 unsigned Depth = 0) const {
335 return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
337 unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
338 return llvm::ComputeNumSignBits(Op, TD, Depth);
343 /// SimplifyCommutative - This performs a few simplifications for
344 /// commutative operators.
345 bool SimplifyCommutative(BinaryOperator &I);
347 /// SimplifyCompare - This reorders the operands of a CmpInst to get them in
348 /// most-complex to least-complex order.
349 bool SimplifyCompare(CmpInst &I);
351 /// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
352 /// based on the demanded bits.
353 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
354 APInt& KnownZero, APInt& KnownOne,
356 bool SimplifyDemandedBits(Use &U, APInt DemandedMask,
357 APInt& KnownZero, APInt& KnownOne,
360 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
361 /// SimplifyDemandedBits knows about. See if the instruction has any
362 /// properties that allow us to simplify its operands.
363 bool SimplifyDemandedInstructionBits(Instruction &Inst);
365 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
366 APInt& UndefElts, unsigned Depth = 0);
368 // FoldOpIntoPhi - Given a binary operator or cast instruction which has a
369 // PHI node as operand #0, see if we can fold the instruction into the PHI
370 // (which is only possible if all operands to the PHI are constants).
371 Instruction *FoldOpIntoPhi(Instruction &I);
373 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
374 // operator and they all are only used by the PHI, PHI together their
375 // inputs, and do the operation once, to the result of the PHI.
376 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
377 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
378 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
381 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
382 ConstantInt *AndRHS, BinaryOperator &TheAnd);
384 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
385 bool isSub, Instruction &I);
386 Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
387 bool isSigned, bool Inside, Instruction &IB);
388 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI);
389 Instruction *MatchBSwap(BinaryOperator &I);
390 bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
391 Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
392 Instruction *SimplifyMemSet(MemSetInst *MI);
395 Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
397 bool CanEvaluateInDifferentType(Value *V, const Type *Ty,
398 unsigned CastOpc, int &NumCastsRemoved);
399 unsigned GetOrEnforceKnownAlignment(Value *V,
400 unsigned PrefAlign = 0);
405 char InstCombiner::ID = 0;
406 static RegisterPass<InstCombiner>
407 X("instcombine", "Combine redundant instructions");
409 // getComplexity: Assign a complexity or rank value to LLVM Values...
410 // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
411 static unsigned getComplexity(LLVMContext *Context, Value *V) {
412 if (isa<Instruction>(V)) {
413 if (BinaryOperator::isNeg(*Context, V) ||
414 BinaryOperator::isFNeg(*Context, V) ||
415 BinaryOperator::isNot(V))
419 if (isa<Argument>(V)) return 3;
420 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
423 // isOnlyUse - Return true if this instruction will be deleted if we stop using
425 static bool isOnlyUse(Value *V) {
426 return V->hasOneUse() || isa<Constant>(V);
429 // getPromotedType - Return the specified type promoted as it would be to pass
430 // though a va_arg area...
431 static const Type *getPromotedType(const Type *Ty) {
432 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
433 if (ITy->getBitWidth() < 32)
434 return Type::Int32Ty;
439 /// getBitCastOperand - If the specified operand is a CastInst, a constant
440 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
441 /// operand value, otherwise return null.
442 static Value *getBitCastOperand(Value *V) {
443 if (BitCastInst *I = dyn_cast<BitCastInst>(V))
445 return I->getOperand(0);
446 else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
447 // GetElementPtrInst?
448 if (GEP->hasAllZeroIndices())
449 return GEP->getOperand(0);
450 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
451 if (CE->getOpcode() == Instruction::BitCast)
452 // BitCast ConstantExp?
453 return CE->getOperand(0);
454 else if (CE->getOpcode() == Instruction::GetElementPtr) {
455 // GetElementPtr ConstantExp?
456 for (User::op_iterator I = CE->op_begin() + 1, E = CE->op_end();
458 ConstantInt *CI = dyn_cast<ConstantInt>(I);
459 if (!CI || !CI->isZero())
460 // Any non-zero indices? Not cast-like.
463 // All-zero indices? This is just like casting.
464 return CE->getOperand(0);
470 /// This function is a wrapper around CastInst::isEliminableCastPair. It
471 /// simply extracts arguments and returns what that function returns.
472 static Instruction::CastOps
473 isEliminableCastPair(
474 const CastInst *CI, ///< The first cast instruction
475 unsigned opcode, ///< The opcode of the second cast instruction
476 const Type *DstTy, ///< The target type for the second cast instruction
477 TargetData *TD ///< The target data for pointer size
480 const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
481 const Type *MidTy = CI->getType(); // B from above
483 // Get the opcodes of the two Cast instructions
484 Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
485 Instruction::CastOps secondOp = Instruction::CastOps(opcode);
487 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
488 DstTy, TD->getIntPtrType());
490 // We don't want to form an inttoptr or ptrtoint that converts to an integer
491 // type that differs from the pointer size.
492 if ((Res == Instruction::IntToPtr && SrcTy != TD->getIntPtrType()) ||
493 (Res == Instruction::PtrToInt && DstTy != TD->getIntPtrType()))
496 return Instruction::CastOps(Res);
499 /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
500 /// in any code being generated. It does not require codegen if V is simple
501 /// enough or if the cast can be folded into other casts.
502 static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V,
503 const Type *Ty, TargetData *TD) {
504 if (V->getType() == Ty || isa<Constant>(V)) return false;
506 // If this is another cast that can be eliminated, it isn't codegen either.
507 if (const CastInst *CI = dyn_cast<CastInst>(V))
508 if (isEliminableCastPair(CI, opcode, Ty, TD))
513 // SimplifyCommutative - This performs a few simplifications for commutative
516 // 1. Order operands such that they are listed from right (least complex) to
517 // left (most complex). This puts constants before unary operators before
520 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
521 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
523 bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
524 bool Changed = false;
525 if (getComplexity(Context, I.getOperand(0)) <
526 getComplexity(Context, I.getOperand(1)))
527 Changed = !I.swapOperands();
529 if (!I.isAssociative()) return Changed;
530 Instruction::BinaryOps Opcode = I.getOpcode();
531 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
532 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
533 if (isa<Constant>(I.getOperand(1))) {
534 Constant *Folded = Context->getConstantExpr(I.getOpcode(),
535 cast<Constant>(I.getOperand(1)),
536 cast<Constant>(Op->getOperand(1)));
537 I.setOperand(0, Op->getOperand(0));
538 I.setOperand(1, Folded);
540 } else if (BinaryOperator *Op1=dyn_cast<BinaryOperator>(I.getOperand(1)))
541 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
542 isOnlyUse(Op) && isOnlyUse(Op1)) {
543 Constant *C1 = cast<Constant>(Op->getOperand(1));
544 Constant *C2 = cast<Constant>(Op1->getOperand(1));
546 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
547 Constant *Folded = Context->getConstantExpr(I.getOpcode(), C1, C2);
548 Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
552 I.setOperand(0, New);
553 I.setOperand(1, Folded);
560 /// SimplifyCompare - For a CmpInst this function just orders the operands
561 /// so that theyare listed from right (least complex) to left (most complex).
562 /// This puts constants before unary operators before binary operators.
563 bool InstCombiner::SimplifyCompare(CmpInst &I) {
564 if (getComplexity(Context, I.getOperand(0)) >=
565 getComplexity(Context, I.getOperand(1)))
568 // Compare instructions are not associative so there's nothing else we can do.
572 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
573 // if the LHS is a constant zero (which is the 'negate' form).
575 static inline Value *dyn_castNegVal(Value *V, LLVMContext *Context) {
576 if (BinaryOperator::isNeg(*Context, V))
577 return BinaryOperator::getNegArgument(V);
579 // Constants can be considered to be negated values if they can be folded.
580 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
581 return Context->getConstantExprNeg(C);
583 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
584 if (C->getType()->getElementType()->isInteger())
585 return Context->getConstantExprNeg(C);
590 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
591 // instruction if the LHS is a constant negative zero (which is the 'negate'
594 static inline Value *dyn_castFNegVal(Value *V, LLVMContext *Context) {
595 if (BinaryOperator::isFNeg(*Context, V))
596 return BinaryOperator::getFNegArgument(V);
598 // Constants can be considered to be negated values if they can be folded.
599 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
600 return Context->getConstantExprFNeg(C);
602 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
603 if (C->getType()->getElementType()->isFloatingPoint())
604 return Context->getConstantExprFNeg(C);
609 static inline Value *dyn_castNotVal(Value *V, LLVMContext *Context) {
610 if (BinaryOperator::isNot(V))
611 return BinaryOperator::getNotArgument(V);
613 // Constants can be considered to be not'ed values...
614 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
615 return Context->getConstantInt(~C->getValue());
619 // dyn_castFoldableMul - If this value is a multiply that can be folded into
620 // other computations (because it has a constant operand), return the
621 // non-constant operand of the multiply, and set CST to point to the multiplier.
622 // Otherwise, return null.
624 static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST,
625 LLVMContext *Context) {
626 if (V->hasOneUse() && V->getType()->isInteger())
627 if (Instruction *I = dyn_cast<Instruction>(V)) {
628 if (I->getOpcode() == Instruction::Mul)
629 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
630 return I->getOperand(0);
631 if (I->getOpcode() == Instruction::Shl)
632 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
633 // The multiplier is really 1 << CST.
634 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
635 uint32_t CSTVal = CST->getLimitedValue(BitWidth);
636 CST = Context->getConstantInt(APInt(BitWidth, 1).shl(CSTVal));
637 return I->getOperand(0);
643 /// dyn_castGetElementPtr - If this is a getelementptr instruction or constant
644 /// expression, return it.
645 static User *dyn_castGetElementPtr(Value *V) {
646 if (isa<GetElementPtrInst>(V)) return cast<User>(V);
647 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
648 if (CE->getOpcode() == Instruction::GetElementPtr)
649 return cast<User>(V);
653 /// getOpcode - If this is an Instruction or a ConstantExpr, return the
654 /// opcode value. Otherwise return UserOp1.
655 static unsigned getOpcode(const Value *V) {
656 if (const Instruction *I = dyn_cast<Instruction>(V))
657 return I->getOpcode();
658 if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
659 return CE->getOpcode();
660 // Use UserOp1 to mean there's no opcode.
661 return Instruction::UserOp1;
664 /// AddOne - Add one to a ConstantInt
665 static Constant *AddOne(Constant *C, LLVMContext *Context) {
666 return Context->getConstantExprAdd(C,
667 Context->getConstantInt(C->getType(), 1));
669 /// SubOne - Subtract one from a ConstantInt
670 static Constant *SubOne(ConstantInt *C, LLVMContext *Context) {
671 return Context->getConstantExprSub(C,
672 Context->getConstantInt(C->getType(), 1));
674 /// MultiplyOverflows - True if the multiply can not be expressed in an int
676 static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign,
677 LLVMContext *Context) {
678 uint32_t W = C1->getBitWidth();
679 APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
688 APInt MulExt = LHSExt * RHSExt;
691 APInt Min = APInt::getSignedMinValue(W).sext(W * 2);
692 APInt Max = APInt::getSignedMaxValue(W).sext(W * 2);
693 return MulExt.slt(Min) || MulExt.sgt(Max);
695 return MulExt.ugt(APInt::getLowBitsSet(W * 2, W));
699 /// ShrinkDemandedConstant - Check to see if the specified operand of the
700 /// specified instruction is a constant integer. If so, check to see if there
701 /// are any bits set in the constant that are not demanded. If so, shrink the
702 /// constant and return true.
703 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
704 APInt Demanded, LLVMContext *Context) {
705 assert(I && "No instruction?");
706 assert(OpNo < I->getNumOperands() && "Operand index too large");
708 // If the operand is not a constant integer, nothing to do.
709 ConstantInt *OpC = dyn_cast<ConstantInt>(I->getOperand(OpNo));
710 if (!OpC) return false;
712 // If there are no bits set that aren't demanded, nothing to do.
713 Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
714 if ((~Demanded & OpC->getValue()) == 0)
717 // This instruction is producing bits that are not demanded. Shrink the RHS.
718 Demanded &= OpC->getValue();
719 I->setOperand(OpNo, Context->getConstantInt(Demanded));
723 // ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
724 // set of known zero and one bits, compute the maximum and minimum values that
725 // could have the specified known zero and known one bits, returning them in
727 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
728 const APInt& KnownOne,
729 APInt& Min, APInt& Max) {
730 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
731 KnownZero.getBitWidth() == Min.getBitWidth() &&
732 KnownZero.getBitWidth() == Max.getBitWidth() &&
733 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
734 APInt UnknownBits = ~(KnownZero|KnownOne);
736 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
737 // bit if it is unknown.
739 Max = KnownOne|UnknownBits;
741 if (UnknownBits.isNegative()) { // Sign bit is unknown
742 Min.set(Min.getBitWidth()-1);
743 Max.clear(Max.getBitWidth()-1);
747 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
748 // a set of known zero and one bits, compute the maximum and minimum values that
749 // could have the specified known zero and known one bits, returning them in
751 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
752 const APInt &KnownOne,
753 APInt &Min, APInt &Max) {
754 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
755 KnownZero.getBitWidth() == Min.getBitWidth() &&
756 KnownZero.getBitWidth() == Max.getBitWidth() &&
757 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
758 APInt UnknownBits = ~(KnownZero|KnownOne);
760 // The minimum value is when the unknown bits are all zeros.
762 // The maximum value is when the unknown bits are all ones.
763 Max = KnownOne|UnknownBits;
766 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
767 /// SimplifyDemandedBits knows about. See if the instruction has any
768 /// properties that allow us to simplify its operands.
769 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
770 unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
771 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
772 APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
774 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
775 KnownZero, KnownOne, 0);
776 if (V == 0) return false;
777 if (V == &Inst) return true;
778 ReplaceInstUsesWith(Inst, V);
782 /// SimplifyDemandedBits - This form of SimplifyDemandedBits simplifies the
783 /// specified instruction operand if possible, updating it in place. It returns
784 /// true if it made any change and false otherwise.
785 bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
786 APInt &KnownZero, APInt &KnownOne,
788 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
789 KnownZero, KnownOne, Depth);
790 if (NewVal == 0) return false;
796 /// SimplifyDemandedUseBits - This function attempts to replace V with a simpler
797 /// value based on the demanded bits. When this function is called, it is known
798 /// that only the bits set in DemandedMask of the result of V are ever used
799 /// downstream. Consequently, depending on the mask and V, it may be possible
800 /// to replace V with a constant or one of its operands. In such cases, this
801 /// function does the replacement and returns true. In all other cases, it
802 /// returns false after analyzing the expression and setting KnownOne and known
803 /// to be one in the expression. KnownZero contains all the bits that are known
804 /// to be zero in the expression. These are provided to potentially allow the
805 /// caller (which might recursively be SimplifyDemandedBits itself) to simplify
806 /// the expression. KnownOne and KnownZero always follow the invariant that
807 /// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
808 /// the bits in KnownOne and KnownZero may only be accurate for those bits set
809 /// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
810 /// and KnownOne must all be the same.
812 /// This returns null if it did not change anything and it permits no
813 /// simplification. This returns V itself if it did some simplification of V's
814 /// operands based on the information about what bits are demanded. This returns
815 /// some other non-null value if it found out that V is equal to another value
816 /// in the context where the specified bits are demanded, but not for all users.
817 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
818 APInt &KnownZero, APInt &KnownOne,
820 assert(V != 0 && "Null pointer of Value???");
821 assert(Depth <= 6 && "Limit Search Depth");
822 uint32_t BitWidth = DemandedMask.getBitWidth();
823 const Type *VTy = V->getType();
824 assert((TD || !isa<PointerType>(VTy)) &&
825 "SimplifyDemandedBits needs to know bit widths!");
826 assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
827 (!VTy->isIntOrIntVector() ||
828 VTy->getScalarSizeInBits() == BitWidth) &&
829 KnownZero.getBitWidth() == BitWidth &&
830 KnownOne.getBitWidth() == BitWidth &&
831 "Value *V, DemandedMask, KnownZero and KnownOne "
832 "must have same BitWidth");
833 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
834 // We know all of the bits for a constant!
835 KnownOne = CI->getValue() & DemandedMask;
836 KnownZero = ~KnownOne & DemandedMask;
839 if (isa<ConstantPointerNull>(V)) {
840 // We know all of the bits for a constant!
842 KnownZero = DemandedMask;
848 if (DemandedMask == 0) { // Not demanding any bits from V.
849 if (isa<UndefValue>(V))
851 return Context->getUndef(VTy);
854 if (Depth == 6) // Limit search depth.
857 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
858 APInt &RHSKnownZero = KnownZero, &RHSKnownOne = KnownOne;
860 Instruction *I = dyn_cast<Instruction>(V);
862 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
863 return 0; // Only analyze instructions.
866 // If there are multiple uses of this value and we aren't at the root, then
867 // we can't do any simplifications of the operands, because DemandedMask
868 // only reflects the bits demanded by *one* of the users.
869 if (Depth != 0 && !I->hasOneUse()) {
870 // Despite the fact that we can't simplify this instruction in all User's
871 // context, we can at least compute the knownzero/knownone bits, and we can
872 // do simplifications that apply to *just* the one user if we know that
873 // this instruction has a simpler value in that context.
874 if (I->getOpcode() == Instruction::And) {
875 // If either the LHS or the RHS are Zero, the result is zero.
876 ComputeMaskedBits(I->getOperand(1), DemandedMask,
877 RHSKnownZero, RHSKnownOne, Depth+1);
878 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
879 LHSKnownZero, LHSKnownOne, Depth+1);
881 // If all of the demanded bits are known 1 on one side, return the other.
882 // These bits cannot contribute to the result of the 'and' in this
884 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
885 (DemandedMask & ~LHSKnownZero))
886 return I->getOperand(0);
887 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
888 (DemandedMask & ~RHSKnownZero))
889 return I->getOperand(1);
891 // If all of the demanded bits in the inputs are known zeros, return zero.
892 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
893 return Context->getNullValue(VTy);
895 } else if (I->getOpcode() == Instruction::Or) {
896 // We can simplify (X|Y) -> X or Y in the user's context if we know that
897 // only bits from X or Y are demanded.
899 // If either the LHS or the RHS are One, the result is One.
900 ComputeMaskedBits(I->getOperand(1), DemandedMask,
901 RHSKnownZero, RHSKnownOne, Depth+1);
902 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
903 LHSKnownZero, LHSKnownOne, Depth+1);
905 // If all of the demanded bits are known zero on one side, return the
906 // other. These bits cannot contribute to the result of the 'or' in this
908 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
909 (DemandedMask & ~LHSKnownOne))
910 return I->getOperand(0);
911 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
912 (DemandedMask & ~RHSKnownOne))
913 return I->getOperand(1);
915 // If all of the potentially set bits on one side are known to be set on
916 // the other side, just use the 'other' side.
917 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
918 (DemandedMask & (~RHSKnownZero)))
919 return I->getOperand(0);
920 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
921 (DemandedMask & (~LHSKnownZero)))
922 return I->getOperand(1);
925 // Compute the KnownZero/KnownOne bits to simplify things downstream.
926 ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
930 // If this is the root being simplified, allow it to have multiple uses,
931 // just set the DemandedMask to all bits so that we can try to simplify the
932 // operands. This allows visitTruncInst (for example) to simplify the
933 // operand of a trunc without duplicating all the logic below.
934 if (Depth == 0 && !V->hasOneUse())
935 DemandedMask = APInt::getAllOnesValue(BitWidth);
937 switch (I->getOpcode()) {
939 ComputeMaskedBits(I, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
941 case Instruction::And:
942 // If either the LHS or the RHS are Zero, the result is zero.
943 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
944 RHSKnownZero, RHSKnownOne, Depth+1) ||
945 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownZero,
946 LHSKnownZero, LHSKnownOne, Depth+1))
948 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
949 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
951 // If all of the demanded bits are known 1 on one side, return the other.
952 // These bits cannot contribute to the result of the 'and'.
953 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
954 (DemandedMask & ~LHSKnownZero))
955 return I->getOperand(0);
956 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
957 (DemandedMask & ~RHSKnownZero))
958 return I->getOperand(1);
960 // If all of the demanded bits in the inputs are known zeros, return zero.
961 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
962 return Context->getNullValue(VTy);
964 // If the RHS is a constant, see if we can simplify it.
965 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero, Context))
968 // Output known-1 bits are only known if set in both the LHS & RHS.
969 RHSKnownOne &= LHSKnownOne;
970 // Output known-0 are known to be clear if zero in either the LHS | RHS.
971 RHSKnownZero |= LHSKnownZero;
973 case Instruction::Or:
974 // If either the LHS or the RHS are One, the result is One.
975 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
976 RHSKnownZero, RHSKnownOne, Depth+1) ||
977 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownOne,
978 LHSKnownZero, LHSKnownOne, Depth+1))
980 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
981 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
983 // If all of the demanded bits are known zero on one side, return the other.
984 // These bits cannot contribute to the result of the 'or'.
985 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
986 (DemandedMask & ~LHSKnownOne))
987 return I->getOperand(0);
988 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
989 (DemandedMask & ~RHSKnownOne))
990 return I->getOperand(1);
992 // If all of the potentially set bits on one side are known to be set on
993 // the other side, just use the 'other' side.
994 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
995 (DemandedMask & (~RHSKnownZero)))
996 return I->getOperand(0);
997 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
998 (DemandedMask & (~LHSKnownZero)))
999 return I->getOperand(1);
1001 // If the RHS is a constant, see if we can simplify it.
1002 if (ShrinkDemandedConstant(I, 1, DemandedMask, Context))
1005 // Output known-0 bits are only known if clear in both the LHS & RHS.
1006 RHSKnownZero &= LHSKnownZero;
1007 // Output known-1 are known to be set if set in either the LHS | RHS.
1008 RHSKnownOne |= LHSKnownOne;
1010 case Instruction::Xor: {
1011 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1012 RHSKnownZero, RHSKnownOne, Depth+1) ||
1013 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1014 LHSKnownZero, LHSKnownOne, Depth+1))
1016 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1017 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1019 // If all of the demanded bits are known zero on one side, return the other.
1020 // These bits cannot contribute to the result of the 'xor'.
1021 if ((DemandedMask & RHSKnownZero) == DemandedMask)
1022 return I->getOperand(0);
1023 if ((DemandedMask & LHSKnownZero) == DemandedMask)
1024 return I->getOperand(1);
1026 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1027 APInt KnownZeroOut = (RHSKnownZero & LHSKnownZero) |
1028 (RHSKnownOne & LHSKnownOne);
1029 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1030 APInt KnownOneOut = (RHSKnownZero & LHSKnownOne) |
1031 (RHSKnownOne & LHSKnownZero);
1033 // If all of the demanded bits are known to be zero on one side or the
1034 // other, turn this into an *inclusive* or.
1035 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1036 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
1038 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1040 return InsertNewInstBefore(Or, *I);
1043 // If all of the demanded bits on one side are known, and all of the set
1044 // bits on that side are also known to be set on the other side, turn this
1045 // into an AND, as we know the bits will be cleared.
1046 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1047 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
1049 if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
1050 Constant *AndC = Context->getConstantInt(~RHSKnownOne & DemandedMask);
1052 BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
1053 return InsertNewInstBefore(And, *I);
1057 // If the RHS is a constant, see if we can simplify it.
1058 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
1059 if (ShrinkDemandedConstant(I, 1, DemandedMask, Context))
1062 RHSKnownZero = KnownZeroOut;
1063 RHSKnownOne = KnownOneOut;
1066 case Instruction::Select:
1067 if (SimplifyDemandedBits(I->getOperandUse(2), DemandedMask,
1068 RHSKnownZero, RHSKnownOne, Depth+1) ||
1069 SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1070 LHSKnownZero, LHSKnownOne, Depth+1))
1072 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1073 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1075 // If the operands are constants, see if we can simplify them.
1076 if (ShrinkDemandedConstant(I, 1, DemandedMask, Context) ||
1077 ShrinkDemandedConstant(I, 2, DemandedMask, Context))
1080 // Only known if known in both the LHS and RHS.
1081 RHSKnownOne &= LHSKnownOne;
1082 RHSKnownZero &= LHSKnownZero;
1084 case Instruction::Trunc: {
1085 unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
1086 DemandedMask.zext(truncBf);
1087 RHSKnownZero.zext(truncBf);
1088 RHSKnownOne.zext(truncBf);
1089 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1090 RHSKnownZero, RHSKnownOne, Depth+1))
1092 DemandedMask.trunc(BitWidth);
1093 RHSKnownZero.trunc(BitWidth);
1094 RHSKnownOne.trunc(BitWidth);
1095 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1098 case Instruction::BitCast:
1099 if (!I->getOperand(0)->getType()->isIntOrIntVector())
1100 return false; // vector->int or fp->int?
1102 if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
1103 if (const VectorType *SrcVTy =
1104 dyn_cast<VectorType>(I->getOperand(0)->getType())) {
1105 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
1106 // Don't touch a bitcast between vectors of different element counts.
1109 // Don't touch a scalar-to-vector bitcast.
1111 } else if (isa<VectorType>(I->getOperand(0)->getType()))
1112 // Don't touch a vector-to-scalar bitcast.
1115 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1116 RHSKnownZero, RHSKnownOne, Depth+1))
1118 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1120 case Instruction::ZExt: {
1121 // Compute the bits in the result that are not present in the input.
1122 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1124 DemandedMask.trunc(SrcBitWidth);
1125 RHSKnownZero.trunc(SrcBitWidth);
1126 RHSKnownOne.trunc(SrcBitWidth);
1127 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1128 RHSKnownZero, RHSKnownOne, Depth+1))
1130 DemandedMask.zext(BitWidth);
1131 RHSKnownZero.zext(BitWidth);
1132 RHSKnownOne.zext(BitWidth);
1133 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1134 // The top bits are known to be zero.
1135 RHSKnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1138 case Instruction::SExt: {
1139 // Compute the bits in the result that are not present in the input.
1140 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1142 APInt InputDemandedBits = DemandedMask &
1143 APInt::getLowBitsSet(BitWidth, SrcBitWidth);
1145 APInt NewBits(APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth));
1146 // If any of the sign extended bits are demanded, we know that the sign
1148 if ((NewBits & DemandedMask) != 0)
1149 InputDemandedBits.set(SrcBitWidth-1);
1151 InputDemandedBits.trunc(SrcBitWidth);
1152 RHSKnownZero.trunc(SrcBitWidth);
1153 RHSKnownOne.trunc(SrcBitWidth);
1154 if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
1155 RHSKnownZero, RHSKnownOne, Depth+1))
1157 InputDemandedBits.zext(BitWidth);
1158 RHSKnownZero.zext(BitWidth);
1159 RHSKnownOne.zext(BitWidth);
1160 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1162 // If the sign bit of the input is known set or clear, then we know the
1163 // top bits of the result.
1165 // If the input sign bit is known zero, or if the NewBits are not demanded
1166 // convert this into a zero extension.
1167 if (RHSKnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
1168 // Convert to ZExt cast
1169 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
1170 return InsertNewInstBefore(NewCast, *I);
1171 } else if (RHSKnownOne[SrcBitWidth-1]) { // Input sign bit known set
1172 RHSKnownOne |= NewBits;
1176 case Instruction::Add: {
1177 // Figure out what the input bits are. If the top bits of the and result
1178 // are not demanded, then the add doesn't demand them from its input
1180 unsigned NLZ = DemandedMask.countLeadingZeros();
1182 // If there is a constant on the RHS, there are a variety of xformations
1184 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1185 // If null, this should be simplified elsewhere. Some of the xforms here
1186 // won't work if the RHS is zero.
1190 // If the top bit of the output is demanded, demand everything from the
1191 // input. Otherwise, we demand all the input bits except NLZ top bits.
1192 APInt InDemandedBits(APInt::getLowBitsSet(BitWidth, BitWidth - NLZ));
1194 // Find information about known zero/one bits in the input.
1195 if (SimplifyDemandedBits(I->getOperandUse(0), InDemandedBits,
1196 LHSKnownZero, LHSKnownOne, Depth+1))
1199 // If the RHS of the add has bits set that can't affect the input, reduce
1201 if (ShrinkDemandedConstant(I, 1, InDemandedBits, Context))
1204 // Avoid excess work.
1205 if (LHSKnownZero == 0 && LHSKnownOne == 0)
1208 // Turn it into OR if input bits are zero.
1209 if ((LHSKnownZero & RHS->getValue()) == RHS->getValue()) {
1211 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1213 return InsertNewInstBefore(Or, *I);
1216 // We can say something about the output known-zero and known-one bits,
1217 // depending on potential carries from the input constant and the
1218 // unknowns. For example if the LHS is known to have at most the 0x0F0F0
1219 // bits set and the RHS constant is 0x01001, then we know we have a known
1220 // one mask of 0x00001 and a known zero mask of 0xE0F0E.
1222 // To compute this, we first compute the potential carry bits. These are
1223 // the bits which may be modified. I'm not aware of a better way to do
1225 const APInt &RHSVal = RHS->getValue();
1226 APInt CarryBits((~LHSKnownZero + RHSVal) ^ (~LHSKnownZero ^ RHSVal));
1228 // Now that we know which bits have carries, compute the known-1/0 sets.
1230 // Bits are known one if they are known zero in one operand and one in the
1231 // other, and there is no input carry.
1232 RHSKnownOne = ((LHSKnownZero & RHSVal) |
1233 (LHSKnownOne & ~RHSVal)) & ~CarryBits;
1235 // Bits are known zero if they are known zero in both operands and there
1236 // is no input carry.
1237 RHSKnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
1239 // If the high-bits of this ADD are not demanded, then it does not demand
1240 // the high bits of its LHS or RHS.
1241 if (DemandedMask[BitWidth-1] == 0) {
1242 // Right fill the mask of bits for this ADD to demand the most
1243 // significant bit and all those below it.
1244 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1245 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1246 LHSKnownZero, LHSKnownOne, Depth+1) ||
1247 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1248 LHSKnownZero, LHSKnownOne, Depth+1))
1254 case Instruction::Sub:
1255 // If the high-bits of this SUB are not demanded, then it does not demand
1256 // the high bits of its LHS or RHS.
1257 if (DemandedMask[BitWidth-1] == 0) {
1258 // Right fill the mask of bits for this SUB to demand the most
1259 // significant bit and all those below it.
1260 uint32_t NLZ = DemandedMask.countLeadingZeros();
1261 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1262 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1263 LHSKnownZero, LHSKnownOne, Depth+1) ||
1264 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1265 LHSKnownZero, LHSKnownOne, Depth+1))
1268 // Otherwise just hand the sub off to ComputeMaskedBits to fill in
1269 // the known zeros and ones.
1270 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1272 case Instruction::Shl:
1273 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1274 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1275 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
1276 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1277 RHSKnownZero, RHSKnownOne, Depth+1))
1279 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1280 RHSKnownZero <<= ShiftAmt;
1281 RHSKnownOne <<= ShiftAmt;
1282 // low bits known zero.
1284 RHSKnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
1287 case Instruction::LShr:
1288 // For a logical shift right
1289 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1290 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1292 // Unsigned shift right.
1293 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1294 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1295 RHSKnownZero, RHSKnownOne, Depth+1))
1297 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1298 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1299 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1301 // Compute the new bits that are at the top now.
1302 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1303 RHSKnownZero |= HighBits; // high bits known zero.
1307 case Instruction::AShr:
1308 // If this is an arithmetic shift right and only the low-bit is set, we can
1309 // always convert this into a logical shr, even if the shift amount is
1310 // variable. The low bit of the shift cannot be an input sign bit unless
1311 // the shift amount is >= the size of the datatype, which is undefined.
1312 if (DemandedMask == 1) {
1313 // Perform the logical shift right.
1314 Instruction *NewVal = BinaryOperator::CreateLShr(
1315 I->getOperand(0), I->getOperand(1), I->getName());
1316 return InsertNewInstBefore(NewVal, *I);
1319 // If the sign bit is the only bit demanded by this ashr, then there is no
1320 // need to do it, the shift doesn't change the high bit.
1321 if (DemandedMask.isSignBit())
1322 return I->getOperand(0);
1324 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1325 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth);
1327 // Signed shift right.
1328 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1329 // If any of the "high bits" are demanded, we should set the sign bit as
1331 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
1332 DemandedMaskIn.set(BitWidth-1);
1333 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1334 RHSKnownZero, RHSKnownOne, Depth+1))
1336 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1337 // Compute the new bits that are at the top now.
1338 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1339 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1340 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1342 // Handle the sign bits.
1343 APInt SignBit(APInt::getSignBit(BitWidth));
1344 // Adjust to where it is now in the mask.
1345 SignBit = APIntOps::lshr(SignBit, ShiftAmt);
1347 // If the input sign bit is known to be zero, or if none of the top bits
1348 // are demanded, turn this into an unsigned shift right.
1349 if (BitWidth <= ShiftAmt || RHSKnownZero[BitWidth-ShiftAmt-1] ||
1350 (HighBits & ~DemandedMask) == HighBits) {
1351 // Perform the logical shift right.
1352 Instruction *NewVal = BinaryOperator::CreateLShr(
1353 I->getOperand(0), SA, I->getName());
1354 return InsertNewInstBefore(NewVal, *I);
1355 } else if ((RHSKnownOne & SignBit) != 0) { // New bits are known one.
1356 RHSKnownOne |= HighBits;
1360 case Instruction::SRem:
1361 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1362 APInt RA = Rem->getValue().abs();
1363 if (RA.isPowerOf2()) {
1364 if (DemandedMask.ult(RA)) // srem won't affect demanded bits
1365 return I->getOperand(0);
1367 APInt LowBits = RA - 1;
1368 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
1369 if (SimplifyDemandedBits(I->getOperandUse(0), Mask2,
1370 LHSKnownZero, LHSKnownOne, Depth+1))
1373 if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits))
1374 LHSKnownZero |= ~LowBits;
1376 KnownZero |= LHSKnownZero & DemandedMask;
1378 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
1382 case Instruction::URem: {
1383 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
1384 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1385 if (SimplifyDemandedBits(I->getOperandUse(0), AllOnes,
1386 KnownZero2, KnownOne2, Depth+1) ||
1387 SimplifyDemandedBits(I->getOperandUse(1), AllOnes,
1388 KnownZero2, KnownOne2, Depth+1))
1391 unsigned Leaders = KnownZero2.countLeadingOnes();
1392 Leaders = std::max(Leaders,
1393 KnownZero2.countLeadingOnes());
1394 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
1397 case Instruction::Call:
1398 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1399 switch (II->getIntrinsicID()) {
1401 case Intrinsic::bswap: {
1402 // If the only bits demanded come from one byte of the bswap result,
1403 // just shift the input byte into position to eliminate the bswap.
1404 unsigned NLZ = DemandedMask.countLeadingZeros();
1405 unsigned NTZ = DemandedMask.countTrailingZeros();
1407 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
1408 // we need all the bits down to bit 8. Likewise, round NLZ. If we
1409 // have 14 leading zeros, round to 8.
1412 // If we need exactly one byte, we can do this transformation.
1413 if (BitWidth-NLZ-NTZ == 8) {
1414 unsigned ResultBit = NTZ;
1415 unsigned InputBit = BitWidth-NTZ-8;
1417 // Replace this with either a left or right shift to get the byte into
1419 Instruction *NewVal;
1420 if (InputBit > ResultBit)
1421 NewVal = BinaryOperator::CreateLShr(I->getOperand(1),
1422 Context->getConstantInt(I->getType(), InputBit-ResultBit));
1424 NewVal = BinaryOperator::CreateShl(I->getOperand(1),
1425 Context->getConstantInt(I->getType(), ResultBit-InputBit));
1426 NewVal->takeName(I);
1427 return InsertNewInstBefore(NewVal, *I);
1430 // TODO: Could compute known zero/one bits based on the input.
1435 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1439 // If the client is only demanding bits that we know, return the known
1441 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
1442 Constant *C = Context->getConstantInt(RHSKnownOne);
1443 if (isa<PointerType>(V->getType()))
1444 C = Context->getConstantExprIntToPtr(C, V->getType());
1451 /// SimplifyDemandedVectorElts - The specified value produces a vector with
1452 /// any number of elements. DemandedElts contains the set of elements that are
1453 /// actually used by the caller. This method analyzes which elements of the
1454 /// operand are undef and returns that information in UndefElts.
1456 /// If the information about demanded elements can be used to simplify the
1457 /// operation, the operation is simplified, then the resultant value is
1458 /// returned. This returns null if no change was made.
1459 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
1462 unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
1463 APInt EltMask(APInt::getAllOnesValue(VWidth));
1464 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1466 if (isa<UndefValue>(V)) {
1467 // If the entire vector is undefined, just return this info.
1468 UndefElts = EltMask;
1470 } else if (DemandedElts == 0) { // If nothing is demanded, provide undef.
1471 UndefElts = EltMask;
1472 return Context->getUndef(V->getType());
1476 if (ConstantVector *CP = dyn_cast<ConstantVector>(V)) {
1477 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1478 Constant *Undef = Context->getUndef(EltTy);
1480 std::vector<Constant*> Elts;
1481 for (unsigned i = 0; i != VWidth; ++i)
1482 if (!DemandedElts[i]) { // If not demanded, set to undef.
1483 Elts.push_back(Undef);
1485 } else if (isa<UndefValue>(CP->getOperand(i))) { // Already undef.
1486 Elts.push_back(Undef);
1488 } else { // Otherwise, defined.
1489 Elts.push_back(CP->getOperand(i));
1492 // If we changed the constant, return it.
1493 Constant *NewCP = Context->getConstantVector(Elts);
1494 return NewCP != CP ? NewCP : 0;
1495 } else if (isa<ConstantAggregateZero>(V)) {
1496 // Simplify the CAZ to a ConstantVector where the non-demanded elements are
1499 // Check if this is identity. If so, return 0 since we are not simplifying
1501 if (DemandedElts == ((1ULL << VWidth) -1))
1504 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1505 Constant *Zero = Context->getNullValue(EltTy);
1506 Constant *Undef = Context->getUndef(EltTy);
1507 std::vector<Constant*> Elts;
1508 for (unsigned i = 0; i != VWidth; ++i) {
1509 Constant *Elt = DemandedElts[i] ? Zero : Undef;
1510 Elts.push_back(Elt);
1512 UndefElts = DemandedElts ^ EltMask;
1513 return Context->getConstantVector(Elts);
1516 // Limit search depth.
1520 // If multiple users are using the root value, procede with
1521 // simplification conservatively assuming that all elements
1523 if (!V->hasOneUse()) {
1524 // Quit if we find multiple users of a non-root value though.
1525 // They'll be handled when it's their turn to be visited by
1526 // the main instcombine process.
1528 // TODO: Just compute the UndefElts information recursively.
1531 // Conservatively assume that all elements are needed.
1532 DemandedElts = EltMask;
1535 Instruction *I = dyn_cast<Instruction>(V);
1536 if (!I) return 0; // Only analyze instructions.
1538 bool MadeChange = false;
1539 APInt UndefElts2(VWidth, 0);
1541 switch (I->getOpcode()) {
1544 case Instruction::InsertElement: {
1545 // If this is a variable index, we don't know which element it overwrites.
1546 // demand exactly the same input as we produce.
1547 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1549 // Note that we can't propagate undef elt info, because we don't know
1550 // which elt is getting updated.
1551 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1552 UndefElts2, Depth+1);
1553 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1557 // If this is inserting an element that isn't demanded, remove this
1559 unsigned IdxNo = Idx->getZExtValue();
1560 if (IdxNo >= VWidth || !DemandedElts[IdxNo])
1561 return AddSoonDeadInstToWorklist(*I, 0);
1563 // Otherwise, the element inserted overwrites whatever was there, so the
1564 // input demanded set is simpler than the output set.
1565 APInt DemandedElts2 = DemandedElts;
1566 DemandedElts2.clear(IdxNo);
1567 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
1568 UndefElts, Depth+1);
1569 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1571 // The inserted element is defined.
1572 UndefElts.clear(IdxNo);
1575 case Instruction::ShuffleVector: {
1576 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1577 uint64_t LHSVWidth =
1578 cast<VectorType>(Shuffle->getOperand(0)->getType())->getNumElements();
1579 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1580 for (unsigned i = 0; i < VWidth; i++) {
1581 if (DemandedElts[i]) {
1582 unsigned MaskVal = Shuffle->getMaskValue(i);
1583 if (MaskVal != -1u) {
1584 assert(MaskVal < LHSVWidth * 2 &&
1585 "shufflevector mask index out of range!");
1586 if (MaskVal < LHSVWidth)
1587 LeftDemanded.set(MaskVal);
1589 RightDemanded.set(MaskVal - LHSVWidth);
1594 APInt UndefElts4(LHSVWidth, 0);
1595 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
1596 UndefElts4, Depth+1);
1597 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1599 APInt UndefElts3(LHSVWidth, 0);
1600 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
1601 UndefElts3, Depth+1);
1602 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1604 bool NewUndefElts = false;
1605 for (unsigned i = 0; i < VWidth; i++) {
1606 unsigned MaskVal = Shuffle->getMaskValue(i);
1607 if (MaskVal == -1u) {
1609 } else if (MaskVal < LHSVWidth) {
1610 if (UndefElts4[MaskVal]) {
1611 NewUndefElts = true;
1615 if (UndefElts3[MaskVal - LHSVWidth]) {
1616 NewUndefElts = true;
1623 // Add additional discovered undefs.
1624 std::vector<Constant*> Elts;
1625 for (unsigned i = 0; i < VWidth; ++i) {
1627 Elts.push_back(Context->getUndef(Type::Int32Ty));
1629 Elts.push_back(Context->getConstantInt(Type::Int32Ty,
1630 Shuffle->getMaskValue(i)));
1632 I->setOperand(2, Context->getConstantVector(Elts));
1637 case Instruction::BitCast: {
1638 // Vector->vector casts only.
1639 const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1641 unsigned InVWidth = VTy->getNumElements();
1642 APInt InputDemandedElts(InVWidth, 0);
1645 if (VWidth == InVWidth) {
1646 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1647 // elements as are demanded of us.
1649 InputDemandedElts = DemandedElts;
1650 } else if (VWidth > InVWidth) {
1654 // If there are more elements in the result than there are in the source,
1655 // then an input element is live if any of the corresponding output
1656 // elements are live.
1657 Ratio = VWidth/InVWidth;
1658 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1659 if (DemandedElts[OutIdx])
1660 InputDemandedElts.set(OutIdx/Ratio);
1666 // If there are more elements in the source than there are in the result,
1667 // then an input element is live if the corresponding output element is
1669 Ratio = InVWidth/VWidth;
1670 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1671 if (DemandedElts[InIdx/Ratio])
1672 InputDemandedElts.set(InIdx);
1675 // div/rem demand all inputs, because they don't want divide by zero.
1676 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1677 UndefElts2, Depth+1);
1679 I->setOperand(0, TmpV);
1683 UndefElts = UndefElts2;
1684 if (VWidth > InVWidth) {
1685 LLVM_UNREACHABLE("Unimp");
1686 // If there are more elements in the result than there are in the source,
1687 // then an output element is undef if the corresponding input element is
1689 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1690 if (UndefElts2[OutIdx/Ratio])
1691 UndefElts.set(OutIdx);
1692 } else if (VWidth < InVWidth) {
1693 LLVM_UNREACHABLE("Unimp");
1694 // If there are more elements in the source than there are in the result,
1695 // then a result element is undef if all of the corresponding input
1696 // elements are undef.
1697 UndefElts = ~0ULL >> (64-VWidth); // Start out all undef.
1698 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1699 if (!UndefElts2[InIdx]) // Not undef?
1700 UndefElts.clear(InIdx/Ratio); // Clear undef bit.
1704 case Instruction::And:
1705 case Instruction::Or:
1706 case Instruction::Xor:
1707 case Instruction::Add:
1708 case Instruction::Sub:
1709 case Instruction::Mul:
1710 // div/rem demand all inputs, because they don't want divide by zero.
1711 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1712 UndefElts, Depth+1);
1713 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1714 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1715 UndefElts2, Depth+1);
1716 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1718 // Output elements are undefined if both are undefined. Consider things
1719 // like undef&0. The result is known zero, not undef.
1720 UndefElts &= UndefElts2;
1723 case Instruction::Call: {
1724 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1726 switch (II->getIntrinsicID()) {
1729 // Binary vector operations that work column-wise. A dest element is a
1730 // function of the corresponding input elements from the two inputs.
1731 case Intrinsic::x86_sse_sub_ss:
1732 case Intrinsic::x86_sse_mul_ss:
1733 case Intrinsic::x86_sse_min_ss:
1734 case Intrinsic::x86_sse_max_ss:
1735 case Intrinsic::x86_sse2_sub_sd:
1736 case Intrinsic::x86_sse2_mul_sd:
1737 case Intrinsic::x86_sse2_min_sd:
1738 case Intrinsic::x86_sse2_max_sd:
1739 TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
1740 UndefElts, Depth+1);
1741 if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
1742 TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
1743 UndefElts2, Depth+1);
1744 if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
1746 // If only the low elt is demanded and this is a scalarizable intrinsic,
1747 // scalarize it now.
1748 if (DemandedElts == 1) {
1749 switch (II->getIntrinsicID()) {
1751 case Intrinsic::x86_sse_sub_ss:
1752 case Intrinsic::x86_sse_mul_ss:
1753 case Intrinsic::x86_sse2_sub_sd:
1754 case Intrinsic::x86_sse2_mul_sd:
1755 // TODO: Lower MIN/MAX/ABS/etc
1756 Value *LHS = II->getOperand(1);
1757 Value *RHS = II->getOperand(2);
1758 // Extract the element as scalars.
1759 LHS = InsertNewInstBefore(new ExtractElementInst(LHS, 0U,"tmp"), *II);
1760 RHS = InsertNewInstBefore(new ExtractElementInst(RHS, 0U,"tmp"), *II);
1762 switch (II->getIntrinsicID()) {
1763 default: LLVM_UNREACHABLE("Case stmts out of sync!");
1764 case Intrinsic::x86_sse_sub_ss:
1765 case Intrinsic::x86_sse2_sub_sd:
1766 TmpV = InsertNewInstBefore(BinaryOperator::CreateFSub(LHS, RHS,
1767 II->getName()), *II);
1769 case Intrinsic::x86_sse_mul_ss:
1770 case Intrinsic::x86_sse2_mul_sd:
1771 TmpV = InsertNewInstBefore(BinaryOperator::CreateFMul(LHS, RHS,
1772 II->getName()), *II);
1777 InsertElementInst::Create(
1778 Context->getUndef(II->getType()), TmpV, 0U, II->getName());
1779 InsertNewInstBefore(New, *II);
1780 AddSoonDeadInstToWorklist(*II, 0);
1785 // Output elements are undefined if both are undefined. Consider things
1786 // like undef&0. The result is known zero, not undef.
1787 UndefElts &= UndefElts2;
1793 return MadeChange ? I : 0;
1797 /// AssociativeOpt - Perform an optimization on an associative operator. This
1798 /// function is designed to check a chain of associative operators for a
1799 /// potential to apply a certain optimization. Since the optimization may be
1800 /// applicable if the expression was reassociated, this checks the chain, then
1801 /// reassociates the expression as necessary to expose the optimization
1802 /// opportunity. This makes use of a special Functor, which must define
1803 /// 'shouldApply' and 'apply' methods.
1805 template<typename Functor>
1806 static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F,
1807 LLVMContext *Context) {
1808 unsigned Opcode = Root.getOpcode();
1809 Value *LHS = Root.getOperand(0);
1811 // Quick check, see if the immediate LHS matches...
1812 if (F.shouldApply(LHS))
1813 return F.apply(Root);
1815 // Otherwise, if the LHS is not of the same opcode as the root, return.
1816 Instruction *LHSI = dyn_cast<Instruction>(LHS);
1817 while (LHSI && LHSI->getOpcode() == Opcode && LHSI->hasOneUse()) {
1818 // Should we apply this transform to the RHS?
1819 bool ShouldApply = F.shouldApply(LHSI->getOperand(1));
1821 // If not to the RHS, check to see if we should apply to the LHS...
1822 if (!ShouldApply && F.shouldApply(LHSI->getOperand(0))) {
1823 cast<BinaryOperator>(LHSI)->swapOperands(); // Make the LHS the RHS
1827 // If the functor wants to apply the optimization to the RHS of LHSI,
1828 // reassociate the expression from ((? op A) op B) to (? op (A op B))
1830 // Now all of the instructions are in the current basic block, go ahead
1831 // and perform the reassociation.
1832 Instruction *TmpLHSI = cast<Instruction>(Root.getOperand(0));
1834 // First move the selected RHS to the LHS of the root...
1835 Root.setOperand(0, LHSI->getOperand(1));
1837 // Make what used to be the LHS of the root be the user of the root...
1838 Value *ExtraOperand = TmpLHSI->getOperand(1);
1839 if (&Root == TmpLHSI) {
1840 Root.replaceAllUsesWith(Context->getNullValue(TmpLHSI->getType()));
1843 Root.replaceAllUsesWith(TmpLHSI); // Users now use TmpLHSI
1844 TmpLHSI->setOperand(1, &Root); // TmpLHSI now uses the root
1845 BasicBlock::iterator ARI = &Root; ++ARI;
1846 TmpLHSI->moveBefore(ARI); // Move TmpLHSI to after Root
1849 // Now propagate the ExtraOperand down the chain of instructions until we
1851 while (TmpLHSI != LHSI) {
1852 Instruction *NextLHSI = cast<Instruction>(TmpLHSI->getOperand(0));
1853 // Move the instruction to immediately before the chain we are
1854 // constructing to avoid breaking dominance properties.
1855 NextLHSI->moveBefore(ARI);
1858 Value *NextOp = NextLHSI->getOperand(1);
1859 NextLHSI->setOperand(1, ExtraOperand);
1861 ExtraOperand = NextOp;
1864 // Now that the instructions are reassociated, have the functor perform
1865 // the transformation...
1866 return F.apply(Root);
1869 LHSI = dyn_cast<Instruction>(LHSI->getOperand(0));
1876 // AddRHS - Implements: X + X --> X << 1
1879 LLVMContext *Context;
1880 AddRHS(Value *rhs, LLVMContext *C) : RHS(rhs), Context(C) {}
1881 bool shouldApply(Value *LHS) const { return LHS == RHS; }
1882 Instruction *apply(BinaryOperator &Add) const {
1883 return BinaryOperator::CreateShl(Add.getOperand(0),
1884 Context->getConstantInt(Add.getType(), 1));
1888 // AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
1890 struct AddMaskingAnd {
1892 LLVMContext *Context;
1893 AddMaskingAnd(Constant *c, LLVMContext *C) : C2(c), Context(C) {}
1894 bool shouldApply(Value *LHS) const {
1896 return match(LHS, m_And(m_Value(), m_ConstantInt(C1)), *Context) &&
1897 Context->getConstantExprAnd(C1, C2)->isNullValue();
1899 Instruction *apply(BinaryOperator &Add) const {
1900 return BinaryOperator::CreateOr(Add.getOperand(0), Add.getOperand(1));
1906 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
1908 LLVMContext *Context = IC->getContext();
1910 if (CastInst *CI = dyn_cast<CastInst>(&I)) {
1911 return IC->InsertCastBefore(CI->getOpcode(), SO, I.getType(), I);
1914 // Figure out if the constant is the left or the right argument.
1915 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
1916 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
1918 if (Constant *SOC = dyn_cast<Constant>(SO)) {
1920 return Context->getConstantExpr(I.getOpcode(), SOC, ConstOperand);
1921 return Context->getConstantExpr(I.getOpcode(), ConstOperand, SOC);
1924 Value *Op0 = SO, *Op1 = ConstOperand;
1926 std::swap(Op0, Op1);
1928 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
1929 New = BinaryOperator::Create(BO->getOpcode(), Op0, Op1,SO->getName()+".op");
1930 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
1931 New = CmpInst::Create(*Context, CI->getOpcode(), CI->getPredicate(),
1932 Op0, Op1, SO->getName()+".cmp");
1934 LLVM_UNREACHABLE("Unknown binary instruction type!");
1936 return IC->InsertNewInstBefore(New, I);
1939 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
1940 // constant as the other operand, try to fold the binary operator into the
1941 // select arguments. This also works for Cast instructions, which obviously do
1942 // not have a second operand.
1943 static Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
1945 // Don't modify shared select instructions
1946 if (!SI->hasOneUse()) return 0;
1947 Value *TV = SI->getOperand(1);
1948 Value *FV = SI->getOperand(2);
1950 if (isa<Constant>(TV) || isa<Constant>(FV)) {
1951 // Bool selects with constant operands can be folded to logical ops.
1952 if (SI->getType() == Type::Int1Ty) return 0;
1954 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, IC);
1955 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, IC);
1957 return SelectInst::Create(SI->getCondition(), SelectTrueVal,
1964 /// FoldOpIntoPhi - Given a binary operator or cast instruction which has a PHI
1965 /// node as operand #0, see if we can fold the instruction into the PHI (which
1966 /// is only possible if all operands to the PHI are constants).
1967 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
1968 PHINode *PN = cast<PHINode>(I.getOperand(0));
1969 unsigned NumPHIValues = PN->getNumIncomingValues();
1970 if (!PN->hasOneUse() || NumPHIValues == 0) return 0;
1972 // Check to see if all of the operands of the PHI are constants. If there is
1973 // one non-constant value, remember the BB it is. If there is more than one
1974 // or if *it* is a PHI, bail out.
1975 BasicBlock *NonConstBB = 0;
1976 for (unsigned i = 0; i != NumPHIValues; ++i)
1977 if (!isa<Constant>(PN->getIncomingValue(i))) {
1978 if (NonConstBB) return 0; // More than one non-const value.
1979 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
1980 NonConstBB = PN->getIncomingBlock(i);
1982 // If the incoming non-constant value is in I's block, we have an infinite
1984 if (NonConstBB == I.getParent())
1988 // If there is exactly one non-constant value, we can insert a copy of the
1989 // operation in that block. However, if this is a critical edge, we would be
1990 // inserting the computation one some other paths (e.g. inside a loop). Only
1991 // do this if the pred block is unconditionally branching into the phi block.
1993 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
1994 if (!BI || !BI->isUnconditional()) return 0;
1997 // Okay, we can do the transformation: create the new PHI node.
1998 PHINode *NewPN = PHINode::Create(I.getType(), "");
1999 NewPN->reserveOperandSpace(PN->getNumOperands()/2);
2000 InsertNewInstBefore(NewPN, *PN);
2001 NewPN->takeName(PN);
2003 // Next, add all of the operands to the PHI.
2004 if (I.getNumOperands() == 2) {
2005 Constant *C = cast<Constant>(I.getOperand(1));
2006 for (unsigned i = 0; i != NumPHIValues; ++i) {
2008 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2009 if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2010 InV = Context->getConstantExprCompare(CI->getPredicate(), InC, C);
2012 InV = Context->getConstantExpr(I.getOpcode(), InC, C);
2014 assert(PN->getIncomingBlock(i) == NonConstBB);
2015 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
2016 InV = BinaryOperator::Create(BO->getOpcode(),
2017 PN->getIncomingValue(i), C, "phitmp",
2018 NonConstBB->getTerminator());
2019 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2020 InV = CmpInst::Create(*Context, CI->getOpcode(),
2022 PN->getIncomingValue(i), C, "phitmp",
2023 NonConstBB->getTerminator());
2025 LLVM_UNREACHABLE("Unknown binop!");
2027 AddToWorkList(cast<Instruction>(InV));
2029 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2032 CastInst *CI = cast<CastInst>(&I);
2033 const Type *RetTy = CI->getType();
2034 for (unsigned i = 0; i != NumPHIValues; ++i) {
2036 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2037 InV = Context->getConstantExprCast(CI->getOpcode(), InC, RetTy);
2039 assert(PN->getIncomingBlock(i) == NonConstBB);
2040 InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
2041 I.getType(), "phitmp",
2042 NonConstBB->getTerminator());
2043 AddToWorkList(cast<Instruction>(InV));
2045 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2048 return ReplaceInstUsesWith(I, NewPN);
2052 /// WillNotOverflowSignedAdd - Return true if we can prove that:
2053 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
2054 /// This basically requires proving that the add in the original type would not
2055 /// overflow to change the sign bit or have a carry out.
2056 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
2057 // There are different heuristics we can use for this. Here are some simple
2060 // Add has the property that adding any two 2's complement numbers can only
2061 // have one carry bit which can change a sign. As such, if LHS and RHS each
2062 // have at least two sign bits, we know that the addition of the two values will
2063 // sign extend fine.
2064 if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
2068 // If one of the operands only has one non-zero bit, and if the other operand
2069 // has a known-zero bit in a more significant place than it (not including the
2070 // sign bit) the ripple may go up to and fill the zero, but won't change the
2071 // sign. For example, (X & ~4) + 1.
2079 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
2080 bool Changed = SimplifyCommutative(I);
2081 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2083 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2084 // X + undef -> undef
2085 if (isa<UndefValue>(RHS))
2086 return ReplaceInstUsesWith(I, RHS);
2089 if (RHSC->isNullValue())
2090 return ReplaceInstUsesWith(I, LHS);
2092 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
2093 // X + (signbit) --> X ^ signbit
2094 const APInt& Val = CI->getValue();
2095 uint32_t BitWidth = Val.getBitWidth();
2096 if (Val == APInt::getSignBit(BitWidth))
2097 return BinaryOperator::CreateXor(LHS, RHS);
2099 // See if SimplifyDemandedBits can simplify this. This handles stuff like
2100 // (X & 254)+1 -> (X&254)|1
2101 if (SimplifyDemandedInstructionBits(I))
2104 // zext(i1) - 1 -> select i1, 0, -1
2105 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
2106 if (CI->isAllOnesValue() &&
2107 ZI->getOperand(0)->getType() == Type::Int1Ty)
2108 return SelectInst::Create(ZI->getOperand(0),
2109 Context->getNullValue(I.getType()),
2110 Context->getConstantIntAllOnesValue(I.getType()));
2113 if (isa<PHINode>(LHS))
2114 if (Instruction *NV = FoldOpIntoPhi(I))
2117 ConstantInt *XorRHS = 0;
2119 if (isa<ConstantInt>(RHSC) &&
2120 match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)), *Context)) {
2121 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
2122 const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
2124 uint32_t Size = TySizeBits / 2;
2125 APInt C0080Val(APInt(TySizeBits, 1ULL).shl(Size - 1));
2126 APInt CFF80Val(-C0080Val);
2128 if (TySizeBits > Size) {
2129 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
2130 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
2131 if ((RHSVal == CFF80Val && XorRHS->getValue() == C0080Val) ||
2132 (RHSVal == C0080Val && XorRHS->getValue() == CFF80Val)) {
2133 // This is a sign extend if the top bits are known zero.
2134 if (!MaskedValueIsZero(XorLHS,
2135 APInt::getHighBitsSet(TySizeBits, TySizeBits - Size)))
2136 Size = 0; // Not a sign ext, but can't be any others either.
2141 C0080Val = APIntOps::lshr(C0080Val, Size);
2142 CFF80Val = APIntOps::ashr(CFF80Val, Size);
2143 } while (Size >= 1);
2145 // FIXME: This shouldn't be necessary. When the backends can handle types
2146 // with funny bit widths then this switch statement should be removed. It
2147 // is just here to get the size of the "middle" type back up to something
2148 // that the back ends can handle.
2149 const Type *MiddleType = 0;
2152 case 32: MiddleType = Type::Int32Ty; break;
2153 case 16: MiddleType = Type::Int16Ty; break;
2154 case 8: MiddleType = Type::Int8Ty; break;
2157 Instruction *NewTrunc = new TruncInst(XorLHS, MiddleType, "sext");
2158 InsertNewInstBefore(NewTrunc, I);
2159 return new SExtInst(NewTrunc, I.getType(), I.getName());
2164 if (I.getType() == Type::Int1Ty)
2165 return BinaryOperator::CreateXor(LHS, RHS);
2168 if (I.getType()->isInteger()) {
2169 if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS, Context), Context))
2172 if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
2173 if (RHSI->getOpcode() == Instruction::Sub)
2174 if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
2175 return ReplaceInstUsesWith(I, RHSI->getOperand(0));
2177 if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
2178 if (LHSI->getOpcode() == Instruction::Sub)
2179 if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
2180 return ReplaceInstUsesWith(I, LHSI->getOperand(0));
2185 // -A + -B --> -(A + B)
2186 if (Value *LHSV = dyn_castNegVal(LHS, Context)) {
2187 if (LHS->getType()->isIntOrIntVector()) {
2188 if (Value *RHSV = dyn_castNegVal(RHS, Context)) {
2189 Instruction *NewAdd = BinaryOperator::CreateAdd(LHSV, RHSV, "sum");
2190 InsertNewInstBefore(NewAdd, I);
2191 return BinaryOperator::CreateNeg(*Context, NewAdd);
2195 return BinaryOperator::CreateSub(RHS, LHSV);
2199 if (!isa<Constant>(RHS))
2200 if (Value *V = dyn_castNegVal(RHS, Context))
2201 return BinaryOperator::CreateSub(LHS, V);
2205 if (Value *X = dyn_castFoldableMul(LHS, C2, Context)) {
2206 if (X == RHS) // X*C + X --> X * (C+1)
2207 return BinaryOperator::CreateMul(RHS, AddOne(C2, Context));
2209 // X*C1 + X*C2 --> X * (C1+C2)
2211 if (X == dyn_castFoldableMul(RHS, C1, Context))
2212 return BinaryOperator::CreateMul(X, Context->getConstantExprAdd(C1, C2));
2215 // X + X*C --> X * (C+1)
2216 if (dyn_castFoldableMul(RHS, C2, Context) == LHS)
2217 return BinaryOperator::CreateMul(LHS, AddOne(C2, Context));
2219 // X + ~X --> -1 since ~X = -X-1
2220 if (dyn_castNotVal(LHS, Context) == RHS ||
2221 dyn_castNotVal(RHS, Context) == LHS)
2222 return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType()));
2225 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
2226 if (match(RHS, m_And(m_Value(), m_ConstantInt(C2)), *Context))
2227 if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2, Context), Context))
2230 // A+B --> A|B iff A and B have no bits set in common.
2231 if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
2232 APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
2233 APInt LHSKnownOne(IT->getBitWidth(), 0);
2234 APInt LHSKnownZero(IT->getBitWidth(), 0);
2235 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
2236 if (LHSKnownZero != 0) {
2237 APInt RHSKnownOne(IT->getBitWidth(), 0);
2238 APInt RHSKnownZero(IT->getBitWidth(), 0);
2239 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
2241 // No bits in common -> bitwise or.
2242 if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
2243 return BinaryOperator::CreateOr(LHS, RHS);
2247 // W*X + Y*Z --> W * (X+Z) iff W == Y
2248 if (I.getType()->isIntOrIntVector()) {
2249 Value *W, *X, *Y, *Z;
2250 if (match(LHS, m_Mul(m_Value(W), m_Value(X)), *Context) &&
2251 match(RHS, m_Mul(m_Value(Y), m_Value(Z)), *Context)) {
2255 } else if (Y == X) {
2257 } else if (X == Z) {
2264 Value *NewAdd = InsertNewInstBefore(BinaryOperator::CreateAdd(X, Z,
2265 LHS->getName()), I);
2266 return BinaryOperator::CreateMul(W, NewAdd);
2271 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
2273 if (match(LHS, m_Not(m_Value(X)), *Context)) // ~X + C --> (C-1) - X
2274 return BinaryOperator::CreateSub(SubOne(CRHS, Context), X);
2276 // (X & FF00) + xx00 -> (X+xx00) & FF00
2277 if (LHS->hasOneUse() &&
2278 match(LHS, m_And(m_Value(X), m_ConstantInt(C2)), *Context)) {
2279 Constant *Anded = Context->getConstantExprAnd(CRHS, C2);
2280 if (Anded == CRHS) {
2281 // See if all bits from the first bit set in the Add RHS up are included
2282 // in the mask. First, get the rightmost bit.
2283 const APInt& AddRHSV = CRHS->getValue();
2285 // Form a mask of all bits from the lowest bit added through the top.
2286 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
2288 // See if the and mask includes all of these bits.
2289 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
2291 if (AddRHSHighBits == AddRHSHighBitsAnd) {
2292 // Okay, the xform is safe. Insert the new add pronto.
2293 Value *NewAdd = InsertNewInstBefore(BinaryOperator::CreateAdd(X, CRHS,
2294 LHS->getName()), I);
2295 return BinaryOperator::CreateAnd(NewAdd, C2);
2300 // Try to fold constant add into select arguments.
2301 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
2302 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2306 // add (cast *A to intptrtype) B ->
2307 // cast (GEP (cast *A to i8*) B) --> intptrtype
2309 CastInst *CI = dyn_cast<CastInst>(LHS);
2312 CI = dyn_cast<CastInst>(RHS);
2315 if (CI && CI->getType()->isSized() &&
2316 (CI->getType()->getScalarSizeInBits() ==
2317 TD->getIntPtrType()->getPrimitiveSizeInBits())
2318 && isa<PointerType>(CI->getOperand(0)->getType())) {
2320 cast<PointerType>(CI->getOperand(0)->getType())->getAddressSpace();
2321 Value *I2 = InsertBitCastBefore(CI->getOperand(0),
2322 Context->getPointerType(Type::Int8Ty, AS), I);
2323 I2 = InsertNewInstBefore(GetElementPtrInst::Create(I2, Other, "ctg2"), I);
2324 return new PtrToIntInst(I2, CI->getType());
2328 // add (select X 0 (sub n A)) A --> select X A n
2330 SelectInst *SI = dyn_cast<SelectInst>(LHS);
2333 SI = dyn_cast<SelectInst>(RHS);
2336 if (SI && SI->hasOneUse()) {
2337 Value *TV = SI->getTrueValue();
2338 Value *FV = SI->getFalseValue();
2341 // Can we fold the add into the argument of the select?
2342 // We check both true and false select arguments for a matching subtract.
2343 if (match(FV, m_Zero(), *Context) &&
2344 match(TV, m_Sub(m_Value(N), m_Specific(A)), *Context))
2345 // Fold the add into the true select value.
2346 return SelectInst::Create(SI->getCondition(), N, A);
2347 if (match(TV, m_Zero(), *Context) &&
2348 match(FV, m_Sub(m_Value(N), m_Specific(A)), *Context))
2349 // Fold the add into the false select value.
2350 return SelectInst::Create(SI->getCondition(), A, N);
2354 // Check for (add (sext x), y), see if we can merge this into an
2355 // integer add followed by a sext.
2356 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
2357 // (add (sext x), cst) --> (sext (add x, cst'))
2358 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2360 Context->getConstantExprTrunc(RHSC, LHSConv->getOperand(0)->getType());
2361 if (LHSConv->hasOneUse() &&
2362 Context->getConstantExprSExt(CI, I.getType()) == RHSC &&
2363 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2364 // Insert the new, smaller add.
2365 Instruction *NewAdd = BinaryOperator::CreateAdd(LHSConv->getOperand(0),
2367 InsertNewInstBefore(NewAdd, I);
2368 return new SExtInst(NewAdd, I.getType());
2372 // (add (sext x), (sext y)) --> (sext (add int x, y))
2373 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
2374 // Only do this if x/y have the same type, if at last one of them has a
2375 // single use (so we don't increase the number of sexts), and if the
2376 // integer add will not overflow.
2377 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2378 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2379 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2380 RHSConv->getOperand(0))) {
2381 // Insert the new integer add.
2382 Instruction *NewAdd = BinaryOperator::CreateAdd(LHSConv->getOperand(0),
2383 RHSConv->getOperand(0),
2385 InsertNewInstBefore(NewAdd, I);
2386 return new SExtInst(NewAdd, I.getType());
2391 return Changed ? &I : 0;
2394 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
2395 bool Changed = SimplifyCommutative(I);
2396 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2398 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2400 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
2401 if (CFP->isExactlyValue(Context->getConstantFPNegativeZero
2402 (I.getType())->getValueAPF()))
2403 return ReplaceInstUsesWith(I, LHS);
2406 if (isa<PHINode>(LHS))
2407 if (Instruction *NV = FoldOpIntoPhi(I))
2412 // -A + -B --> -(A + B)
2413 if (Value *LHSV = dyn_castFNegVal(LHS, Context))
2414 return BinaryOperator::CreateFSub(RHS, LHSV);
2417 if (!isa<Constant>(RHS))
2418 if (Value *V = dyn_castFNegVal(RHS, Context))
2419 return BinaryOperator::CreateFSub(LHS, V);
2421 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
2422 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
2423 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
2424 return ReplaceInstUsesWith(I, LHS);
2426 // Check for (add double (sitofp x), y), see if we can merge this into an
2427 // integer add followed by a promotion.
2428 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
2429 // (add double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
2430 // ... if the constant fits in the integer value. This is useful for things
2431 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
2432 // requires a constant pool load, and generally allows the add to be better
2434 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
2436 Context->getConstantExprFPToSI(CFP, LHSConv->getOperand(0)->getType());
2437 if (LHSConv->hasOneUse() &&
2438 Context->getConstantExprSIToFP(CI, I.getType()) == CFP &&
2439 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2440 // Insert the new integer add.
2441 Instruction *NewAdd = BinaryOperator::CreateAdd(LHSConv->getOperand(0),
2443 InsertNewInstBefore(NewAdd, I);
2444 return new SIToFPInst(NewAdd, I.getType());
2448 // (add double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
2449 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
2450 // Only do this if x/y have the same type, if at last one of them has a
2451 // single use (so we don't increase the number of int->fp conversions),
2452 // and if the integer add will not overflow.
2453 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2454 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2455 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2456 RHSConv->getOperand(0))) {
2457 // Insert the new integer add.
2458 Instruction *NewAdd = BinaryOperator::CreateAdd(LHSConv->getOperand(0),
2459 RHSConv->getOperand(0),
2461 InsertNewInstBefore(NewAdd, I);
2462 return new SIToFPInst(NewAdd, I.getType());
2467 return Changed ? &I : 0;
2470 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
2471 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2473 if (Op0 == Op1) // sub X, X -> 0
2474 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
2476 // If this is a 'B = x-(-A)', change to B = x+A...
2477 if (Value *V = dyn_castNegVal(Op1, Context))
2478 return BinaryOperator::CreateAdd(Op0, V);
2480 if (isa<UndefValue>(Op0))
2481 return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
2482 if (isa<UndefValue>(Op1))
2483 return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
2485 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
2486 // Replace (-1 - A) with (~A)...
2487 if (C->isAllOnesValue())
2488 return BinaryOperator::CreateNot(Op1);
2490 // C - ~X == X + (1+C)
2492 if (match(Op1, m_Not(m_Value(X)), *Context))
2493 return BinaryOperator::CreateAdd(X, AddOne(C, Context));
2495 // -(X >>u 31) -> (X >>s 31)
2496 // -(X >>s 31) -> (X >>u 31)
2498 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
2499 if (SI->getOpcode() == Instruction::LShr) {
2500 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2501 // Check to see if we are shifting out everything but the sign bit.
2502 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2503 SI->getType()->getPrimitiveSizeInBits()-1) {
2504 // Ok, the transformation is safe. Insert AShr.
2505 return BinaryOperator::Create(Instruction::AShr,
2506 SI->getOperand(0), CU, SI->getName());
2510 else if (SI->getOpcode() == Instruction::AShr) {
2511 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2512 // Check to see if we are shifting out everything but the sign bit.
2513 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2514 SI->getType()->getPrimitiveSizeInBits()-1) {
2515 // Ok, the transformation is safe. Insert LShr.
2516 return BinaryOperator::CreateLShr(
2517 SI->getOperand(0), CU, SI->getName());
2524 // Try to fold constant sub into select arguments.
2525 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2526 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2530 if (I.getType() == Type::Int1Ty)
2531 return BinaryOperator::CreateXor(Op0, Op1);
2533 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2534 if (Op1I->getOpcode() == Instruction::Add) {
2535 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2536 return BinaryOperator::CreateNeg(*Context, Op1I->getOperand(1),
2538 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2539 return BinaryOperator::CreateNeg(*Context, Op1I->getOperand(0),
2541 else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
2542 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
2543 // C1-(X+C2) --> (C1-C2)-X
2544 return BinaryOperator::CreateSub(
2545 Context->getConstantExprSub(CI1, CI2), Op1I->getOperand(0));
2549 if (Op1I->hasOneUse()) {
2550 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
2551 // is not used by anyone else...
2553 if (Op1I->getOpcode() == Instruction::Sub) {
2554 // Swap the two operands of the subexpr...
2555 Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
2556 Op1I->setOperand(0, IIOp1);
2557 Op1I->setOperand(1, IIOp0);
2559 // Create the new top level add instruction...
2560 return BinaryOperator::CreateAdd(Op0, Op1);
2563 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
2565 if (Op1I->getOpcode() == Instruction::And &&
2566 (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
2567 Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
2570 InsertNewInstBefore(BinaryOperator::CreateNot(OtherOp, "B.not"), I);
2571 return BinaryOperator::CreateAnd(Op0, NewNot);
2574 // 0 - (X sdiv C) -> (X sdiv -C)
2575 if (Op1I->getOpcode() == Instruction::SDiv)
2576 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
2578 if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
2579 return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
2580 Context->getConstantExprNeg(DivRHS));
2582 // X - X*C --> X * (1-C)
2583 ConstantInt *C2 = 0;
2584 if (dyn_castFoldableMul(Op1I, C2, Context) == Op0) {
2586 Context->getConstantExprSub(Context->getConstantInt(I.getType(), 1),
2588 return BinaryOperator::CreateMul(Op0, CP1);
2593 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2594 if (Op0I->getOpcode() == Instruction::Add) {
2595 if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
2596 return ReplaceInstUsesWith(I, Op0I->getOperand(1));
2597 else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
2598 return ReplaceInstUsesWith(I, Op0I->getOperand(0));
2599 } else if (Op0I->getOpcode() == Instruction::Sub) {
2600 if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
2601 return BinaryOperator::CreateNeg(*Context, Op0I->getOperand(1),
2607 if (Value *X = dyn_castFoldableMul(Op0, C1, Context)) {
2608 if (X == Op1) // X*C - X --> X * (C-1)
2609 return BinaryOperator::CreateMul(Op1, SubOne(C1, Context));
2611 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
2612 if (X == dyn_castFoldableMul(Op1, C2, Context))
2613 return BinaryOperator::CreateMul(X, Context->getConstantExprSub(C1, C2));
2618 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
2619 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2621 // If this is a 'B = x-(-A)', change to B = x+A...
2622 if (Value *V = dyn_castFNegVal(Op1, Context))
2623 return BinaryOperator::CreateFAdd(Op0, V);
2625 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2626 if (Op1I->getOpcode() == Instruction::FAdd) {
2627 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2628 return BinaryOperator::CreateFNeg(*Context, Op1I->getOperand(1),
2630 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2631 return BinaryOperator::CreateFNeg(*Context, Op1I->getOperand(0),
2639 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
2640 /// comparison only checks the sign bit. If it only checks the sign bit, set
2641 /// TrueIfSigned if the result of the comparison is true when the input value is
2643 static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
2644 bool &TrueIfSigned) {
2646 case ICmpInst::ICMP_SLT: // True if LHS s< 0
2647 TrueIfSigned = true;
2648 return RHS->isZero();
2649 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
2650 TrueIfSigned = true;
2651 return RHS->isAllOnesValue();
2652 case ICmpInst::ICMP_SGT: // True if LHS s> -1
2653 TrueIfSigned = false;
2654 return RHS->isAllOnesValue();
2655 case ICmpInst::ICMP_UGT:
2656 // True if LHS u> RHS and RHS == high-bit-mask - 1
2657 TrueIfSigned = true;
2658 return RHS->getValue() ==
2659 APInt::getSignedMaxValue(RHS->getType()->getPrimitiveSizeInBits());
2660 case ICmpInst::ICMP_UGE:
2661 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
2662 TrueIfSigned = true;
2663 return RHS->getValue().isSignBit();
2669 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
2670 bool Changed = SimplifyCommutative(I);
2671 Value *Op0 = I.getOperand(0);
2673 // TODO: If Op1 is undef and Op0 is finite, return zero.
2674 if (!I.getType()->isFPOrFPVector() &&
2675 isa<UndefValue>(I.getOperand(1))) // undef * X -> 0
2676 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
2678 // Simplify mul instructions with a constant RHS...
2679 if (Constant *Op1 = dyn_cast<Constant>(I.getOperand(1))) {
2680 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
2682 // ((X << C1)*C2) == (X * (C2 << C1))
2683 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
2684 if (SI->getOpcode() == Instruction::Shl)
2685 if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
2686 return BinaryOperator::CreateMul(SI->getOperand(0),
2687 Context->getConstantExprShl(CI, ShOp));
2690 return ReplaceInstUsesWith(I, Op1); // X * 0 == 0
2691 if (CI->equalsInt(1)) // X * 1 == X
2692 return ReplaceInstUsesWith(I, Op0);
2693 if (CI->isAllOnesValue()) // X * -1 == 0 - X
2694 return BinaryOperator::CreateNeg(*Context, Op0, I.getName());
2696 const APInt& Val = cast<ConstantInt>(CI)->getValue();
2697 if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
2698 return BinaryOperator::CreateShl(Op0,
2699 Context->getConstantInt(Op0->getType(), Val.logBase2()));
2701 } else if (isa<VectorType>(Op1->getType())) {
2702 // TODO: If Op1 is all zeros and Op0 is all finite, return all zeros.
2704 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
2705 if (Op1V->isAllOnesValue()) // X * -1 == 0 - X
2706 return BinaryOperator::CreateNeg(*Context, Op0, I.getName());
2708 // As above, vector X*splat(1.0) -> X in all defined cases.
2709 if (Constant *Splat = Op1V->getSplatValue()) {
2710 if (ConstantInt *CI = dyn_cast<ConstantInt>(Splat))
2711 if (CI->equalsInt(1))
2712 return ReplaceInstUsesWith(I, Op0);
2717 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
2718 if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
2719 isa<ConstantInt>(Op0I->getOperand(1)) && isa<ConstantInt>(Op1)) {
2720 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
2721 Instruction *Add = BinaryOperator::CreateMul(Op0I->getOperand(0),
2723 InsertNewInstBefore(Add, I);
2724 Value *C1C2 = Context->getConstantExprMul(Op1,
2725 cast<Constant>(Op0I->getOperand(1)));
2726 return BinaryOperator::CreateAdd(Add, C1C2);
2730 // Try to fold constant mul into select arguments.
2731 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2732 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2735 if (isa<PHINode>(Op0))
2736 if (Instruction *NV = FoldOpIntoPhi(I))
2740 if (Value *Op0v = dyn_castNegVal(Op0, Context)) // -X * -Y = X*Y
2741 if (Value *Op1v = dyn_castNegVal(I.getOperand(1), Context))
2742 return BinaryOperator::CreateMul(Op0v, Op1v);
2744 // (X / Y) * Y = X - (X % Y)
2745 // (X / Y) * -Y = (X % Y) - X
2747 Value *Op1 = I.getOperand(1);
2748 BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0);
2750 (BO->getOpcode() != Instruction::UDiv &&
2751 BO->getOpcode() != Instruction::SDiv)) {
2753 BO = dyn_cast<BinaryOperator>(I.getOperand(1));
2755 Value *Neg = dyn_castNegVal(Op1, Context);
2756 if (BO && BO->hasOneUse() &&
2757 (BO->getOperand(1) == Op1 || BO->getOperand(1) == Neg) &&
2758 (BO->getOpcode() == Instruction::UDiv ||
2759 BO->getOpcode() == Instruction::SDiv)) {
2760 Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);
2763 if (BO->getOpcode() == Instruction::UDiv)
2764 Rem = BinaryOperator::CreateURem(Op0BO, Op1BO);
2766 Rem = BinaryOperator::CreateSRem(Op0BO, Op1BO);
2768 InsertNewInstBefore(Rem, I);
2772 return BinaryOperator::CreateSub(Op0BO, Rem);
2774 return BinaryOperator::CreateSub(Rem, Op0BO);
2778 if (I.getType() == Type::Int1Ty)
2779 return BinaryOperator::CreateAnd(Op0, I.getOperand(1));
2781 // If one of the operands of the multiply is a cast from a boolean value, then
2782 // we know the bool is either zero or one, so this is a 'masking' multiply.
2783 // See if we can simplify things based on how the boolean was originally
2785 CastInst *BoolCast = 0;
2786 if (ZExtInst *CI = dyn_cast<ZExtInst>(Op0))
2787 if (CI->getOperand(0)->getType() == Type::Int1Ty)
2790 if (ZExtInst *CI = dyn_cast<ZExtInst>(I.getOperand(1)))
2791 if (CI->getOperand(0)->getType() == Type::Int1Ty)
2794 if (ICmpInst *SCI = dyn_cast<ICmpInst>(BoolCast->getOperand(0))) {
2795 Value *SCIOp0 = SCI->getOperand(0), *SCIOp1 = SCI->getOperand(1);
2796 const Type *SCOpTy = SCIOp0->getType();
2799 // If the icmp is true iff the sign bit of X is set, then convert this
2800 // multiply into a shift/and combination.
2801 if (isa<ConstantInt>(SCIOp1) &&
2802 isSignBitCheck(SCI->getPredicate(), cast<ConstantInt>(SCIOp1), TIS) &&
2804 // Shift the X value right to turn it into "all signbits".
2805 Constant *Amt = Context->getConstantInt(SCIOp0->getType(),
2806 SCOpTy->getPrimitiveSizeInBits()-1);
2808 InsertNewInstBefore(
2809 BinaryOperator::Create(Instruction::AShr, SCIOp0, Amt,
2810 BoolCast->getOperand(0)->getName()+
2813 // If the multiply type is not the same as the source type, sign extend
2814 // or truncate to the multiply type.
2815 if (I.getType() != V->getType()) {
2816 uint32_t SrcBits = V->getType()->getPrimitiveSizeInBits();
2817 uint32_t DstBits = I.getType()->getPrimitiveSizeInBits();
2818 Instruction::CastOps opcode =
2819 (SrcBits == DstBits ? Instruction::BitCast :
2820 (SrcBits < DstBits ? Instruction::SExt : Instruction::Trunc));
2821 V = InsertCastBefore(opcode, V, I.getType(), I);
2824 Value *OtherOp = Op0 == BoolCast ? I.getOperand(1) : Op0;
2825 return BinaryOperator::CreateAnd(V, OtherOp);
2830 return Changed ? &I : 0;
2833 Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
2834 bool Changed = SimplifyCommutative(I);
2835 Value *Op0 = I.getOperand(0);
2837 // Simplify mul instructions with a constant RHS...
2838 if (Constant *Op1 = dyn_cast<Constant>(I.getOperand(1))) {
2839 if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1)) {
2840 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
2841 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
2842 if (Op1F->isExactlyValue(1.0))
2843 return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0'
2844 } else if (isa<VectorType>(Op1->getType())) {
2845 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
2846 // As above, vector X*splat(1.0) -> X in all defined cases.
2847 if (Constant *Splat = Op1V->getSplatValue()) {
2848 if (ConstantFP *F = dyn_cast<ConstantFP>(Splat))
2849 if (F->isExactlyValue(1.0))
2850 return ReplaceInstUsesWith(I, Op0);
2855 // Try to fold constant mul into select arguments.
2856 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
2857 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2860 if (isa<PHINode>(Op0))
2861 if (Instruction *NV = FoldOpIntoPhi(I))
2865 if (Value *Op0v = dyn_castFNegVal(Op0, Context)) // -X * -Y = X*Y
2866 if (Value *Op1v = dyn_castFNegVal(I.getOperand(1), Context))
2867 return BinaryOperator::CreateFMul(Op0v, Op1v);
2869 return Changed ? &I : 0;
2872 /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
2874 bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
2875 SelectInst *SI = cast<SelectInst>(I.getOperand(1));
2877 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
2878 int NonNullOperand = -1;
2879 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
2880 if (ST->isNullValue())
2882 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
2883 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
2884 if (ST->isNullValue())
2887 if (NonNullOperand == -1)
2890 Value *SelectCond = SI->getOperand(0);
2892 // Change the div/rem to use 'Y' instead of the select.
2893 I.setOperand(1, SI->getOperand(NonNullOperand));
2895 // Okay, we know we replace the operand of the div/rem with 'Y' with no
2896 // problem. However, the select, or the condition of the select may have
2897 // multiple uses. Based on our knowledge that the operand must be non-zero,
2898 // propagate the known value for the select into other uses of it, and
2899 // propagate a known value of the condition into its other users.
2901 // If the select and condition only have a single use, don't bother with this,
2903 if (SI->use_empty() && SelectCond->hasOneUse())
2906 // Scan the current block backward, looking for other uses of SI.
2907 BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();
2909 while (BBI != BBFront) {
2911 // If we found a call to a function, we can't assume it will return, so
2912 // information from below it cannot be propagated above it.
2913 if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
2916 // Replace uses of the select or its condition with the known values.
2917 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
2920 *I = SI->getOperand(NonNullOperand);
2922 } else if (*I == SelectCond) {
2923 *I = NonNullOperand == 1 ? Context->getConstantIntTrue() :
2924 Context->getConstantIntFalse();
2929 // If we past the instruction, quit looking for it.
2932 if (&*BBI == SelectCond)
2935 // If we ran out of things to eliminate, break out of the loop.
2936 if (SelectCond == 0 && SI == 0)
2944 /// This function implements the transforms on div instructions that work
2945 /// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
2946 /// used by the visitors to those instructions.
2947 /// @brief Transforms common to all three div instructions
2948 Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
2949 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2951 // undef / X -> 0 for integer.
2952 // undef / X -> undef for FP (the undef could be a snan).
2953 if (isa<UndefValue>(Op0)) {
2954 if (Op0->getType()->isFPOrFPVector())
2955 return ReplaceInstUsesWith(I, Op0);
2956 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
2959 // X / undef -> undef
2960 if (isa<UndefValue>(Op1))
2961 return ReplaceInstUsesWith(I, Op1);
2966 /// This function implements the transforms common to both integer division
2967 /// instructions (udiv and sdiv). It is called by the visitors to those integer
2968 /// division instructions.
2969 /// @brief Common integer divide transforms
2970 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
2971 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2973 // (sdiv X, X) --> 1 (udiv X, X) --> 1
2975 if (const VectorType *Ty = dyn_cast<VectorType>(I.getType())) {
2976 Constant *CI = Context->getConstantInt(Ty->getElementType(), 1);
2977 std::vector<Constant*> Elts(Ty->getNumElements(), CI);
2978 return ReplaceInstUsesWith(I, Context->getConstantVector(Elts));
2981 Constant *CI = Context->getConstantInt(I.getType(), 1);
2982 return ReplaceInstUsesWith(I, CI);
2985 if (Instruction *Common = commonDivTransforms(I))
2988 // Handle cases involving: [su]div X, (select Cond, Y, Z)
2989 // This does not apply for fdiv.
2990 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
2993 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
2995 if (RHS->equalsInt(1))
2996 return ReplaceInstUsesWith(I, Op0);
2998 // (X / C1) / C2 -> X / (C1*C2)
2999 if (Instruction *LHS = dyn_cast<Instruction>(Op0))
3000 if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
3001 if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
3002 if (MultiplyOverflows(RHS, LHSRHS,
3003 I.getOpcode()==Instruction::SDiv, Context))
3004 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
3006 return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
3007 Context->getConstantExprMul(RHS, LHSRHS));
3010 if (!RHS->isZero()) { // avoid X udiv 0
3011 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3012 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3014 if (isa<PHINode>(Op0))
3015 if (Instruction *NV = FoldOpIntoPhi(I))
3020 // 0 / X == 0, we don't need to preserve faults!
3021 if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
3022 if (LHS->equalsInt(0))
3023 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
3025 // It can't be division by zero, hence it must be division by one.
3026 if (I.getType() == Type::Int1Ty)
3027 return ReplaceInstUsesWith(I, Op0);
3029 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
3030 if (ConstantInt *X = cast_or_null<ConstantInt>(Op1V->getSplatValue()))
3033 return ReplaceInstUsesWith(I, Op0);
3039 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
3040 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3042 // Handle the integer div common cases
3043 if (Instruction *Common = commonIDivTransforms(I))
3046 if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
3047 // X udiv C^2 -> X >> C
3048 // Check to see if this is an unsigned division with an exact power of 2,
3049 // if so, convert to a right shift.
3050 if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
3051 return BinaryOperator::CreateLShr(Op0,
3052 Context->getConstantInt(Op0->getType(), C->getValue().logBase2()));
3054 // X udiv C, where C >= signbit
3055 if (C->getValue().isNegative()) {
3056 Value *IC = InsertNewInstBefore(new ICmpInst(*Context,
3057 ICmpInst::ICMP_ULT, Op0, C),
3059 return SelectInst::Create(IC, Context->getNullValue(I.getType()),
3060 Context->getConstantInt(I.getType(), 1));
3064 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
3065 if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
3066 if (RHSI->getOpcode() == Instruction::Shl &&
3067 isa<ConstantInt>(RHSI->getOperand(0))) {
3068 const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
3069 if (C1.isPowerOf2()) {
3070 Value *N = RHSI->getOperand(1);
3071 const Type *NTy = N->getType();
3072 if (uint32_t C2 = C1.logBase2()) {
3073 Constant *C2V = Context->getConstantInt(NTy, C2);
3074 N = InsertNewInstBefore(BinaryOperator::CreateAdd(N, C2V, "tmp"), I);
3076 return BinaryOperator::CreateLShr(Op0, N);
3081 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
3082 // where C1&C2 are powers of two.
3083 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
3084 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3085 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3086 const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
3087 if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
3088 // Compute the shift amounts
3089 uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
3090 // Construct the "on true" case of the select
3091 Constant *TC = Context->getConstantInt(Op0->getType(), TSA);
3092 Instruction *TSI = BinaryOperator::CreateLShr(
3093 Op0, TC, SI->getName()+".t");
3094 TSI = InsertNewInstBefore(TSI, I);
3096 // Construct the "on false" case of the select
3097 Constant *FC = Context->getConstantInt(Op0->getType(), FSA);
3098 Instruction *FSI = BinaryOperator::CreateLShr(
3099 Op0, FC, SI->getName()+".f");
3100 FSI = InsertNewInstBefore(FSI, I);
3102 // construct the select instruction and return it.
3103 return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
3109 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
3110 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3112 // Handle the integer div common cases
3113 if (Instruction *Common = commonIDivTransforms(I))
3116 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3118 if (RHS->isAllOnesValue())
3119 return BinaryOperator::CreateNeg(*Context, Op0);
3122 // If the sign bits of both operands are zero (i.e. we can prove they are
3123 // unsigned inputs), turn this into a udiv.
3124 if (I.getType()->isInteger()) {
3125 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3126 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
3127 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
3128 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
3135 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
3136 return commonDivTransforms(I);
3139 /// This function implements the transforms on rem instructions that work
3140 /// regardless of the kind of rem instruction it is (urem, srem, or frem). It
3141 /// is used by the visitors to those instructions.
3142 /// @brief Transforms common to all three rem instructions
3143 Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
3144 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3146 if (isa<UndefValue>(Op0)) { // undef % X -> 0
3147 if (I.getType()->isFPOrFPVector())
3148 return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
3149 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
3151 if (isa<UndefValue>(Op1))
3152 return ReplaceInstUsesWith(I, Op1); // X % undef -> undef
3154 // Handle cases involving: rem X, (select Cond, Y, Z)
3155 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
3161 /// This function implements the transforms common to both integer remainder
3162 /// instructions (urem and srem). It is called by the visitors to those integer
3163 /// remainder instructions.
3164 /// @brief Common integer remainder transforms
3165 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
3166 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3168 if (Instruction *common = commonRemTransforms(I))
3171 // 0 % X == 0 for integer, we don't need to preserve faults!
3172 if (Constant *LHS = dyn_cast<Constant>(Op0))
3173 if (LHS->isNullValue())
3174 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
3176 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3177 // X % 0 == undef, we don't need to preserve faults!
3178 if (RHS->equalsInt(0))
3179 return ReplaceInstUsesWith(I, Context->getUndef(I.getType()));
3181 if (RHS->equalsInt(1)) // X % 1 == 0
3182 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
3184 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
3185 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
3186 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3188 } else if (isa<PHINode>(Op0I)) {
3189 if (Instruction *NV = FoldOpIntoPhi(I))
3193 // See if we can fold away this rem instruction.
3194 if (SimplifyDemandedInstructionBits(I))
3202 Instruction *InstCombiner::visitURem(BinaryOperator &I) {
3203 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3205 if (Instruction *common = commonIRemTransforms(I))
3208 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3209 // X urem C^2 -> X and C
3210 // Check to see if this is an unsigned remainder with an exact power of 2,
3211 // if so, convert to a bitwise and.
3212 if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
3213 if (C->getValue().isPowerOf2())
3214 return BinaryOperator::CreateAnd(Op0, SubOne(C, Context));
3217 if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
3218 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
3219 if (RHSI->getOpcode() == Instruction::Shl &&
3220 isa<ConstantInt>(RHSI->getOperand(0))) {
3221 if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
3222 Constant *N1 = Context->getConstantIntAllOnesValue(I.getType());
3223 Value *Add = InsertNewInstBefore(BinaryOperator::CreateAdd(RHSI, N1,
3225 return BinaryOperator::CreateAnd(Op0, Add);
3230 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
3231 // where C1&C2 are powers of two.
3232 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
3233 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3234 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3235 // STO == 0 and SFO == 0 handled above.
3236 if ((STO->getValue().isPowerOf2()) &&
3237 (SFO->getValue().isPowerOf2())) {
3238 Value *TrueAnd = InsertNewInstBefore(
3239 BinaryOperator::CreateAnd(Op0, SubOne(STO, Context),
3240 SI->getName()+".t"), I);
3241 Value *FalseAnd = InsertNewInstBefore(
3242 BinaryOperator::CreateAnd(Op0, SubOne(SFO, Context),
3243 SI->getName()+".f"), I);
3244 return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
3252 Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
3253 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3255 // Handle the integer rem common cases
3256 if (Instruction *common = commonIRemTransforms(I))
3259 if (Value *RHSNeg = dyn_castNegVal(Op1, Context))
3260 if (!isa<Constant>(RHSNeg) ||
3261 (isa<ConstantInt>(RHSNeg) &&
3262 cast<ConstantInt>(RHSNeg)->getValue().isStrictlyPositive())) {
3264 AddUsesToWorkList(I);
3265 I.setOperand(1, RHSNeg);
3269 // If the sign bits of both operands are zero (i.e. we can prove they are
3270 // unsigned inputs), turn this into a urem.
3271 if (I.getType()->isInteger()) {
3272 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3273 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
3274 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
3275 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
3279 // If it's a constant vector, flip any negative values positive.
3280 if (ConstantVector *RHSV = dyn_cast<ConstantVector>(Op1)) {
3281 unsigned VWidth = RHSV->getNumOperands();
3283 bool hasNegative = false;
3284 for (unsigned i = 0; !hasNegative && i != VWidth; ++i)
3285 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i)))
3286 if (RHS->getValue().isNegative())
3290 std::vector<Constant *> Elts(VWidth);
3291 for (unsigned i = 0; i != VWidth; ++i) {
3292 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i))) {
3293 if (RHS->getValue().isNegative())
3294 Elts[i] = cast<ConstantInt>(Context->getConstantExprNeg(RHS));
3300 Constant *NewRHSV = Context->getConstantVector(Elts);
3301 if (NewRHSV != RHSV) {
3302 AddUsesToWorkList(I);
3303 I.setOperand(1, NewRHSV);
3312 Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
3313 return commonRemTransforms(I);
3316 // isOneBitSet - Return true if there is exactly one bit set in the specified
3318 static bool isOneBitSet(const ConstantInt *CI) {
3319 return CI->getValue().isPowerOf2();
3322 // isHighOnes - Return true if the constant is of the form 1+0+.
3323 // This is the same as lowones(~X).
3324 static bool isHighOnes(const ConstantInt *CI) {
3325 return (~CI->getValue() + 1).isPowerOf2();
3328 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
3329 /// are carefully arranged to allow folding of expressions such as:
3331 /// (A < B) | (A > B) --> (A != B)
3333 /// Note that this is only valid if the first and second predicates have the
3334 /// same sign. Is illegal to do: (A u< B) | (A s> B)
3336 /// Three bits are used to represent the condition, as follows:
3341 /// <=> Value Definition
3342 /// 000 0 Always false
3349 /// 111 7 Always true
3351 static unsigned getICmpCode(const ICmpInst *ICI) {
3352 switch (ICI->getPredicate()) {
3354 case ICmpInst::ICMP_UGT: return 1; // 001
3355 case ICmpInst::ICMP_SGT: return 1; // 001
3356 case ICmpInst::ICMP_EQ: return 2; // 010
3357 case ICmpInst::ICMP_UGE: return 3; // 011
3358 case ICmpInst::ICMP_SGE: return 3; // 011
3359 case ICmpInst::ICMP_ULT: return 4; // 100
3360 case ICmpInst::ICMP_SLT: return 4; // 100
3361 case ICmpInst::ICMP_NE: return 5; // 101
3362 case ICmpInst::ICMP_ULE: return 6; // 110
3363 case ICmpInst::ICMP_SLE: return 6; // 110
3366 LLVM_UNREACHABLE("Invalid ICmp predicate!");
3371 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
3372 /// predicate into a three bit mask. It also returns whether it is an ordered
3373 /// predicate by reference.
3374 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
3377 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
3378 case FCmpInst::FCMP_UNO: return 0; // 000
3379 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
3380 case FCmpInst::FCMP_UGT: return 1; // 001
3381 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
3382 case FCmpInst::FCMP_UEQ: return 2; // 010
3383 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
3384 case FCmpInst::FCMP_UGE: return 3; // 011
3385 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
3386 case FCmpInst::FCMP_ULT: return 4; // 100
3387 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
3388 case FCmpInst::FCMP_UNE: return 5; // 101
3389 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
3390 case FCmpInst::FCMP_ULE: return 6; // 110
3393 // Not expecting FCMP_FALSE and FCMP_TRUE;
3394 LLVM_UNREACHABLE("Unexpected FCmp predicate!");
3399 /// getICmpValue - This is the complement of getICmpCode, which turns an
3400 /// opcode and two operands into either a constant true or false, or a brand
3401 /// new ICmp instruction. The sign is passed in to determine which kind
3402 /// of predicate to use in the new icmp instruction.
3403 static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS,
3404 LLVMContext *Context) {
3406 default: LLVM_UNREACHABLE("Illegal ICmp code!");
3407 case 0: return Context->getConstantIntFalse();
3410 return new ICmpInst(*Context, ICmpInst::ICMP_SGT, LHS, RHS);
3412 return new ICmpInst(*Context, ICmpInst::ICMP_UGT, LHS, RHS);
3413 case 2: return new ICmpInst(*Context, ICmpInst::ICMP_EQ, LHS, RHS);
3416 return new ICmpInst(*Context, ICmpInst::ICMP_SGE, LHS, RHS);
3418 return new ICmpInst(*Context, ICmpInst::ICMP_UGE, LHS, RHS);
3421 return new ICmpInst(*Context, ICmpInst::ICMP_SLT, LHS, RHS);
3423 return new ICmpInst(*Context, ICmpInst::ICMP_ULT, LHS, RHS);
3424 case 5: return new ICmpInst(*Context, ICmpInst::ICMP_NE, LHS, RHS);
3427 return new ICmpInst(*Context, ICmpInst::ICMP_SLE, LHS, RHS);
3429 return new ICmpInst(*Context, ICmpInst::ICMP_ULE, LHS, RHS);
3430 case 7: return Context->getConstantIntTrue();
3434 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
3435 /// opcode and two operands into either a FCmp instruction. isordered is passed
3436 /// in to determine which kind of predicate to use in the new fcmp instruction.
3437 static Value *getFCmpValue(bool isordered, unsigned code,
3438 Value *LHS, Value *RHS, LLVMContext *Context) {
3440 default: LLVM_UNREACHABLE("Illegal FCmp code!");
3443 return new FCmpInst(*Context, FCmpInst::FCMP_ORD, LHS, RHS);
3445 return new FCmpInst(*Context, FCmpInst::FCMP_UNO, LHS, RHS);
3448 return new FCmpInst(*Context, FCmpInst::FCMP_OGT, LHS, RHS);
3450 return new FCmpInst(*Context, FCmpInst::FCMP_UGT, LHS, RHS);
3453 return new FCmpInst(*Context, FCmpInst::FCMP_OEQ, LHS, RHS);
3455 return new FCmpInst(*Context, FCmpInst::FCMP_UEQ, LHS, RHS);
3458 return new FCmpInst(*Context, FCmpInst::FCMP_OGE, LHS, RHS);
3460 return new FCmpInst(*Context, FCmpInst::FCMP_UGE, LHS, RHS);
3463 return new FCmpInst(*Context, FCmpInst::FCMP_OLT, LHS, RHS);
3465 return new FCmpInst(*Context, FCmpInst::FCMP_ULT, LHS, RHS);
3468 return new FCmpInst(*Context, FCmpInst::FCMP_ONE, LHS, RHS);
3470 return new FCmpInst(*Context, FCmpInst::FCMP_UNE, LHS, RHS);
3473 return new FCmpInst(*Context, FCmpInst::FCMP_OLE, LHS, RHS);
3475 return new FCmpInst(*Context, FCmpInst::FCMP_ULE, LHS, RHS);
3476 case 7: return Context->getConstantIntTrue();
3480 /// PredicatesFoldable - Return true if both predicates match sign or if at
3481 /// least one of them is an equality comparison (which is signless).
3482 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
3483 return (ICmpInst::isSignedPredicate(p1) == ICmpInst::isSignedPredicate(p2)) ||
3484 (ICmpInst::isSignedPredicate(p1) && ICmpInst::isEquality(p2)) ||
3485 (ICmpInst::isSignedPredicate(p2) && ICmpInst::isEquality(p1));
3489 // FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3490 struct FoldICmpLogical {
3493 ICmpInst::Predicate pred;
3494 FoldICmpLogical(InstCombiner &ic, ICmpInst *ICI)
3495 : IC(ic), LHS(ICI->getOperand(0)), RHS(ICI->getOperand(1)),
3496 pred(ICI->getPredicate()) {}
3497 bool shouldApply(Value *V) const {
3498 if (ICmpInst *ICI = dyn_cast<ICmpInst>(V))
3499 if (PredicatesFoldable(pred, ICI->getPredicate()))
3500 return ((ICI->getOperand(0) == LHS && ICI->getOperand(1) == RHS) ||
3501 (ICI->getOperand(0) == RHS && ICI->getOperand(1) == LHS));
3504 Instruction *apply(Instruction &Log) const {
3505 ICmpInst *ICI = cast<ICmpInst>(Log.getOperand(0));
3506 if (ICI->getOperand(0) != LHS) {
3507 assert(ICI->getOperand(1) == LHS);
3508 ICI->swapOperands(); // Swap the LHS and RHS of the ICmp
3511 ICmpInst *RHSICI = cast<ICmpInst>(Log.getOperand(1));
3512 unsigned LHSCode = getICmpCode(ICI);
3513 unsigned RHSCode = getICmpCode(RHSICI);
3515 switch (Log.getOpcode()) {
3516 case Instruction::And: Code = LHSCode & RHSCode; break;
3517 case Instruction::Or: Code = LHSCode | RHSCode; break;
3518 case Instruction::Xor: Code = LHSCode ^ RHSCode; break;
3519 default: LLVM_UNREACHABLE("Illegal logical opcode!"); return 0;
3522 bool isSigned = ICmpInst::isSignedPredicate(RHSICI->getPredicate()) ||
3523 ICmpInst::isSignedPredicate(ICI->getPredicate());
3525 Value *RV = getICmpValue(isSigned, Code, LHS, RHS, IC.getContext());
3526 if (Instruction *I = dyn_cast<Instruction>(RV))
3528 // Otherwise, it's a constant boolean value...
3529 return IC.ReplaceInstUsesWith(Log, RV);
3532 } // end anonymous namespace
3534 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
3535 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
3536 // guaranteed to be a binary operator.
3537 Instruction *InstCombiner::OptAndOp(Instruction *Op,
3539 ConstantInt *AndRHS,
3540 BinaryOperator &TheAnd) {
3541 Value *X = Op->getOperand(0);
3542 Constant *Together = 0;
3544 Together = Context->getConstantExprAnd(AndRHS, OpRHS);
3546 switch (Op->getOpcode()) {
3547 case Instruction::Xor:
3548 if (Op->hasOneUse()) {
3549 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
3550 Instruction *And = BinaryOperator::CreateAnd(X, AndRHS);
3551 InsertNewInstBefore(And, TheAnd);
3553 return BinaryOperator::CreateXor(And, Together);
3556 case Instruction::Or:
3557 if (Together == AndRHS) // (X | C) & C --> C
3558 return ReplaceInstUsesWith(TheAnd, AndRHS);
3560 if (Op->hasOneUse() && Together != OpRHS) {
3561 // (X | C1) & C2 --> (X | (C1&C2)) & C2
3562 Instruction *Or = BinaryOperator::CreateOr(X, Together);
3563 InsertNewInstBefore(Or, TheAnd);
3565 return BinaryOperator::CreateAnd(Or, AndRHS);
3568 case Instruction::Add:
3569 if (Op->hasOneUse()) {
3570 // Adding a one to a single bit bit-field should be turned into an XOR
3571 // of the bit. First thing to check is to see if this AND is with a
3572 // single bit constant.
3573 const APInt& AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
3575 // If there is only one bit set...
3576 if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
3577 // Ok, at this point, we know that we are masking the result of the
3578 // ADD down to exactly one bit. If the constant we are adding has
3579 // no bits set below this bit, then we can eliminate the ADD.
3580 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
3582 // Check to see if any bits below the one bit set in AndRHSV are set.
3583 if ((AddRHS & (AndRHSV-1)) == 0) {
3584 // If not, the only thing that can effect the output of the AND is
3585 // the bit specified by AndRHSV. If that bit is set, the effect of
3586 // the XOR is to toggle the bit. If it is clear, then the ADD has
3588 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
3589 TheAnd.setOperand(0, X);
3592 // Pull the XOR out of the AND.
3593 Instruction *NewAnd = BinaryOperator::CreateAnd(X, AndRHS);
3594 InsertNewInstBefore(NewAnd, TheAnd);
3595 NewAnd->takeName(Op);
3596 return BinaryOperator::CreateXor(NewAnd, AndRHS);
3603 case Instruction::Shl: {
3604 // We know that the AND will not produce any of the bits shifted in, so if
3605 // the anded constant includes them, clear them now!
3607 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3608 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3609 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
3610 ConstantInt *CI = Context->getConstantInt(AndRHS->getValue() & ShlMask);
3612 if (CI->getValue() == ShlMask) {
3613 // Masking out bits that the shift already masks
3614 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
3615 } else if (CI != AndRHS) { // Reducing bits set in and.
3616 TheAnd.setOperand(1, CI);
3621 case Instruction::LShr:
3623 // We know that the AND will not produce any of the bits shifted in, so if
3624 // the anded constant includes them, clear them now! This only applies to
3625 // unsigned shifts, because a signed shr may bring in set bits!
3627 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3628 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3629 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3630 ConstantInt *CI = Context->getConstantInt(AndRHS->getValue() & ShrMask);
3632 if (CI->getValue() == ShrMask) {
3633 // Masking out bits that the shift already masks.
3634 return ReplaceInstUsesWith(TheAnd, Op);
3635 } else if (CI != AndRHS) {
3636 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
3641 case Instruction::AShr:
3643 // See if this is shifting in some sign extension, then masking it out
3645 if (Op->hasOneUse()) {
3646 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3647 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3648 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
3649 Constant *C = Context->getConstantInt(AndRHS->getValue() & ShrMask);
3650 if (C == AndRHS) { // Masking out bits shifted in.
3651 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
3652 // Make the argument unsigned.
3653 Value *ShVal = Op->getOperand(0);
3654 ShVal = InsertNewInstBefore(
3655 BinaryOperator::CreateLShr(ShVal, OpRHS,
3656 Op->getName()), TheAnd);
3657 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
3666 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
3667 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
3668 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
3669 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
3670 /// insert new instructions.
3671 Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
3672 bool isSigned, bool Inside,
3674 assert(cast<ConstantInt>(Context->getConstantExprICmp((isSigned ?
3675 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
3676 "Lo is not <= Hi in range emission code!");
3679 if (Lo == Hi) // Trivially false.
3680 return new ICmpInst(*Context, ICmpInst::ICMP_NE, V, V);
3682 // V >= Min && V < Hi --> V < Hi
3683 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3684 ICmpInst::Predicate pred = (isSigned ?
3685 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
3686 return new ICmpInst(*Context, pred, V, Hi);
3689 // Emit V-Lo <u Hi-Lo
3690 Constant *NegLo = Context->getConstantExprNeg(Lo);
3691 Instruction *Add = BinaryOperator::CreateAdd(V, NegLo, V->getName()+".off");
3692 InsertNewInstBefore(Add, IB);
3693 Constant *UpperBound = Context->getConstantExprAdd(NegLo, Hi);
3694 return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Add, UpperBound);
3697 if (Lo == Hi) // Trivially true.
3698 return new ICmpInst(*Context, ICmpInst::ICMP_EQ, V, V);
3700 // V < Min || V >= Hi -> V > Hi-1
3701 Hi = SubOne(cast<ConstantInt>(Hi), Context);
3702 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
3703 ICmpInst::Predicate pred = (isSigned ?
3704 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
3705 return new ICmpInst(*Context, pred, V, Hi);
3708 // Emit V-Lo >u Hi-1-Lo
3709 // Note that Hi has already had one subtracted from it, above.
3710 ConstantInt *NegLo = cast<ConstantInt>(Context->getConstantExprNeg(Lo));
3711 Instruction *Add = BinaryOperator::CreateAdd(V, NegLo, V->getName()+".off");
3712 InsertNewInstBefore(Add, IB);
3713 Constant *LowerBound = Context->getConstantExprAdd(NegLo, Hi);
3714 return new ICmpInst(*Context, ICmpInst::ICMP_UGT, Add, LowerBound);
3717 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
3718 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
3719 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
3720 // not, since all 1s are not contiguous.
3721 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
3722 const APInt& V = Val->getValue();
3723 uint32_t BitWidth = Val->getType()->getBitWidth();
3724 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
3726 // look for the first zero bit after the run of ones
3727 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
3728 // look for the first non-zero bit
3729 ME = V.getActiveBits();
3733 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
3734 /// where isSub determines whether the operator is a sub. If we can fold one of
3735 /// the following xforms:
3737 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
3738 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3739 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
3741 /// return (A +/- B).
3743 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
3744 ConstantInt *Mask, bool isSub,
3746 Instruction *LHSI = dyn_cast<Instruction>(LHS);
3747 if (!LHSI || LHSI->getNumOperands() != 2 ||
3748 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
3750 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
3752 switch (LHSI->getOpcode()) {
3754 case Instruction::And:
3755 if (Context->getConstantExprAnd(N, Mask) == Mask) {
3756 // If the AndRHS is a power of two minus one (0+1+), this is simple.
3757 if ((Mask->getValue().countLeadingZeros() +
3758 Mask->getValue().countPopulation()) ==
3759 Mask->getValue().getBitWidth())
3762 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
3763 // part, we don't need any explicit masks to take them out of A. If that
3764 // is all N is, ignore it.
3765 uint32_t MB = 0, ME = 0;
3766 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
3767 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
3768 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
3769 if (MaskedValueIsZero(RHS, Mask))
3774 case Instruction::Or:
3775 case Instruction::Xor:
3776 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
3777 if ((Mask->getValue().countLeadingZeros() +
3778 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
3779 && Context->getConstantExprAnd(N, Mask)->isNullValue())
3786 New = BinaryOperator::CreateSub(LHSI->getOperand(0), RHS, "fold");
3788 New = BinaryOperator::CreateAdd(LHSI->getOperand(0), RHS, "fold");
3789 return InsertNewInstBefore(New, I);
3792 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
3793 Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
3794 ICmpInst *LHS, ICmpInst *RHS) {
3796 ConstantInt *LHSCst, *RHSCst;
3797 ICmpInst::Predicate LHSCC, RHSCC;
3799 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
3800 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
3801 m_ConstantInt(LHSCst)), *Context) ||
3802 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
3803 m_ConstantInt(RHSCst)), *Context))
3806 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
3807 // where C is a power of 2
3808 if (LHSCst == RHSCst && LHSCC == RHSCC && LHSCC == ICmpInst::ICMP_ULT &&
3809 LHSCst->getValue().isPowerOf2()) {
3810 Instruction *NewOr = BinaryOperator::CreateOr(Val, Val2);
3811 InsertNewInstBefore(NewOr, I);
3812 return new ICmpInst(*Context, LHSCC, NewOr, LHSCst);
3815 // From here on, we only handle:
3816 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
3817 if (Val != Val2) return 0;
3819 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
3820 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
3821 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
3822 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
3823 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
3826 // We can't fold (ugt x, C) & (sgt x, C2).
3827 if (!PredicatesFoldable(LHSCC, RHSCC))
3830 // Ensure that the larger constant is on the RHS.
3832 if (ICmpInst::isSignedPredicate(LHSCC) ||
3833 (ICmpInst::isEquality(LHSCC) &&
3834 ICmpInst::isSignedPredicate(RHSCC)))
3835 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
3837 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
3840 std::swap(LHS, RHS);
3841 std::swap(LHSCst, RHSCst);
3842 std::swap(LHSCC, RHSCC);
3845 // At this point, we know we have have two icmp instructions
3846 // comparing a value against two constants and and'ing the result
3847 // together. Because of the above check, we know that we only have
3848 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
3849 // (from the FoldICmpLogical check above), that the two constants
3850 // are not equal and that the larger constant is on the RHS
3851 assert(LHSCst != RHSCst && "Compares not folded above?");
3854 default: LLVM_UNREACHABLE("Unknown integer condition code!");
3855 case ICmpInst::ICMP_EQ:
3857 default: LLVM_UNREACHABLE("Unknown integer condition code!");
3858 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
3859 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
3860 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
3861 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
3862 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
3863 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
3864 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
3865 return ReplaceInstUsesWith(I, LHS);
3867 case ICmpInst::ICMP_NE:
3869 default: LLVM_UNREACHABLE("Unknown integer condition code!");
3870 case ICmpInst::ICMP_ULT:
3871 if (LHSCst == SubOne(RHSCst, Context)) // (X != 13 & X u< 14) -> X < 13
3872 return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Val, LHSCst);
3873 break; // (X != 13 & X u< 15) -> no change
3874 case ICmpInst::ICMP_SLT:
3875 if (LHSCst == SubOne(RHSCst, Context)) // (X != 13 & X s< 14) -> X < 13
3876 return new ICmpInst(*Context, ICmpInst::ICMP_SLT, Val, LHSCst);
3877 break; // (X != 13 & X s< 15) -> no change
3878 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
3879 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
3880 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
3881 return ReplaceInstUsesWith(I, RHS);
3882 case ICmpInst::ICMP_NE:
3883 if (LHSCst == SubOne(RHSCst, Context)){// (X != 13 & X != 14) -> X-13 >u 1
3884 Constant *AddCST = Context->getConstantExprNeg(LHSCst);
3885 Instruction *Add = BinaryOperator::CreateAdd(Val, AddCST,
3886 Val->getName()+".off");
3887 InsertNewInstBefore(Add, I);
3888 return new ICmpInst(*Context, ICmpInst::ICMP_UGT, Add,
3889 Context->getConstantInt(Add->getType(), 1));
3891 break; // (X != 13 & X != 15) -> no change
3894 case ICmpInst::ICMP_ULT:
3896 default: LLVM_UNREACHABLE("Unknown integer condition code!");
3897 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
3898 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
3899 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
3900 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
3902 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
3903 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
3904 return ReplaceInstUsesWith(I, LHS);
3905 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
3909 case ICmpInst::ICMP_SLT:
3911 default: LLVM_UNREACHABLE("Unknown integer condition code!");
3912 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
3913 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
3914 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
3915 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
3917 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
3918 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
3919 return ReplaceInstUsesWith(I, LHS);
3920 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
3924 case ICmpInst::ICMP_UGT:
3926 default: LLVM_UNREACHABLE("Unknown integer condition code!");
3927 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
3928 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
3929 return ReplaceInstUsesWith(I, RHS);
3930 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
3932 case ICmpInst::ICMP_NE:
3933 if (RHSCst == AddOne(LHSCst, Context)) // (X u> 13 & X != 14) -> X u> 14
3934 return new ICmpInst(*Context, LHSCC, Val, RHSCst);
3935 break; // (X u> 13 & X != 15) -> no change
3936 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
3937 return InsertRangeTest(Val, AddOne(LHSCst, Context),
3938 RHSCst, false, true, I);
3939 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
3943 case ICmpInst::ICMP_SGT:
3945 default: LLVM_UNREACHABLE("Unknown integer condition code!");
3946 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
3947 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
3948 return ReplaceInstUsesWith(I, RHS);
3949 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
3951 case ICmpInst::ICMP_NE:
3952 if (RHSCst == AddOne(LHSCst, Context)) // (X s> 13 & X != 14) -> X s> 14
3953 return new ICmpInst(*Context, LHSCC, Val, RHSCst);
3954 break; // (X s> 13 & X != 15) -> no change
3955 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
3956 return InsertRangeTest(Val, AddOne(LHSCst, Context),
3957 RHSCst, true, true, I);
3958 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
3968 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
3969 bool Changed = SimplifyCommutative(I);
3970 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3972 if (isa<UndefValue>(Op1)) // X & undef -> 0
3973 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
3977 return ReplaceInstUsesWith(I, Op1);
3979 // See if we can simplify any instructions used by the instruction whose sole
3980 // purpose is to compute bits we don't care about.
3981 if (SimplifyDemandedInstructionBits(I))
3983 if (isa<VectorType>(I.getType())) {
3984 if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) {
3985 if (CP->isAllOnesValue()) // X & <-1,-1> -> X
3986 return ReplaceInstUsesWith(I, I.getOperand(0));
3987 } else if (isa<ConstantAggregateZero>(Op1)) {
3988 return ReplaceInstUsesWith(I, Op1); // X & <0,0> -> <0,0>
3992 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
3993 const APInt& AndRHSMask = AndRHS->getValue();
3994 APInt NotAndRHS(~AndRHSMask);
3996 // Optimize a variety of ((val OP C1) & C2) combinations...
3997 if (isa<BinaryOperator>(Op0)) {
3998 Instruction *Op0I = cast<Instruction>(Op0);
3999 Value *Op0LHS = Op0I->getOperand(0);
4000 Value *Op0RHS = Op0I->getOperand(1);
4001 switch (Op0I->getOpcode()) {
4002 case Instruction::Xor:
4003 case Instruction::Or:
4004 // If the mask is only needed on one incoming arm, push it up.
4005 if (Op0I->hasOneUse()) {
4006 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
4007 // Not masking anything out for the LHS, move to RHS.
4008 Instruction *NewRHS = BinaryOperator::CreateAnd(Op0RHS, AndRHS,
4009 Op0RHS->getName()+".masked");
4010 InsertNewInstBefore(NewRHS, I);
4011 return BinaryOperator::Create(
4012 cast<BinaryOperator>(Op0I)->getOpcode(), Op0LHS, NewRHS);
4014 if (!isa<Constant>(Op0RHS) &&
4015 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
4016 // Not masking anything out for the RHS, move to LHS.
4017 Instruction *NewLHS = BinaryOperator::CreateAnd(Op0LHS, AndRHS,
4018 Op0LHS->getName()+".masked");
4019 InsertNewInstBefore(NewLHS, I);
4020 return BinaryOperator::Create(
4021 cast<BinaryOperator>(Op0I)->getOpcode(), NewLHS, Op0RHS);
4026 case Instruction::Add:
4027 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
4028 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4029 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4030 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
4031 return BinaryOperator::CreateAnd(V, AndRHS);
4032 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
4033 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
4036 case Instruction::Sub:
4037 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
4038 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4039 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4040 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
4041 return BinaryOperator::CreateAnd(V, AndRHS);
4043 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
4044 // has 1's for all bits that the subtraction with A might affect.
4045 if (Op0I->hasOneUse()) {
4046 uint32_t BitWidth = AndRHSMask.getBitWidth();
4047 uint32_t Zeros = AndRHSMask.countLeadingZeros();
4048 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
4050 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
4051 if (!(A && A->isZero()) && // avoid infinite recursion.
4052 MaskedValueIsZero(Op0LHS, Mask)) {
4053 Instruction *NewNeg = BinaryOperator::CreateNeg(*Context, Op0RHS);
4054 InsertNewInstBefore(NewNeg, I);
4055 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
4060 case Instruction::Shl:
4061 case Instruction::LShr:
4062 // (1 << x) & 1 --> zext(x == 0)
4063 // (1 >> x) & 1 --> zext(x == 0)
4064 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
4065 Instruction *NewICmp = new ICmpInst(*Context, ICmpInst::ICMP_EQ,
4066 Op0RHS, Context->getNullValue(I.getType()));
4067 InsertNewInstBefore(NewICmp, I);
4068 return new ZExtInst(NewICmp, I.getType());
4073 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
4074 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
4076 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
4077 // If this is an integer truncation or change from signed-to-unsigned, and
4078 // if the source is an and/or with immediate, transform it. This
4079 // frequently occurs for bitfield accesses.
4080 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
4081 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
4082 CastOp->getNumOperands() == 2)
4083 if (ConstantInt *AndCI = dyn_cast<ConstantInt>(CastOp->getOperand(1))) {
4084 if (CastOp->getOpcode() == Instruction::And) {
4085 // Change: and (cast (and X, C1) to T), C2
4086 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
4087 // This will fold the two constants together, which may allow
4088 // other simplifications.
4089 Instruction *NewCast = CastInst::CreateTruncOrBitCast(
4090 CastOp->getOperand(0), I.getType(),
4091 CastOp->getName()+".shrunk");
4092 NewCast = InsertNewInstBefore(NewCast, I);
4093 // trunc_or_bitcast(C1)&C2
4095 Context->getConstantExprTruncOrBitCast(AndCI,I.getType());
4096 C3 = Context->getConstantExprAnd(C3, AndRHS);
4097 return BinaryOperator::CreateAnd(NewCast, C3);
4098 } else if (CastOp->getOpcode() == Instruction::Or) {
4099 // Change: and (cast (or X, C1) to T), C2
4100 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
4102 Context->getConstantExprTruncOrBitCast(AndCI,I.getType());
4103 if (Context->getConstantExprAnd(C3, AndRHS) == AndRHS)
4105 return ReplaceInstUsesWith(I, AndRHS);
4111 // Try to fold constant and into select arguments.
4112 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4113 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4115 if (isa<PHINode>(Op0))
4116 if (Instruction *NV = FoldOpIntoPhi(I))
4120 Value *Op0NotVal = dyn_castNotVal(Op0, Context);
4121 Value *Op1NotVal = dyn_castNotVal(Op1, Context);
4123 if (Op0NotVal == Op1 || Op1NotVal == Op0) // A & ~A == ~A & A == 0
4124 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
4126 // (~A & ~B) == (~(A | B)) - De Morgan's Law
4127 if (Op0NotVal && Op1NotVal && isOnlyUse(Op0) && isOnlyUse(Op1)) {
4128 Instruction *Or = BinaryOperator::CreateOr(Op0NotVal, Op1NotVal,
4129 I.getName()+".demorgan");
4130 InsertNewInstBefore(Or, I);
4131 return BinaryOperator::CreateNot(Or);
4135 Value *A = 0, *B = 0, *C = 0, *D = 0;
4136 if (match(Op0, m_Or(m_Value(A), m_Value(B)), *Context)) {
4137 if (A == Op1 || B == Op1) // (A | ?) & A --> A
4138 return ReplaceInstUsesWith(I, Op1);
4140 // (A|B) & ~(A&B) -> A^B
4141 if (match(Op1, m_Not(m_And(m_Value(C), m_Value(D))), *Context)) {
4142 if ((A == C && B == D) || (A == D && B == C))
4143 return BinaryOperator::CreateXor(A, B);
4147 if (match(Op1, m_Or(m_Value(A), m_Value(B)), *Context)) {
4148 if (A == Op0 || B == Op0) // A & (A | ?) --> A
4149 return ReplaceInstUsesWith(I, Op0);
4151 // ~(A&B) & (A|B) -> A^B
4152 if (match(Op0, m_Not(m_And(m_Value(C), m_Value(D))), *Context)) {
4153 if ((A == C && B == D) || (A == D && B == C))
4154 return BinaryOperator::CreateXor(A, B);
4158 if (Op0->hasOneUse() &&
4159 match(Op0, m_Xor(m_Value(A), m_Value(B)), *Context)) {
4160 if (A == Op1) { // (A^B)&A -> A&(A^B)
4161 I.swapOperands(); // Simplify below
4162 std::swap(Op0, Op1);
4163 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
4164 cast<BinaryOperator>(Op0)->swapOperands();
4165 I.swapOperands(); // Simplify below
4166 std::swap(Op0, Op1);
4170 if (Op1->hasOneUse() &&
4171 match(Op1, m_Xor(m_Value(A), m_Value(B)), *Context)) {
4172 if (B == Op0) { // B&(A^B) -> B&(B^A)
4173 cast<BinaryOperator>(Op1)->swapOperands();
4176 if (A == Op0) { // A&(A^B) -> A & ~B
4177 Instruction *NotB = BinaryOperator::CreateNot(B, "tmp");
4178 InsertNewInstBefore(NotB, I);
4179 return BinaryOperator::CreateAnd(A, NotB);
4183 // (A&((~A)|B)) -> A&B
4184 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A)), *Context) ||
4185 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1))), *Context))
4186 return BinaryOperator::CreateAnd(A, Op1);
4187 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A)), *Context) ||
4188 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0))), *Context))
4189 return BinaryOperator::CreateAnd(A, Op0);
4192 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) {
4193 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
4194 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS),Context))
4197 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
4198 if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS))
4202 // fold (and (cast A), (cast B)) -> (cast (and A, B))
4203 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
4204 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4205 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
4206 const Type *SrcTy = Op0C->getOperand(0)->getType();
4207 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
4208 // Only do this if the casts both really cause code to be generated.
4209 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4211 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4213 Instruction *NewOp = BinaryOperator::CreateAnd(Op0C->getOperand(0),
4214 Op1C->getOperand(0),
4216 InsertNewInstBefore(NewOp, I);
4217 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
4221 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
4222 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4223 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4224 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4225 SI0->getOperand(1) == SI1->getOperand(1) &&
4226 (SI0->hasOneUse() || SI1->hasOneUse())) {
4227 Instruction *NewOp =
4228 InsertNewInstBefore(BinaryOperator::CreateAnd(SI0->getOperand(0),
4230 SI0->getName()), I);
4231 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
4232 SI1->getOperand(1));
4236 // If and'ing two fcmp, try combine them into one.
4237 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4238 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) {
4239 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
4240 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
4241 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
4242 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4243 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4244 // If either of the constants are nans, then the whole thing returns
4246 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
4247 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
4248 return new FCmpInst(*Context, FCmpInst::FCMP_ORD,
4249 LHS->getOperand(0), RHS->getOperand(0));
4252 Value *Op0LHS, *Op0RHS, *Op1LHS, *Op1RHS;
4253 FCmpInst::Predicate Op0CC, Op1CC;
4254 if (match(Op0, m_FCmp(Op0CC, m_Value(Op0LHS),
4255 m_Value(Op0RHS)), *Context) &&
4256 match(Op1, m_FCmp(Op1CC, m_Value(Op1LHS),
4257 m_Value(Op1RHS)), *Context)) {
4258 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
4259 // Swap RHS operands to match LHS.
4260 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
4261 std::swap(Op1LHS, Op1RHS);
4263 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
4264 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
4266 return new FCmpInst(*Context, (FCmpInst::Predicate)Op0CC,
4268 else if (Op0CC == FCmpInst::FCMP_FALSE ||
4269 Op1CC == FCmpInst::FCMP_FALSE)
4270 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
4271 else if (Op0CC == FCmpInst::FCMP_TRUE)
4272 return ReplaceInstUsesWith(I, Op1);
4273 else if (Op1CC == FCmpInst::FCMP_TRUE)
4274 return ReplaceInstUsesWith(I, Op0);
4277 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
4278 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
4280 std::swap(Op0, Op1);
4281 std::swap(Op0Pred, Op1Pred);
4282 std::swap(Op0Ordered, Op1Ordered);
4285 // uno && ueq -> uno && (uno || eq) -> ueq
4286 // ord && olt -> ord && (ord && lt) -> olt
4287 if (Op0Ordered == Op1Ordered)
4288 return ReplaceInstUsesWith(I, Op1);
4289 // uno && oeq -> uno && (ord && eq) -> false
4290 // uno && ord -> false
4292 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
4293 // ord && ueq -> ord && (uno || eq) -> oeq
4294 return cast<Instruction>(getFCmpValue(true, Op1Pred,
4295 Op0LHS, Op0RHS, Context));
4303 return Changed ? &I : 0;
4306 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
4307 /// capable of providing pieces of a bswap. The subexpression provides pieces
4308 /// of a bswap if it is proven that each of the non-zero bytes in the output of
4309 /// the expression came from the corresponding "byte swapped" byte in some other
4310 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
4311 /// we know that the expression deposits the low byte of %X into the high byte
4312 /// of the bswap result and that all other bytes are zero. This expression is
4313 /// accepted, the high byte of ByteValues is set to X to indicate a correct
4316 /// This function returns true if the match was unsuccessful and false if so.
4317 /// On entry to the function the "OverallLeftShift" is a signed integer value
4318 /// indicating the number of bytes that the subexpression is later shifted. For
4319 /// example, if the expression is later right shifted by 16 bits, the
4320 /// OverallLeftShift value would be -2 on entry. This is used to specify which
4321 /// byte of ByteValues is actually being set.
4323 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
4324 /// byte is masked to zero by a user. For example, in (X & 255), X will be
4325 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
4326 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
4327 /// always in the local (OverallLeftShift) coordinate space.
4329 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
4330 SmallVector<Value*, 8> &ByteValues) {
4331 if (Instruction *I = dyn_cast<Instruction>(V)) {
4332 // If this is an or instruction, it may be an inner node of the bswap.
4333 if (I->getOpcode() == Instruction::Or) {
4334 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4336 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
4340 // If this is a logical shift by a constant multiple of 8, recurse with
4341 // OverallLeftShift and ByteMask adjusted.
4342 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
4344 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
4345 // Ensure the shift amount is defined and of a byte value.
4346 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
4349 unsigned ByteShift = ShAmt >> 3;
4350 if (I->getOpcode() == Instruction::Shl) {
4351 // X << 2 -> collect(X, +2)
4352 OverallLeftShift += ByteShift;
4353 ByteMask >>= ByteShift;
4355 // X >>u 2 -> collect(X, -2)
4356 OverallLeftShift -= ByteShift;
4357 ByteMask <<= ByteShift;
4358 ByteMask &= (~0U >> (32-ByteValues.size()));
4361 if (OverallLeftShift >= (int)ByteValues.size()) return true;
4362 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
4364 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4368 // If this is a logical 'and' with a mask that clears bytes, clear the
4369 // corresponding bytes in ByteMask.
4370 if (I->getOpcode() == Instruction::And &&
4371 isa<ConstantInt>(I->getOperand(1))) {
4372 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
4373 unsigned NumBytes = ByteValues.size();
4374 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
4375 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
4377 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
4378 // If this byte is masked out by a later operation, we don't care what
4380 if ((ByteMask & (1 << i)) == 0)
4383 // If the AndMask is all zeros for this byte, clear the bit.
4384 APInt MaskB = AndMask & Byte;
4386 ByteMask &= ~(1U << i);
4390 // If the AndMask is not all ones for this byte, it's not a bytezap.
4394 // Otherwise, this byte is kept.
4397 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4402 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
4403 // the input value to the bswap. Some observations: 1) if more than one byte
4404 // is demanded from this input, then it could not be successfully assembled
4405 // into a byteswap. At least one of the two bytes would not be aligned with
4406 // their ultimate destination.
4407 if (!isPowerOf2_32(ByteMask)) return true;
4408 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
4410 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
4411 // is demanded, it needs to go into byte 0 of the result. This means that the
4412 // byte needs to be shifted until it lands in the right byte bucket. The
4413 // shift amount depends on the position: if the byte is coming from the high
4414 // part of the value (e.g. byte 3) then it must be shifted right. If from the
4415 // low part, it must be shifted left.
4416 unsigned DestByteNo = InputByteNo + OverallLeftShift;
4417 if (InputByteNo < ByteValues.size()/2) {
4418 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4421 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4425 // If the destination byte value is already defined, the values are or'd
4426 // together, which isn't a bswap (unless it's an or of the same bits).
4427 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
4429 ByteValues[DestByteNo] = V;
4433 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
4434 /// If so, insert the new bswap intrinsic and return it.
4435 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
4436 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
4437 if (!ITy || ITy->getBitWidth() % 16 ||
4438 // ByteMask only allows up to 32-byte values.
4439 ITy->getBitWidth() > 32*8)
4440 return 0; // Can only bswap pairs of bytes. Can't do vectors.
4442 /// ByteValues - For each byte of the result, we keep track of which value
4443 /// defines each byte.
4444 SmallVector<Value*, 8> ByteValues;
4445 ByteValues.resize(ITy->getBitWidth()/8);
4447 // Try to find all the pieces corresponding to the bswap.
4448 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
4449 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
4452 // Check to see if all of the bytes come from the same value.
4453 Value *V = ByteValues[0];
4454 if (V == 0) return 0; // Didn't find a byte? Must be zero.
4456 // Check to make sure that all of the bytes come from the same value.
4457 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
4458 if (ByteValues[i] != V)
4460 const Type *Tys[] = { ITy };
4461 Module *M = I.getParent()->getParent()->getParent();
4462 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
4463 return CallInst::Create(F, V);
4466 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
4467 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
4468 /// we can simplify this expression to "cond ? C : D or B".
4469 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
4471 LLVMContext *Context) {
4472 // If A is not a select of -1/0, this cannot match.
4474 if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond)), *Context))
4477 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
4478 if (match(D, m_SelectCst<0, -1>(m_Specific(Cond)), *Context))
4479 return SelectInst::Create(Cond, C, B);
4480 if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond))), *Context))
4481 return SelectInst::Create(Cond, C, B);
4482 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
4483 if (match(B, m_SelectCst<0, -1>(m_Specific(Cond)), *Context))
4484 return SelectInst::Create(Cond, C, D);
4485 if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond))), *Context))
4486 return SelectInst::Create(Cond, C, D);
4490 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
4491 Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
4492 ICmpInst *LHS, ICmpInst *RHS) {
4494 ConstantInt *LHSCst, *RHSCst;
4495 ICmpInst::Predicate LHSCC, RHSCC;
4497 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
4498 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
4499 m_ConstantInt(LHSCst)), *Context) ||
4500 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
4501 m_ConstantInt(RHSCst)), *Context))
4504 // From here on, we only handle:
4505 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
4506 if (Val != Val2) return 0;
4508 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4509 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
4510 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
4511 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
4512 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
4515 // We can't fold (ugt x, C) | (sgt x, C2).
4516 if (!PredicatesFoldable(LHSCC, RHSCC))
4519 // Ensure that the larger constant is on the RHS.
4521 if (ICmpInst::isSignedPredicate(LHSCC) ||
4522 (ICmpInst::isEquality(LHSCC) &&
4523 ICmpInst::isSignedPredicate(RHSCC)))
4524 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
4526 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
4529 std::swap(LHS, RHS);
4530 std::swap(LHSCst, RHSCst);
4531 std::swap(LHSCC, RHSCC);
4534 // At this point, we know we have have two icmp instructions
4535 // comparing a value against two constants and or'ing the result
4536 // together. Because of the above check, we know that we only have
4537 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
4538 // FoldICmpLogical check above), that the two constants are not
4540 assert(LHSCst != RHSCst && "Compares not folded above?");
4543 default: LLVM_UNREACHABLE("Unknown integer condition code!");
4544 case ICmpInst::ICMP_EQ:
4546 default: LLVM_UNREACHABLE("Unknown integer condition code!");
4547 case ICmpInst::ICMP_EQ:
4548 if (LHSCst == SubOne(RHSCst, Context)) {
4549 // (X == 13 | X == 14) -> X-13 <u 2
4550 Constant *AddCST = Context->getConstantExprNeg(LHSCst);
4551 Instruction *Add = BinaryOperator::CreateAdd(Val, AddCST,
4552 Val->getName()+".off");
4553 InsertNewInstBefore(Add, I);
4554 AddCST = Context->getConstantExprSub(AddOne(RHSCst, Context), LHSCst);
4555 return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Add, AddCST);
4557 break; // (X == 13 | X == 15) -> no change
4558 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
4559 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
4561 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
4562 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
4563 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
4564 return ReplaceInstUsesWith(I, RHS);
4567 case ICmpInst::ICMP_NE:
4569 default: LLVM_UNREACHABLE("Unknown integer condition code!");
4570 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
4571 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
4572 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
4573 return ReplaceInstUsesWith(I, LHS);
4574 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
4575 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
4576 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
4577 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
4580 case ICmpInst::ICMP_ULT:
4582 default: LLVM_UNREACHABLE("Unknown integer condition code!");
4583 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
4585 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
4586 // If RHSCst is [us]MAXINT, it is always false. Not handling
4587 // this can cause overflow.
4588 if (RHSCst->isMaxValue(false))
4589 return ReplaceInstUsesWith(I, LHS);
4590 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst, Context),
4592 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
4594 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
4595 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
4596 return ReplaceInstUsesWith(I, RHS);
4597 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
4601 case ICmpInst::ICMP_SLT:
4603 default: LLVM_UNREACHABLE("Unknown integer condition code!");
4604 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
4606 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
4607 // If RHSCst is [us]MAXINT, it is always false. Not handling
4608 // this can cause overflow.
4609 if (RHSCst->isMaxValue(true))
4610 return ReplaceInstUsesWith(I, LHS);
4611 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst, Context),
4613 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
4615 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
4616 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
4617 return ReplaceInstUsesWith(I, RHS);
4618 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
4622 case ICmpInst::ICMP_UGT:
4624 default: LLVM_UNREACHABLE("Unknown integer condition code!");
4625 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
4626 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
4627 return ReplaceInstUsesWith(I, LHS);
4628 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
4630 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
4631 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
4632 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
4633 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
4637 case ICmpInst::ICMP_SGT:
4639 default: LLVM_UNREACHABLE("Unknown integer condition code!");
4640 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
4641 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
4642 return ReplaceInstUsesWith(I, LHS);
4643 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
4645 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
4646 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
4647 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
4648 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
4656 /// FoldOrWithConstants - This helper function folds:
4658 /// ((A | B) & C1) | (B & C2)
4664 /// when the XOR of the two constants is "all ones" (-1).
4665 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
4666 Value *A, Value *B, Value *C) {
4667 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
4671 ConstantInt *CI2 = 0;
4672 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)), *Context)) return 0;
4674 APInt Xor = CI1->getValue() ^ CI2->getValue();
4675 if (!Xor.isAllOnesValue()) return 0;
4677 if (V1 == A || V1 == B) {
4678 Instruction *NewOp =
4679 InsertNewInstBefore(BinaryOperator::CreateAnd((V1 == A) ? B : A, CI1), I);
4680 return BinaryOperator::CreateOr(NewOp, V1);
4686 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
4687 bool Changed = SimplifyCommutative(I);
4688 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4690 if (isa<UndefValue>(Op1)) // X | undef -> -1
4691 return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType()));
4695 return ReplaceInstUsesWith(I, Op0);
4697 // See if we can simplify any instructions used by the instruction whose sole
4698 // purpose is to compute bits we don't care about.
4699 if (SimplifyDemandedInstructionBits(I))
4701 if (isa<VectorType>(I.getType())) {
4702 if (isa<ConstantAggregateZero>(Op1)) {
4703 return ReplaceInstUsesWith(I, Op0); // X | <0,0> -> X
4704 } else if (ConstantVector *CP = dyn_cast<ConstantVector>(Op1)) {
4705 if (CP->isAllOnesValue()) // X | <-1,-1> -> <-1,-1>
4706 return ReplaceInstUsesWith(I, I.getOperand(1));
4711 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
4712 ConstantInt *C1 = 0; Value *X = 0;
4713 // (X & C1) | C2 --> (X | C2) & (C1|C2)
4714 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1)), *Context) &&
4716 Instruction *Or = BinaryOperator::CreateOr(X, RHS);
4717 InsertNewInstBefore(Or, I);
4719 return BinaryOperator::CreateAnd(Or,
4720 Context->getConstantInt(RHS->getValue() | C1->getValue()));
4723 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
4724 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1)), *Context) &&
4726 Instruction *Or = BinaryOperator::CreateOr(X, RHS);
4727 InsertNewInstBefore(Or, I);
4729 return BinaryOperator::CreateXor(Or,
4730 Context->getConstantInt(C1->getValue() & ~RHS->getValue()));
4733 // Try to fold constant and into select arguments.
4734 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4735 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4737 if (isa<PHINode>(Op0))
4738 if (Instruction *NV = FoldOpIntoPhi(I))
4742 Value *A = 0, *B = 0;
4743 ConstantInt *C1 = 0, *C2 = 0;
4745 if (match(Op0, m_And(m_Value(A), m_Value(B)), *Context))
4746 if (A == Op1 || B == Op1) // (A & ?) | A --> A
4747 return ReplaceInstUsesWith(I, Op1);
4748 if (match(Op1, m_And(m_Value(A), m_Value(B)), *Context))
4749 if (A == Op0 || B == Op0) // A | (A & ?) --> A
4750 return ReplaceInstUsesWith(I, Op0);
4752 // (A | B) | C and A | (B | C) -> bswap if possible.
4753 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
4754 if (match(Op0, m_Or(m_Value(), m_Value()), *Context) ||
4755 match(Op1, m_Or(m_Value(), m_Value()), *Context) ||
4756 (match(Op0, m_Shift(m_Value(), m_Value()), *Context) &&
4757 match(Op1, m_Shift(m_Value(), m_Value()), *Context))) {
4758 if (Instruction *BSwap = MatchBSwap(I))
4762 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
4763 if (Op0->hasOneUse() &&
4764 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1)), *Context) &&
4765 MaskedValueIsZero(Op1, C1->getValue())) {
4766 Instruction *NOr = BinaryOperator::CreateOr(A, Op1);
4767 InsertNewInstBefore(NOr, I);
4769 return BinaryOperator::CreateXor(NOr, C1);
4772 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
4773 if (Op1->hasOneUse() &&
4774 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1)), *Context) &&
4775 MaskedValueIsZero(Op0, C1->getValue())) {
4776 Instruction *NOr = BinaryOperator::CreateOr(A, Op0);
4777 InsertNewInstBefore(NOr, I);
4779 return BinaryOperator::CreateXor(NOr, C1);
4783 Value *C = 0, *D = 0;
4784 if (match(Op0, m_And(m_Value(A), m_Value(C)), *Context) &&
4785 match(Op1, m_And(m_Value(B), m_Value(D)), *Context)) {
4786 Value *V1 = 0, *V2 = 0, *V3 = 0;
4787 C1 = dyn_cast<ConstantInt>(C);
4788 C2 = dyn_cast<ConstantInt>(D);
4789 if (C1 && C2) { // (A & C1)|(B & C2)
4790 // If we have: ((V + N) & C1) | (V & C2)
4791 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
4792 // replace with V+N.
4793 if (C1->getValue() == ~C2->getValue()) {
4794 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
4795 match(A, m_Add(m_Value(V1), m_Value(V2)), *Context)) {
4796 // Add commutes, try both ways.
4797 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
4798 return ReplaceInstUsesWith(I, A);
4799 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
4800 return ReplaceInstUsesWith(I, A);
4802 // Or commutes, try both ways.
4803 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
4804 match(B, m_Add(m_Value(V1), m_Value(V2)), *Context)) {
4805 // Add commutes, try both ways.
4806 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
4807 return ReplaceInstUsesWith(I, B);
4808 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
4809 return ReplaceInstUsesWith(I, B);
4812 V1 = 0; V2 = 0; V3 = 0;
4815 // Check to see if we have any common things being and'ed. If so, find the
4816 // terms for V1 & (V2|V3).
4817 if (isOnlyUse(Op0) || isOnlyUse(Op1)) {
4818 if (A == B) // (A & C)|(A & D) == A & (C|D)
4819 V1 = A, V2 = C, V3 = D;
4820 else if (A == D) // (A & C)|(B & A) == A & (B|C)
4821 V1 = A, V2 = B, V3 = C;
4822 else if (C == B) // (A & C)|(C & D) == C & (A|D)
4823 V1 = C, V2 = A, V3 = D;
4824 else if (C == D) // (A & C)|(B & C) == C & (A|B)
4825 V1 = C, V2 = A, V3 = B;
4829 InsertNewInstBefore(BinaryOperator::CreateOr(V2, V3, "tmp"), I);
4830 return BinaryOperator::CreateAnd(V1, Or);
4834 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
4835 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D, Context))
4837 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C, Context))
4839 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D, Context))
4841 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C, Context))
4844 // ((A&~B)|(~A&B)) -> A^B
4845 if ((match(C, m_Not(m_Specific(D)), *Context) &&
4846 match(B, m_Not(m_Specific(A)), *Context)))
4847 return BinaryOperator::CreateXor(A, D);
4848 // ((~B&A)|(~A&B)) -> A^B
4849 if ((match(A, m_Not(m_Specific(D)), *Context) &&
4850 match(B, m_Not(m_Specific(C)), *Context)))
4851 return BinaryOperator::CreateXor(C, D);
4852 // ((A&~B)|(B&~A)) -> A^B
4853 if ((match(C, m_Not(m_Specific(B)), *Context) &&
4854 match(D, m_Not(m_Specific(A)), *Context)))
4855 return BinaryOperator::CreateXor(A, B);
4856 // ((~B&A)|(B&~A)) -> A^B
4857 if ((match(A, m_Not(m_Specific(B)), *Context) &&
4858 match(D, m_Not(m_Specific(C)), *Context)))
4859 return BinaryOperator::CreateXor(C, B);
4862 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
4863 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4864 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4865 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4866 SI0->getOperand(1) == SI1->getOperand(1) &&
4867 (SI0->hasOneUse() || SI1->hasOneUse())) {
4868 Instruction *NewOp =
4869 InsertNewInstBefore(BinaryOperator::CreateOr(SI0->getOperand(0),
4871 SI0->getName()), I);
4872 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
4873 SI1->getOperand(1));
4877 // ((A|B)&1)|(B&-2) -> (A&1) | B
4878 if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C)), *Context) ||
4879 match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))), *Context)) {
4880 Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
4881 if (Ret) return Ret;
4883 // (B&-2)|((A|B)&1) -> (A&1) | B
4884 if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C)), *Context) ||
4885 match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))), *Context)) {
4886 Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
4887 if (Ret) return Ret;
4890 if (match(Op0, m_Not(m_Value(A)), *Context)) { // ~A | Op1
4891 if (A == Op1) // ~A | A == -1
4892 return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType()));
4896 // Note, A is still live here!
4897 if (match(Op1, m_Not(m_Value(B)), *Context)) { // Op0 | ~B
4899 return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType()));
4901 // (~A | ~B) == (~(A & B)) - De Morgan's Law
4902 if (A && isOnlyUse(Op0) && isOnlyUse(Op1)) {
4903 Value *And = InsertNewInstBefore(BinaryOperator::CreateAnd(A, B,
4904 I.getName()+".demorgan"), I);
4905 return BinaryOperator::CreateNot(And);
4909 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
4910 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) {
4911 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS),Context))
4914 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
4915 if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS))
4919 // fold (or (cast A), (cast B)) -> (cast (or A, B))
4920 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
4921 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4922 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
4923 if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
4924 !isa<ICmpInst>(Op1C->getOperand(0))) {
4925 const Type *SrcTy = Op0C->getOperand(0)->getType();
4926 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
4927 // Only do this if the casts both really cause code to be
4929 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4931 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4933 Instruction *NewOp = BinaryOperator::CreateOr(Op0C->getOperand(0),
4934 Op1C->getOperand(0),
4936 InsertNewInstBefore(NewOp, I);
4937 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
4944 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
4945 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4946 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) {
4947 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
4948 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
4949 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
4950 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4951 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4952 // If either of the constants are nans, then the whole thing returns
4954 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
4955 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
4957 // Otherwise, no need to compare the two constants, compare the
4959 return new FCmpInst(*Context, FCmpInst::FCMP_UNO,
4960 LHS->getOperand(0), RHS->getOperand(0));
4963 Value *Op0LHS, *Op0RHS, *Op1LHS, *Op1RHS;
4964 FCmpInst::Predicate Op0CC, Op1CC;
4965 if (match(Op0, m_FCmp(Op0CC, m_Value(Op0LHS),
4966 m_Value(Op0RHS)), *Context) &&
4967 match(Op1, m_FCmp(Op1CC, m_Value(Op1LHS),
4968 m_Value(Op1RHS)), *Context)) {
4969 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
4970 // Swap RHS operands to match LHS.
4971 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
4972 std::swap(Op1LHS, Op1RHS);
4974 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
4975 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
4977 return new FCmpInst(*Context, (FCmpInst::Predicate)Op0CC,
4979 else if (Op0CC == FCmpInst::FCMP_TRUE ||
4980 Op1CC == FCmpInst::FCMP_TRUE)
4981 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
4982 else if (Op0CC == FCmpInst::FCMP_FALSE)
4983 return ReplaceInstUsesWith(I, Op1);
4984 else if (Op1CC == FCmpInst::FCMP_FALSE)
4985 return ReplaceInstUsesWith(I, Op0);
4988 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
4989 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
4990 if (Op0Ordered == Op1Ordered) {
4991 // If both are ordered or unordered, return a new fcmp with
4992 // or'ed predicates.
4993 Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred,
4994 Op0LHS, Op0RHS, Context);
4995 if (Instruction *I = dyn_cast<Instruction>(RV))
4997 // Otherwise, it's a constant boolean value...
4998 return ReplaceInstUsesWith(I, RV);
5006 return Changed ? &I : 0;
5011 // XorSelf - Implements: X ^ X --> 0
5014 XorSelf(Value *rhs) : RHS(rhs) {}
5015 bool shouldApply(Value *LHS) const { return LHS == RHS; }
5016 Instruction *apply(BinaryOperator &Xor) const {
5023 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
5024 bool Changed = SimplifyCommutative(I);
5025 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5027 if (isa<UndefValue>(Op1)) {
5028 if (isa<UndefValue>(Op0))
5029 // Handle undef ^ undef -> 0 special case. This is a common
5031 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
5032 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
5035 // xor X, X = 0, even if X is nested in a sequence of Xor's.
5036 if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1), Context)) {
5037 assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result;
5038 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
5041 // See if we can simplify any instructions used by the instruction whose sole
5042 // purpose is to compute bits we don't care about.
5043 if (SimplifyDemandedInstructionBits(I))
5045 if (isa<VectorType>(I.getType()))
5046 if (isa<ConstantAggregateZero>(Op1))
5047 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
5049 // Is this a ~ operation?
5050 if (Value *NotOp = dyn_castNotVal(&I, Context)) {
5051 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
5052 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
5053 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
5054 if (Op0I->getOpcode() == Instruction::And ||
5055 Op0I->getOpcode() == Instruction::Or) {
5056 if (dyn_castNotVal(Op0I->getOperand(1), Context)) Op0I->swapOperands();
5057 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0), Context)) {
5059 BinaryOperator::CreateNot(Op0I->getOperand(1),
5060 Op0I->getOperand(1)->getName()+".not");
5061 InsertNewInstBefore(NotY, I);
5062 if (Op0I->getOpcode() == Instruction::And)
5063 return BinaryOperator::CreateOr(Op0NotVal, NotY);
5065 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
5072 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
5073 if (RHS == Context->getConstantIntTrue() && Op0->hasOneUse()) {
5074 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
5075 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
5076 return new ICmpInst(*Context, ICI->getInversePredicate(),
5077 ICI->getOperand(0), ICI->getOperand(1));
5079 if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
5080 return new FCmpInst(*Context, FCI->getInversePredicate(),
5081 FCI->getOperand(0), FCI->getOperand(1));
5084 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
5085 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5086 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
5087 if (CI->hasOneUse() && Op0C->hasOneUse()) {
5088 Instruction::CastOps Opcode = Op0C->getOpcode();
5089 if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) {
5090 if (RHS == Context->getConstantExprCast(Opcode,
5091 Context->getConstantIntTrue(),
5092 Op0C->getDestTy())) {
5093 Instruction *NewCI = InsertNewInstBefore(CmpInst::Create(
5095 CI->getOpcode(), CI->getInversePredicate(),
5096 CI->getOperand(0), CI->getOperand(1)), I);
5097 NewCI->takeName(CI);
5098 return CastInst::Create(Opcode, NewCI, Op0C->getType());
5105 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
5106 // ~(c-X) == X-c-1 == X+(-c-1)
5107 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
5108 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
5109 Constant *NegOp0I0C = Context->getConstantExprNeg(Op0I0C);
5110 Constant *ConstantRHS = Context->getConstantExprSub(NegOp0I0C,
5111 Context->getConstantInt(I.getType(), 1));
5112 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
5115 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
5116 if (Op0I->getOpcode() == Instruction::Add) {
5117 // ~(X-c) --> (-c-1)-X
5118 if (RHS->isAllOnesValue()) {
5119 Constant *NegOp0CI = Context->getConstantExprNeg(Op0CI);
5120 return BinaryOperator::CreateSub(
5121 Context->getConstantExprSub(NegOp0CI,
5122 Context->getConstantInt(I.getType(), 1)),
5123 Op0I->getOperand(0));
5124 } else if (RHS->getValue().isSignBit()) {
5125 // (X + C) ^ signbit -> (X + C + signbit)
5127 Context->getConstantInt(RHS->getValue() + Op0CI->getValue());
5128 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
5131 } else if (Op0I->getOpcode() == Instruction::Or) {
5132 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
5133 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
5134 Constant *NewRHS = Context->getConstantExprOr(Op0CI, RHS);
5135 // Anything in both C1 and C2 is known to be zero, remove it from
5137 Constant *CommonBits = Context->getConstantExprAnd(Op0CI, RHS);
5138 NewRHS = Context->getConstantExprAnd(NewRHS,
5139 Context->getConstantExprNot(CommonBits));
5140 AddToWorkList(Op0I);
5141 I.setOperand(0, Op0I->getOperand(0));
5142 I.setOperand(1, NewRHS);
5149 // Try to fold constant and into select arguments.
5150 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
5151 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
5153 if (isa<PHINode>(Op0))
5154 if (Instruction *NV = FoldOpIntoPhi(I))
5158 if (Value *X = dyn_castNotVal(Op0, Context)) // ~A ^ A == -1
5160 return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType()));
5162 if (Value *X = dyn_castNotVal(Op1, Context)) // A ^ ~A == -1
5164 return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType()));
5167 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
5170 if (match(Op1I, m_Or(m_Value(A), m_Value(B)), *Context)) {
5171 if (A == Op0) { // B^(B|A) == (A|B)^B
5172 Op1I->swapOperands();
5174 std::swap(Op0, Op1);
5175 } else if (B == Op0) { // B^(A|B) == (A|B)^B
5176 I.swapOperands(); // Simplified below.
5177 std::swap(Op0, Op1);
5179 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)), *Context)) {
5180 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
5181 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)), *Context)) {
5182 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
5183 } else if (match(Op1I, m_And(m_Value(A), m_Value(B)), *Context) &&
5185 if (A == Op0) { // A^(A&B) -> A^(B&A)
5186 Op1I->swapOperands();
5189 if (B == Op0) { // A^(B&A) -> (B&A)^A
5190 I.swapOperands(); // Simplified below.
5191 std::swap(Op0, Op1);
5196 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
5199 if (match(Op0I, m_Or(m_Value(A), m_Value(B)), *Context) &&
5200 Op0I->hasOneUse()) {
5201 if (A == Op1) // (B|A)^B == (A|B)^B
5203 if (B == Op1) { // (A|B)^B == A & ~B
5205 InsertNewInstBefore(BinaryOperator::CreateNot(Op1, "tmp"), I);
5206 return BinaryOperator::CreateAnd(A, NotB);
5208 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)), *Context)) {
5209 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
5210 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)), *Context)) {
5211 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
5212 } else if (match(Op0I, m_And(m_Value(A), m_Value(B)), *Context) &&
5214 if (A == Op1) // (A&B)^A -> (B&A)^A
5216 if (B == Op1 && // (B&A)^A == ~B & A
5217 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
5219 InsertNewInstBefore(BinaryOperator::CreateNot(A, "tmp"), I);
5220 return BinaryOperator::CreateAnd(N, Op1);
5225 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
5226 if (Op0I && Op1I && Op0I->isShift() &&
5227 Op0I->getOpcode() == Op1I->getOpcode() &&
5228 Op0I->getOperand(1) == Op1I->getOperand(1) &&
5229 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
5230 Instruction *NewOp =
5231 InsertNewInstBefore(BinaryOperator::CreateXor(Op0I->getOperand(0),
5232 Op1I->getOperand(0),
5233 Op0I->getName()), I);
5234 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
5235 Op1I->getOperand(1));
5239 Value *A, *B, *C, *D;
5240 // (A & B)^(A | B) -> A ^ B
5241 if (match(Op0I, m_And(m_Value(A), m_Value(B)), *Context) &&
5242 match(Op1I, m_Or(m_Value(C), m_Value(D)), *Context)) {
5243 if ((A == C && B == D) || (A == D && B == C))
5244 return BinaryOperator::CreateXor(A, B);
5246 // (A | B)^(A & B) -> A ^ B
5247 if (match(Op0I, m_Or(m_Value(A), m_Value(B)), *Context) &&
5248 match(Op1I, m_And(m_Value(C), m_Value(D)), *Context)) {
5249 if ((A == C && B == D) || (A == D && B == C))
5250 return BinaryOperator::CreateXor(A, B);
5254 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
5255 match(Op0I, m_And(m_Value(A), m_Value(B)), *Context) &&
5256 match(Op1I, m_And(m_Value(C), m_Value(D)), *Context)) {
5257 // (X & Y)^(X & Y) -> (Y^Z) & X
5258 Value *X = 0, *Y = 0, *Z = 0;
5260 X = A, Y = B, Z = D;
5262 X = A, Y = B, Z = C;
5264 X = B, Y = A, Z = D;
5266 X = B, Y = A, Z = C;
5269 Instruction *NewOp =
5270 InsertNewInstBefore(BinaryOperator::CreateXor(Y, Z, Op0->getName()), I);
5271 return BinaryOperator::CreateAnd(NewOp, X);
5276 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
5277 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
5278 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS),Context))
5281 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
5282 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5283 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
5284 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
5285 const Type *SrcTy = Op0C->getOperand(0)->getType();
5286 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
5287 // Only do this if the casts both really cause code to be generated.
5288 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
5290 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
5292 Instruction *NewOp = BinaryOperator::CreateXor(Op0C->getOperand(0),
5293 Op1C->getOperand(0),
5295 InsertNewInstBefore(NewOp, I);
5296 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
5301 return Changed ? &I : 0;
5304 static ConstantInt *ExtractElement(Constant *V, Constant *Idx,
5305 LLVMContext *Context) {
5306 return cast<ConstantInt>(Context->getConstantExprExtractElement(V, Idx));
5309 static bool HasAddOverflow(ConstantInt *Result,
5310 ConstantInt *In1, ConstantInt *In2,
5313 if (In2->getValue().isNegative())
5314 return Result->getValue().sgt(In1->getValue());
5316 return Result->getValue().slt(In1->getValue());
5318 return Result->getValue().ult(In1->getValue());
5321 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
5322 /// overflowed for this type.
5323 static bool AddWithOverflow(Constant *&Result, Constant *In1,
5324 Constant *In2, LLVMContext *Context,
5325 bool IsSigned = false) {
5326 Result = Context->getConstantExprAdd(In1, In2);
5328 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5329 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5330 Constant *Idx = Context->getConstantInt(Type::Int32Ty, i);
5331 if (HasAddOverflow(ExtractElement(Result, Idx, Context),
5332 ExtractElement(In1, Idx, Context),
5333 ExtractElement(In2, Idx, Context),
5340 return HasAddOverflow(cast<ConstantInt>(Result),
5341 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5345 static bool HasSubOverflow(ConstantInt *Result,
5346 ConstantInt *In1, ConstantInt *In2,
5349 if (In2->getValue().isNegative())
5350 return Result->getValue().slt(In1->getValue());
5352 return Result->getValue().sgt(In1->getValue());
5354 return Result->getValue().ugt(In1->getValue());
5357 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
5358 /// overflowed for this type.
5359 static bool SubWithOverflow(Constant *&Result, Constant *In1,
5360 Constant *In2, LLVMContext *Context,
5361 bool IsSigned = false) {
5362 Result = Context->getConstantExprSub(In1, In2);
5364 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5365 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5366 Constant *Idx = Context->getConstantInt(Type::Int32Ty, i);
5367 if (HasSubOverflow(ExtractElement(Result, Idx, Context),
5368 ExtractElement(In1, Idx, Context),
5369 ExtractElement(In2, Idx, Context),
5376 return HasSubOverflow(cast<ConstantInt>(Result),
5377 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5381 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
5382 /// code necessary to compute the offset from the base pointer (without adding
5383 /// in the base pointer). Return the result as a signed integer of intptr size.
5384 static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
5385 TargetData &TD = IC.getTargetData();
5386 gep_type_iterator GTI = gep_type_begin(GEP);
5387 const Type *IntPtrTy = TD.getIntPtrType();
5388 LLVMContext *Context = IC.getContext();
5389 Value *Result = Context->getNullValue(IntPtrTy);
5391 // Build a mask for high order bits.
5392 unsigned IntPtrWidth = TD.getPointerSizeInBits();
5393 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
5395 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
5398 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
5399 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
5400 if (OpC->isZero()) continue;
5402 // Handle a struct index, which adds its field offset to the pointer.
5403 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5404 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
5406 if (ConstantInt *RC = dyn_cast<ConstantInt>(Result))
5408 Context->getConstantInt(RC->getValue() + APInt(IntPtrWidth, Size));
5410 Result = IC.InsertNewInstBefore(
5411 BinaryOperator::CreateAdd(Result,
5412 Context->getConstantInt(IntPtrTy, Size),
5413 GEP->getName()+".offs"), I);
5417 Constant *Scale = Context->getConstantInt(IntPtrTy, Size);
5419 Context->getConstantExprIntegerCast(OpC, IntPtrTy, true /*SExt*/);
5420 Scale = Context->getConstantExprMul(OC, Scale);
5421 if (Constant *RC = dyn_cast<Constant>(Result))
5422 Result = Context->getConstantExprAdd(RC, Scale);
5424 // Emit an add instruction.
5425 Result = IC.InsertNewInstBefore(
5426 BinaryOperator::CreateAdd(Result, Scale,
5427 GEP->getName()+".offs"), I);
5431 // Convert to correct type.
5432 if (Op->getType() != IntPtrTy) {
5433 if (Constant *OpC = dyn_cast<Constant>(Op))
5434 Op = Context->getConstantExprIntegerCast(OpC, IntPtrTy, true);
5436 Op = IC.InsertNewInstBefore(CastInst::CreateIntegerCast(Op, IntPtrTy,
5438 Op->getName()+".c"), I);
5441 Constant *Scale = Context->getConstantInt(IntPtrTy, Size);
5442 if (Constant *OpC = dyn_cast<Constant>(Op))
5443 Op = Context->getConstantExprMul(OpC, Scale);
5444 else // We'll let instcombine(mul) convert this to a shl if possible.
5445 Op = IC.InsertNewInstBefore(BinaryOperator::CreateMul(Op, Scale,
5446 GEP->getName()+".idx"), I);
5449 // Emit an add instruction.
5450 if (isa<Constant>(Op) && isa<Constant>(Result))
5451 Result = Context->getConstantExprAdd(cast<Constant>(Op),
5452 cast<Constant>(Result));
5454 Result = IC.InsertNewInstBefore(BinaryOperator::CreateAdd(Op, Result,
5455 GEP->getName()+".offs"), I);
5461 /// EvaluateGEPOffsetExpression - Return an value that can be used to compare of
5462 /// the *offset* implied by GEP to zero. For example, if we have &A[i], we want
5463 /// to return 'i' for "icmp ne i, 0". Note that, in general, indices can be
5464 /// complex, and scales are involved. The above expression would also be legal
5465 /// to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32). This
5466 /// later form is less amenable to optimization though, and we are allowed to
5467 /// generate the first by knowing that pointer arithmetic doesn't overflow.
5469 /// If we can't emit an optimized form for this expression, this returns null.
5471 static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
5473 TargetData &TD = IC.getTargetData();
5474 gep_type_iterator GTI = gep_type_begin(GEP);
5476 // Check to see if this gep only has a single variable index. If so, and if
5477 // any constant indices are a multiple of its scale, then we can compute this
5478 // in terms of the scale of the variable index. For example, if the GEP
5479 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
5480 // because the expression will cross zero at the same point.
5481 unsigned i, e = GEP->getNumOperands();
5483 for (i = 1; i != e; ++i, ++GTI) {
5484 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
5485 // Compute the aggregate offset of constant indices.
5486 if (CI->isZero()) continue;
5488 // Handle a struct index, which adds its field offset to the pointer.
5489 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5490 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
5492 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
5493 Offset += Size*CI->getSExtValue();
5496 // Found our variable index.
5501 // If there are no variable indices, we must have a constant offset, just
5502 // evaluate it the general way.
5503 if (i == e) return 0;
5505 Value *VariableIdx = GEP->getOperand(i);
5506 // Determine the scale factor of the variable element. For example, this is
5507 // 4 if the variable index is into an array of i32.
5508 uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
5510 // Verify that there are no other variable indices. If so, emit the hard way.
5511 for (++i, ++GTI; i != e; ++i, ++GTI) {
5512 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
5515 // Compute the aggregate offset of constant indices.
5516 if (CI->isZero()) continue;
5518 // Handle a struct index, which adds its field offset to the pointer.
5519 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
5520 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
5522 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
5523 Offset += Size*CI->getSExtValue();
5527 // Okay, we know we have a single variable index, which must be a
5528 // pointer/array/vector index. If there is no offset, life is simple, return
5530 unsigned IntPtrWidth = TD.getPointerSizeInBits();
5532 // Cast to intptrty in case a truncation occurs. If an extension is needed,
5533 // we don't need to bother extending: the extension won't affect where the
5534 // computation crosses zero.
5535 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
5536 VariableIdx = new TruncInst(VariableIdx, TD.getIntPtrType(),
5537 VariableIdx->getNameStart(), &I);
5541 // Otherwise, there is an index. The computation we will do will be modulo
5542 // the pointer size, so get it.
5543 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
5545 Offset &= PtrSizeMask;
5546 VariableScale &= PtrSizeMask;
5548 // To do this transformation, any constant index must be a multiple of the
5549 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
5550 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
5551 // multiple of the variable scale.
5552 int64_t NewOffs = Offset / (int64_t)VariableScale;
5553 if (Offset != NewOffs*(int64_t)VariableScale)
5556 // Okay, we can do this evaluation. Start by converting the index to intptr.
5557 const Type *IntPtrTy = TD.getIntPtrType();
5558 if (VariableIdx->getType() != IntPtrTy)
5559 VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
5561 VariableIdx->getNameStart(), &I);
5562 Constant *OffsetVal = IC.getContext()->getConstantInt(IntPtrTy, NewOffs);
5563 return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
5567 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
5568 /// else. At this point we know that the GEP is on the LHS of the comparison.
5569 Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS,
5570 ICmpInst::Predicate Cond,
5572 assert(dyn_castGetElementPtr(GEPLHS) && "LHS is not a getelementptr!");
5574 // Look through bitcasts.
5575 if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
5576 RHS = BCI->getOperand(0);
5578 Value *PtrBase = GEPLHS->getOperand(0);
5579 if (PtrBase == RHS) {
5580 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
5581 // This transformation (ignoring the base and scales) is valid because we
5582 // know pointers can't overflow. See if we can output an optimized form.
5583 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, I, *this);
5585 // If not, synthesize the offset the hard way.
5587 Offset = EmitGEPOffset(GEPLHS, I, *this);
5588 return new ICmpInst(*Context, ICmpInst::getSignedPredicate(Cond), Offset,
5589 Context->getNullValue(Offset->getType()));
5590 } else if (User *GEPRHS = dyn_castGetElementPtr(RHS)) {
5591 // If the base pointers are different, but the indices are the same, just
5592 // compare the base pointer.
5593 if (PtrBase != GEPRHS->getOperand(0)) {
5594 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
5595 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
5596 GEPRHS->getOperand(0)->getType();
5598 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5599 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5600 IndicesTheSame = false;
5604 // If all indices are the same, just compare the base pointers.
5606 return new ICmpInst(*Context, ICmpInst::getSignedPredicate(Cond),
5607 GEPLHS->getOperand(0), GEPRHS->getOperand(0));
5609 // Otherwise, the base pointers are different and the indices are
5610 // different, bail out.
5614 // If one of the GEPs has all zero indices, recurse.
5615 bool AllZeros = true;
5616 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5617 if (!isa<Constant>(GEPLHS->getOperand(i)) ||
5618 !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
5623 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
5624 ICmpInst::getSwappedPredicate(Cond), I);
5626 // If the other GEP has all zero indices, recurse.
5628 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5629 if (!isa<Constant>(GEPRHS->getOperand(i)) ||
5630 !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
5635 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
5637 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
5638 // If the GEPs only differ by one index, compare it.
5639 unsigned NumDifferences = 0; // Keep track of # differences.
5640 unsigned DiffOperand = 0; // The operand that differs.
5641 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5642 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5643 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
5644 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
5645 // Irreconcilable differences.
5649 if (NumDifferences++) break;
5654 if (NumDifferences == 0) // SAME GEP?
5655 return ReplaceInstUsesWith(I, // No comparison is needed here.
5656 Context->getConstantInt(Type::Int1Ty,
5657 ICmpInst::isTrueWhenEqual(Cond)));
5659 else if (NumDifferences == 1) {
5660 Value *LHSV = GEPLHS->getOperand(DiffOperand);
5661 Value *RHSV = GEPRHS->getOperand(DiffOperand);
5662 // Make sure we do a signed comparison here.
5663 return new ICmpInst(*Context,
5664 ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
5668 // Only lower this if the icmp is the only user of the GEP or if we expect
5669 // the result to fold to a constant!
5670 if ((isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
5671 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
5672 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
5673 Value *L = EmitGEPOffset(GEPLHS, I, *this);
5674 Value *R = EmitGEPOffset(GEPRHS, I, *this);
5675 return new ICmpInst(*Context, ICmpInst::getSignedPredicate(Cond), L, R);
5681 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
5683 Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
5686 if (!isa<ConstantFP>(RHSC)) return 0;
5687 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5689 // Get the width of the mantissa. We don't want to hack on conversions that
5690 // might lose information from the integer, e.g. "i64 -> float"
5691 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5692 if (MantissaWidth == -1) return 0; // Unknown.
5694 // Check to see that the input is converted from an integer type that is small
5695 // enough that preserves all bits. TODO: check here for "known" sign bits.
5696 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5697 unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits();
5699 // If this is a uitofp instruction, we need an extra bit to hold the sign.
5700 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5704 // If the conversion would lose info, don't hack on this.
5705 if ((int)InputSize > MantissaWidth)
5708 // Otherwise, we can potentially simplify the comparison. We know that it
5709 // will always come through as an integer value and we know the constant is
5710 // not a NAN (it would have been previously simplified).
5711 assert(!RHS.isNaN() && "NaN comparison not already folded!");
5713 ICmpInst::Predicate Pred;
5714 switch (I.getPredicate()) {
5715 default: LLVM_UNREACHABLE("Unexpected predicate!");
5716 case FCmpInst::FCMP_UEQ:
5717 case FCmpInst::FCMP_OEQ:
5718 Pred = ICmpInst::ICMP_EQ;
5720 case FCmpInst::FCMP_UGT:
5721 case FCmpInst::FCMP_OGT:
5722 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5724 case FCmpInst::FCMP_UGE:
5725 case FCmpInst::FCMP_OGE:
5726 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5728 case FCmpInst::FCMP_ULT:
5729 case FCmpInst::FCMP_OLT:
5730 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5732 case FCmpInst::FCMP_ULE:
5733 case FCmpInst::FCMP_OLE:
5734 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5736 case FCmpInst::FCMP_UNE:
5737 case FCmpInst::FCMP_ONE:
5738 Pred = ICmpInst::ICMP_NE;
5740 case FCmpInst::FCMP_ORD:
5741 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5742 case FCmpInst::FCMP_UNO:
5743 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5746 const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5748 // Now we know that the APFloat is a normal number, zero or inf.
5750 // See if the FP constant is too large for the integer. For example,
5751 // comparing an i8 to 300.0.
5752 unsigned IntWidth = IntTy->getScalarSizeInBits();
5755 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
5756 // and large values.
5757 APFloat SMax(RHS.getSemantics(), APFloat::fcZero, false);
5758 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5759 APFloat::rmNearestTiesToEven);
5760 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
5761 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
5762 Pred == ICmpInst::ICMP_SLE)
5763 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5764 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5767 // If the RHS value is > UnsignedMax, fold the comparison. This handles
5768 // +INF and large values.
5769 APFloat UMax(RHS.getSemantics(), APFloat::fcZero, false);
5770 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5771 APFloat::rmNearestTiesToEven);
5772 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
5773 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
5774 Pred == ICmpInst::ICMP_ULE)
5775 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5776 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5781 // See if the RHS value is < SignedMin.
5782 APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false);
5783 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5784 APFloat::rmNearestTiesToEven);
5785 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
5786 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5787 Pred == ICmpInst::ICMP_SGE)
5788 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5789 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5793 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5794 // [0, UMAX], but it may still be fractional. See if it is fractional by
5795 // casting the FP value to the integer value and back, checking for equality.
5796 // Don't do this for zero, because -0.0 is not fractional.
5797 Constant *RHSInt = LHSUnsigned
5798 ? Context->getConstantExprFPToUI(RHSC, IntTy)
5799 : Context->getConstantExprFPToSI(RHSC, IntTy);
5800 if (!RHS.isZero()) {
5801 bool Equal = LHSUnsigned
5802 ? Context->getConstantExprUIToFP(RHSInt, RHSC->getType()) == RHSC
5803 : Context->getConstantExprSIToFP(RHSInt, RHSC->getType()) == RHSC;
5805 // If we had a comparison against a fractional value, we have to adjust
5806 // the compare predicate and sometimes the value. RHSC is rounded towards
5807 // zero at this point.
5809 default: LLVM_UNREACHABLE("Unexpected integer comparison!");
5810 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
5811 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5812 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
5813 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5814 case ICmpInst::ICMP_ULE:
5815 // (float)int <= 4.4 --> int <= 4
5816 // (float)int <= -4.4 --> false
5817 if (RHS.isNegative())
5818 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5820 case ICmpInst::ICMP_SLE:
5821 // (float)int <= 4.4 --> int <= 4
5822 // (float)int <= -4.4 --> int < -4
5823 if (RHS.isNegative())
5824 Pred = ICmpInst::ICMP_SLT;
5826 case ICmpInst::ICMP_ULT:
5827 // (float)int < -4.4 --> false
5828 // (float)int < 4.4 --> int <= 4
5829 if (RHS.isNegative())
5830 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5831 Pred = ICmpInst::ICMP_ULE;
5833 case ICmpInst::ICMP_SLT:
5834 // (float)int < -4.4 --> int < -4
5835 // (float)int < 4.4 --> int <= 4
5836 if (!RHS.isNegative())
5837 Pred = ICmpInst::ICMP_SLE;
5839 case ICmpInst::ICMP_UGT:
5840 // (float)int > 4.4 --> int > 4
5841 // (float)int > -4.4 --> true
5842 if (RHS.isNegative())
5843 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5845 case ICmpInst::ICMP_SGT:
5846 // (float)int > 4.4 --> int > 4
5847 // (float)int > -4.4 --> int >= -4
5848 if (RHS.isNegative())
5849 Pred = ICmpInst::ICMP_SGE;
5851 case ICmpInst::ICMP_UGE:
5852 // (float)int >= -4.4 --> true
5853 // (float)int >= 4.4 --> int > 4
5854 if (!RHS.isNegative())
5855 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5856 Pred = ICmpInst::ICMP_UGT;
5858 case ICmpInst::ICMP_SGE:
5859 // (float)int >= -4.4 --> int >= -4
5860 // (float)int >= 4.4 --> int > 4
5861 if (!RHS.isNegative())
5862 Pred = ICmpInst::ICMP_SGT;
5868 // Lower this FP comparison into an appropriate integer version of the
5870 return new ICmpInst(*Context, Pred, LHSI->getOperand(0), RHSInt);
5873 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
5874 bool Changed = SimplifyCompare(I);
5875 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5877 // Fold trivial predicates.
5878 if (I.getPredicate() == FCmpInst::FCMP_FALSE)
5879 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5880 if (I.getPredicate() == FCmpInst::FCMP_TRUE)
5881 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5883 // Simplify 'fcmp pred X, X'
5885 switch (I.getPredicate()) {
5886 default: LLVM_UNREACHABLE("Unknown predicate!");
5887 case FCmpInst::FCMP_UEQ: // True if unordered or equal
5888 case FCmpInst::FCMP_UGE: // True if unordered, greater than, or equal
5889 case FCmpInst::FCMP_ULE: // True if unordered, less than, or equal
5890 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5891 case FCmpInst::FCMP_OGT: // True if ordered and greater than
5892 case FCmpInst::FCMP_OLT: // True if ordered and less than
5893 case FCmpInst::FCMP_ONE: // True if ordered and operands are unequal
5894 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5896 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
5897 case FCmpInst::FCMP_ULT: // True if unordered or less than
5898 case FCmpInst::FCMP_UGT: // True if unordered or greater than
5899 case FCmpInst::FCMP_UNE: // True if unordered or not equal
5900 // Canonicalize these to be 'fcmp uno %X, 0.0'.
5901 I.setPredicate(FCmpInst::FCMP_UNO);
5902 I.setOperand(1, Context->getNullValue(Op0->getType()));
5905 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
5906 case FCmpInst::FCMP_OEQ: // True if ordered and equal
5907 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
5908 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
5909 // Canonicalize these to be 'fcmp ord %X, 0.0'.
5910 I.setPredicate(FCmpInst::FCMP_ORD);
5911 I.setOperand(1, Context->getNullValue(Op0->getType()));
5916 if (isa<UndefValue>(Op1)) // fcmp pred X, undef -> undef
5917 return ReplaceInstUsesWith(I, Context->getUndef(Type::Int1Ty));
5919 // Handle fcmp with constant RHS
5920 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
5921 // If the constant is a nan, see if we can fold the comparison based on it.
5922 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
5923 if (CFP->getValueAPF().isNaN()) {
5924 if (FCmpInst::isOrdered(I.getPredicate())) // True if ordered and...
5925 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
5926 assert(FCmpInst::isUnordered(I.getPredicate()) &&
5927 "Comparison must be either ordered or unordered!");
5928 // True if unordered.
5929 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
5933 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
5934 switch (LHSI->getOpcode()) {
5935 case Instruction::PHI:
5936 // Only fold fcmp into the PHI if the phi and fcmp are in the same
5937 // block. If in the same block, we're encouraging jump threading. If
5938 // not, we are just pessimizing the code by making an i1 phi.
5939 if (LHSI->getParent() == I.getParent())
5940 if (Instruction *NV = FoldOpIntoPhi(I))
5943 case Instruction::SIToFP:
5944 case Instruction::UIToFP:
5945 if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
5948 case Instruction::Select:
5949 // If either operand of the select is a constant, we can fold the
5950 // comparison into the select arms, which will cause one to be
5951 // constant folded and the select turned into a bitwise or.
5952 Value *Op1 = 0, *Op2 = 0;
5953 if (LHSI->hasOneUse()) {
5954 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
5955 // Fold the known value into the constant operand.
5956 Op1 = Context->getConstantExprCompare(I.getPredicate(), C, RHSC);
5957 // Insert a new FCmp of the other select operand.
5958 Op2 = InsertNewInstBefore(new FCmpInst(*Context, I.getPredicate(),
5959 LHSI->getOperand(2), RHSC,
5961 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
5962 // Fold the known value into the constant operand.
5963 Op2 = Context->getConstantExprCompare(I.getPredicate(), C, RHSC);
5964 // Insert a new FCmp of the other select operand.
5965 Op1 = InsertNewInstBefore(new FCmpInst(*Context, I.getPredicate(),
5966 LHSI->getOperand(1), RHSC,
5972 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
5977 return Changed ? &I : 0;
5980 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
5981 bool Changed = SimplifyCompare(I);
5982 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5983 const Type *Ty = Op0->getType();
5987 return ReplaceInstUsesWith(I, Context->getConstantInt(Type::Int1Ty,
5988 I.isTrueWhenEqual()));
5990 if (isa<UndefValue>(Op1)) // X icmp undef -> undef
5991 return ReplaceInstUsesWith(I, Context->getUndef(Type::Int1Ty));
5993 // icmp <global/alloca*/null>, <global/alloca*/null> - Global/Stack value
5994 // addresses never equal each other! We already know that Op0 != Op1.
5995 if ((isa<GlobalValue>(Op0) || isa<AllocaInst>(Op0) ||
5996 isa<ConstantPointerNull>(Op0)) &&
5997 (isa<GlobalValue>(Op1) || isa<AllocaInst>(Op1) ||
5998 isa<ConstantPointerNull>(Op1)))
5999 return ReplaceInstUsesWith(I, Context->getConstantInt(Type::Int1Ty,
6000 !I.isTrueWhenEqual()));
6002 // icmp's with boolean values can always be turned into bitwise operations
6003 if (Ty == Type::Int1Ty) {
6004 switch (I.getPredicate()) {
6005 default: LLVM_UNREACHABLE("Invalid icmp instruction!");
6006 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
6007 Instruction *Xor = BinaryOperator::CreateXor(Op0, Op1, I.getName()+"tmp");
6008 InsertNewInstBefore(Xor, I);
6009 return BinaryOperator::CreateNot(Xor);
6011 case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
6012 return BinaryOperator::CreateXor(Op0, Op1);
6014 case ICmpInst::ICMP_UGT:
6015 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
6017 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
6018 Instruction *Not = BinaryOperator::CreateNot(Op0, I.getName()+"tmp");
6019 InsertNewInstBefore(Not, I);
6020 return BinaryOperator::CreateAnd(Not, Op1);
6022 case ICmpInst::ICMP_SGT:
6023 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
6025 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
6026 Instruction *Not = BinaryOperator::CreateNot(Op1, I.getName()+"tmp");
6027 InsertNewInstBefore(Not, I);
6028 return BinaryOperator::CreateAnd(Not, Op0);
6030 case ICmpInst::ICMP_UGE:
6031 std::swap(Op0, Op1); // Change icmp uge -> icmp ule
6033 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
6034 Instruction *Not = BinaryOperator::CreateNot(Op0, I.getName()+"tmp");
6035 InsertNewInstBefore(Not, I);
6036 return BinaryOperator::CreateOr(Not, Op1);
6038 case ICmpInst::ICMP_SGE:
6039 std::swap(Op0, Op1); // Change icmp sge -> icmp sle
6041 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
6042 Instruction *Not = BinaryOperator::CreateNot(Op1, I.getName()+"tmp");
6043 InsertNewInstBefore(Not, I);
6044 return BinaryOperator::CreateOr(Not, Op0);
6049 unsigned BitWidth = 0;
6051 BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
6052 else if (Ty->isIntOrIntVector())
6053 BitWidth = Ty->getScalarSizeInBits();
6055 bool isSignBit = false;
6057 // See if we are doing a comparison with a constant.
6058 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6059 Value *A = 0, *B = 0;
6061 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
6062 if (I.isEquality() && CI->isNullValue() &&
6063 match(Op0, m_Sub(m_Value(A), m_Value(B)), *Context)) {
6064 // (icmp cond A B) if cond is equality
6065 return new ICmpInst(*Context, I.getPredicate(), A, B);
6068 // If we have an icmp le or icmp ge instruction, turn it into the
6069 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
6070 // them being folded in the code below.
6071 switch (I.getPredicate()) {
6073 case ICmpInst::ICMP_ULE:
6074 if (CI->isMaxValue(false)) // A <=u MAX -> TRUE
6075 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6076 return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Op0,
6077 AddOne(CI, Context));
6078 case ICmpInst::ICMP_SLE:
6079 if (CI->isMaxValue(true)) // A <=s MAX -> TRUE
6080 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6081 return new ICmpInst(*Context, ICmpInst::ICMP_SLT, Op0,
6082 AddOne(CI, Context));
6083 case ICmpInst::ICMP_UGE:
6084 if (CI->isMinValue(false)) // A >=u MIN -> TRUE
6085 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6086 return new ICmpInst(*Context, ICmpInst::ICMP_UGT, Op0,
6087 SubOne(CI, Context));
6088 case ICmpInst::ICMP_SGE:
6089 if (CI->isMinValue(true)) // A >=s MIN -> TRUE
6090 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6091 return new ICmpInst(*Context, ICmpInst::ICMP_SGT, Op0,
6092 SubOne(CI, Context));
6095 // If this comparison is a normal comparison, it demands all
6096 // bits, if it is a sign bit comparison, it only demands the sign bit.
6098 isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
6101 // See if we can fold the comparison based on range information we can get
6102 // by checking whether bits are known to be zero or one in the input.
6103 if (BitWidth != 0) {
6104 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
6105 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
6107 if (SimplifyDemandedBits(I.getOperandUse(0),
6108 isSignBit ? APInt::getSignBit(BitWidth)
6109 : APInt::getAllOnesValue(BitWidth),
6110 Op0KnownZero, Op0KnownOne, 0))
6112 if (SimplifyDemandedBits(I.getOperandUse(1),
6113 APInt::getAllOnesValue(BitWidth),
6114 Op1KnownZero, Op1KnownOne, 0))
6117 // Given the known and unknown bits, compute a range that the LHS could be
6118 // in. Compute the Min, Max and RHS values based on the known bits. For the
6119 // EQ and NE we use unsigned values.
6120 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6121 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6122 if (ICmpInst::isSignedPredicate(I.getPredicate())) {
6123 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6125 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6128 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6130 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6134 // If Min and Max are known to be the same, then SimplifyDemandedBits
6135 // figured out that the LHS is a constant. Just constant fold this now so
6136 // that code below can assume that Min != Max.
6137 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
6138 return new ICmpInst(*Context, I.getPredicate(),
6139 Context->getConstantInt(Op0Min), Op1);
6140 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
6141 return new ICmpInst(*Context, I.getPredicate(), Op0,
6142 Context->getConstantInt(Op1Min));
6144 // Based on the range information we know about the LHS, see if we can
6145 // simplify this comparison. For example, (x&4) < 8 is always true.
6146 switch (I.getPredicate()) {
6147 default: LLVM_UNREACHABLE("Unknown icmp opcode!");
6148 case ICmpInst::ICMP_EQ:
6149 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6150 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6152 case ICmpInst::ICMP_NE:
6153 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6154 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6156 case ICmpInst::ICMP_ULT:
6157 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
6158 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6159 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
6160 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6161 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6162 return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1);
6163 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6164 if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
6165 return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0,
6166 SubOne(CI, Context));
6168 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
6169 if (CI->isMinValue(true))
6170 return new ICmpInst(*Context, ICmpInst::ICMP_SGT, Op0,
6171 Context->getConstantIntAllOnesValue(Op0->getType()));
6174 case ICmpInst::ICMP_UGT:
6175 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
6176 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6177 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
6178 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6180 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6181 return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1);
6182 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6183 if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
6184 return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0,
6185 AddOne(CI, Context));
6187 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
6188 if (CI->isMaxValue(true))
6189 return new ICmpInst(*Context, ICmpInst::ICMP_SLT, Op0,
6190 Context->getNullValue(Op0->getType()));
6193 case ICmpInst::ICMP_SLT:
6194 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
6195 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6196 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
6197 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6198 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
6199 return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1);
6200 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6201 if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
6202 return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0,
6203 SubOne(CI, Context));
6206 case ICmpInst::ICMP_SGT:
6207 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
6208 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6209 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
6210 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6212 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
6213 return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1);
6214 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6215 if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
6216 return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0,
6217 AddOne(CI, Context));
6220 case ICmpInst::ICMP_SGE:
6221 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
6222 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
6223 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6224 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
6225 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6227 case ICmpInst::ICMP_SLE:
6228 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
6229 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
6230 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6231 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
6232 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6234 case ICmpInst::ICMP_UGE:
6235 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
6236 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
6237 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6238 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
6239 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6241 case ICmpInst::ICMP_ULE:
6242 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
6243 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
6244 return ReplaceInstUsesWith(I, Context->getConstantIntTrue());
6245 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
6246 return ReplaceInstUsesWith(I, Context->getConstantIntFalse());
6250 // Turn a signed comparison into an unsigned one if both operands
6251 // are known to have the same sign.
6252 if (I.isSignedPredicate() &&
6253 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
6254 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
6255 return new ICmpInst(*Context, I.getUnsignedPredicate(), Op0, Op1);
6258 // Test if the ICmpInst instruction is used exclusively by a select as
6259 // part of a minimum or maximum operation. If so, refrain from doing
6260 // any other folding. This helps out other analyses which understand
6261 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6262 // and CodeGen. And in this case, at least one of the comparison
6263 // operands has at least one user besides the compare (the select),
6264 // which would often largely negate the benefit of folding anyway.
6266 if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
6267 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
6268 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
6271 // See if we are doing a comparison between a constant and an instruction that
6272 // can be folded into the comparison.
6273 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6274 // Since the RHS is a ConstantInt (CI), if the left hand side is an
6275 // instruction, see if that instruction also has constants so that the
6276 // instruction can be folded into the icmp
6277 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6278 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
6282 // Handle icmp with constant (but not simple integer constant) RHS
6283 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
6284 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6285 switch (LHSI->getOpcode()) {
6286 case Instruction::GetElementPtr:
6287 if (RHSC->isNullValue()) {
6288 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
6289 bool isAllZeros = true;
6290 for (unsigned i = 1, e = LHSI->getNumOperands(); i != e; ++i)
6291 if (!isa<Constant>(LHSI->getOperand(i)) ||
6292 !cast<Constant>(LHSI->getOperand(i))->isNullValue()) {
6297 return new ICmpInst(*Context, I.getPredicate(), LHSI->getOperand(0),
6298 Context->getNullValue(LHSI->getOperand(0)->getType()));
6302 case Instruction::PHI:
6303 // Only fold icmp into the PHI if the phi and fcmp are in the same
6304 // block. If in the same block, we're encouraging jump threading. If
6305 // not, we are just pessimizing the code by making an i1 phi.
6306 if (LHSI->getParent() == I.getParent())
6307 if (Instruction *NV = FoldOpIntoPhi(I))
6310 case Instruction::Select: {
6311 // If either operand of the select is a constant, we can fold the
6312 // comparison into the select arms, which will cause one to be
6313 // constant folded and the select turned into a bitwise or.
6314 Value *Op1 = 0, *Op2 = 0;
6315 if (LHSI->hasOneUse()) {
6316 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
6317 // Fold the known value into the constant operand.
6318 Op1 = Context->getConstantExprICmp(I.getPredicate(), C, RHSC);
6319 // Insert a new ICmp of the other select operand.
6320 Op2 = InsertNewInstBefore(new ICmpInst(*Context, I.getPredicate(),
6321 LHSI->getOperand(2), RHSC,
6323 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
6324 // Fold the known value into the constant operand.
6325 Op2 = Context->getConstantExprICmp(I.getPredicate(), C, RHSC);
6326 // Insert a new ICmp of the other select operand.
6327 Op1 = InsertNewInstBefore(new ICmpInst(*Context, I.getPredicate(),
6328 LHSI->getOperand(1), RHSC,
6334 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
6337 case Instruction::Malloc:
6338 // If we have (malloc != null), and if the malloc has a single use, we
6339 // can assume it is successful and remove the malloc.
6340 if (LHSI->hasOneUse() && isa<ConstantPointerNull>(RHSC)) {
6341 AddToWorkList(LHSI);
6342 return ReplaceInstUsesWith(I, Context->getConstantInt(Type::Int1Ty,
6343 !I.isTrueWhenEqual()));
6349 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
6350 if (User *GEP = dyn_castGetElementPtr(Op0))
6351 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
6353 if (User *GEP = dyn_castGetElementPtr(Op1))
6354 if (Instruction *NI = FoldGEPICmp(GEP, Op0,
6355 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
6358 // Test to see if the operands of the icmp are casted versions of other
6359 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
6361 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
6362 if (isa<PointerType>(Op0->getType()) &&
6363 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
6364 // We keep moving the cast from the left operand over to the right
6365 // operand, where it can often be eliminated completely.
6366 Op0 = CI->getOperand(0);
6368 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
6369 // so eliminate it as well.
6370 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
6371 Op1 = CI2->getOperand(0);
6373 // If Op1 is a constant, we can fold the cast into the constant.
6374 if (Op0->getType() != Op1->getType()) {
6375 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
6376 Op1 = Context->getConstantExprBitCast(Op1C, Op0->getType());
6378 // Otherwise, cast the RHS right before the icmp
6379 Op1 = InsertBitCastBefore(Op1, Op0->getType(), I);
6382 return new ICmpInst(*Context, I.getPredicate(), Op0, Op1);
6386 if (isa<CastInst>(Op0)) {
6387 // Handle the special case of: icmp (cast bool to X), <cst>
6388 // This comes up when you have code like
6391 // For generality, we handle any zero-extension of any operand comparison
6392 // with a constant or another cast from the same type.
6393 if (isa<ConstantInt>(Op1) || isa<CastInst>(Op1))
6394 if (Instruction *R = visitICmpInstWithCastAndCast(I))
6398 // See if it's the same type of instruction on the left and right.
6399 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
6400 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
6401 if (Op0I->getOpcode() == Op1I->getOpcode() && Op0I->hasOneUse() &&
6402 Op1I->hasOneUse() && Op0I->getOperand(1) == Op1I->getOperand(1)) {
6403 switch (Op0I->getOpcode()) {
6405 case Instruction::Add:
6406 case Instruction::Sub:
6407 case Instruction::Xor:
6408 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
6409 return new ICmpInst(*Context, I.getPredicate(), Op0I->getOperand(0),
6410 Op1I->getOperand(0));
6411 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
6412 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6413 if (CI->getValue().isSignBit()) {
6414 ICmpInst::Predicate Pred = I.isSignedPredicate()
6415 ? I.getUnsignedPredicate()
6416 : I.getSignedPredicate();
6417 return new ICmpInst(*Context, Pred, Op0I->getOperand(0),
6418 Op1I->getOperand(0));
6421 if (CI->getValue().isMaxSignedValue()) {
6422 ICmpInst::Predicate Pred = I.isSignedPredicate()
6423 ? I.getUnsignedPredicate()
6424 : I.getSignedPredicate();
6425 Pred = I.getSwappedPredicate(Pred);
6426 return new ICmpInst(*Context, Pred, Op0I->getOperand(0),
6427 Op1I->getOperand(0));
6431 case Instruction::Mul:
6432 if (!I.isEquality())
6435 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6436 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
6437 // Mask = -1 >> count-trailing-zeros(Cst).
6438 if (!CI->isZero() && !CI->isOne()) {
6439 const APInt &AP = CI->getValue();
6440 ConstantInt *Mask = Context->getConstantInt(
6441 APInt::getLowBitsSet(AP.getBitWidth(),
6443 AP.countTrailingZeros()));
6444 Instruction *And1 = BinaryOperator::CreateAnd(Op0I->getOperand(0),
6446 Instruction *And2 = BinaryOperator::CreateAnd(Op1I->getOperand(0),
6448 InsertNewInstBefore(And1, I);
6449 InsertNewInstBefore(And2, I);
6450 return new ICmpInst(*Context, I.getPredicate(), And1, And2);
6459 // ~x < ~y --> y < x
6461 if (match(Op0, m_Not(m_Value(A)), *Context) &&
6462 match(Op1, m_Not(m_Value(B)), *Context))
6463 return new ICmpInst(*Context, I.getPredicate(), B, A);
6466 if (I.isEquality()) {
6467 Value *A, *B, *C, *D;
6469 // -x == -y --> x == y
6470 if (match(Op0, m_Neg(m_Value(A)), *Context) &&
6471 match(Op1, m_Neg(m_Value(B)), *Context))
6472 return new ICmpInst(*Context, I.getPredicate(), A, B);
6474 if (match(Op0, m_Xor(m_Value(A), m_Value(B)), *Context)) {
6475 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
6476 Value *OtherVal = A == Op1 ? B : A;
6477 return new ICmpInst(*Context, I.getPredicate(), OtherVal,
6478 Context->getNullValue(A->getType()));
6481 if (match(Op1, m_Xor(m_Value(C), m_Value(D)), *Context)) {
6482 // A^c1 == C^c2 --> A == C^(c1^c2)
6483 ConstantInt *C1, *C2;
6484 if (match(B, m_ConstantInt(C1), *Context) &&
6485 match(D, m_ConstantInt(C2), *Context) && Op1->hasOneUse()) {
6487 Context->getConstantInt(C1->getValue() ^ C2->getValue());
6488 Instruction *Xor = BinaryOperator::CreateXor(C, NC, "tmp");
6489 return new ICmpInst(*Context, I.getPredicate(), A,
6490 InsertNewInstBefore(Xor, I));
6493 // A^B == A^D -> B == D
6494 if (A == C) return new ICmpInst(*Context, I.getPredicate(), B, D);
6495 if (A == D) return new ICmpInst(*Context, I.getPredicate(), B, C);
6496 if (B == C) return new ICmpInst(*Context, I.getPredicate(), A, D);
6497 if (B == D) return new ICmpInst(*Context, I.getPredicate(), A, C);
6501 if (match(Op1, m_Xor(m_Value(A), m_Value(B)), *Context) &&
6502 (A == Op0 || B == Op0)) {
6503 // A == (A^B) -> B == 0
6504 Value *OtherVal = A == Op0 ? B : A;
6505 return new ICmpInst(*Context, I.getPredicate(), OtherVal,
6506 Context->getNullValue(A->getType()));
6509 // (A-B) == A -> B == 0
6510 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B)), *Context))
6511 return new ICmpInst(*Context, I.getPredicate(), B,
6512 Context->getNullValue(B->getType()));
6514 // A == (A-B) -> B == 0
6515 if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B)), *Context))
6516 return new ICmpInst(*Context, I.getPredicate(), B,
6517 Context->getNullValue(B->getType()));
6519 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6520 if (Op0->hasOneUse() && Op1->hasOneUse() &&
6521 match(Op0, m_And(m_Value(A), m_Value(B)), *Context) &&
6522 match(Op1, m_And(m_Value(C), m_Value(D)), *Context)) {
6523 Value *X = 0, *Y = 0, *Z = 0;
6526 X = B; Y = D; Z = A;
6527 } else if (A == D) {
6528 X = B; Y = C; Z = A;
6529 } else if (B == C) {
6530 X = A; Y = D; Z = B;
6531 } else if (B == D) {
6532 X = A; Y = C; Z = B;
6535 if (X) { // Build (X^Y) & Z
6536 Op1 = InsertNewInstBefore(BinaryOperator::CreateXor(X, Y, "tmp"), I);
6537 Op1 = InsertNewInstBefore(BinaryOperator::CreateAnd(Op1, Z, "tmp"), I);
6538 I.setOperand(0, Op1);
6539 I.setOperand(1, Context->getNullValue(Op1->getType()));
6544 return Changed ? &I : 0;
6548 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
6549 /// and CmpRHS are both known to be integer constants.
6550 Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
6551 ConstantInt *DivRHS) {
6552 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
6553 const APInt &CmpRHSV = CmpRHS->getValue();
6555 // FIXME: If the operand types don't match the type of the divide
6556 // then don't attempt this transform. The code below doesn't have the
6557 // logic to deal with a signed divide and an unsigned compare (and
6558 // vice versa). This is because (x /s C1) <s C2 produces different
6559 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
6560 // (x /u C1) <u C2. Simply casting the operands and result won't
6561 // work. :( The if statement below tests that condition and bails
6563 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
6564 if (!ICI.isEquality() && DivIsSigned != ICI.isSignedPredicate())
6566 if (DivRHS->isZero())
6567 return 0; // The ProdOV computation fails on divide by zero.
6568 if (DivIsSigned && DivRHS->isAllOnesValue())
6569 return 0; // The overflow computation also screws up here
6570 if (DivRHS->isOne())
6571 return 0; // Not worth bothering, and eliminates some funny cases
6574 // Compute Prod = CI * DivRHS. We are essentially solving an equation
6575 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
6576 // C2 (CI). By solving for X we can turn this into a range check
6577 // instead of computing a divide.
6578 Constant *Prod = Context->getConstantExprMul(CmpRHS, DivRHS);
6580 // Determine if the product overflows by seeing if the product is
6581 // not equal to the divide. Make sure we do the same kind of divide
6582 // as in the LHS instruction that we're folding.
6583 bool ProdOV = (DivIsSigned ? Context->getConstantExprSDiv(Prod, DivRHS) :
6584 Context->getConstantExprUDiv(Prod, DivRHS)) != CmpRHS;
6586 // Get the ICmp opcode
6587 ICmpInst::Predicate Pred = ICI.getPredicate();
6589 // Figure out the interval that is being checked. For example, a comparison
6590 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
6591 // Compute this interval based on the constants involved and the signedness of
6592 // the compare/divide. This computes a half-open interval, keeping track of
6593 // whether either value in the interval overflows. After analysis each
6594 // overflow variable is set to 0 if it's corresponding bound variable is valid
6595 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
6596 int LoOverflow = 0, HiOverflow = 0;
6597 Constant *LoBound = 0, *HiBound = 0;
6599 if (!DivIsSigned) { // udiv
6600 // e.g. X/5 op 3 --> [15, 20)
6602 HiOverflow = LoOverflow = ProdOV;
6604 HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, Context, false);
6605 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
6606 if (CmpRHSV == 0) { // (X / pos) op 0
6607 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
6608 LoBound = cast<ConstantInt>(Context->getConstantExprNeg(SubOne(DivRHS,
6611 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
6612 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
6613 HiOverflow = LoOverflow = ProdOV;
6615 HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, Context, true);
6616 } else { // (X / pos) op neg
6617 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
6618 HiBound = AddOne(Prod, Context);
6619 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
6621 ConstantInt* DivNeg =
6622 cast<ConstantInt>(Context->getConstantExprNeg(DivRHS));
6623 LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, Context,
6627 } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
6628 if (CmpRHSV == 0) { // (X / neg) op 0
6629 // e.g. X/-5 op 0 --> [-4, 5)
6630 LoBound = AddOne(DivRHS, Context);
6631 HiBound = cast<ConstantInt>(Context->getConstantExprNeg(DivRHS));
6632 if (HiBound == DivRHS) { // -INTMIN = INTMIN
6633 HiOverflow = 1; // [INTMIN+1, overflow)
6634 HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
6636 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
6637 // e.g. X/-5 op 3 --> [-19, -14)
6638 HiBound = AddOne(Prod, Context);
6639 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
6641 LoOverflow = AddWithOverflow(LoBound, HiBound,
6642 DivRHS, Context, true) ? -1 : 0;
6643 } else { // (X / neg) op neg
6644 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
6645 LoOverflow = HiOverflow = ProdOV;
6647 HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, Context, true);
6650 // Dividing by a negative swaps the condition. LT <-> GT
6651 Pred = ICmpInst::getSwappedPredicate(Pred);
6654 Value *X = DivI->getOperand(0);
6656 default: LLVM_UNREACHABLE("Unhandled icmp opcode!");
6657 case ICmpInst::ICMP_EQ:
6658 if (LoOverflow && HiOverflow)
6659 return ReplaceInstUsesWith(ICI, Context->getConstantIntFalse());
6660 else if (HiOverflow)
6661 return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SGE :
6662 ICmpInst::ICMP_UGE, X, LoBound);
6663 else if (LoOverflow)
6664 return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SLT :
6665 ICmpInst::ICMP_ULT, X, HiBound);
6667 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, true, ICI);
6668 case ICmpInst::ICMP_NE:
6669 if (LoOverflow && HiOverflow)
6670 return ReplaceInstUsesWith(ICI, Context->getConstantIntTrue());
6671 else if (HiOverflow)
6672 return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SLT :
6673 ICmpInst::ICMP_ULT, X, LoBound);
6674 else if (LoOverflow)
6675 return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SGE :
6676 ICmpInst::ICMP_UGE, X, HiBound);
6678 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, false, ICI);
6679 case ICmpInst::ICMP_ULT:
6680 case ICmpInst::ICMP_SLT:
6681 if (LoOverflow == +1) // Low bound is greater than input range.
6682 return ReplaceInstUsesWith(ICI, Context->getConstantIntTrue());
6683 if (LoOverflow == -1) // Low bound is less than input range.
6684 return ReplaceInstUsesWith(ICI, Context->getConstantIntFalse());
6685 return new ICmpInst(*Context, Pred, X, LoBound);
6686 case ICmpInst::ICMP_UGT:
6687 case ICmpInst::ICMP_SGT:
6688 if (HiOverflow == +1) // High bound greater than input range.
6689 return ReplaceInstUsesWith(ICI, Context->getConstantIntFalse());
6690 else if (HiOverflow == -1) // High bound less than input range.
6691 return ReplaceInstUsesWith(ICI, Context->getConstantIntTrue());
6692 if (Pred == ICmpInst::ICMP_UGT)
6693 return new ICmpInst(*Context, ICmpInst::ICMP_UGE, X, HiBound);
6695 return new ICmpInst(*Context, ICmpInst::ICMP_SGE, X, HiBound);
6700 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
6702 Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
6705 const APInt &RHSV = RHS->getValue();
6707 switch (LHSI->getOpcode()) {
6708 case Instruction::Trunc:
6709 if (ICI.isEquality() && LHSI->hasOneUse()) {
6710 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
6711 // of the high bits truncated out of x are known.
6712 unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
6713 SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
6714 APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
6715 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
6716 ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
6718 // If all the high bits are known, we can do this xform.
6719 if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
6720 // Pull in the high bits from known-ones set.
6721 APInt NewRHS(RHS->getValue());
6722 NewRHS.zext(SrcBits);
6724 return new ICmpInst(*Context, ICI.getPredicate(), LHSI->getOperand(0),
6725 Context->getConstantInt(NewRHS));
6730 case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
6731 if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
6732 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
6734 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
6735 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
6736 Value *CompareVal = LHSI->getOperand(0);
6738 // If the sign bit of the XorCST is not set, there is no change to
6739 // the operation, just stop using the Xor.
6740 if (!XorCST->getValue().isNegative()) {
6741 ICI.setOperand(0, CompareVal);
6742 AddToWorkList(LHSI);
6746 // Was the old condition true if the operand is positive?
6747 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
6749 // If so, the new one isn't.
6750 isTrueIfPositive ^= true;
6752 if (isTrueIfPositive)
6753 return new ICmpInst(*Context, ICmpInst::ICMP_SGT, CompareVal,
6754 SubOne(RHS, Context));
6756 return new ICmpInst(*Context, ICmpInst::ICMP_SLT, CompareVal,
6757 AddOne(RHS, Context));
6760 if (LHSI->hasOneUse()) {
6761 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
6762 if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
6763 const APInt &SignBit = XorCST->getValue();
6764 ICmpInst::Predicate Pred = ICI.isSignedPredicate()
6765 ? ICI.getUnsignedPredicate()
6766 : ICI.getSignedPredicate();
6767 return new ICmpInst(*Context, Pred, LHSI->getOperand(0),
6768 Context->getConstantInt(RHSV ^ SignBit));
6771 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
6772 if (!ICI.isEquality() && XorCST->getValue().isMaxSignedValue()) {
6773 const APInt &NotSignBit = XorCST->getValue();
6774 ICmpInst::Predicate Pred = ICI.isSignedPredicate()
6775 ? ICI.getUnsignedPredicate()
6776 : ICI.getSignedPredicate();
6777 Pred = ICI.getSwappedPredicate(Pred);
6778 return new ICmpInst(*Context, Pred, LHSI->getOperand(0),
6779 Context->getConstantInt(RHSV ^ NotSignBit));
6784 case Instruction::And: // (icmp pred (and X, AndCST), RHS)
6785 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
6786 LHSI->getOperand(0)->hasOneUse()) {
6787 ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
6789 // If the LHS is an AND of a truncating cast, we can widen the
6790 // and/compare to be the input width without changing the value
6791 // produced, eliminating a cast.
6792 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
6793 // We can do this transformation if either the AND constant does not
6794 // have its sign bit set or if it is an equality comparison.
6795 // Extending a relational comparison when we're checking the sign
6796 // bit would not work.
6797 if (Cast->hasOneUse() &&
6798 (ICI.isEquality() ||
6799 (AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
6801 cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
6802 APInt NewCST = AndCST->getValue();
6803 NewCST.zext(BitWidth);
6805 NewCI.zext(BitWidth);
6806 Instruction *NewAnd =
6807 BinaryOperator::CreateAnd(Cast->getOperand(0),
6808 Context->getConstantInt(NewCST),LHSI->getName());
6809 InsertNewInstBefore(NewAnd, ICI);
6810 return new ICmpInst(*Context, ICI.getPredicate(), NewAnd,
6811 Context->getConstantInt(NewCI));
6815 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
6816 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
6817 // happens a LOT in code produced by the C front-end, for bitfield
6819 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
6820 if (Shift && !Shift->isShift())
6824 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
6825 const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
6826 const Type *AndTy = AndCST->getType(); // Type of the and.
6828 // We can fold this as long as we can't shift unknown bits
6829 // into the mask. This can only happen with signed shift
6830 // rights, as they sign-extend.
6832 bool CanFold = Shift->isLogicalShift();
6834 // To test for the bad case of the signed shr, see if any
6835 // of the bits shifted in could be tested after the mask.
6836 uint32_t TyBits = Ty->getPrimitiveSizeInBits();
6837 int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
6839 uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
6840 if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
6841 AndCST->getValue()) == 0)
6847 if (Shift->getOpcode() == Instruction::Shl)
6848 NewCst = Context->getConstantExprLShr(RHS, ShAmt);
6850 NewCst = Context->getConstantExprShl(RHS, ShAmt);
6852 // Check to see if we are shifting out any of the bits being
6854 if (Context->getConstantExpr(Shift->getOpcode(),
6855 NewCst, ShAmt) != RHS) {
6856 // If we shifted bits out, the fold is not going to work out.
6857 // As a special case, check to see if this means that the
6858 // result is always true or false now.
6859 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
6860 return ReplaceInstUsesWith(ICI, Context->getConstantIntFalse());
6861 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
6862 return ReplaceInstUsesWith(ICI, Context->getConstantIntTrue());
6864 ICI.setOperand(1, NewCst);
6865 Constant *NewAndCST;
6866 if (Shift->getOpcode() == Instruction::Shl)
6867 NewAndCST = Context->getConstantExprLShr(AndCST, ShAmt);
6869 NewAndCST = Context->getConstantExprShl(AndCST, ShAmt);
6870 LHSI->setOperand(1, NewAndCST);
6871 LHSI->setOperand(0, Shift->getOperand(0));
6872 AddToWorkList(Shift); // Shift is dead.
6873 AddUsesToWorkList(ICI);
6879 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
6880 // preferable because it allows the C<<Y expression to be hoisted out
6881 // of a loop if Y is invariant and X is not.
6882 if (Shift && Shift->hasOneUse() && RHSV == 0 &&
6883 ICI.isEquality() && !Shift->isArithmeticShift() &&
6884 !isa<Constant>(Shift->getOperand(0))) {
6887 if (Shift->getOpcode() == Instruction::LShr) {
6888 NS = BinaryOperator::CreateShl(AndCST,
6889 Shift->getOperand(1), "tmp");
6891 // Insert a logical shift.
6892 NS = BinaryOperator::CreateLShr(AndCST,
6893 Shift->getOperand(1), "tmp");
6895 InsertNewInstBefore(cast<Instruction>(NS), ICI);
6897 // Compute X & (C << Y).
6898 Instruction *NewAnd =
6899 BinaryOperator::CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
6900 InsertNewInstBefore(NewAnd, ICI);
6902 ICI.setOperand(0, NewAnd);
6908 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
6909 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6912 uint32_t TypeBits = RHSV.getBitWidth();
6914 // Check that the shift amount is in range. If not, don't perform
6915 // undefined shifts. When the shift is visited it will be
6917 if (ShAmt->uge(TypeBits))
6920 if (ICI.isEquality()) {
6921 // If we are comparing against bits always shifted out, the
6922 // comparison cannot succeed.
6924 Context->getConstantExprShl(Context->getConstantExprLShr(RHS, ShAmt),
6926 if (Comp != RHS) {// Comparing against a bit that we know is zero.
6927 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6928 Constant *Cst = Context->getConstantInt(Type::Int1Ty, IsICMP_NE);
6929 return ReplaceInstUsesWith(ICI, Cst);
6932 if (LHSI->hasOneUse()) {
6933 // Otherwise strength reduce the shift into an and.
6934 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6936 Context->getConstantInt(APInt::getLowBitsSet(TypeBits,
6937 TypeBits-ShAmtVal));
6940 BinaryOperator::CreateAnd(LHSI->getOperand(0),
6941 Mask, LHSI->getName()+".mask");
6942 Value *And = InsertNewInstBefore(AndI, ICI);
6943 return new ICmpInst(*Context, ICI.getPredicate(), And,
6944 Context->getConstantInt(RHSV.lshr(ShAmtVal)));
6948 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
6949 bool TrueIfSigned = false;
6950 if (LHSI->hasOneUse() &&
6951 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
6952 // (X << 31) <s 0 --> (X&1) != 0
6953 Constant *Mask = Context->getConstantInt(APInt(TypeBits, 1) <<
6954 (TypeBits-ShAmt->getZExtValue()-1));
6956 BinaryOperator::CreateAnd(LHSI->getOperand(0),
6957 Mask, LHSI->getName()+".mask");
6958 Value *And = InsertNewInstBefore(AndI, ICI);
6960 return new ICmpInst(*Context,
6961 TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
6962 And, Context->getNullValue(And->getType()));
6967 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
6968 case Instruction::AShr: {
6969 // Only handle equality comparisons of shift-by-constant.
6970 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
6971 if (!ShAmt || !ICI.isEquality()) break;
6973 // Check that the shift amount is in range. If not, don't perform
6974 // undefined shifts. When the shift is visited it will be
6976 uint32_t TypeBits = RHSV.getBitWidth();
6977 if (ShAmt->uge(TypeBits))
6980 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
6982 // If we are comparing against bits always shifted out, the
6983 // comparison cannot succeed.
6984 APInt Comp = RHSV << ShAmtVal;
6985 if (LHSI->getOpcode() == Instruction::LShr)
6986 Comp = Comp.lshr(ShAmtVal);
6988 Comp = Comp.ashr(ShAmtVal);
6990 if (Comp != RHSV) { // Comparing against a bit that we know is zero.
6991 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
6992 Constant *Cst = Context->getConstantInt(Type::Int1Ty, IsICMP_NE);
6993 return ReplaceInstUsesWith(ICI, Cst);
6996 // Otherwise, check to see if the bits shifted out are known to be zero.
6997 // If so, we can compare against the unshifted value:
6998 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
6999 if (LHSI->hasOneUse() &&
7000 MaskedValueIsZero(LHSI->getOperand(0),
7001 APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
7002 return new ICmpInst(*Context, ICI.getPredicate(), LHSI->getOperand(0),
7003 Context->getConstantExprShl(RHS, ShAmt));
7006 if (LHSI->hasOneUse()) {
7007 // Otherwise strength reduce the shift into an and.
7008 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
7009 Constant *Mask = Context->getConstantInt(Val);
7012 BinaryOperator::CreateAnd(LHSI->getOperand(0),
7013 Mask, LHSI->getName()+".mask");
7014 Value *And = InsertNewInstBefore(AndI, ICI);
7015 return new ICmpInst(*Context, ICI.getPredicate(), And,
7016 Context->getConstantExprShl(RHS, ShAmt));
7021 case Instruction::SDiv:
7022 case Instruction::UDiv:
7023 // Fold: icmp pred ([us]div X, C1), C2 -> range test
7024 // Fold this div into the comparison, producing a range check.
7025 // Determine, based on the divide type, what the range is being
7026 // checked. If there is an overflow on the low or high side, remember
7027 // it, otherwise compute the range [low, hi) bounding the new value.
7028 // See: InsertRangeTest above for the kinds of replacements possible.
7029 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
7030 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
7035 case Instruction::Add:
7036 // Fold: icmp pred (add, X, C1), C2
7038 if (!ICI.isEquality()) {
7039 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
7041 const APInt &LHSV = LHSC->getValue();
7043 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
7046 if (ICI.isSignedPredicate()) {
7047 if (CR.getLower().isSignBit()) {
7048 return new ICmpInst(*Context, ICmpInst::ICMP_SLT, LHSI->getOperand(0),
7049 Context->getConstantInt(CR.getUpper()));
7050 } else if (CR.getUpper().isSignBit()) {
7051 return new ICmpInst(*Context, ICmpInst::ICMP_SGE, LHSI->getOperand(0),
7052 Context->getConstantInt(CR.getLower()));
7055 if (CR.getLower().isMinValue()) {
7056 return new ICmpInst(*Context, ICmpInst::ICMP_ULT, LHSI->getOperand(0),
7057 Context->getConstantInt(CR.getUpper()));
7058 } else if (CR.getUpper().isMinValue()) {
7059 return new ICmpInst(*Context, ICmpInst::ICMP_UGE, LHSI->getOperand(0),
7060 Context->getConstantInt(CR.getLower()));
7067 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
7068 if (ICI.isEquality()) {
7069 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
7071 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
7072 // the second operand is a constant, simplify a bit.
7073 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
7074 switch (BO->getOpcode()) {
7075 case Instruction::SRem:
7076 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
7077 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
7078 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
7079 if (V.sgt(APInt(V.getBitWidth(), 1)) && V.isPowerOf2()) {
7080 Instruction *NewRem =
7081 BinaryOperator::CreateURem(BO->getOperand(0), BO->getOperand(1),
7083 InsertNewInstBefore(NewRem, ICI);
7084 return new ICmpInst(*Context, ICI.getPredicate(), NewRem,
7085 Context->getNullValue(BO->getType()));
7089 case Instruction::Add:
7090 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
7091 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7092 if (BO->hasOneUse())
7093 return new ICmpInst(*Context, ICI.getPredicate(), BO->getOperand(0),
7094 Context->getConstantExprSub(RHS, BOp1C));
7095 } else if (RHSV == 0) {
7096 // Replace ((add A, B) != 0) with (A != -B) if A or B is
7097 // efficiently invertible, or if the add has just this one use.
7098 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
7100 if (Value *NegVal = dyn_castNegVal(BOp1, Context))
7101 return new ICmpInst(*Context, ICI.getPredicate(), BOp0, NegVal);
7102 else if (Value *NegVal = dyn_castNegVal(BOp0, Context))
7103 return new ICmpInst(*Context, ICI.getPredicate(), NegVal, BOp1);
7104 else if (BO->hasOneUse()) {
7105 Instruction *Neg = BinaryOperator::CreateNeg(*Context, BOp1);
7106 InsertNewInstBefore(Neg, ICI);
7108 return new ICmpInst(*Context, ICI.getPredicate(), BOp0, Neg);
7112 case Instruction::Xor:
7113 // For the xor case, we can xor two constants together, eliminating
7114 // the explicit xor.
7115 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1)))
7116 return new ICmpInst(*Context, ICI.getPredicate(), BO->getOperand(0),
7117 Context->getConstantExprXor(RHS, BOC));
7120 case Instruction::Sub:
7121 // Replace (([sub|xor] A, B) != 0) with (A != B)
7123 return new ICmpInst(*Context, ICI.getPredicate(), BO->getOperand(0),
7127 case Instruction::Or:
7128 // If bits are being or'd in that are not present in the constant we
7129 // are comparing against, then the comparison could never succeed!
7130 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
7131 Constant *NotCI = Context->getConstantExprNot(RHS);
7132 if (!Context->getConstantExprAnd(BOC, NotCI)->isNullValue())
7133 return ReplaceInstUsesWith(ICI,
7134 Context->getConstantInt(Type::Int1Ty,
7139 case Instruction::And:
7140 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7141 // If bits are being compared against that are and'd out, then the
7142 // comparison can never succeed!
7143 if ((RHSV & ~BOC->getValue()) != 0)
7144 return ReplaceInstUsesWith(ICI,
7145 Context->getConstantInt(Type::Int1Ty,
7148 // If we have ((X & C) == C), turn it into ((X & C) != 0).
7149 if (RHS == BOC && RHSV.isPowerOf2())
7150 return new ICmpInst(*Context, isICMP_NE ? ICmpInst::ICMP_EQ :
7151 ICmpInst::ICMP_NE, LHSI,
7152 Context->getNullValue(RHS->getType()));
7154 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
7155 if (BOC->getValue().isSignBit()) {
7156 Value *X = BO->getOperand(0);
7157 Constant *Zero = Context->getNullValue(X->getType());
7158 ICmpInst::Predicate pred = isICMP_NE ?
7159 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
7160 return new ICmpInst(*Context, pred, X, Zero);
7163 // ((X & ~7) == 0) --> X < 8
7164 if (RHSV == 0 && isHighOnes(BOC)) {
7165 Value *X = BO->getOperand(0);
7166 Constant *NegX = Context->getConstantExprNeg(BOC);
7167 ICmpInst::Predicate pred = isICMP_NE ?
7168 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
7169 return new ICmpInst(*Context, pred, X, NegX);
7174 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
7175 // Handle icmp {eq|ne} <intrinsic>, intcst.
7176 if (II->getIntrinsicID() == Intrinsic::bswap) {
7178 ICI.setOperand(0, II->getOperand(1));
7179 ICI.setOperand(1, Context->getConstantInt(RHSV.byteSwap()));
7187 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
7188 /// We only handle extending casts so far.
7190 Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
7191 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
7192 Value *LHSCIOp = LHSCI->getOperand(0);
7193 const Type *SrcTy = LHSCIOp->getType();
7194 const Type *DestTy = LHSCI->getType();
7197 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
7198 // integer type is the same size as the pointer type.
7199 if (LHSCI->getOpcode() == Instruction::PtrToInt &&
7200 getTargetData().getPointerSizeInBits() ==
7201 cast<IntegerType>(DestTy)->getBitWidth()) {
7203 if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
7204 RHSOp = Context->getConstantExprIntToPtr(RHSC, SrcTy);
7205 } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
7206 RHSOp = RHSC->getOperand(0);
7207 // If the pointer types don't match, insert a bitcast.
7208 if (LHSCIOp->getType() != RHSOp->getType())
7209 RHSOp = InsertBitCastBefore(RHSOp, LHSCIOp->getType(), ICI);
7213 return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, RHSOp);
7216 // The code below only handles extension cast instructions, so far.
7218 if (LHSCI->getOpcode() != Instruction::ZExt &&
7219 LHSCI->getOpcode() != Instruction::SExt)
7222 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
7223 bool isSignedCmp = ICI.isSignedPredicate();
7225 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
7226 // Not an extension from the same type?
7227 RHSCIOp = CI->getOperand(0);
7228 if (RHSCIOp->getType() != LHSCIOp->getType())
7231 // If the signedness of the two casts doesn't agree (i.e. one is a sext
7232 // and the other is a zext), then we can't handle this.
7233 if (CI->getOpcode() != LHSCI->getOpcode())
7236 // Deal with equality cases early.
7237 if (ICI.isEquality())
7238 return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, RHSCIOp);
7240 // A signed comparison of sign extended values simplifies into a
7241 // signed comparison.
7242 if (isSignedCmp && isSignedExt)
7243 return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, RHSCIOp);
7245 // The other three cases all fold into an unsigned comparison.
7246 return new ICmpInst(*Context, ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
7249 // If we aren't dealing with a constant on the RHS, exit early
7250 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
7254 // Compute the constant that would happen if we truncated to SrcTy then
7255 // reextended to DestTy.
7256 Constant *Res1 = Context->getConstantExprTrunc(CI, SrcTy);
7257 Constant *Res2 = Context->getConstantExprCast(LHSCI->getOpcode(),
7260 // If the re-extended constant didn't change...
7262 // Make sure that sign of the Cmp and the sign of the Cast are the same.
7263 // For example, we might have:
7264 // %A = sext i16 %X to i32
7265 // %B = icmp ugt i32 %A, 1330
7266 // It is incorrect to transform this into
7267 // %B = icmp ugt i16 %X, 1330
7268 // because %A may have negative value.
7270 // However, we allow this when the compare is EQ/NE, because they are
7272 if (isSignedExt == isSignedCmp || ICI.isEquality())
7273 return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, Res1);
7277 // The re-extended constant changed so the constant cannot be represented
7278 // in the shorter type. Consequently, we cannot emit a simple comparison.
7280 // First, handle some easy cases. We know the result cannot be equal at this
7281 // point so handle the ICI.isEquality() cases
7282 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
7283 return ReplaceInstUsesWith(ICI, Context->getConstantIntFalse());
7284 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
7285 return ReplaceInstUsesWith(ICI, Context->getConstantIntTrue());
7287 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
7288 // should have been folded away previously and not enter in here.
7291 // We're performing a signed comparison.
7292 if (cast<ConstantInt>(CI)->getValue().isNegative())
7293 Result = Context->getConstantIntFalse(); // X < (small) --> false
7295 Result = Context->getConstantIntTrue(); // X < (large) --> true
7297 // We're performing an unsigned comparison.
7299 // We're performing an unsigned comp with a sign extended value.
7300 // This is true if the input is >= 0. [aka >s -1]
7301 Constant *NegOne = Context->getConstantIntAllOnesValue(SrcTy);
7302 Result = InsertNewInstBefore(new ICmpInst(*Context, ICmpInst::ICMP_SGT,
7303 LHSCIOp, NegOne, ICI.getName()), ICI);
7305 // Unsigned extend & unsigned compare -> always true.
7306 Result = Context->getConstantIntTrue();
7310 // Finally, return the value computed.
7311 if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
7312 ICI.getPredicate() == ICmpInst::ICMP_SLT)
7313 return ReplaceInstUsesWith(ICI, Result);
7315 assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
7316 ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
7317 "ICmp should be folded!");
7318 if (Constant *CI = dyn_cast<Constant>(Result))
7319 return ReplaceInstUsesWith(ICI, Context->getConstantExprNot(CI));
7320 return BinaryOperator::CreateNot(Result);
7323 Instruction *InstCombiner::visitShl(BinaryOperator &I) {
7324 return commonShiftTransforms(I);
7327 Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
7328 return commonShiftTransforms(I);
7331 Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
7332 if (Instruction *R = commonShiftTransforms(I))
7335 Value *Op0 = I.getOperand(0);
7337 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
7338 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
7339 if (CSI->isAllOnesValue())
7340 return ReplaceInstUsesWith(I, CSI);
7342 // See if we can turn a signed shr into an unsigned shr.
7343 if (MaskedValueIsZero(Op0,
7344 APInt::getSignBit(I.getType()->getScalarSizeInBits())))
7345 return BinaryOperator::CreateLShr(Op0, I.getOperand(1));
7347 // Arithmetic shifting an all-sign-bit value is a no-op.
7348 unsigned NumSignBits = ComputeNumSignBits(Op0);
7349 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
7350 return ReplaceInstUsesWith(I, Op0);
7355 Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
7356 assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
7357 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7359 // shl X, 0 == X and shr X, 0 == X
7360 // shl 0, X == 0 and shr 0, X == 0
7361 if (Op1 == Context->getNullValue(Op1->getType()) ||
7362 Op0 == Context->getNullValue(Op0->getType()))
7363 return ReplaceInstUsesWith(I, Op0);
7365 if (isa<UndefValue>(Op0)) {
7366 if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
7367 return ReplaceInstUsesWith(I, Op0);
7368 else // undef << X -> 0, undef >>u X -> 0
7369 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
7371 if (isa<UndefValue>(Op1)) {
7372 if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
7373 return ReplaceInstUsesWith(I, Op0);
7374 else // X << undef, X >>u undef -> 0
7375 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
7378 // See if we can fold away this shift.
7379 if (SimplifyDemandedInstructionBits(I))
7382 // Try to fold constant and into select arguments.
7383 if (isa<Constant>(Op0))
7384 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
7385 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7388 if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
7389 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
7394 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
7395 BinaryOperator &I) {
7396 bool isLeftShift = I.getOpcode() == Instruction::Shl;
7398 // See if we can simplify any instructions used by the instruction whose sole
7399 // purpose is to compute bits we don't care about.
7400 uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
7402 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
7405 if (Op1->uge(TypeBits)) {
7406 if (I.getOpcode() != Instruction::AShr)
7407 return ReplaceInstUsesWith(I, Context->getNullValue(Op0->getType()));
7409 I.setOperand(1, Context->getConstantInt(I.getType(), TypeBits-1));
7414 // ((X*C1) << C2) == (X * (C1 << C2))
7415 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
7416 if (BO->getOpcode() == Instruction::Mul && isLeftShift)
7417 if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
7418 return BinaryOperator::CreateMul(BO->getOperand(0),
7419 Context->getConstantExprShl(BOOp, Op1));
7421 // Try to fold constant and into select arguments.
7422 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
7423 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7425 if (isa<PHINode>(Op0))
7426 if (Instruction *NV = FoldOpIntoPhi(I))
7429 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
7430 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
7431 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
7432 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
7433 // place. Don't try to do this transformation in this case. Also, we
7434 // require that the input operand is a shift-by-constant so that we have
7435 // confidence that the shifts will get folded together. We could do this
7436 // xform in more cases, but it is unlikely to be profitable.
7437 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
7438 isa<ConstantInt>(TrOp->getOperand(1))) {
7439 // Okay, we'll do this xform. Make the shift of shift.
7440 Constant *ShAmt = Context->getConstantExprZExt(Op1, TrOp->getType());
7441 Instruction *NSh = BinaryOperator::Create(I.getOpcode(), TrOp, ShAmt,
7443 InsertNewInstBefore(NSh, I); // (shift2 (shift1 & 0x00FF), c2)
7445 // For logical shifts, the truncation has the effect of making the high
7446 // part of the register be zeros. Emulate this by inserting an AND to
7447 // clear the top bits as needed. This 'and' will usually be zapped by
7448 // other xforms later if dead.
7449 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
7450 unsigned DstSize = TI->getType()->getScalarSizeInBits();
7451 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
7453 // The mask we constructed says what the trunc would do if occurring
7454 // between the shifts. We want to know the effect *after* the second
7455 // shift. We know that it is a logical shift by a constant, so adjust the
7456 // mask as appropriate.
7457 if (I.getOpcode() == Instruction::Shl)
7458 MaskV <<= Op1->getZExtValue();
7460 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
7461 MaskV = MaskV.lshr(Op1->getZExtValue());
7465 BinaryOperator::CreateAnd(NSh, Context->getConstantInt(MaskV),
7467 InsertNewInstBefore(And, I); // shift1 & 0x00FF
7469 // Return the value truncated to the interesting size.
7470 return new TruncInst(And, I.getType());
7474 if (Op0->hasOneUse()) {
7475 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
7476 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7479 switch (Op0BO->getOpcode()) {
7481 case Instruction::Add:
7482 case Instruction::And:
7483 case Instruction::Or:
7484 case Instruction::Xor: {
7485 // These operators commute.
7486 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
7487 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
7488 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
7489 m_Specific(Op1)), *Context)){
7490 Instruction *YS = BinaryOperator::CreateShl(
7491 Op0BO->getOperand(0), Op1,
7493 InsertNewInstBefore(YS, I); // (Y << C)
7495 BinaryOperator::Create(Op0BO->getOpcode(), YS, V1,
7496 Op0BO->getOperand(1)->getName());
7497 InsertNewInstBefore(X, I); // (X + (Y << C))
7498 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
7499 return BinaryOperator::CreateAnd(X, Context->getConstantInt(
7500 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
7503 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
7504 Value *Op0BOOp1 = Op0BO->getOperand(1);
7505 if (isLeftShift && Op0BOOp1->hasOneUse() &&
7507 m_And(m_Shr(m_Value(V1), m_Specific(Op1)),
7508 m_ConstantInt(CC)), *Context) &&
7509 cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse()) {
7510 Instruction *YS = BinaryOperator::CreateShl(
7511 Op0BO->getOperand(0), Op1,
7513 InsertNewInstBefore(YS, I); // (Y << C)
7515 BinaryOperator::CreateAnd(V1,
7516 Context->getConstantExprShl(CC, Op1),
7517 V1->getName()+".mask");
7518 InsertNewInstBefore(XM, I); // X & (CC << C)
7520 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
7525 case Instruction::Sub: {
7526 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
7527 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
7528 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
7529 m_Specific(Op1)), *Context)){
7530 Instruction *YS = BinaryOperator::CreateShl(
7531 Op0BO->getOperand(1), Op1,
7533 InsertNewInstBefore(YS, I); // (Y << C)
7535 BinaryOperator::Create(Op0BO->getOpcode(), V1, YS,
7536 Op0BO->getOperand(0)->getName());
7537 InsertNewInstBefore(X, I); // (X + (Y << C))
7538 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
7539 return BinaryOperator::CreateAnd(X, Context->getConstantInt(
7540 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
7543 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
7544 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
7545 match(Op0BO->getOperand(0),
7546 m_And(m_Shr(m_Value(V1), m_Value(V2)),
7547 m_ConstantInt(CC)), *Context) && V2 == Op1 &&
7548 cast<BinaryOperator>(Op0BO->getOperand(0))
7549 ->getOperand(0)->hasOneUse()) {
7550 Instruction *YS = BinaryOperator::CreateShl(
7551 Op0BO->getOperand(1), Op1,
7553 InsertNewInstBefore(YS, I); // (Y << C)
7555 BinaryOperator::CreateAnd(V1,
7556 Context->getConstantExprShl(CC, Op1),
7557 V1->getName()+".mask");
7558 InsertNewInstBefore(XM, I); // X & (CC << C)
7560 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
7568 // If the operand is an bitwise operator with a constant RHS, and the
7569 // shift is the only use, we can pull it out of the shift.
7570 if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
7571 bool isValid = true; // Valid only for And, Or, Xor
7572 bool highBitSet = false; // Transform if high bit of constant set?
7574 switch (Op0BO->getOpcode()) {
7575 default: isValid = false; break; // Do not perform transform!
7576 case Instruction::Add:
7577 isValid = isLeftShift;
7579 case Instruction::Or:
7580 case Instruction::Xor:
7583 case Instruction::And:
7588 // If this is a signed shift right, and the high bit is modified
7589 // by the logical operation, do not perform the transformation.
7590 // The highBitSet boolean indicates the value of the high bit of
7591 // the constant which would cause it to be modified for this
7594 if (isValid && I.getOpcode() == Instruction::AShr)
7595 isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
7598 Constant *NewRHS = Context->getConstantExpr(I.getOpcode(), Op0C, Op1);
7600 Instruction *NewShift =
7601 BinaryOperator::Create(I.getOpcode(), Op0BO->getOperand(0), Op1);
7602 InsertNewInstBefore(NewShift, I);
7603 NewShift->takeName(Op0BO);
7605 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
7612 // Find out if this is a shift of a shift by a constant.
7613 BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
7614 if (ShiftOp && !ShiftOp->isShift())
7617 if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
7618 ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
7619 uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
7620 uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
7621 assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
7622 if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
7623 Value *X = ShiftOp->getOperand(0);
7625 uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
7627 const IntegerType *Ty = cast<IntegerType>(I.getType());
7629 // Check for (X << c1) << c2 and (X >> c1) >> c2
7630 if (I.getOpcode() == ShiftOp->getOpcode()) {
7631 // If this is oversized composite shift, then unsigned shifts get 0, ashr
7633 if (AmtSum >= TypeBits) {
7634 if (I.getOpcode() != Instruction::AShr)
7635 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
7636 AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
7639 return BinaryOperator::Create(I.getOpcode(), X,
7640 Context->getConstantInt(Ty, AmtSum));
7641 } else if (ShiftOp->getOpcode() == Instruction::LShr &&
7642 I.getOpcode() == Instruction::AShr) {
7643 if (AmtSum >= TypeBits)
7644 return ReplaceInstUsesWith(I, Context->getNullValue(I.getType()));
7646 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
7647 return BinaryOperator::CreateLShr(X, Context->getConstantInt(Ty, AmtSum));
7648 } else if (ShiftOp->getOpcode() == Instruction::AShr &&
7649 I.getOpcode() == Instruction::LShr) {
7650 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
7651 if (AmtSum >= TypeBits)
7652 AmtSum = TypeBits-1;
7654 Instruction *Shift =
7655 BinaryOperator::CreateAShr(X, Context->getConstantInt(Ty, AmtSum));
7656 InsertNewInstBefore(Shift, I);
7658 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7659 return BinaryOperator::CreateAnd(Shift, Context->getConstantInt(Mask));
7662 // Okay, if we get here, one shift must be left, and the other shift must be
7663 // right. See if the amounts are equal.
7664 if (ShiftAmt1 == ShiftAmt2) {
7665 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
7666 if (I.getOpcode() == Instruction::Shl) {
7667 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
7668 return BinaryOperator::CreateAnd(X, Context->getConstantInt(Mask));
7670 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
7671 if (I.getOpcode() == Instruction::LShr) {
7672 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
7673 return BinaryOperator::CreateAnd(X, Context->getConstantInt(Mask));
7675 // We can simplify ((X << C) >>s C) into a trunc + sext.
7676 // NOTE: we could do this for any C, but that would make 'unusual' integer
7677 // types. For now, just stick to ones well-supported by the code
7679 const Type *SExtType = 0;
7680 switch (Ty->getBitWidth() - ShiftAmt1) {
7687 SExtType = Context->getIntegerType(Ty->getBitWidth() - ShiftAmt1);
7692 Instruction *NewTrunc = new TruncInst(X, SExtType, "sext");
7693 InsertNewInstBefore(NewTrunc, I);
7694 return new SExtInst(NewTrunc, Ty);
7696 // Otherwise, we can't handle it yet.
7697 } else if (ShiftAmt1 < ShiftAmt2) {
7698 uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
7700 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
7701 if (I.getOpcode() == Instruction::Shl) {
7702 assert(ShiftOp->getOpcode() == Instruction::LShr ||
7703 ShiftOp->getOpcode() == Instruction::AShr);
7704 Instruction *Shift =
7705 BinaryOperator::CreateShl(X, Context->getConstantInt(Ty, ShiftDiff));
7706 InsertNewInstBefore(Shift, I);
7708 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
7709 return BinaryOperator::CreateAnd(Shift, Context->getConstantInt(Mask));
7712 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
7713 if (I.getOpcode() == Instruction::LShr) {
7714 assert(ShiftOp->getOpcode() == Instruction::Shl);
7715 Instruction *Shift =
7716 BinaryOperator::CreateLShr(X, Context->getConstantInt(Ty, ShiftDiff));
7717 InsertNewInstBefore(Shift, I);
7719 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7720 return BinaryOperator::CreateAnd(Shift, Context->getConstantInt(Mask));
7723 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
7725 assert(ShiftAmt2 < ShiftAmt1);
7726 uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
7728 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
7729 if (I.getOpcode() == Instruction::Shl) {
7730 assert(ShiftOp->getOpcode() == Instruction::LShr ||
7731 ShiftOp->getOpcode() == Instruction::AShr);
7732 Instruction *Shift =
7733 BinaryOperator::Create(ShiftOp->getOpcode(), X,
7734 Context->getConstantInt(Ty, ShiftDiff));
7735 InsertNewInstBefore(Shift, I);
7737 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
7738 return BinaryOperator::CreateAnd(Shift, Context->getConstantInt(Mask));
7741 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
7742 if (I.getOpcode() == Instruction::LShr) {
7743 assert(ShiftOp->getOpcode() == Instruction::Shl);
7744 Instruction *Shift =
7745 BinaryOperator::CreateShl(X, Context->getConstantInt(Ty, ShiftDiff));
7746 InsertNewInstBefore(Shift, I);
7748 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
7749 return BinaryOperator::CreateAnd(Shift, Context->getConstantInt(Mask));
7752 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
7759 /// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
7760 /// expression. If so, decompose it, returning some value X, such that Val is
7763 static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
7764 int &Offset, LLVMContext *Context) {
7765 assert(Val->getType() == Type::Int32Ty && "Unexpected allocation size type!");
7766 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
7767 Offset = CI->getZExtValue();
7769 return Context->getConstantInt(Type::Int32Ty, 0);
7770 } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
7771 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
7772 if (I->getOpcode() == Instruction::Shl) {
7773 // This is a value scaled by '1 << the shift amt'.
7774 Scale = 1U << RHS->getZExtValue();
7776 return I->getOperand(0);
7777 } else if (I->getOpcode() == Instruction::Mul) {
7778 // This value is scaled by 'RHS'.
7779 Scale = RHS->getZExtValue();
7781 return I->getOperand(0);
7782 } else if (I->getOpcode() == Instruction::Add) {
7783 // We have X+C. Check to see if we really have (X*C2)+C1,
7784 // where C1 is divisible by C2.
7787 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale,
7789 Offset += RHS->getZExtValue();
7796 // Otherwise, we can't look past this.
7803 /// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
7804 /// try to eliminate the cast by moving the type information into the alloc.
7805 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
7806 AllocationInst &AI) {
7807 const PointerType *PTy = cast<PointerType>(CI.getType());
7809 // Remove any uses of AI that are dead.
7810 assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
7812 for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
7813 Instruction *User = cast<Instruction>(*UI++);
7814 if (isInstructionTriviallyDead(User)) {
7815 while (UI != E && *UI == User)
7816 ++UI; // If this instruction uses AI more than once, don't break UI.
7819 DOUT << "IC: DCE: " << *User;
7820 EraseInstFromFunction(*User);
7824 // Get the type really allocated and the type casted to.
7825 const Type *AllocElTy = AI.getAllocatedType();
7826 const Type *CastElTy = PTy->getElementType();
7827 if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
7829 unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
7830 unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
7831 if (CastElTyAlign < AllocElTyAlign) return 0;
7833 // If the allocation has multiple uses, only promote it if we are strictly
7834 // increasing the alignment of the resultant allocation. If we keep it the
7835 // same, we open the door to infinite loops of various kinds. (A reference
7836 // from a dbg.declare doesn't count as a use for this purpose.)
7837 if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
7838 CastElTyAlign == AllocElTyAlign) return 0;
7840 uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
7841 uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
7842 if (CastElTySize == 0 || AllocElTySize == 0) return 0;
7844 // See if we can satisfy the modulus by pulling a scale out of the array
7846 unsigned ArraySizeScale;
7848 Value *NumElements = // See if the array size is a decomposable linear expr.
7849 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale,
7850 ArrayOffset, Context);
7852 // If we can now satisfy the modulus, by using a non-1 scale, we really can
7854 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
7855 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
7857 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
7862 // If the allocation size is constant, form a constant mul expression
7863 Amt = Context->getConstantInt(Type::Int32Ty, Scale);
7864 if (isa<ConstantInt>(NumElements))
7865 Amt = Context->getConstantExprMul(cast<ConstantInt>(NumElements),
7866 cast<ConstantInt>(Amt));
7867 // otherwise multiply the amount and the number of elements
7869 Instruction *Tmp = BinaryOperator::CreateMul(Amt, NumElements, "tmp");
7870 Amt = InsertNewInstBefore(Tmp, AI);
7874 if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
7875 Value *Off = Context->getConstantInt(Type::Int32Ty, Offset, true);
7876 Instruction *Tmp = BinaryOperator::CreateAdd(Amt, Off, "tmp");
7877 Amt = InsertNewInstBefore(Tmp, AI);
7880 AllocationInst *New;
7881 if (isa<MallocInst>(AI))
7882 New = new MallocInst(CastElTy, Amt, AI.getAlignment());
7884 New = new AllocaInst(CastElTy, Amt, AI.getAlignment());
7885 InsertNewInstBefore(New, AI);
7888 // If the allocation has one real use plus a dbg.declare, just remove the
7890 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
7891 EraseInstFromFunction(*DI);
7893 // If the allocation has multiple real uses, insert a cast and change all
7894 // things that used it to use the new cast. This will also hack on CI, but it
7896 else if (!AI.hasOneUse()) {
7897 AddUsesToWorkList(AI);
7898 // New is the allocation instruction, pointer typed. AI is the original
7899 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
7900 CastInst *NewCast = new BitCastInst(New, AI.getType(), "tmpcast");
7901 InsertNewInstBefore(NewCast, AI);
7902 AI.replaceAllUsesWith(NewCast);
7904 return ReplaceInstUsesWith(CI, New);
7907 /// CanEvaluateInDifferentType - Return true if we can take the specified value
7908 /// and return it as type Ty without inserting any new casts and without
7909 /// changing the computed value. This is used by code that tries to decide
7910 /// whether promoting or shrinking integer operations to wider or smaller types
7911 /// will allow us to eliminate a truncate or extend.
7913 /// This is a truncation operation if Ty is smaller than V->getType(), or an
7914 /// extension operation if Ty is larger.
7916 /// If CastOpc is a truncation, then Ty will be a type smaller than V. We
7917 /// should return true if trunc(V) can be computed by computing V in the smaller
7918 /// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
7919 /// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
7920 /// efficiently truncated.
7922 /// If CastOpc is a sext or zext, we are asking if the low bits of the value can
7923 /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
7924 /// the final result.
7925 bool InstCombiner::CanEvaluateInDifferentType(Value *V, const Type *Ty,
7927 int &NumCastsRemoved){
7928 // We can always evaluate constants in another type.
7929 if (isa<Constant>(V))
7932 Instruction *I = dyn_cast<Instruction>(V);
7933 if (!I) return false;
7935 const Type *OrigTy = V->getType();
7937 // If this is an extension or truncate, we can often eliminate it.
7938 if (isa<TruncInst>(I) || isa<ZExtInst>(I) || isa<SExtInst>(I)) {
7939 // If this is a cast from the destination type, we can trivially eliminate
7940 // it, and this will remove a cast overall.
7941 if (I->getOperand(0)->getType() == Ty) {
7942 // If the first operand is itself a cast, and is eliminable, do not count
7943 // this as an eliminable cast. We would prefer to eliminate those two
7945 if (!isa<CastInst>(I->getOperand(0)) && I->hasOneUse())
7951 // We can't extend or shrink something that has multiple uses: doing so would
7952 // require duplicating the instruction in general, which isn't profitable.
7953 if (!I->hasOneUse()) return false;
7955 unsigned Opc = I->getOpcode();
7957 case Instruction::Add:
7958 case Instruction::Sub:
7959 case Instruction::Mul:
7960 case Instruction::And:
7961 case Instruction::Or:
7962 case Instruction::Xor:
7963 // These operators can all arbitrarily be extended or truncated.
7964 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7966 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
7969 case Instruction::Shl:
7970 // If we are truncating the result of this SHL, and if it's a shift of a
7971 // constant amount, we can always perform a SHL in a smaller type.
7972 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7973 uint32_t BitWidth = Ty->getScalarSizeInBits();
7974 if (BitWidth < OrigTy->getScalarSizeInBits() &&
7975 CI->getLimitedValue(BitWidth) < BitWidth)
7976 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7980 case Instruction::LShr:
7981 // If this is a truncate of a logical shr, we can truncate it to a smaller
7982 // lshr iff we know that the bits we would otherwise be shifting in are
7984 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
7985 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
7986 uint32_t BitWidth = Ty->getScalarSizeInBits();
7987 if (BitWidth < OrigBitWidth &&
7988 MaskedValueIsZero(I->getOperand(0),
7989 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
7990 CI->getLimitedValue(BitWidth) < BitWidth) {
7991 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
7996 case Instruction::ZExt:
7997 case Instruction::SExt:
7998 case Instruction::Trunc:
7999 // If this is the same kind of case as our original (e.g. zext+zext), we
8000 // can safely replace it. Note that replacing it does not reduce the number
8001 // of casts in the input.
8005 // sext (zext ty1), ty2 -> zext ty2
8006 if (CastOpc == Instruction::SExt && Opc == Instruction::ZExt)
8009 case Instruction::Select: {
8010 SelectInst *SI = cast<SelectInst>(I);
8011 return CanEvaluateInDifferentType(SI->getTrueValue(), Ty, CastOpc,
8013 CanEvaluateInDifferentType(SI->getFalseValue(), Ty, CastOpc,
8016 case Instruction::PHI: {
8017 // We can change a phi if we can change all operands.
8018 PHINode *PN = cast<PHINode>(I);
8019 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
8020 if (!CanEvaluateInDifferentType(PN->getIncomingValue(i), Ty, CastOpc,
8026 // TODO: Can handle more cases here.
8033 /// EvaluateInDifferentType - Given an expression that
8034 /// CanEvaluateInDifferentType returns true for, actually insert the code to
8035 /// evaluate the expression.
8036 Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
8038 if (Constant *C = dyn_cast<Constant>(V))
8039 return Context->getConstantExprIntegerCast(C, Ty,
8040 isSigned /*Sext or ZExt*/);
8042 // Otherwise, it must be an instruction.
8043 Instruction *I = cast<Instruction>(V);
8044 Instruction *Res = 0;
8045 unsigned Opc = I->getOpcode();
8047 case Instruction::Add:
8048 case Instruction::Sub:
8049 case Instruction::Mul:
8050 case Instruction::And:
8051 case Instruction::Or:
8052 case Instruction::Xor:
8053 case Instruction::AShr:
8054 case Instruction::LShr:
8055 case Instruction::Shl: {
8056 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
8057 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
8058 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
8061 case Instruction::Trunc:
8062 case Instruction::ZExt:
8063 case Instruction::SExt:
8064 // If the source type of the cast is the type we're trying for then we can
8065 // just return the source. There's no need to insert it because it is not
8067 if (I->getOperand(0)->getType() == Ty)
8068 return I->getOperand(0);
8070 // Otherwise, must be the same type of cast, so just reinsert a new one.
8071 Res = CastInst::Create(cast<CastInst>(I)->getOpcode(), I->getOperand(0),
8074 case Instruction::Select: {
8075 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
8076 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
8077 Res = SelectInst::Create(I->getOperand(0), True, False);
8080 case Instruction::PHI: {
8081 PHINode *OPN = cast<PHINode>(I);
8082 PHINode *NPN = PHINode::Create(Ty);
8083 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
8084 Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
8085 NPN->addIncoming(V, OPN->getIncomingBlock(i));
8091 // TODO: Can handle more cases here.
8092 LLVM_UNREACHABLE("Unreachable!");
8097 return InsertNewInstBefore(Res, *I);
8100 /// @brief Implement the transforms common to all CastInst visitors.
8101 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
8102 Value *Src = CI.getOperand(0);
8104 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
8105 // eliminate it now.
8106 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
8107 if (Instruction::CastOps opc =
8108 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
8109 // The first cast (CSrc) is eliminable so we need to fix up or replace
8110 // the second cast (CI). CSrc will then have a good chance of being dead.
8111 return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
8115 // If we are casting a select then fold the cast into the select
8116 if (SelectInst *SI = dyn_cast<SelectInst>(Src))
8117 if (Instruction *NV = FoldOpIntoSelect(CI, SI, this))
8120 // If we are casting a PHI then fold the cast into the PHI
8121 if (isa<PHINode>(Src))
8122 if (Instruction *NV = FoldOpIntoPhi(CI))
8128 /// FindElementAtOffset - Given a type and a constant offset, determine whether
8129 /// or not there is a sequence of GEP indices into the type that will land us at
8130 /// the specified offset. If so, fill them into NewIndices and return the
8131 /// resultant element type, otherwise return null.
8132 static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
8133 SmallVectorImpl<Value*> &NewIndices,
8134 const TargetData *TD,
8135 LLVMContext *Context) {
8136 if (!Ty->isSized()) return 0;
8138 // Start with the index over the outer type. Note that the type size
8139 // might be zero (even if the offset isn't zero) if the indexed type
8140 // is something like [0 x {int, int}]
8141 const Type *IntPtrTy = TD->getIntPtrType();
8142 int64_t FirstIdx = 0;
8143 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
8144 FirstIdx = Offset/TySize;
8145 Offset -= FirstIdx*TySize;
8147 // Handle hosts where % returns negative instead of values [0..TySize).
8151 assert(Offset >= 0);
8153 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
8156 NewIndices.push_back(Context->getConstantInt(IntPtrTy, FirstIdx));
8158 // Index into the types. If we fail, set OrigBase to null.
8160 // Indexing into tail padding between struct/array elements.
8161 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
8164 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
8165 const StructLayout *SL = TD->getStructLayout(STy);
8166 assert(Offset < (int64_t)SL->getSizeInBytes() &&
8167 "Offset must stay within the indexed type");
8169 unsigned Elt = SL->getElementContainingOffset(Offset);
8170 NewIndices.push_back(Context->getConstantInt(Type::Int32Ty, Elt));
8172 Offset -= SL->getElementOffset(Elt);
8173 Ty = STy->getElementType(Elt);
8174 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
8175 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
8176 assert(EltSize && "Cannot index into a zero-sized array");
8177 NewIndices.push_back(Context->getConstantInt(IntPtrTy,Offset/EltSize));
8179 Ty = AT->getElementType();
8181 // Otherwise, we can't index into the middle of this atomic type, bail.
8189 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
8190 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
8191 Value *Src = CI.getOperand(0);
8193 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
8194 // If casting the result of a getelementptr instruction with no offset, turn
8195 // this into a cast of the original pointer!
8196 if (GEP->hasAllZeroIndices()) {
8197 // Changing the cast operand is usually not a good idea but it is safe
8198 // here because the pointer operand is being replaced with another
8199 // pointer operand so the opcode doesn't need to change.
8201 CI.setOperand(0, GEP->getOperand(0));
8205 // If the GEP has a single use, and the base pointer is a bitcast, and the
8206 // GEP computes a constant offset, see if we can convert these three
8207 // instructions into fewer. This typically happens with unions and other
8208 // non-type-safe code.
8209 if (GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
8210 if (GEP->hasAllConstantIndices()) {
8211 // We are guaranteed to get a constant from EmitGEPOffset.
8212 ConstantInt *OffsetV =
8213 cast<ConstantInt>(EmitGEPOffset(GEP, CI, *this));
8214 int64_t Offset = OffsetV->getSExtValue();
8216 // Get the base pointer input of the bitcast, and the type it points to.
8217 Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
8218 const Type *GEPIdxTy =
8219 cast<PointerType>(OrigBase->getType())->getElementType();
8220 SmallVector<Value*, 8> NewIndices;
8221 if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices, TD, Context)) {
8222 // If we were able to index down into an element, create the GEP
8223 // and bitcast the result. This eliminates one bitcast, potentially
8225 Instruction *NGEP = GetElementPtrInst::Create(OrigBase,
8227 NewIndices.end(), "");
8228 InsertNewInstBefore(NGEP, CI);
8229 NGEP->takeName(GEP);
8231 if (isa<BitCastInst>(CI))
8232 return new BitCastInst(NGEP, CI.getType());
8233 assert(isa<PtrToIntInst>(CI));
8234 return new PtrToIntInst(NGEP, CI.getType());
8240 return commonCastTransforms(CI);
8243 /// isSafeIntegerType - Return true if this is a basic integer type, not a crazy
8244 /// type like i42. We don't want to introduce operations on random non-legal
8245 /// integer types where they don't already exist in the code. In the future,
8246 /// we should consider making this based off target-data, so that 32-bit targets
8247 /// won't get i64 operations etc.
8248 static bool isSafeIntegerType(const Type *Ty) {
8249 switch (Ty->getPrimitiveSizeInBits()) {
8260 /// Only the TRUNC, ZEXT, SEXT. This function implements the common transforms
8261 /// for all those cases.
8262 /// @brief Implement the transforms common to CastInst with integer operands
8263 Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
8264 if (Instruction *Result = commonCastTransforms(CI))
8267 Value *Src = CI.getOperand(0);
8268 const Type *SrcTy = Src->getType();
8269 const Type *DestTy = CI.getType();
8270 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
8271 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
8273 // See if we can simplify any instructions used by the LHS whose sole
8274 // purpose is to compute bits we don't care about.
8275 if (SimplifyDemandedInstructionBits(CI))
8278 // If the source isn't an instruction or has more than one use then we
8279 // can't do anything more.
8280 Instruction *SrcI = dyn_cast<Instruction>(Src);
8281 if (!SrcI || !Src->hasOneUse())
8284 // Attempt to propagate the cast into the instruction for int->int casts.
8285 int NumCastsRemoved = 0;
8286 if (!isa<BitCastInst>(CI) &&
8287 // Only do this if the dest type is a simple type, don't convert the
8288 // expression tree to something weird like i93 unless the source is also
8290 (isSafeIntegerType(DestTy->getScalarType()) ||
8291 !isSafeIntegerType(SrcI->getType()->getScalarType())) &&
8292 CanEvaluateInDifferentType(SrcI, DestTy,
8293 CI.getOpcode(), NumCastsRemoved)) {
8294 // If this cast is a truncate, evaluting in a different type always
8295 // eliminates the cast, so it is always a win. If this is a zero-extension,
8296 // we need to do an AND to maintain the clear top-part of the computation,
8297 // so we require that the input have eliminated at least one cast. If this
8298 // is a sign extension, we insert two new casts (to do the extension) so we
8299 // require that two casts have been eliminated.
8300 bool DoXForm = false;
8301 bool JustReplace = false;
8302 switch (CI.getOpcode()) {
8304 // All the others use floating point so we shouldn't actually
8305 // get here because of the check above.
8306 LLVM_UNREACHABLE("Unknown cast type");
8307 case Instruction::Trunc:
8310 case Instruction::ZExt: {
8311 DoXForm = NumCastsRemoved >= 1;
8312 if (!DoXForm && 0) {
8313 // If it's unnecessary to issue an AND to clear the high bits, it's
8314 // always profitable to do this xform.
8315 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, false);
8316 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8317 if (MaskedValueIsZero(TryRes, Mask))
8318 return ReplaceInstUsesWith(CI, TryRes);
8320 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8321 if (TryI->use_empty())
8322 EraseInstFromFunction(*TryI);
8326 case Instruction::SExt: {
8327 DoXForm = NumCastsRemoved >= 2;
8328 if (!DoXForm && !isa<TruncInst>(SrcI) && 0) {
8329 // If we do not have to emit the truncate + sext pair, then it's always
8330 // profitable to do this xform.
8332 // It's not safe to eliminate the trunc + sext pair if one of the
8333 // eliminated cast is a truncate. e.g.
8334 // t2 = trunc i32 t1 to i16
8335 // t3 = sext i16 t2 to i32
8338 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, true);
8339 unsigned NumSignBits = ComputeNumSignBits(TryRes);
8340 if (NumSignBits > (DestBitSize - SrcBitSize))
8341 return ReplaceInstUsesWith(CI, TryRes);
8343 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8344 if (TryI->use_empty())
8345 EraseInstFromFunction(*TryI);
8352 DOUT << "ICE: EvaluateInDifferentType converting expression type to avoid"
8354 Value *Res = EvaluateInDifferentType(SrcI, DestTy,
8355 CI.getOpcode() == Instruction::SExt);
8357 // Just replace this cast with the result.
8358 return ReplaceInstUsesWith(CI, Res);
8360 assert(Res->getType() == DestTy);
8361 switch (CI.getOpcode()) {
8362 default: LLVM_UNREACHABLE("Unknown cast type!");
8363 case Instruction::Trunc:
8364 case Instruction::BitCast:
8365 // Just replace this cast with the result.
8366 return ReplaceInstUsesWith(CI, Res);
8367 case Instruction::ZExt: {
8368 assert(SrcBitSize < DestBitSize && "Not a zext?");
8370 // If the high bits are already zero, just replace this cast with the
8372 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8373 if (MaskedValueIsZero(Res, Mask))
8374 return ReplaceInstUsesWith(CI, Res);
8376 // We need to emit an AND to clear the high bits.
8377 Constant *C = Context->getConstantInt(APInt::getLowBitsSet(DestBitSize,
8379 return BinaryOperator::CreateAnd(Res, C);
8381 case Instruction::SExt: {
8382 // If the high bits are already filled with sign bit, just replace this
8383 // cast with the result.
8384 unsigned NumSignBits = ComputeNumSignBits(Res);
8385 if (NumSignBits > (DestBitSize - SrcBitSize))
8386 return ReplaceInstUsesWith(CI, Res);
8388 // We need to emit a cast to truncate, then a cast to sext.
8389 return CastInst::Create(Instruction::SExt,
8390 InsertCastBefore(Instruction::Trunc, Res, Src->getType(),
8397 Value *Op0 = SrcI->getNumOperands() > 0 ? SrcI->getOperand(0) : 0;
8398 Value *Op1 = SrcI->getNumOperands() > 1 ? SrcI->getOperand(1) : 0;
8400 switch (SrcI->getOpcode()) {
8401 case Instruction::Add:
8402 case Instruction::Mul:
8403 case Instruction::And:
8404 case Instruction::Or:
8405 case Instruction::Xor:
8406 // If we are discarding information, rewrite.
8407 if (DestBitSize <= SrcBitSize && DestBitSize != 1) {
8408 // Don't insert two casts if they cannot be eliminated. We allow
8409 // two casts to be inserted if the sizes are the same. This could
8410 // only be converting signedness, which is a noop.
8411 if (DestBitSize == SrcBitSize ||
8412 !ValueRequiresCast(CI.getOpcode(), Op1, DestTy,TD) ||
8413 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
8414 Instruction::CastOps opcode = CI.getOpcode();
8415 Value *Op0c = InsertCastBefore(opcode, Op0, DestTy, *SrcI);
8416 Value *Op1c = InsertCastBefore(opcode, Op1, DestTy, *SrcI);
8417 return BinaryOperator::Create(
8418 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
8422 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
8423 if (isa<ZExtInst>(CI) && SrcBitSize == 1 &&
8424 SrcI->getOpcode() == Instruction::Xor &&
8425 Op1 == Context->getConstantIntTrue() &&
8426 (!Op0->hasOneUse() || !isa<CmpInst>(Op0))) {
8427 Value *New = InsertCastBefore(Instruction::ZExt, Op0, DestTy, CI);
8428 return BinaryOperator::CreateXor(New,
8429 Context->getConstantInt(CI.getType(), 1));
8432 case Instruction::SDiv:
8433 case Instruction::UDiv:
8434 case Instruction::SRem:
8435 case Instruction::URem:
8436 // If we are just changing the sign, rewrite.
8437 if (DestBitSize == SrcBitSize) {
8438 // Don't insert two casts if they cannot be eliminated. We allow
8439 // two casts to be inserted if the sizes are the same. This could
8440 // only be converting signedness, which is a noop.
8441 if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) ||
8442 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
8443 Value *Op0c = InsertCastBefore(Instruction::BitCast,
8444 Op0, DestTy, *SrcI);
8445 Value *Op1c = InsertCastBefore(Instruction::BitCast,
8446 Op1, DestTy, *SrcI);
8447 return BinaryOperator::Create(
8448 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
8453 case Instruction::Shl:
8454 // Allow changing the sign of the source operand. Do not allow
8455 // changing the size of the shift, UNLESS the shift amount is a
8456 // constant. We must not change variable sized shifts to a smaller
8457 // size, because it is undefined to shift more bits out than exist
8459 if (DestBitSize == SrcBitSize ||
8460 (DestBitSize < SrcBitSize && isa<Constant>(Op1))) {
8461 Instruction::CastOps opcode = (DestBitSize == SrcBitSize ?
8462 Instruction::BitCast : Instruction::Trunc);
8463 Value *Op0c = InsertCastBefore(opcode, Op0, DestTy, *SrcI);
8464 Value *Op1c = InsertCastBefore(opcode, Op1, DestTy, *SrcI);
8465 return BinaryOperator::CreateShl(Op0c, Op1c);
8468 case Instruction::AShr:
8469 // If this is a signed shr, and if all bits shifted in are about to be
8470 // truncated off, turn it into an unsigned shr to allow greater
8472 if (DestBitSize < SrcBitSize &&
8473 isa<ConstantInt>(Op1)) {
8474 uint32_t ShiftAmt = cast<ConstantInt>(Op1)->getLimitedValue(SrcBitSize);
8475 if (SrcBitSize > ShiftAmt && SrcBitSize-ShiftAmt >= DestBitSize) {
8476 // Insert the new logical shift right.
8477 return BinaryOperator::CreateLShr(Op0, Op1);
8485 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
8486 if (Instruction *Result = commonIntCastTransforms(CI))
8489 Value *Src = CI.getOperand(0);
8490 const Type *Ty = CI.getType();
8491 uint32_t DestBitWidth = Ty->getScalarSizeInBits();
8492 uint32_t SrcBitWidth = Src->getType()->getScalarSizeInBits();
8494 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
8495 if (DestBitWidth == 1 &&
8496 isa<VectorType>(Ty) == isa<VectorType>(Src->getType())) {
8497 Constant *One = Context->getConstantInt(Src->getType(), 1);
8498 Src = InsertNewInstBefore(BinaryOperator::CreateAnd(Src, One, "tmp"), CI);
8499 Value *Zero = Context->getNullValue(Src->getType());
8500 return new ICmpInst(*Context, ICmpInst::ICMP_NE, Src, Zero);
8503 // Optimize trunc(lshr(), c) to pull the shift through the truncate.
8504 ConstantInt *ShAmtV = 0;
8506 if (Src->hasOneUse() &&
8507 match(Src, m_LShr(m_Value(ShiftOp), m_ConstantInt(ShAmtV)), *Context)) {
8508 uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth);
8510 // Get a mask for the bits shifting in.
8511 APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth));
8512 if (MaskedValueIsZero(ShiftOp, Mask)) {
8513 if (ShAmt >= DestBitWidth) // All zeros.
8514 return ReplaceInstUsesWith(CI, Context->getNullValue(Ty));
8516 // Okay, we can shrink this. Truncate the input, then return a new
8518 Value *V1 = InsertCastBefore(Instruction::Trunc, ShiftOp, Ty, CI);
8519 Value *V2 = Context->getConstantExprTrunc(ShAmtV, Ty);
8520 return BinaryOperator::CreateLShr(V1, V2);
8527 /// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
8528 /// in order to eliminate the icmp.
8529 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
8531 // If we are just checking for a icmp eq of a single bit and zext'ing it
8532 // to an integer, then shift the bit to the appropriate place and then
8533 // cast to integer to avoid the comparison.
8534 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
8535 const APInt &Op1CV = Op1C->getValue();
8537 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
8538 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
8539 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
8540 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
8541 if (!DoXform) return ICI;
8543 Value *In = ICI->getOperand(0);
8544 Value *Sh = Context->getConstantInt(In->getType(),
8545 In->getType()->getScalarSizeInBits()-1);
8546 In = InsertNewInstBefore(BinaryOperator::CreateLShr(In, Sh,
8547 In->getName()+".lobit"),
8549 if (In->getType() != CI.getType())
8550 In = CastInst::CreateIntegerCast(In, CI.getType(),
8551 false/*ZExt*/, "tmp", &CI);
8553 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
8554 Constant *One = Context->getConstantInt(In->getType(), 1);
8555 In = InsertNewInstBefore(BinaryOperator::CreateXor(In, One,
8556 In->getName()+".not"),
8560 return ReplaceInstUsesWith(CI, In);
8565 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
8566 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8567 // zext (X == 1) to i32 --> X iff X has only the low bit set.
8568 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
8569 // zext (X != 0) to i32 --> X iff X has only the low bit set.
8570 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
8571 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
8572 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
8573 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
8574 // This only works for EQ and NE
8575 ICI->isEquality()) {
8576 // If Op1C some other power of two, convert:
8577 uint32_t BitWidth = Op1C->getType()->getBitWidth();
8578 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
8579 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
8580 ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
8582 APInt KnownZeroMask(~KnownZero);
8583 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
8584 if (!DoXform) return ICI;
8586 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
8587 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
8588 // (X&4) == 2 --> false
8589 // (X&4) != 2 --> true
8590 Constant *Res = Context->getConstantInt(Type::Int1Ty, isNE);
8591 Res = Context->getConstantExprZExt(Res, CI.getType());
8592 return ReplaceInstUsesWith(CI, Res);
8595 uint32_t ShiftAmt = KnownZeroMask.logBase2();
8596 Value *In = ICI->getOperand(0);
8598 // Perform a logical shr by shiftamt.
8599 // Insert the shift to put the result in the low bit.
8600 In = InsertNewInstBefore(BinaryOperator::CreateLShr(In,
8601 Context->getConstantInt(In->getType(), ShiftAmt),
8602 In->getName()+".lobit"), CI);
8605 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
8606 Constant *One = Context->getConstantInt(In->getType(), 1);
8607 In = BinaryOperator::CreateXor(In, One, "tmp");
8608 InsertNewInstBefore(cast<Instruction>(In), CI);
8611 if (CI.getType() == In->getType())
8612 return ReplaceInstUsesWith(CI, In);
8614 return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
8622 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
8623 // If one of the common conversion will work ..
8624 if (Instruction *Result = commonIntCastTransforms(CI))
8627 Value *Src = CI.getOperand(0);
8629 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
8630 // types and if the sizes are just right we can convert this into a logical
8631 // 'and' which will be much cheaper than the pair of casts.
8632 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
8633 // Get the sizes of the types involved. We know that the intermediate type
8634 // will be smaller than A or C, but don't know the relation between A and C.
8635 Value *A = CSrc->getOperand(0);
8636 unsigned SrcSize = A->getType()->getScalarSizeInBits();
8637 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
8638 unsigned DstSize = CI.getType()->getScalarSizeInBits();
8639 // If we're actually extending zero bits, then if
8640 // SrcSize < DstSize: zext(a & mask)
8641 // SrcSize == DstSize: a & mask
8642 // SrcSize > DstSize: trunc(a) & mask
8643 if (SrcSize < DstSize) {
8644 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
8645 Constant *AndConst = Context->getConstantInt(A->getType(), AndValue);
8647 BinaryOperator::CreateAnd(A, AndConst, CSrc->getName()+".mask");
8648 InsertNewInstBefore(And, CI);
8649 return new ZExtInst(And, CI.getType());
8650 } else if (SrcSize == DstSize) {
8651 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
8652 return BinaryOperator::CreateAnd(A, Context->getConstantInt(A->getType(),
8654 } else if (SrcSize > DstSize) {
8655 Instruction *Trunc = new TruncInst(A, CI.getType(), "tmp");
8656 InsertNewInstBefore(Trunc, CI);
8657 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
8658 return BinaryOperator::CreateAnd(Trunc,
8659 Context->getConstantInt(Trunc->getType(),
8664 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
8665 return transformZExtICmp(ICI, CI);
8667 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
8668 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
8669 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
8670 // of the (zext icmp) will be transformed.
8671 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
8672 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
8673 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
8674 (transformZExtICmp(LHS, CI, false) ||
8675 transformZExtICmp(RHS, CI, false))) {
8676 Value *LCast = InsertCastBefore(Instruction::ZExt, LHS, CI.getType(), CI);
8677 Value *RCast = InsertCastBefore(Instruction::ZExt, RHS, CI.getType(), CI);
8678 return BinaryOperator::Create(Instruction::Or, LCast, RCast);
8682 // zext(trunc(t) & C) -> (t & zext(C)).
8683 if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
8684 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
8685 if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
8686 Value *TI0 = TI->getOperand(0);
8687 if (TI0->getType() == CI.getType())
8689 BinaryOperator::CreateAnd(TI0,
8690 Context->getConstantExprZExt(C, CI.getType()));
8693 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
8694 if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
8695 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
8696 if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
8697 if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
8698 And->getOperand(1) == C)
8699 if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
8700 Value *TI0 = TI->getOperand(0);
8701 if (TI0->getType() == CI.getType()) {
8702 Constant *ZC = Context->getConstantExprZExt(C, CI.getType());
8703 Instruction *NewAnd = BinaryOperator::CreateAnd(TI0, ZC, "tmp");
8704 InsertNewInstBefore(NewAnd, *And);
8705 return BinaryOperator::CreateXor(NewAnd, ZC);
8712 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
8713 if (Instruction *I = commonIntCastTransforms(CI))
8716 Value *Src = CI.getOperand(0);
8718 // Canonicalize sign-extend from i1 to a select.
8719 if (Src->getType() == Type::Int1Ty)
8720 return SelectInst::Create(Src,
8721 Context->getConstantIntAllOnesValue(CI.getType()),
8722 Context->getNullValue(CI.getType()));
8724 // See if the value being truncated is already sign extended. If so, just
8725 // eliminate the trunc/sext pair.
8726 if (getOpcode(Src) == Instruction::Trunc) {
8727 Value *Op = cast<User>(Src)->getOperand(0);
8728 unsigned OpBits = Op->getType()->getScalarSizeInBits();
8729 unsigned MidBits = Src->getType()->getScalarSizeInBits();
8730 unsigned DestBits = CI.getType()->getScalarSizeInBits();
8731 unsigned NumSignBits = ComputeNumSignBits(Op);
8733 if (OpBits == DestBits) {
8734 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
8735 // bits, it is already ready.
8736 if (NumSignBits > DestBits-MidBits)
8737 return ReplaceInstUsesWith(CI, Op);
8738 } else if (OpBits < DestBits) {
8739 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
8740 // bits, just sext from i32.
8741 if (NumSignBits > OpBits-MidBits)
8742 return new SExtInst(Op, CI.getType(), "tmp");
8744 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
8745 // bits, just truncate to i32.
8746 if (NumSignBits > OpBits-MidBits)
8747 return new TruncInst(Op, CI.getType(), "tmp");
8751 // If the input is a shl/ashr pair of a same constant, then this is a sign
8752 // extension from a smaller value. If we could trust arbitrary bitwidth
8753 // integers, we could turn this into a truncate to the smaller bit and then
8754 // use a sext for the whole extension. Since we don't, look deeper and check
8755 // for a truncate. If the source and dest are the same type, eliminate the
8756 // trunc and extend and just do shifts. For example, turn:
8757 // %a = trunc i32 %i to i8
8758 // %b = shl i8 %a, 6
8759 // %c = ashr i8 %b, 6
8760 // %d = sext i8 %c to i32
8762 // %a = shl i32 %i, 30
8763 // %d = ashr i32 %a, 30
8765 ConstantInt *BA = 0, *CA = 0;
8766 if (match(Src, m_AShr(m_Shl(m_Value(A), m_ConstantInt(BA)),
8767 m_ConstantInt(CA)), *Context) &&
8768 BA == CA && isa<TruncInst>(A)) {
8769 Value *I = cast<TruncInst>(A)->getOperand(0);
8770 if (I->getType() == CI.getType()) {
8771 unsigned MidSize = Src->getType()->getScalarSizeInBits();
8772 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
8773 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
8774 Constant *ShAmtV = Context->getConstantInt(CI.getType(), ShAmt);
8775 I = InsertNewInstBefore(BinaryOperator::CreateShl(I, ShAmtV,
8777 return BinaryOperator::CreateAShr(I, ShAmtV);
8784 /// FitsInFPType - Return a Constant* for the specified FP constant if it fits
8785 /// in the specified FP type without changing its value.
8786 static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem,
8787 LLVMContext *Context) {
8789 APFloat F = CFP->getValueAPF();
8790 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
8792 return Context->getConstantFP(F);
8796 /// LookThroughFPExtensions - If this is an fp extension instruction, look
8797 /// through it until we get the source value.
8798 static Value *LookThroughFPExtensions(Value *V, LLVMContext *Context) {
8799 if (Instruction *I = dyn_cast<Instruction>(V))
8800 if (I->getOpcode() == Instruction::FPExt)
8801 return LookThroughFPExtensions(I->getOperand(0), Context);
8803 // If this value is a constant, return the constant in the smallest FP type
8804 // that can accurately represent it. This allows us to turn
8805 // (float)((double)X+2.0) into x+2.0f.
8806 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
8807 if (CFP->getType() == Type::PPC_FP128Ty)
8808 return V; // No constant folding of this.
8809 // See if the value can be truncated to float and then reextended.
8810 if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle, Context))
8812 if (CFP->getType() == Type::DoubleTy)
8813 return V; // Won't shrink.
8814 if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble, Context))
8816 // Don't try to shrink to various long double types.
8822 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
8823 if (Instruction *I = commonCastTransforms(CI))
8826 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
8827 // smaller than the destination type, we can eliminate the truncate by doing
8828 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as
8829 // many builtins (sqrt, etc).
8830 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
8831 if (OpI && OpI->hasOneUse()) {
8832 switch (OpI->getOpcode()) {
8834 case Instruction::FAdd:
8835 case Instruction::FSub:
8836 case Instruction::FMul:
8837 case Instruction::FDiv:
8838 case Instruction::FRem:
8839 const Type *SrcTy = OpI->getType();
8840 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0), Context);
8841 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1), Context);
8842 if (LHSTrunc->getType() != SrcTy &&
8843 RHSTrunc->getType() != SrcTy) {
8844 unsigned DstSize = CI.getType()->getScalarSizeInBits();
8845 // If the source types were both smaller than the destination type of
8846 // the cast, do this xform.
8847 if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
8848 RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
8849 LHSTrunc = InsertCastBefore(Instruction::FPExt, LHSTrunc,
8851 RHSTrunc = InsertCastBefore(Instruction::FPExt, RHSTrunc,
8853 return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
8862 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
8863 return commonCastTransforms(CI);
8866 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
8867 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
8869 return commonCastTransforms(FI);
8871 // fptoui(uitofp(X)) --> X
8872 // fptoui(sitofp(X)) --> X
8873 // This is safe if the intermediate type has enough bits in its mantissa to
8874 // accurately represent all values of X. For example, do not do this with
8875 // i64->float->i64. This is also safe for sitofp case, because any negative
8876 // 'X' value would cause an undefined result for the fptoui.
8877 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
8878 OpI->getOperand(0)->getType() == FI.getType() &&
8879 (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
8880 OpI->getType()->getFPMantissaWidth())
8881 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
8883 return commonCastTransforms(FI);
8886 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
8887 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
8889 return commonCastTransforms(FI);
8891 // fptosi(sitofp(X)) --> X
8892 // fptosi(uitofp(X)) --> X
8893 // This is safe if the intermediate type has enough bits in its mantissa to
8894 // accurately represent all values of X. For example, do not do this with
8895 // i64->float->i64. This is also safe for sitofp case, because any negative
8896 // 'X' value would cause an undefined result for the fptoui.
8897 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
8898 OpI->getOperand(0)->getType() == FI.getType() &&
8899 (int)FI.getType()->getScalarSizeInBits() <=
8900 OpI->getType()->getFPMantissaWidth())
8901 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
8903 return commonCastTransforms(FI);
8906 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
8907 return commonCastTransforms(CI);
8910 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
8911 return commonCastTransforms(CI);
8914 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
8915 // If the destination integer type is smaller than the intptr_t type for
8916 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
8917 // trunc to be exposed to other transforms. Don't do this for extending
8918 // ptrtoint's, because we don't know if the target sign or zero extends its
8920 if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
8921 Value *P = InsertNewInstBefore(new PtrToIntInst(CI.getOperand(0),
8922 TD->getIntPtrType(),
8924 return new TruncInst(P, CI.getType());
8927 return commonPointerCastTransforms(CI);
8930 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
8931 // If the source integer type is larger than the intptr_t type for
8932 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
8933 // allows the trunc to be exposed to other transforms. Don't do this for
8934 // extending inttoptr's, because we don't know if the target sign or zero
8935 // extends to pointers.
8936 if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
8937 TD->getPointerSizeInBits()) {
8938 Value *P = InsertNewInstBefore(new TruncInst(CI.getOperand(0),
8939 TD->getIntPtrType(),
8941 return new IntToPtrInst(P, CI.getType());
8944 if (Instruction *I = commonCastTransforms(CI))
8947 const Type *DestPointee = cast<PointerType>(CI.getType())->getElementType();
8948 if (!DestPointee->isSized()) return 0;
8950 // If this is inttoptr(add (ptrtoint x), cst), try to turn this into a GEP.
8953 if (match(CI.getOperand(0), m_Add(m_Cast<PtrToIntInst>(m_Value(X)),
8954 m_ConstantInt(Cst)), *Context)) {
8955 // If the source and destination operands have the same type, see if this
8956 // is a single-index GEP.
8957 if (X->getType() == CI.getType()) {
8958 // Get the size of the pointee type.
8959 uint64_t Size = TD->getTypeAllocSize(DestPointee);
8961 // Convert the constant to intptr type.
8962 APInt Offset = Cst->getValue();
8963 Offset.sextOrTrunc(TD->getPointerSizeInBits());
8965 // If Offset is evenly divisible by Size, we can do this xform.
8966 if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){
8967 Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size));
8968 return GetElementPtrInst::Create(X, Context->getConstantInt(Offset));
8971 // TODO: Could handle other cases, e.g. where add is indexing into field of
8973 } else if (CI.getOperand(0)->hasOneUse() &&
8974 match(CI.getOperand(0), m_Add(m_Value(X),
8975 m_ConstantInt(Cst)), *Context)) {
8976 // Otherwise, if this is inttoptr(add x, cst), try to turn this into an
8977 // "inttoptr+GEP" instead of "add+intptr".
8979 // Get the size of the pointee type.
8980 uint64_t Size = TD->getTypeAllocSize(DestPointee);
8982 // Convert the constant to intptr type.
8983 APInt Offset = Cst->getValue();
8984 Offset.sextOrTrunc(TD->getPointerSizeInBits());
8986 // If Offset is evenly divisible by Size, we can do this xform.
8987 if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){
8988 Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size));
8990 Instruction *P = InsertNewInstBefore(new IntToPtrInst(X, CI.getType(),
8992 return GetElementPtrInst::Create(P,
8993 Context->getConstantInt(Offset), "tmp");
8999 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
9000 // If the operands are integer typed then apply the integer transforms,
9001 // otherwise just apply the common ones.
9002 Value *Src = CI.getOperand(0);
9003 const Type *SrcTy = Src->getType();
9004 const Type *DestTy = CI.getType();
9006 if (isa<PointerType>(SrcTy)) {
9007 if (Instruction *I = commonPointerCastTransforms(CI))
9010 if (Instruction *Result = commonCastTransforms(CI))
9015 // Get rid of casts from one type to the same type. These are useless and can
9016 // be replaced by the operand.
9017 if (DestTy == Src->getType())
9018 return ReplaceInstUsesWith(CI, Src);
9020 if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
9021 const PointerType *SrcPTy = cast<PointerType>(SrcTy);
9022 const Type *DstElTy = DstPTy->getElementType();
9023 const Type *SrcElTy = SrcPTy->getElementType();
9025 // If the address spaces don't match, don't eliminate the bitcast, which is
9026 // required for changing types.
9027 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
9030 // If we are casting a malloc or alloca to a pointer to a type of the same
9031 // size, rewrite the allocation instruction to allocate the "right" type.
9032 if (AllocationInst *AI = dyn_cast<AllocationInst>(Src))
9033 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
9036 // If the source and destination are pointers, and this cast is equivalent
9037 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
9038 // This can enhance SROA and other transforms that want type-safe pointers.
9039 Constant *ZeroUInt = Context->getNullValue(Type::Int32Ty);
9040 unsigned NumZeros = 0;
9041 while (SrcElTy != DstElTy &&
9042 isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
9043 SrcElTy->getNumContainedTypes() /* not "{}" */) {
9044 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
9048 // If we found a path from the src to dest, create the getelementptr now.
9049 if (SrcElTy == DstElTy) {
9050 SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
9051 return GetElementPtrInst::Create(Src, Idxs.begin(), Idxs.end(), "",
9052 ((Instruction*) NULL));
9056 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
9057 if (SVI->hasOneUse()) {
9058 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
9059 // a bitconvert to a vector with the same # elts.
9060 if (isa<VectorType>(DestTy) &&
9061 cast<VectorType>(DestTy)->getNumElements() ==
9062 SVI->getType()->getNumElements() &&
9063 SVI->getType()->getNumElements() ==
9064 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements()) {
9066 // If either of the operands is a cast from CI.getType(), then
9067 // evaluating the shuffle in the casted destination's type will allow
9068 // us to eliminate at least one cast.
9069 if (((Tmp = dyn_cast<CastInst>(SVI->getOperand(0))) &&
9070 Tmp->getOperand(0)->getType() == DestTy) ||
9071 ((Tmp = dyn_cast<CastInst>(SVI->getOperand(1))) &&
9072 Tmp->getOperand(0)->getType() == DestTy)) {
9073 Value *LHS = InsertCastBefore(Instruction::BitCast,
9074 SVI->getOperand(0), DestTy, CI);
9075 Value *RHS = InsertCastBefore(Instruction::BitCast,
9076 SVI->getOperand(1), DestTy, CI);
9077 // Return a new shuffle vector. Use the same element ID's, as we
9078 // know the vector types match #elts.
9079 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
9087 /// GetSelectFoldableOperands - We want to turn code that looks like this:
9089 /// %D = select %cond, %C, %A
9091 /// %C = select %cond, %B, 0
9094 /// Assuming that the specified instruction is an operand to the select, return
9095 /// a bitmask indicating which operands of this instruction are foldable if they
9096 /// equal the other incoming value of the select.
9098 static unsigned GetSelectFoldableOperands(Instruction *I) {
9099 switch (I->getOpcode()) {
9100 case Instruction::Add:
9101 case Instruction::Mul:
9102 case Instruction::And:
9103 case Instruction::Or:
9104 case Instruction::Xor:
9105 return 3; // Can fold through either operand.
9106 case Instruction::Sub: // Can only fold on the amount subtracted.
9107 case Instruction::Shl: // Can only fold on the shift amount.
9108 case Instruction::LShr:
9109 case Instruction::AShr:
9112 return 0; // Cannot fold
9116 /// GetSelectFoldableConstant - For the same transformation as the previous
9117 /// function, return the identity constant that goes into the select.
9118 static Constant *GetSelectFoldableConstant(Instruction *I,
9119 LLVMContext *Context) {
9120 switch (I->getOpcode()) {
9121 default: LLVM_UNREACHABLE("This cannot happen!");
9122 case Instruction::Add:
9123 case Instruction::Sub:
9124 case Instruction::Or:
9125 case Instruction::Xor:
9126 case Instruction::Shl:
9127 case Instruction::LShr:
9128 case Instruction::AShr:
9129 return Context->getNullValue(I->getType());
9130 case Instruction::And:
9131 return Context->getAllOnesValue(I->getType());
9132 case Instruction::Mul:
9133 return Context->getConstantInt(I->getType(), 1);
9137 /// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
9138 /// have the same opcode and only one use each. Try to simplify this.
9139 Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
9141 if (TI->getNumOperands() == 1) {
9142 // If this is a non-volatile load or a cast from the same type,
9145 if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
9148 return 0; // unknown unary op.
9151 // Fold this by inserting a select from the input values.
9152 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
9153 FI->getOperand(0), SI.getName()+".v");
9154 InsertNewInstBefore(NewSI, SI);
9155 return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
9159 // Only handle binary operators here.
9160 if (!isa<BinaryOperator>(TI))
9163 // Figure out if the operations have any operands in common.
9164 Value *MatchOp, *OtherOpT, *OtherOpF;
9166 if (TI->getOperand(0) == FI->getOperand(0)) {
9167 MatchOp = TI->getOperand(0);
9168 OtherOpT = TI->getOperand(1);
9169 OtherOpF = FI->getOperand(1);
9170 MatchIsOpZero = true;
9171 } else if (TI->getOperand(1) == FI->getOperand(1)) {
9172 MatchOp = TI->getOperand(1);
9173 OtherOpT = TI->getOperand(0);
9174 OtherOpF = FI->getOperand(0);
9175 MatchIsOpZero = false;
9176 } else if (!TI->isCommutative()) {
9178 } else if (TI->getOperand(0) == FI->getOperand(1)) {
9179 MatchOp = TI->getOperand(0);
9180 OtherOpT = TI->getOperand(1);
9181 OtherOpF = FI->getOperand(0);
9182 MatchIsOpZero = true;
9183 } else if (TI->getOperand(1) == FI->getOperand(0)) {
9184 MatchOp = TI->getOperand(1);
9185 OtherOpT = TI->getOperand(0);
9186 OtherOpF = FI->getOperand(1);
9187 MatchIsOpZero = true;
9192 // If we reach here, they do have operations in common.
9193 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), OtherOpT,
9194 OtherOpF, SI.getName()+".v");
9195 InsertNewInstBefore(NewSI, SI);
9197 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TI)) {
9199 return BinaryOperator::Create(BO->getOpcode(), MatchOp, NewSI);
9201 return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp);
9203 LLVM_UNREACHABLE("Shouldn't get here");
9207 static bool isSelect01(Constant *C1, Constant *C2) {
9208 ConstantInt *C1I = dyn_cast<ConstantInt>(C1);
9211 ConstantInt *C2I = dyn_cast<ConstantInt>(C2);
9214 return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne());
9217 /// FoldSelectIntoOp - Try fold the select into one of the operands to
9218 /// facilitate further optimization.
9219 Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
9221 // See the comment above GetSelectFoldableOperands for a description of the
9222 // transformation we are doing here.
9223 if (Instruction *TVI = dyn_cast<Instruction>(TrueVal)) {
9224 if (TVI->hasOneUse() && TVI->getNumOperands() == 2 &&
9225 !isa<Constant>(FalseVal)) {
9226 if (unsigned SFO = GetSelectFoldableOperands(TVI)) {
9227 unsigned OpToFold = 0;
9228 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
9230 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
9235 Constant *C = GetSelectFoldableConstant(TVI, Context);
9236 Value *OOp = TVI->getOperand(2-OpToFold);
9237 // Avoid creating select between 2 constants unless it's selecting
9239 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9240 Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C);
9241 InsertNewInstBefore(NewSel, SI);
9242 NewSel->takeName(TVI);
9243 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
9244 return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel);
9245 LLVM_UNREACHABLE("Unknown instruction!!");
9252 if (Instruction *FVI = dyn_cast<Instruction>(FalseVal)) {
9253 if (FVI->hasOneUse() && FVI->getNumOperands() == 2 &&
9254 !isa<Constant>(TrueVal)) {
9255 if (unsigned SFO = GetSelectFoldableOperands(FVI)) {
9256 unsigned OpToFold = 0;
9257 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
9259 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
9264 Constant *C = GetSelectFoldableConstant(FVI, Context);
9265 Value *OOp = FVI->getOperand(2-OpToFold);
9266 // Avoid creating select between 2 constants unless it's selecting
9268 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9269 Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp);
9270 InsertNewInstBefore(NewSel, SI);
9271 NewSel->takeName(FVI);
9272 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
9273 return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel);
9274 LLVM_UNREACHABLE("Unknown instruction!!");
9284 /// visitSelectInstWithICmp - Visit a SelectInst that has an
9285 /// ICmpInst as its first operand.
9287 Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
9289 bool Changed = false;
9290 ICmpInst::Predicate Pred = ICI->getPredicate();
9291 Value *CmpLHS = ICI->getOperand(0);
9292 Value *CmpRHS = ICI->getOperand(1);
9293 Value *TrueVal = SI.getTrueValue();
9294 Value *FalseVal = SI.getFalseValue();
9296 // Check cases where the comparison is with a constant that
9297 // can be adjusted to fit the min/max idiom. We may edit ICI in
9298 // place here, so make sure the select is the only user.
9299 if (ICI->hasOneUse())
9300 if (ConstantInt *CI = dyn_cast<ConstantInt>(CmpRHS)) {
9303 case ICmpInst::ICMP_ULT:
9304 case ICmpInst::ICMP_SLT: {
9305 // X < MIN ? T : F --> F
9306 if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
9307 return ReplaceInstUsesWith(SI, FalseVal);
9308 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
9309 Constant *AdjustedRHS = SubOne(CI, Context);
9310 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9311 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9312 Pred = ICmpInst::getSwappedPredicate(Pred);
9313 CmpRHS = AdjustedRHS;
9314 std::swap(FalseVal, TrueVal);
9315 ICI->setPredicate(Pred);
9316 ICI->setOperand(1, CmpRHS);
9317 SI.setOperand(1, TrueVal);
9318 SI.setOperand(2, FalseVal);
9323 case ICmpInst::ICMP_UGT:
9324 case ICmpInst::ICMP_SGT: {
9325 // X > MAX ? T : F --> F
9326 if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
9327 return ReplaceInstUsesWith(SI, FalseVal);
9328 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
9329 Constant *AdjustedRHS = AddOne(CI, Context);
9330 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9331 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9332 Pred = ICmpInst::getSwappedPredicate(Pred);
9333 CmpRHS = AdjustedRHS;
9334 std::swap(FalseVal, TrueVal);
9335 ICI->setPredicate(Pred);
9336 ICI->setOperand(1, CmpRHS);
9337 SI.setOperand(1, TrueVal);
9338 SI.setOperand(2, FalseVal);
9345 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
9346 // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
9347 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
9348 if (match(TrueVal, m_ConstantInt<-1>(), *Context) &&
9349 match(FalseVal, m_ConstantInt<0>(), *Context))
9350 Pred = ICI->getPredicate();
9351 else if (match(TrueVal, m_ConstantInt<0>(), *Context) &&
9352 match(FalseVal, m_ConstantInt<-1>(), *Context))
9353 Pred = CmpInst::getInversePredicate(ICI->getPredicate());
9355 if (Pred != CmpInst::BAD_ICMP_PREDICATE) {
9356 // If we are just checking for a icmp eq of a single bit and zext'ing it
9357 // to an integer, then shift the bit to the appropriate place and then
9358 // cast to integer to avoid the comparison.
9359 const APInt &Op1CV = CI->getValue();
9361 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
9362 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
9363 if ((Pred == ICmpInst::ICMP_SLT && Op1CV == 0) ||
9364 (Pred == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) {
9365 Value *In = ICI->getOperand(0);
9366 Value *Sh = Context->getConstantInt(In->getType(),
9367 In->getType()->getScalarSizeInBits()-1);
9368 In = InsertNewInstBefore(BinaryOperator::CreateAShr(In, Sh,
9369 In->getName()+".lobit"),
9371 if (In->getType() != SI.getType())
9372 In = CastInst::CreateIntegerCast(In, SI.getType(),
9373 true/*SExt*/, "tmp", ICI);
9375 if (Pred == ICmpInst::ICMP_SGT)
9376 In = InsertNewInstBefore(BinaryOperator::CreateNot(In,
9377 In->getName()+".not"), *ICI);
9379 return ReplaceInstUsesWith(SI, In);
9384 if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
9385 // Transform (X == Y) ? X : Y -> Y
9386 if (Pred == ICmpInst::ICMP_EQ)
9387 return ReplaceInstUsesWith(SI, FalseVal);
9388 // Transform (X != Y) ? X : Y -> X
9389 if (Pred == ICmpInst::ICMP_NE)
9390 return ReplaceInstUsesWith(SI, TrueVal);
9391 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9393 } else if (CmpLHS == FalseVal && CmpRHS == TrueVal) {
9394 // Transform (X == Y) ? Y : X -> X
9395 if (Pred == ICmpInst::ICMP_EQ)
9396 return ReplaceInstUsesWith(SI, FalseVal);
9397 // Transform (X != Y) ? Y : X -> Y
9398 if (Pred == ICmpInst::ICMP_NE)
9399 return ReplaceInstUsesWith(SI, TrueVal);
9400 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9403 /// NOTE: if we wanted to, this is where to detect integer ABS
9405 return Changed ? &SI : 0;
9408 Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
9409 Value *CondVal = SI.getCondition();
9410 Value *TrueVal = SI.getTrueValue();
9411 Value *FalseVal = SI.getFalseValue();
9413 // select true, X, Y -> X
9414 // select false, X, Y -> Y
9415 if (ConstantInt *C = dyn_cast<ConstantInt>(CondVal))
9416 return ReplaceInstUsesWith(SI, C->getZExtValue() ? TrueVal : FalseVal);
9418 // select C, X, X -> X
9419 if (TrueVal == FalseVal)
9420 return ReplaceInstUsesWith(SI, TrueVal);
9422 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
9423 return ReplaceInstUsesWith(SI, FalseVal);
9424 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
9425 return ReplaceInstUsesWith(SI, TrueVal);
9426 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
9427 if (isa<Constant>(TrueVal))
9428 return ReplaceInstUsesWith(SI, TrueVal);
9430 return ReplaceInstUsesWith(SI, FalseVal);
9433 if (SI.getType() == Type::Int1Ty) {
9434 if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
9435 if (C->getZExtValue()) {
9436 // Change: A = select B, true, C --> A = or B, C
9437 return BinaryOperator::CreateOr(CondVal, FalseVal);
9439 // Change: A = select B, false, C --> A = and !B, C
9441 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9442 "not."+CondVal->getName()), SI);
9443 return BinaryOperator::CreateAnd(NotCond, FalseVal);
9445 } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) {
9446 if (C->getZExtValue() == false) {
9447 // Change: A = select B, C, false --> A = and B, C
9448 return BinaryOperator::CreateAnd(CondVal, TrueVal);
9450 // Change: A = select B, C, true --> A = or !B, C
9452 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9453 "not."+CondVal->getName()), SI);
9454 return BinaryOperator::CreateOr(NotCond, TrueVal);
9458 // select a, b, a -> a&b
9459 // select a, a, b -> a|b
9460 if (CondVal == TrueVal)
9461 return BinaryOperator::CreateOr(CondVal, FalseVal);
9462 else if (CondVal == FalseVal)
9463 return BinaryOperator::CreateAnd(CondVal, TrueVal);
9466 // Selecting between two integer constants?
9467 if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
9468 if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
9469 // select C, 1, 0 -> zext C to int
9470 if (FalseValC->isZero() && TrueValC->getValue() == 1) {
9471 return CastInst::Create(Instruction::ZExt, CondVal, SI.getType());
9472 } else if (TrueValC->isZero() && FalseValC->getValue() == 1) {
9473 // select C, 0, 1 -> zext !C to int
9475 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9476 "not."+CondVal->getName()), SI);
9477 return CastInst::Create(Instruction::ZExt, NotCond, SI.getType());
9480 if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
9481 // If one of the constants is zero (we know they can't both be) and we
9482 // have an icmp instruction with zero, and we have an 'and' with the
9483 // non-constant value, eliminate this whole mess. This corresponds to
9484 // cases like this: ((X & 27) ? 27 : 0)
9485 if (TrueValC->isZero() || FalseValC->isZero())
9486 if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
9487 cast<Constant>(IC->getOperand(1))->isNullValue())
9488 if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
9489 if (ICA->getOpcode() == Instruction::And &&
9490 isa<ConstantInt>(ICA->getOperand(1)) &&
9491 (ICA->getOperand(1) == TrueValC ||
9492 ICA->getOperand(1) == FalseValC) &&
9493 isOneBitSet(cast<ConstantInt>(ICA->getOperand(1)))) {
9494 // Okay, now we know that everything is set up, we just don't
9495 // know whether we have a icmp_ne or icmp_eq and whether the
9496 // true or false val is the zero.
9497 bool ShouldNotVal = !TrueValC->isZero();
9498 ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
9501 V = InsertNewInstBefore(BinaryOperator::Create(
9502 Instruction::Xor, V, ICA->getOperand(1)), SI);
9503 return ReplaceInstUsesWith(SI, V);
9508 // See if we are selecting two values based on a comparison of the two values.
9509 if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
9510 if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
9511 // Transform (X == Y) ? X : Y -> Y
9512 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
9513 // This is not safe in general for floating point:
9514 // consider X== -0, Y== +0.
9515 // It becomes safe if either operand is a nonzero constant.
9516 ConstantFP *CFPt, *CFPf;
9517 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
9518 !CFPt->getValueAPF().isZero()) ||
9519 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
9520 !CFPf->getValueAPF().isZero()))
9521 return ReplaceInstUsesWith(SI, FalseVal);
9523 // Transform (X != Y) ? X : Y -> X
9524 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
9525 return ReplaceInstUsesWith(SI, TrueVal);
9526 // NOTE: if we wanted to, this is where to detect MIN/MAX
9528 } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
9529 // Transform (X == Y) ? Y : X -> X
9530 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
9531 // This is not safe in general for floating point:
9532 // consider X== -0, Y== +0.
9533 // It becomes safe if either operand is a nonzero constant.
9534 ConstantFP *CFPt, *CFPf;
9535 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
9536 !CFPt->getValueAPF().isZero()) ||
9537 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
9538 !CFPf->getValueAPF().isZero()))
9539 return ReplaceInstUsesWith(SI, FalseVal);
9541 // Transform (X != Y) ? Y : X -> Y
9542 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
9543 return ReplaceInstUsesWith(SI, TrueVal);
9544 // NOTE: if we wanted to, this is where to detect MIN/MAX
9546 // NOTE: if we wanted to, this is where to detect ABS
9549 // See if we are selecting two values based on a comparison of the two values.
9550 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
9551 if (Instruction *Result = visitSelectInstWithICmp(SI, ICI))
9554 if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
9555 if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
9556 if (TI->hasOneUse() && FI->hasOneUse()) {
9557 Instruction *AddOp = 0, *SubOp = 0;
9559 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
9560 if (TI->getOpcode() == FI->getOpcode())
9561 if (Instruction *IV = FoldSelectOpOp(SI, TI, FI))
9564 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
9565 // even legal for FP.
9566 if ((TI->getOpcode() == Instruction::Sub &&
9567 FI->getOpcode() == Instruction::Add) ||
9568 (TI->getOpcode() == Instruction::FSub &&
9569 FI->getOpcode() == Instruction::FAdd)) {
9570 AddOp = FI; SubOp = TI;
9571 } else if ((FI->getOpcode() == Instruction::Sub &&
9572 TI->getOpcode() == Instruction::Add) ||
9573 (FI->getOpcode() == Instruction::FSub &&
9574 TI->getOpcode() == Instruction::FAdd)) {
9575 AddOp = TI; SubOp = FI;
9579 Value *OtherAddOp = 0;
9580 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
9581 OtherAddOp = AddOp->getOperand(1);
9582 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
9583 OtherAddOp = AddOp->getOperand(0);
9587 // So at this point we know we have (Y -> OtherAddOp):
9588 // select C, (add X, Y), (sub X, Z)
9589 Value *NegVal; // Compute -Z
9590 if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
9591 NegVal = Context->getConstantExprNeg(C);
9593 NegVal = InsertNewInstBefore(
9594 BinaryOperator::CreateNeg(*Context, SubOp->getOperand(1),
9598 Value *NewTrueOp = OtherAddOp;
9599 Value *NewFalseOp = NegVal;
9601 std::swap(NewTrueOp, NewFalseOp);
9602 Instruction *NewSel =
9603 SelectInst::Create(CondVal, NewTrueOp,
9604 NewFalseOp, SI.getName() + ".p");
9606 NewSel = InsertNewInstBefore(NewSel, SI);
9607 return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
9612 // See if we can fold the select into one of our operands.
9613 if (SI.getType()->isInteger()) {
9614 Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal);
9619 if (BinaryOperator::isNot(CondVal)) {
9620 SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
9621 SI.setOperand(1, FalseVal);
9622 SI.setOperand(2, TrueVal);
9629 /// EnforceKnownAlignment - If the specified pointer points to an object that
9630 /// we control, modify the object's alignment to PrefAlign. This isn't
9631 /// often possible though. If alignment is important, a more reliable approach
9632 /// is to simply align all global variables and allocation instructions to
9633 /// their preferred alignment from the beginning.
9635 static unsigned EnforceKnownAlignment(Value *V,
9636 unsigned Align, unsigned PrefAlign) {
9638 User *U = dyn_cast<User>(V);
9639 if (!U) return Align;
9641 switch (getOpcode(U)) {
9643 case Instruction::BitCast:
9644 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
9645 case Instruction::GetElementPtr: {
9646 // If all indexes are zero, it is just the alignment of the base pointer.
9647 bool AllZeroOperands = true;
9648 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
9649 if (!isa<Constant>(*i) ||
9650 !cast<Constant>(*i)->isNullValue()) {
9651 AllZeroOperands = false;
9655 if (AllZeroOperands) {
9656 // Treat this like a bitcast.
9657 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
9663 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
9664 // If there is a large requested alignment and we can, bump up the alignment
9666 if (!GV->isDeclaration()) {
9667 if (GV->getAlignment() >= PrefAlign)
9668 Align = GV->getAlignment();
9670 GV->setAlignment(PrefAlign);
9674 } else if (AllocationInst *AI = dyn_cast<AllocationInst>(V)) {
9675 // If there is a requested alignment and if this is an alloca, round up. We
9676 // don't do this for malloc, because some systems can't respect the request.
9677 if (isa<AllocaInst>(AI)) {
9678 if (AI->getAlignment() >= PrefAlign)
9679 Align = AI->getAlignment();
9681 AI->setAlignment(PrefAlign);
9690 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
9691 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
9692 /// and it is more than the alignment of the ultimate object, see if we can
9693 /// increase the alignment of the ultimate object, making this check succeed.
9694 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
9695 unsigned PrefAlign) {
9696 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
9697 sizeof(PrefAlign) * CHAR_BIT;
9698 APInt Mask = APInt::getAllOnesValue(BitWidth);
9699 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
9700 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
9701 unsigned TrailZ = KnownZero.countTrailingOnes();
9702 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
9704 if (PrefAlign > Align)
9705 Align = EnforceKnownAlignment(V, Align, PrefAlign);
9707 // We don't need to make any adjustment.
9711 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
9712 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
9713 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
9714 unsigned MinAlign = std::min(DstAlign, SrcAlign);
9715 unsigned CopyAlign = MI->getAlignment();
9717 if (CopyAlign < MinAlign) {
9718 MI->setAlignment(Context->getConstantInt(MI->getAlignmentType(),
9723 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
9725 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
9726 if (MemOpLength == 0) return 0;
9728 // Source and destination pointer types are always "i8*" for intrinsic. See
9729 // if the size is something we can handle with a single primitive load/store.
9730 // A single load+store correctly handles overlapping memory in the memmove
9732 unsigned Size = MemOpLength->getZExtValue();
9733 if (Size == 0) return MI; // Delete this mem transfer.
9735 if (Size > 8 || (Size&(Size-1)))
9736 return 0; // If not 1/2/4/8 bytes, exit.
9738 // Use an integer load+store unless we can find something better.
9740 Context->getPointerTypeUnqual(Context->getIntegerType(Size<<3));
9742 // Memcpy forces the use of i8* for the source and destination. That means
9743 // that if you're using memcpy to move one double around, you'll get a cast
9744 // from double* to i8*. We'd much rather use a double load+store rather than
9745 // an i64 load+store, here because this improves the odds that the source or
9746 // dest address will be promotable. See if we can find a better type than the
9747 // integer datatype.
9748 if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
9749 const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
9750 if (SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
9751 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
9752 // down through these levels if so.
9753 while (!SrcETy->isSingleValueType()) {
9754 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
9755 if (STy->getNumElements() == 1)
9756 SrcETy = STy->getElementType(0);
9759 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
9760 if (ATy->getNumElements() == 1)
9761 SrcETy = ATy->getElementType();
9768 if (SrcETy->isSingleValueType())
9769 NewPtrTy = Context->getPointerTypeUnqual(SrcETy);
9774 // If the memcpy/memmove provides better alignment info than we can
9776 SrcAlign = std::max(SrcAlign, CopyAlign);
9777 DstAlign = std::max(DstAlign, CopyAlign);
9779 Value *Src = InsertBitCastBefore(MI->getOperand(2), NewPtrTy, *MI);
9780 Value *Dest = InsertBitCastBefore(MI->getOperand(1), NewPtrTy, *MI);
9781 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
9782 InsertNewInstBefore(L, *MI);
9783 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
9785 // Set the size of the copy to 0, it will be deleted on the next iteration.
9786 MI->setOperand(3, Context->getNullValue(MemOpLength->getType()));
9790 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
9791 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
9792 if (MI->getAlignment() < Alignment) {
9793 MI->setAlignment(Context->getConstantInt(MI->getAlignmentType(),
9798 // Extract the length and alignment and fill if they are constant.
9799 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
9800 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
9801 if (!LenC || !FillC || FillC->getType() != Type::Int8Ty)
9803 uint64_t Len = LenC->getZExtValue();
9804 Alignment = MI->getAlignment();
9806 // If the length is zero, this is a no-op
9807 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
9809 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
9810 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
9811 const Type *ITy = Context->getIntegerType(Len*8); // n=1 -> i8.
9813 Value *Dest = MI->getDest();
9814 Dest = InsertBitCastBefore(Dest, Context->getPointerTypeUnqual(ITy), *MI);
9816 // Alignment 0 is identity for alignment 1 for memset, but not store.
9817 if (Alignment == 0) Alignment = 1;
9819 // Extract the fill value and store.
9820 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
9821 InsertNewInstBefore(new StoreInst(Context->getConstantInt(ITy, Fill),
9822 Dest, false, Alignment), *MI);
9824 // Set the size of the copy to 0, it will be deleted on the next iteration.
9825 MI->setLength(Context->getNullValue(LenC->getType()));
9833 /// visitCallInst - CallInst simplification. This mostly only handles folding
9834 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
9835 /// the heavy lifting.
9837 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
9838 // If the caller function is nounwind, mark the call as nounwind, even if the
9840 if (CI.getParent()->getParent()->doesNotThrow() &&
9841 !CI.doesNotThrow()) {
9842 CI.setDoesNotThrow();
9848 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
9849 if (!II) return visitCallSite(&CI);
9851 // Intrinsics cannot occur in an invoke, so handle them here instead of in
9853 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
9854 bool Changed = false;
9856 // memmove/cpy/set of zero bytes is a noop.
9857 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
9858 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
9860 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
9861 if (CI->getZExtValue() == 1) {
9862 // Replace the instruction with just byte operations. We would
9863 // transform other cases to loads/stores, but we don't know if
9864 // alignment is sufficient.
9868 // If we have a memmove and the source operation is a constant global,
9869 // then the source and dest pointers can't alias, so we can change this
9870 // into a call to memcpy.
9871 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
9872 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
9873 if (GVSrc->isConstant()) {
9874 Module *M = CI.getParent()->getParent()->getParent();
9875 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
9877 Tys[0] = CI.getOperand(3)->getType();
9879 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
9883 // memmove(x,x,size) -> noop.
9884 if (MMI->getSource() == MMI->getDest())
9885 return EraseInstFromFunction(CI);
9888 // If we can determine a pointer alignment that is bigger than currently
9889 // set, update the alignment.
9890 if (isa<MemTransferInst>(MI)) {
9891 if (Instruction *I = SimplifyMemTransfer(MI))
9893 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
9894 if (Instruction *I = SimplifyMemSet(MSI))
9898 if (Changed) return II;
9901 switch (II->getIntrinsicID()) {
9903 case Intrinsic::bswap:
9904 // bswap(bswap(x)) -> x
9905 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
9906 if (Operand->getIntrinsicID() == Intrinsic::bswap)
9907 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
9909 case Intrinsic::ppc_altivec_lvx:
9910 case Intrinsic::ppc_altivec_lvxl:
9911 case Intrinsic::x86_sse_loadu_ps:
9912 case Intrinsic::x86_sse2_loadu_pd:
9913 case Intrinsic::x86_sse2_loadu_dq:
9914 // Turn PPC lvx -> load if the pointer is known aligned.
9915 // Turn X86 loadups -> load if the pointer is known aligned.
9916 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
9917 Value *Ptr = InsertBitCastBefore(II->getOperand(1),
9918 Context->getPointerTypeUnqual(II->getType()),
9920 return new LoadInst(Ptr);
9923 case Intrinsic::ppc_altivec_stvx:
9924 case Intrinsic::ppc_altivec_stvxl:
9925 // Turn stvx -> store if the pointer is known aligned.
9926 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
9927 const Type *OpPtrTy =
9928 Context->getPointerTypeUnqual(II->getOperand(1)->getType());
9929 Value *Ptr = InsertBitCastBefore(II->getOperand(2), OpPtrTy, CI);
9930 return new StoreInst(II->getOperand(1), Ptr);
9933 case Intrinsic::x86_sse_storeu_ps:
9934 case Intrinsic::x86_sse2_storeu_pd:
9935 case Intrinsic::x86_sse2_storeu_dq:
9936 // Turn X86 storeu -> store if the pointer is known aligned.
9937 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
9938 const Type *OpPtrTy =
9939 Context->getPointerTypeUnqual(II->getOperand(2)->getType());
9940 Value *Ptr = InsertBitCastBefore(II->getOperand(1), OpPtrTy, CI);
9941 return new StoreInst(II->getOperand(2), Ptr);
9945 case Intrinsic::x86_sse_cvttss2si: {
9946 // These intrinsics only demands the 0th element of its input vector. If
9947 // we can simplify the input based on that, do so now.
9949 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
9950 APInt DemandedElts(VWidth, 1);
9951 APInt UndefElts(VWidth, 0);
9952 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
9954 II->setOperand(1, V);
9960 case Intrinsic::ppc_altivec_vperm:
9961 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
9962 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
9963 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
9965 // Check that all of the elements are integer constants or undefs.
9966 bool AllEltsOk = true;
9967 for (unsigned i = 0; i != 16; ++i) {
9968 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
9969 !isa<UndefValue>(Mask->getOperand(i))) {
9976 // Cast the input vectors to byte vectors.
9977 Value *Op0 =InsertBitCastBefore(II->getOperand(1),Mask->getType(),CI);
9978 Value *Op1 =InsertBitCastBefore(II->getOperand(2),Mask->getType(),CI);
9979 Value *Result = Context->getUndef(Op0->getType());
9981 // Only extract each element once.
9982 Value *ExtractedElts[32];
9983 memset(ExtractedElts, 0, sizeof(ExtractedElts));
9985 for (unsigned i = 0; i != 16; ++i) {
9986 if (isa<UndefValue>(Mask->getOperand(i)))
9988 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
9989 Idx &= 31; // Match the hardware behavior.
9991 if (ExtractedElts[Idx] == 0) {
9993 new ExtractElementInst(Idx < 16 ? Op0 : Op1, Idx&15, "tmp");
9994 InsertNewInstBefore(Elt, CI);
9995 ExtractedElts[Idx] = Elt;
9998 // Insert this value into the result vector.
9999 Result = InsertElementInst::Create(Result, ExtractedElts[Idx],
10001 InsertNewInstBefore(cast<Instruction>(Result), CI);
10003 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
10008 case Intrinsic::stackrestore: {
10009 // If the save is right next to the restore, remove the restore. This can
10010 // happen when variable allocas are DCE'd.
10011 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
10012 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
10013 BasicBlock::iterator BI = SS;
10015 return EraseInstFromFunction(CI);
10019 // Scan down this block to see if there is another stack restore in the
10020 // same block without an intervening call/alloca.
10021 BasicBlock::iterator BI = II;
10022 TerminatorInst *TI = II->getParent()->getTerminator();
10023 bool CannotRemove = false;
10024 for (++BI; &*BI != TI; ++BI) {
10025 if (isa<AllocaInst>(BI)) {
10026 CannotRemove = true;
10029 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
10030 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
10031 // If there is a stackrestore below this one, remove this one.
10032 if (II->getIntrinsicID() == Intrinsic::stackrestore)
10033 return EraseInstFromFunction(CI);
10034 // Otherwise, ignore the intrinsic.
10036 // If we found a non-intrinsic call, we can't remove the stack
10038 CannotRemove = true;
10044 // If the stack restore is in a return/unwind block and if there are no
10045 // allocas or calls between the restore and the return, nuke the restore.
10046 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
10047 return EraseInstFromFunction(CI);
10052 return visitCallSite(II);
10055 // InvokeInst simplification
10057 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
10058 return visitCallSite(&II);
10061 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
10062 /// passed through the varargs area, we can eliminate the use of the cast.
10063 static bool isSafeToEliminateVarargsCast(const CallSite CS,
10064 const CastInst * const CI,
10065 const TargetData * const TD,
10067 if (!CI->isLosslessCast())
10070 // The size of ByVal arguments is derived from the type, so we
10071 // can't change to a type with a different size. If the size were
10072 // passed explicitly we could avoid this check.
10073 if (!CS.paramHasAttr(ix, Attribute::ByVal))
10076 const Type* SrcTy =
10077 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
10078 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
10079 if (!SrcTy->isSized() || !DstTy->isSized())
10081 if (TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
10086 // visitCallSite - Improvements for call and invoke instructions.
10088 Instruction *InstCombiner::visitCallSite(CallSite CS) {
10089 bool Changed = false;
10091 // If the callee is a constexpr cast of a function, attempt to move the cast
10092 // to the arguments of the call/invoke.
10093 if (transformConstExprCastCall(CS)) return 0;
10095 Value *Callee = CS.getCalledValue();
10097 if (Function *CalleeF = dyn_cast<Function>(Callee))
10098 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
10099 Instruction *OldCall = CS.getInstruction();
10100 // If the call and callee calling conventions don't match, this call must
10101 // be unreachable, as the call is undefined.
10102 new StoreInst(Context->getConstantIntTrue(),
10103 Context->getUndef(Context->getPointerTypeUnqual(Type::Int1Ty)),
10105 if (!OldCall->use_empty())
10106 OldCall->replaceAllUsesWith(Context->getUndef(OldCall->getType()));
10107 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
10108 return EraseInstFromFunction(*OldCall);
10112 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
10113 // This instruction is not reachable, just remove it. We insert a store to
10114 // undef so that we know that this code is not reachable, despite the fact
10115 // that we can't modify the CFG here.
10116 new StoreInst(Context->getConstantIntTrue(),
10117 Context->getUndef(Context->getPointerTypeUnqual(Type::Int1Ty)),
10118 CS.getInstruction());
10120 if (!CS.getInstruction()->use_empty())
10121 CS.getInstruction()->
10122 replaceAllUsesWith(Context->getUndef(CS.getInstruction()->getType()));
10124 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
10125 // Don't break the CFG, insert a dummy cond branch.
10126 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
10127 Context->getConstantIntTrue(), II);
10129 return EraseInstFromFunction(*CS.getInstruction());
10132 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
10133 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
10134 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
10135 return transformCallThroughTrampoline(CS);
10137 const PointerType *PTy = cast<PointerType>(Callee->getType());
10138 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
10139 if (FTy->isVarArg()) {
10140 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
10141 // See if we can optimize any arguments passed through the varargs area of
10143 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
10144 E = CS.arg_end(); I != E; ++I, ++ix) {
10145 CastInst *CI = dyn_cast<CastInst>(*I);
10146 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
10147 *I = CI->getOperand(0);
10153 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
10154 // Inline asm calls cannot throw - mark them 'nounwind'.
10155 CS.setDoesNotThrow();
10159 return Changed ? CS.getInstruction() : 0;
10162 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
10163 // attempt to move the cast to the arguments of the call/invoke.
10165 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
10166 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
10167 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
10168 if (CE->getOpcode() != Instruction::BitCast ||
10169 !isa<Function>(CE->getOperand(0)))
10171 Function *Callee = cast<Function>(CE->getOperand(0));
10172 Instruction *Caller = CS.getInstruction();
10173 const AttrListPtr &CallerPAL = CS.getAttributes();
10175 // Okay, this is a cast from a function to a different type. Unless doing so
10176 // would cause a type conversion of one of our arguments, change this call to
10177 // be a direct call with arguments casted to the appropriate types.
10179 const FunctionType *FT = Callee->getFunctionType();
10180 const Type *OldRetTy = Caller->getType();
10181 const Type *NewRetTy = FT->getReturnType();
10183 if (isa<StructType>(NewRetTy))
10184 return false; // TODO: Handle multiple return values.
10186 // Check to see if we are changing the return type...
10187 if (OldRetTy != NewRetTy) {
10188 if (Callee->isDeclaration() &&
10189 // Conversion is ok if changing from one pointer type to another or from
10190 // a pointer to an integer of the same size.
10191 !((isa<PointerType>(OldRetTy) || OldRetTy == TD->getIntPtrType()) &&
10192 (isa<PointerType>(NewRetTy) || NewRetTy == TD->getIntPtrType())))
10193 return false; // Cannot transform this return value.
10195 if (!Caller->use_empty() &&
10196 // void -> non-void is handled specially
10197 NewRetTy != Type::VoidTy && !CastInst::isCastable(NewRetTy, OldRetTy))
10198 return false; // Cannot transform this return value.
10200 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
10201 Attributes RAttrs = CallerPAL.getRetAttributes();
10202 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
10203 return false; // Attribute not compatible with transformed value.
10206 // If the callsite is an invoke instruction, and the return value is used by
10207 // a PHI node in a successor, we cannot change the return type of the call
10208 // because there is no place to put the cast instruction (without breaking
10209 // the critical edge). Bail out in this case.
10210 if (!Caller->use_empty())
10211 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
10212 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
10214 if (PHINode *PN = dyn_cast<PHINode>(*UI))
10215 if (PN->getParent() == II->getNormalDest() ||
10216 PN->getParent() == II->getUnwindDest())
10220 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
10221 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
10223 CallSite::arg_iterator AI = CS.arg_begin();
10224 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
10225 const Type *ParamTy = FT->getParamType(i);
10226 const Type *ActTy = (*AI)->getType();
10228 if (!CastInst::isCastable(ActTy, ParamTy))
10229 return false; // Cannot transform this parameter value.
10231 if (CallerPAL.getParamAttributes(i + 1)
10232 & Attribute::typeIncompatible(ParamTy))
10233 return false; // Attribute not compatible with transformed value.
10235 // Converting from one pointer type to another or between a pointer and an
10236 // integer of the same size is safe even if we do not have a body.
10237 bool isConvertible = ActTy == ParamTy ||
10238 ((isa<PointerType>(ParamTy) || ParamTy == TD->getIntPtrType()) &&
10239 (isa<PointerType>(ActTy) || ActTy == TD->getIntPtrType()));
10240 if (Callee->isDeclaration() && !isConvertible) return false;
10243 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
10244 Callee->isDeclaration())
10245 return false; // Do not delete arguments unless we have a function body.
10247 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
10248 !CallerPAL.isEmpty())
10249 // In this case we have more arguments than the new function type, but we
10250 // won't be dropping them. Check that these extra arguments have attributes
10251 // that are compatible with being a vararg call argument.
10252 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
10253 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
10255 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
10256 if (PAttrs & Attribute::VarArgsIncompatible)
10260 // Okay, we decided that this is a safe thing to do: go ahead and start
10261 // inserting cast instructions as necessary...
10262 std::vector<Value*> Args;
10263 Args.reserve(NumActualArgs);
10264 SmallVector<AttributeWithIndex, 8> attrVec;
10265 attrVec.reserve(NumCommonArgs);
10267 // Get any return attributes.
10268 Attributes RAttrs = CallerPAL.getRetAttributes();
10270 // If the return value is not being used, the type may not be compatible
10271 // with the existing attributes. Wipe out any problematic attributes.
10272 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
10274 // Add the new return attributes.
10276 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
10278 AI = CS.arg_begin();
10279 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
10280 const Type *ParamTy = FT->getParamType(i);
10281 if ((*AI)->getType() == ParamTy) {
10282 Args.push_back(*AI);
10284 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
10285 false, ParamTy, false);
10286 CastInst *NewCast = CastInst::Create(opcode, *AI, ParamTy, "tmp");
10287 Args.push_back(InsertNewInstBefore(NewCast, *Caller));
10290 // Add any parameter attributes.
10291 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
10292 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
10295 // If the function takes more arguments than the call was taking, add them
10297 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
10298 Args.push_back(Context->getNullValue(FT->getParamType(i)));
10300 // If we are removing arguments to the function, emit an obnoxious warning...
10301 if (FT->getNumParams() < NumActualArgs) {
10302 if (!FT->isVarArg()) {
10303 cerr << "WARNING: While resolving call to function '"
10304 << Callee->getName() << "' arguments were dropped!\n";
10306 // Add all of the arguments in their promoted form to the arg list...
10307 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
10308 const Type *PTy = getPromotedType((*AI)->getType());
10309 if (PTy != (*AI)->getType()) {
10310 // Must promote to pass through va_arg area!
10311 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI, false,
10313 Instruction *Cast = CastInst::Create(opcode, *AI, PTy, "tmp");
10314 InsertNewInstBefore(Cast, *Caller);
10315 Args.push_back(Cast);
10317 Args.push_back(*AI);
10320 // Add any parameter attributes.
10321 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
10322 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
10327 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
10328 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
10330 if (NewRetTy == Type::VoidTy)
10331 Caller->setName(""); // Void type should not have a name.
10333 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),attrVec.end());
10336 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10337 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
10338 Args.begin(), Args.end(),
10339 Caller->getName(), Caller);
10340 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
10341 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
10343 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
10344 Caller->getName(), Caller);
10345 CallInst *CI = cast<CallInst>(Caller);
10346 if (CI->isTailCall())
10347 cast<CallInst>(NC)->setTailCall();
10348 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
10349 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
10352 // Insert a cast of the return type as necessary.
10354 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
10355 if (NV->getType() != Type::VoidTy) {
10356 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
10358 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
10360 // If this is an invoke instruction, we should insert it after the first
10361 // non-phi, instruction in the normal successor block.
10362 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10363 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
10364 InsertNewInstBefore(NC, *I);
10366 // Otherwise, it's a call, just insert cast right after the call instr
10367 InsertNewInstBefore(NC, *Caller);
10369 AddUsersToWorkList(*Caller);
10371 NV = Context->getUndef(Caller->getType());
10375 if (Caller->getType() != Type::VoidTy && !Caller->use_empty())
10376 Caller->replaceAllUsesWith(NV);
10377 Caller->eraseFromParent();
10378 RemoveFromWorkList(Caller);
10382 // transformCallThroughTrampoline - Turn a call to a function created by the
10383 // init_trampoline intrinsic into a direct call to the underlying function.
10385 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
10386 Value *Callee = CS.getCalledValue();
10387 const PointerType *PTy = cast<PointerType>(Callee->getType());
10388 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
10389 const AttrListPtr &Attrs = CS.getAttributes();
10391 // If the call already has the 'nest' attribute somewhere then give up -
10392 // otherwise 'nest' would occur twice after splicing in the chain.
10393 if (Attrs.hasAttrSomewhere(Attribute::Nest))
10396 IntrinsicInst *Tramp =
10397 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
10399 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
10400 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
10401 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
10403 const AttrListPtr &NestAttrs = NestF->getAttributes();
10404 if (!NestAttrs.isEmpty()) {
10405 unsigned NestIdx = 1;
10406 const Type *NestTy = 0;
10407 Attributes NestAttr = Attribute::None;
10409 // Look for a parameter marked with the 'nest' attribute.
10410 for (FunctionType::param_iterator I = NestFTy->param_begin(),
10411 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
10412 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
10413 // Record the parameter type and any other attributes.
10415 NestAttr = NestAttrs.getParamAttributes(NestIdx);
10420 Instruction *Caller = CS.getInstruction();
10421 std::vector<Value*> NewArgs;
10422 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
10424 SmallVector<AttributeWithIndex, 8> NewAttrs;
10425 NewAttrs.reserve(Attrs.getNumSlots() + 1);
10427 // Insert the nest argument into the call argument list, which may
10428 // mean appending it. Likewise for attributes.
10430 // Add any result attributes.
10431 if (Attributes Attr = Attrs.getRetAttributes())
10432 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
10436 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
10438 if (Idx == NestIdx) {
10439 // Add the chain argument and attributes.
10440 Value *NestVal = Tramp->getOperand(3);
10441 if (NestVal->getType() != NestTy)
10442 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
10443 NewArgs.push_back(NestVal);
10444 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
10450 // Add the original argument and attributes.
10451 NewArgs.push_back(*I);
10452 if (Attributes Attr = Attrs.getParamAttributes(Idx))
10454 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
10460 // Add any function attributes.
10461 if (Attributes Attr = Attrs.getFnAttributes())
10462 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
10464 // The trampoline may have been bitcast to a bogus type (FTy).
10465 // Handle this by synthesizing a new function type, equal to FTy
10466 // with the chain parameter inserted.
10468 std::vector<const Type*> NewTypes;
10469 NewTypes.reserve(FTy->getNumParams()+1);
10471 // Insert the chain's type into the list of parameter types, which may
10472 // mean appending it.
10475 FunctionType::param_iterator I = FTy->param_begin(),
10476 E = FTy->param_end();
10479 if (Idx == NestIdx)
10480 // Add the chain's type.
10481 NewTypes.push_back(NestTy);
10486 // Add the original type.
10487 NewTypes.push_back(*I);
10493 // Replace the trampoline call with a direct call. Let the generic
10494 // code sort out any function type mismatches.
10495 FunctionType *NewFTy =
10496 Context->getFunctionType(FTy->getReturnType(), NewTypes,
10498 Constant *NewCallee =
10499 NestF->getType() == Context->getPointerTypeUnqual(NewFTy) ?
10500 NestF : Context->getConstantExprBitCast(NestF,
10501 Context->getPointerTypeUnqual(NewFTy));
10502 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),NewAttrs.end());
10504 Instruction *NewCaller;
10505 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
10506 NewCaller = InvokeInst::Create(NewCallee,
10507 II->getNormalDest(), II->getUnwindDest(),
10508 NewArgs.begin(), NewArgs.end(),
10509 Caller->getName(), Caller);
10510 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
10511 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
10513 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
10514 Caller->getName(), Caller);
10515 if (cast<CallInst>(Caller)->isTailCall())
10516 cast<CallInst>(NewCaller)->setTailCall();
10517 cast<CallInst>(NewCaller)->
10518 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
10519 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
10521 if (Caller->getType() != Type::VoidTy && !Caller->use_empty())
10522 Caller->replaceAllUsesWith(NewCaller);
10523 Caller->eraseFromParent();
10524 RemoveFromWorkList(Caller);
10529 // Replace the trampoline call with a direct call. Since there is no 'nest'
10530 // parameter, there is no need to adjust the argument list. Let the generic
10531 // code sort out any function type mismatches.
10532 Constant *NewCallee =
10533 NestF->getType() == PTy ? NestF :
10534 Context->getConstantExprBitCast(NestF, PTy);
10535 CS.setCalledFunction(NewCallee);
10536 return CS.getInstruction();
10539 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(c,d)]
10540 /// and if a/b/c/d and the add's all have a single use, turn this into two phi's
10541 /// and a single binop.
10542 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
10543 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
10544 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
10545 unsigned Opc = FirstInst->getOpcode();
10546 Value *LHSVal = FirstInst->getOperand(0);
10547 Value *RHSVal = FirstInst->getOperand(1);
10549 const Type *LHSType = LHSVal->getType();
10550 const Type *RHSType = RHSVal->getType();
10552 // Scan to see if all operands are the same opcode, all have one use, and all
10553 // kill their operands (i.e. the operands have one use).
10554 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
10555 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
10556 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
10557 // Verify type of the LHS matches so we don't fold cmp's of different
10558 // types or GEP's with different index types.
10559 I->getOperand(0)->getType() != LHSType ||
10560 I->getOperand(1)->getType() != RHSType)
10563 // If they are CmpInst instructions, check their predicates
10564 if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
10565 if (cast<CmpInst>(I)->getPredicate() !=
10566 cast<CmpInst>(FirstInst)->getPredicate())
10569 // Keep track of which operand needs a phi node.
10570 if (I->getOperand(0) != LHSVal) LHSVal = 0;
10571 if (I->getOperand(1) != RHSVal) RHSVal = 0;
10574 // Otherwise, this is safe to transform!
10576 Value *InLHS = FirstInst->getOperand(0);
10577 Value *InRHS = FirstInst->getOperand(1);
10578 PHINode *NewLHS = 0, *NewRHS = 0;
10580 NewLHS = PHINode::Create(LHSType,
10581 FirstInst->getOperand(0)->getName() + ".pn");
10582 NewLHS->reserveOperandSpace(PN.getNumOperands()/2);
10583 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
10584 InsertNewInstBefore(NewLHS, PN);
10589 NewRHS = PHINode::Create(RHSType,
10590 FirstInst->getOperand(1)->getName() + ".pn");
10591 NewRHS->reserveOperandSpace(PN.getNumOperands()/2);
10592 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
10593 InsertNewInstBefore(NewRHS, PN);
10597 // Add all operands to the new PHIs.
10598 if (NewLHS || NewRHS) {
10599 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10600 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
10602 Value *NewInLHS = InInst->getOperand(0);
10603 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
10606 Value *NewInRHS = InInst->getOperand(1);
10607 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
10612 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
10613 return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
10614 CmpInst *CIOp = cast<CmpInst>(FirstInst);
10615 return CmpInst::Create(*Context, CIOp->getOpcode(), CIOp->getPredicate(),
10619 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
10620 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
10622 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
10623 FirstInst->op_end());
10624 // This is true if all GEP bases are allocas and if all indices into them are
10626 bool AllBasePointersAreAllocas = true;
10628 // Scan to see if all operands are the same opcode, all have one use, and all
10629 // kill their operands (i.e. the operands have one use).
10630 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
10631 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
10632 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
10633 GEP->getNumOperands() != FirstInst->getNumOperands())
10636 // Keep track of whether or not all GEPs are of alloca pointers.
10637 if (AllBasePointersAreAllocas &&
10638 (!isa<AllocaInst>(GEP->getOperand(0)) ||
10639 !GEP->hasAllConstantIndices()))
10640 AllBasePointersAreAllocas = false;
10642 // Compare the operand lists.
10643 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
10644 if (FirstInst->getOperand(op) == GEP->getOperand(op))
10647 // Don't merge two GEPs when two operands differ (introducing phi nodes)
10648 // if one of the PHIs has a constant for the index. The index may be
10649 // substantially cheaper to compute for the constants, so making it a
10650 // variable index could pessimize the path. This also handles the case
10651 // for struct indices, which must always be constant.
10652 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
10653 isa<ConstantInt>(GEP->getOperand(op)))
10656 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
10658 FixedOperands[op] = 0; // Needs a PHI.
10662 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
10663 // bother doing this transformation. At best, this will just save a bit of
10664 // offset calculation, but all the predecessors will have to materialize the
10665 // stack address into a register anyway. We'd actually rather *clone* the
10666 // load up into the predecessors so that we have a load of a gep of an alloca,
10667 // which can usually all be folded into the load.
10668 if (AllBasePointersAreAllocas)
10671 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
10672 // that is variable.
10673 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
10675 bool HasAnyPHIs = false;
10676 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
10677 if (FixedOperands[i]) continue; // operand doesn't need a phi.
10678 Value *FirstOp = FirstInst->getOperand(i);
10679 PHINode *NewPN = PHINode::Create(FirstOp->getType(),
10680 FirstOp->getName()+".pn");
10681 InsertNewInstBefore(NewPN, PN);
10683 NewPN->reserveOperandSpace(e);
10684 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
10685 OperandPhis[i] = NewPN;
10686 FixedOperands[i] = NewPN;
10691 // Add all operands to the new PHIs.
10693 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10694 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
10695 BasicBlock *InBB = PN.getIncomingBlock(i);
10697 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
10698 if (PHINode *OpPhi = OperandPhis[op])
10699 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
10703 Value *Base = FixedOperands[0];
10704 return GetElementPtrInst::Create(Base, FixedOperands.begin()+1,
10705 FixedOperands.end());
10709 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
10710 /// sink the load out of the block that defines it. This means that it must be
10711 /// obvious the value of the load is not changed from the point of the load to
10712 /// the end of the block it is in.
10714 /// Finally, it is safe, but not profitable, to sink a load targetting a
10715 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
10717 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
10718 BasicBlock::iterator BBI = L, E = L->getParent()->end();
10720 for (++BBI; BBI != E; ++BBI)
10721 if (BBI->mayWriteToMemory())
10724 // Check for non-address taken alloca. If not address-taken already, it isn't
10725 // profitable to do this xform.
10726 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
10727 bool isAddressTaken = false;
10728 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
10730 if (isa<LoadInst>(UI)) continue;
10731 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
10732 // If storing TO the alloca, then the address isn't taken.
10733 if (SI->getOperand(1) == AI) continue;
10735 isAddressTaken = true;
10739 if (!isAddressTaken && AI->isStaticAlloca())
10743 // If this load is a load from a GEP with a constant offset from an alloca,
10744 // then we don't want to sink it. In its present form, it will be
10745 // load [constant stack offset]. Sinking it will cause us to have to
10746 // materialize the stack addresses in each predecessor in a register only to
10747 // do a shared load from register in the successor.
10748 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
10749 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
10750 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
10757 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
10758 // operator and they all are only used by the PHI, PHI together their
10759 // inputs, and do the operation once, to the result of the PHI.
10760 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
10761 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
10763 // Scan the instruction, looking for input operations that can be folded away.
10764 // If all input operands to the phi are the same instruction (e.g. a cast from
10765 // the same type or "+42") we can pull the operation through the PHI, reducing
10766 // code size and simplifying code.
10767 Constant *ConstantOp = 0;
10768 const Type *CastSrcTy = 0;
10769 bool isVolatile = false;
10770 if (isa<CastInst>(FirstInst)) {
10771 CastSrcTy = FirstInst->getOperand(0)->getType();
10772 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
10773 // Can fold binop, compare or shift here if the RHS is a constant,
10774 // otherwise call FoldPHIArgBinOpIntoPHI.
10775 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
10776 if (ConstantOp == 0)
10777 return FoldPHIArgBinOpIntoPHI(PN);
10778 } else if (LoadInst *LI = dyn_cast<LoadInst>(FirstInst)) {
10779 isVolatile = LI->isVolatile();
10780 // We can't sink the load if the loaded value could be modified between the
10781 // load and the PHI.
10782 if (LI->getParent() != PN.getIncomingBlock(0) ||
10783 !isSafeAndProfitableToSinkLoad(LI))
10786 // If the PHI is of volatile loads and the load block has multiple
10787 // successors, sinking it would remove a load of the volatile value from
10788 // the path through the other successor.
10790 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
10793 } else if (isa<GetElementPtrInst>(FirstInst)) {
10794 return FoldPHIArgGEPIntoPHI(PN);
10796 return 0; // Cannot fold this operation.
10799 // Check to see if all arguments are the same operation.
10800 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10801 if (!isa<Instruction>(PN.getIncomingValue(i))) return 0;
10802 Instruction *I = cast<Instruction>(PN.getIncomingValue(i));
10803 if (!I->hasOneUse() || !I->isSameOperationAs(FirstInst))
10806 if (I->getOperand(0)->getType() != CastSrcTy)
10807 return 0; // Cast operation must match.
10808 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
10809 // We can't sink the load if the loaded value could be modified between
10810 // the load and the PHI.
10811 if (LI->isVolatile() != isVolatile ||
10812 LI->getParent() != PN.getIncomingBlock(i) ||
10813 !isSafeAndProfitableToSinkLoad(LI))
10816 // If the PHI is of volatile loads and the load block has multiple
10817 // successors, sinking it would remove a load of the volatile value from
10818 // the path through the other successor.
10820 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
10823 } else if (I->getOperand(1) != ConstantOp) {
10828 // Okay, they are all the same operation. Create a new PHI node of the
10829 // correct type, and PHI together all of the LHS's of the instructions.
10830 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
10831 PN.getName()+".in");
10832 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
10834 Value *InVal = FirstInst->getOperand(0);
10835 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
10837 // Add all operands to the new PHI.
10838 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
10839 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
10840 if (NewInVal != InVal)
10842 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
10847 // The new PHI unions all of the same values together. This is really
10848 // common, so we handle it intelligently here for compile-time speed.
10852 InsertNewInstBefore(NewPN, PN);
10856 // Insert and return the new operation.
10857 if (CastInst* FirstCI = dyn_cast<CastInst>(FirstInst))
10858 return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
10859 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
10860 return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
10861 if (CmpInst *CIOp = dyn_cast<CmpInst>(FirstInst))
10862 return CmpInst::Create(*Context, CIOp->getOpcode(), CIOp->getPredicate(),
10863 PhiVal, ConstantOp);
10864 assert(isa<LoadInst>(FirstInst) && "Unknown operation");
10866 // If this was a volatile load that we are merging, make sure to loop through
10867 // and mark all the input loads as non-volatile. If we don't do this, we will
10868 // insert a new volatile load and the old ones will not be deletable.
10870 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
10871 cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
10873 return new LoadInst(PhiVal, "", isVolatile);
10876 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
10878 static bool DeadPHICycle(PHINode *PN,
10879 SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
10880 if (PN->use_empty()) return true;
10881 if (!PN->hasOneUse()) return false;
10883 // Remember this node, and if we find the cycle, return.
10884 if (!PotentiallyDeadPHIs.insert(PN))
10887 // Don't scan crazily complex things.
10888 if (PotentiallyDeadPHIs.size() == 16)
10891 if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
10892 return DeadPHICycle(PU, PotentiallyDeadPHIs);
10897 /// PHIsEqualValue - Return true if this phi node is always equal to
10898 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
10899 /// z = some value; x = phi (y, z); y = phi (x, z)
10900 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
10901 SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
10902 // See if we already saw this PHI node.
10903 if (!ValueEqualPHIs.insert(PN))
10906 // Don't scan crazily complex things.
10907 if (ValueEqualPHIs.size() == 16)
10910 // Scan the operands to see if they are either phi nodes or are equal to
10912 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
10913 Value *Op = PN->getIncomingValue(i);
10914 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
10915 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
10917 } else if (Op != NonPhiInVal)
10925 // PHINode simplification
10927 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
10928 // If LCSSA is around, don't mess with Phi nodes
10929 if (MustPreserveLCSSA) return 0;
10931 if (Value *V = PN.hasConstantValue())
10932 return ReplaceInstUsesWith(PN, V);
10934 // If all PHI operands are the same operation, pull them through the PHI,
10935 // reducing code size.
10936 if (isa<Instruction>(PN.getIncomingValue(0)) &&
10937 isa<Instruction>(PN.getIncomingValue(1)) &&
10938 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
10939 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
10940 // FIXME: The hasOneUse check will fail for PHIs that use the value more
10941 // than themselves more than once.
10942 PN.getIncomingValue(0)->hasOneUse())
10943 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
10946 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
10947 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
10948 // PHI)... break the cycle.
10949 if (PN.hasOneUse()) {
10950 Instruction *PHIUser = cast<Instruction>(PN.use_back());
10951 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
10952 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
10953 PotentiallyDeadPHIs.insert(&PN);
10954 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
10955 return ReplaceInstUsesWith(PN, Context->getUndef(PN.getType()));
10958 // If this phi has a single use, and if that use just computes a value for
10959 // the next iteration of a loop, delete the phi. This occurs with unused
10960 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
10961 // common case here is good because the only other things that catch this
10962 // are induction variable analysis (sometimes) and ADCE, which is only run
10964 if (PHIUser->hasOneUse() &&
10965 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
10966 PHIUser->use_back() == &PN) {
10967 return ReplaceInstUsesWith(PN, Context->getUndef(PN.getType()));
10971 // We sometimes end up with phi cycles that non-obviously end up being the
10972 // same value, for example:
10973 // z = some value; x = phi (y, z); y = phi (x, z)
10974 // where the phi nodes don't necessarily need to be in the same block. Do a
10975 // quick check to see if the PHI node only contains a single non-phi value, if
10976 // so, scan to see if the phi cycle is actually equal to that value.
10978 unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues();
10979 // Scan for the first non-phi operand.
10980 while (InValNo != NumOperandVals &&
10981 isa<PHINode>(PN.getIncomingValue(InValNo)))
10984 if (InValNo != NumOperandVals) {
10985 Value *NonPhiInVal = PN.getOperand(InValNo);
10987 // Scan the rest of the operands to see if there are any conflicts, if so
10988 // there is no need to recursively scan other phis.
10989 for (++InValNo; InValNo != NumOperandVals; ++InValNo) {
10990 Value *OpVal = PN.getIncomingValue(InValNo);
10991 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
10995 // If we scanned over all operands, then we have one unique value plus
10996 // phi values. Scan PHI nodes to see if they all merge in each other or
10998 if (InValNo == NumOperandVals) {
10999 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
11000 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
11001 return ReplaceInstUsesWith(PN, NonPhiInVal);
11008 static Value *InsertCastToIntPtrTy(Value *V, const Type *DTy,
11009 Instruction *InsertPoint,
11010 InstCombiner *IC) {
11011 unsigned PtrSize = DTy->getScalarSizeInBits();
11012 unsigned VTySize = V->getType()->getScalarSizeInBits();
11013 // We must cast correctly to the pointer type. Ensure that we
11014 // sign extend the integer value if it is smaller as this is
11015 // used for address computation.
11016 Instruction::CastOps opcode =
11017 (VTySize < PtrSize ? Instruction::SExt :
11018 (VTySize == PtrSize ? Instruction::BitCast : Instruction::Trunc));
11019 return IC->InsertCastBefore(opcode, V, DTy, *InsertPoint);
11023 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
11024 Value *PtrOp = GEP.getOperand(0);
11025 // Is it 'getelementptr %P, i32 0' or 'getelementptr %P'
11026 // If so, eliminate the noop.
11027 if (GEP.getNumOperands() == 1)
11028 return ReplaceInstUsesWith(GEP, PtrOp);
11030 if (isa<UndefValue>(GEP.getOperand(0)))
11031 return ReplaceInstUsesWith(GEP, Context->getUndef(GEP.getType()));
11033 bool HasZeroPointerIndex = false;
11034 if (Constant *C = dyn_cast<Constant>(GEP.getOperand(1)))
11035 HasZeroPointerIndex = C->isNullValue();
11037 if (GEP.getNumOperands() == 2 && HasZeroPointerIndex)
11038 return ReplaceInstUsesWith(GEP, PtrOp);
11040 // Eliminate unneeded casts for indices.
11041 bool MadeChange = false;
11043 gep_type_iterator GTI = gep_type_begin(GEP);
11044 for (User::op_iterator i = GEP.op_begin() + 1, e = GEP.op_end();
11045 i != e; ++i, ++GTI) {
11046 if (isa<SequentialType>(*GTI)) {
11047 if (CastInst *CI = dyn_cast<CastInst>(*i)) {
11048 if (CI->getOpcode() == Instruction::ZExt ||
11049 CI->getOpcode() == Instruction::SExt) {
11050 const Type *SrcTy = CI->getOperand(0)->getType();
11051 // We can eliminate a cast from i32 to i64 iff the target
11052 // is a 32-bit pointer target.
11053 if (SrcTy->getScalarSizeInBits() >= TD->getPointerSizeInBits()) {
11055 *i = CI->getOperand(0);
11059 // If we are using a wider index than needed for this platform, shrink it
11060 // to what we need. If narrower, sign-extend it to what we need.
11061 // If the incoming value needs a cast instruction,
11062 // insert it. This explicit cast can make subsequent optimizations more
11065 if (TD->getTypeSizeInBits(Op->getType()) > TD->getPointerSizeInBits()) {
11066 if (Constant *C = dyn_cast<Constant>(Op)) {
11067 *i = Context->getConstantExprTrunc(C, TD->getIntPtrType());
11070 Op = InsertCastBefore(Instruction::Trunc, Op, TD->getIntPtrType(),
11075 } else if (TD->getTypeSizeInBits(Op->getType()) < TD->getPointerSizeInBits()) {
11076 if (Constant *C = dyn_cast<Constant>(Op)) {
11077 *i = Context->getConstantExprSExt(C, TD->getIntPtrType());
11080 Op = InsertCastBefore(Instruction::SExt, Op, TD->getIntPtrType(),
11088 if (MadeChange) return &GEP;
11090 // Combine Indices - If the source pointer to this getelementptr instruction
11091 // is a getelementptr instruction, combine the indices of the two
11092 // getelementptr instructions into a single instruction.
11094 SmallVector<Value*, 8> SrcGEPOperands;
11095 if (User *Src = dyn_castGetElementPtr(PtrOp))
11096 SrcGEPOperands.append(Src->op_begin(), Src->op_end());
11098 if (!SrcGEPOperands.empty()) {
11099 // Note that if our source is a gep chain itself that we wait for that
11100 // chain to be resolved before we perform this transformation. This
11101 // avoids us creating a TON of code in some cases.
11103 if (isa<GetElementPtrInst>(SrcGEPOperands[0]) &&
11104 cast<Instruction>(SrcGEPOperands[0])->getNumOperands() == 2)
11105 return 0; // Wait until our source is folded to completion.
11107 SmallVector<Value*, 8> Indices;
11109 // Find out whether the last index in the source GEP is a sequential idx.
11110 bool EndsWithSequential = false;
11111 for (gep_type_iterator I = gep_type_begin(*cast<User>(PtrOp)),
11112 E = gep_type_end(*cast<User>(PtrOp)); I != E; ++I)
11113 EndsWithSequential = !isa<StructType>(*I);
11115 // Can we combine the two pointer arithmetics offsets?
11116 if (EndsWithSequential) {
11117 // Replace: gep (gep %P, long B), long A, ...
11118 // With: T = long A+B; gep %P, T, ...
11120 Value *Sum, *SO1 = SrcGEPOperands.back(), *GO1 = GEP.getOperand(1);
11121 if (SO1 == Context->getNullValue(SO1->getType())) {
11123 } else if (GO1 == Context->getNullValue(GO1->getType())) {
11126 // If they aren't the same type, convert both to an integer of the
11127 // target's pointer size.
11128 if (SO1->getType() != GO1->getType()) {
11129 if (Constant *SO1C = dyn_cast<Constant>(SO1)) {
11131 Context->getConstantExprIntegerCast(SO1C, GO1->getType(), true);
11132 } else if (Constant *GO1C = dyn_cast<Constant>(GO1)) {
11134 Context->getConstantExprIntegerCast(GO1C, SO1->getType(), true);
11136 unsigned PS = TD->getPointerSizeInBits();
11137 if (TD->getTypeSizeInBits(SO1->getType()) == PS) {
11138 // Convert GO1 to SO1's type.
11139 GO1 = InsertCastToIntPtrTy(GO1, SO1->getType(), &GEP, this);
11141 } else if (TD->getTypeSizeInBits(GO1->getType()) == PS) {
11142 // Convert SO1 to GO1's type.
11143 SO1 = InsertCastToIntPtrTy(SO1, GO1->getType(), &GEP, this);
11145 const Type *PT = TD->getIntPtrType();
11146 SO1 = InsertCastToIntPtrTy(SO1, PT, &GEP, this);
11147 GO1 = InsertCastToIntPtrTy(GO1, PT, &GEP, this);
11151 if (isa<Constant>(SO1) && isa<Constant>(GO1))
11152 Sum = Context->getConstantExprAdd(cast<Constant>(SO1),
11153 cast<Constant>(GO1));
11155 Sum = BinaryOperator::CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
11156 InsertNewInstBefore(cast<Instruction>(Sum), GEP);
11160 // Recycle the GEP we already have if possible.
11161 if (SrcGEPOperands.size() == 2) {
11162 GEP.setOperand(0, SrcGEPOperands[0]);
11163 GEP.setOperand(1, Sum);
11166 Indices.insert(Indices.end(), SrcGEPOperands.begin()+1,
11167 SrcGEPOperands.end()-1);
11168 Indices.push_back(Sum);
11169 Indices.insert(Indices.end(), GEP.op_begin()+2, GEP.op_end());
11171 } else if (isa<Constant>(*GEP.idx_begin()) &&
11172 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
11173 SrcGEPOperands.size() != 1) {
11174 // Otherwise we can do the fold if the first index of the GEP is a zero
11175 Indices.insert(Indices.end(), SrcGEPOperands.begin()+1,
11176 SrcGEPOperands.end());
11177 Indices.insert(Indices.end(), GEP.idx_begin()+1, GEP.idx_end());
11180 if (!Indices.empty())
11181 return GetElementPtrInst::Create(SrcGEPOperands[0], Indices.begin(),
11182 Indices.end(), GEP.getName());
11184 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(PtrOp)) {
11185 // GEP of global variable. If all of the indices for this GEP are
11186 // constants, we can promote this to a constexpr instead of an instruction.
11188 // Scan for nonconstants...
11189 SmallVector<Constant*, 8> Indices;
11190 User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end();
11191 for (; I != E && isa<Constant>(*I); ++I)
11192 Indices.push_back(cast<Constant>(*I));
11194 if (I == E) { // If they are all constants...
11195 Constant *CE = Context->getConstantExprGetElementPtr(GV,
11196 &Indices[0],Indices.size());
11198 // Replace all uses of the GEP with the new constexpr...
11199 return ReplaceInstUsesWith(GEP, CE);
11201 } else if (Value *X = getBitCastOperand(PtrOp)) { // Is the operand a cast?
11202 if (!isa<PointerType>(X->getType())) {
11203 // Not interesting. Source pointer must be a cast from pointer.
11204 } else if (HasZeroPointerIndex) {
11205 // transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
11206 // into : GEP [10 x i8]* X, i32 0, ...
11208 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
11209 // into : GEP i8* X, ...
11211 // This occurs when the program declares an array extern like "int X[];"
11212 const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
11213 const PointerType *XTy = cast<PointerType>(X->getType());
11214 if (const ArrayType *CATy =
11215 dyn_cast<ArrayType>(CPTy->getElementType())) {
11216 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
11217 if (CATy->getElementType() == XTy->getElementType()) {
11218 // -> GEP i8* X, ...
11219 SmallVector<Value*, 8> Indices(GEP.idx_begin()+1, GEP.idx_end());
11220 return GetElementPtrInst::Create(X, Indices.begin(), Indices.end(),
11222 } else if (const ArrayType *XATy =
11223 dyn_cast<ArrayType>(XTy->getElementType())) {
11224 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
11225 if (CATy->getElementType() == XATy->getElementType()) {
11226 // -> GEP [10 x i8]* X, i32 0, ...
11227 // At this point, we know that the cast source type is a pointer
11228 // to an array of the same type as the destination pointer
11229 // array. Because the array type is never stepped over (there
11230 // is a leading zero) we can fold the cast into this GEP.
11231 GEP.setOperand(0, X);
11236 } else if (GEP.getNumOperands() == 2) {
11237 // Transform things like:
11238 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
11239 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
11240 const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
11241 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
11242 if (isa<ArrayType>(SrcElTy) &&
11243 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
11244 TD->getTypeAllocSize(ResElTy)) {
11246 Idx[0] = Context->getNullValue(Type::Int32Ty);
11247 Idx[1] = GEP.getOperand(1);
11248 Value *V = InsertNewInstBefore(
11249 GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName()), GEP);
11250 // V and GEP are both pointer types --> BitCast
11251 return new BitCastInst(V, GEP.getType());
11254 // Transform things like:
11255 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
11256 // (where tmp = 8*tmp2) into:
11257 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
11259 if (isa<ArrayType>(SrcElTy) && ResElTy == Type::Int8Ty) {
11260 uint64_t ArrayEltSize =
11261 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
11263 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
11264 // allow either a mul, shift, or constant here.
11266 ConstantInt *Scale = 0;
11267 if (ArrayEltSize == 1) {
11268 NewIdx = GEP.getOperand(1);
11270 Context->getConstantInt(cast<IntegerType>(NewIdx->getType()), 1);
11271 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
11272 NewIdx = Context->getConstantInt(CI->getType(), 1);
11274 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
11275 if (Inst->getOpcode() == Instruction::Shl &&
11276 isa<ConstantInt>(Inst->getOperand(1))) {
11277 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
11278 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
11279 Scale = Context->getConstantInt(cast<IntegerType>(Inst->getType()),
11281 NewIdx = Inst->getOperand(0);
11282 } else if (Inst->getOpcode() == Instruction::Mul &&
11283 isa<ConstantInt>(Inst->getOperand(1))) {
11284 Scale = cast<ConstantInt>(Inst->getOperand(1));
11285 NewIdx = Inst->getOperand(0);
11289 // If the index will be to exactly the right offset with the scale taken
11290 // out, perform the transformation. Note, we don't know whether Scale is
11291 // signed or not. We'll use unsigned version of division/modulo
11292 // operation after making sure Scale doesn't have the sign bit set.
11293 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
11294 Scale->getZExtValue() % ArrayEltSize == 0) {
11295 Scale = Context->getConstantInt(Scale->getType(),
11296 Scale->getZExtValue() / ArrayEltSize);
11297 if (Scale->getZExtValue() != 1) {
11299 Context->getConstantExprIntegerCast(Scale, NewIdx->getType(),
11301 Instruction *Sc = BinaryOperator::CreateMul(NewIdx, C, "idxscale");
11302 NewIdx = InsertNewInstBefore(Sc, GEP);
11305 // Insert the new GEP instruction.
11307 Idx[0] = Context->getNullValue(Type::Int32Ty);
11309 Instruction *NewGEP =
11310 GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName());
11311 NewGEP = InsertNewInstBefore(NewGEP, GEP);
11312 // The NewGEP must be pointer typed, so must the old one -> BitCast
11313 return new BitCastInst(NewGEP, GEP.getType());
11319 /// See if we can simplify:
11320 /// X = bitcast A to B*
11321 /// Y = gep X, <...constant indices...>
11322 /// into a gep of the original struct. This is important for SROA and alias
11323 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
11324 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
11325 if (!isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
11326 // Determine how much the GEP moves the pointer. We are guaranteed to get
11327 // a constant back from EmitGEPOffset.
11328 ConstantInt *OffsetV =
11329 cast<ConstantInt>(EmitGEPOffset(&GEP, GEP, *this));
11330 int64_t Offset = OffsetV->getSExtValue();
11332 // If this GEP instruction doesn't move the pointer, just replace the GEP
11333 // with a bitcast of the real input to the dest type.
11335 // If the bitcast is of an allocation, and the allocation will be
11336 // converted to match the type of the cast, don't touch this.
11337 if (isa<AllocationInst>(BCI->getOperand(0))) {
11338 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
11339 if (Instruction *I = visitBitCast(*BCI)) {
11342 BCI->getParent()->getInstList().insert(BCI, I);
11343 ReplaceInstUsesWith(*BCI, I);
11348 return new BitCastInst(BCI->getOperand(0), GEP.getType());
11351 // Otherwise, if the offset is non-zero, we need to find out if there is a
11352 // field at Offset in 'A's type. If so, we can pull the cast through the
11354 SmallVector<Value*, 8> NewIndices;
11356 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
11357 if (FindElementAtOffset(InTy, Offset, NewIndices, TD, Context)) {
11358 Instruction *NGEP =
11359 GetElementPtrInst::Create(BCI->getOperand(0), NewIndices.begin(),
11361 if (NGEP->getType() == GEP.getType()) return NGEP;
11362 InsertNewInstBefore(NGEP, GEP);
11363 NGEP->takeName(&GEP);
11364 return new BitCastInst(NGEP, GEP.getType());
11372 Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
11373 // Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
11374 if (AI.isArrayAllocation()) { // Check C != 1
11375 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
11376 const Type *NewTy =
11377 Context->getArrayType(AI.getAllocatedType(), C->getZExtValue());
11378 AllocationInst *New = 0;
11380 // Create and insert the replacement instruction...
11381 if (isa<MallocInst>(AI))
11382 New = new MallocInst(NewTy, 0, AI.getAlignment(), AI.getName());
11384 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
11385 New = new AllocaInst(NewTy, 0, AI.getAlignment(), AI.getName());
11388 InsertNewInstBefore(New, AI);
11390 // Scan to the end of the allocation instructions, to skip over a block of
11391 // allocas if possible...also skip interleaved debug info
11393 BasicBlock::iterator It = New;
11394 while (isa<AllocationInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
11396 // Now that I is pointing to the first non-allocation-inst in the block,
11397 // insert our getelementptr instruction...
11399 Value *NullIdx = Context->getNullValue(Type::Int32Ty);
11403 Value *V = GetElementPtrInst::Create(New, Idx, Idx + 2,
11404 New->getName()+".sub", It);
11406 // Now make everything use the getelementptr instead of the original
11408 return ReplaceInstUsesWith(AI, V);
11409 } else if (isa<UndefValue>(AI.getArraySize())) {
11410 return ReplaceInstUsesWith(AI, Context->getNullValue(AI.getType()));
11414 if (isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
11415 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
11416 // Note that we only do this for alloca's, because malloc should allocate
11417 // and return a unique pointer, even for a zero byte allocation.
11418 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
11419 return ReplaceInstUsesWith(AI, Context->getNullValue(AI.getType()));
11421 // If the alignment is 0 (unspecified), assign it the preferred alignment.
11422 if (AI.getAlignment() == 0)
11423 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
11429 Instruction *InstCombiner::visitFreeInst(FreeInst &FI) {
11430 Value *Op = FI.getOperand(0);
11432 // free undef -> unreachable.
11433 if (isa<UndefValue>(Op)) {
11434 // Insert a new store to null because we cannot modify the CFG here.
11435 new StoreInst(Context->getConstantIntTrue(),
11436 Context->getUndef(Context->getPointerTypeUnqual(Type::Int1Ty)), &FI);
11437 return EraseInstFromFunction(FI);
11440 // If we have 'free null' delete the instruction. This can happen in stl code
11441 // when lots of inlining happens.
11442 if (isa<ConstantPointerNull>(Op))
11443 return EraseInstFromFunction(FI);
11445 // Change free <ty>* (cast <ty2>* X to <ty>*) into free <ty2>* X
11446 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op)) {
11447 FI.setOperand(0, CI->getOperand(0));
11451 // Change free (gep X, 0,0,0,0) into free(X)
11452 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
11453 if (GEPI->hasAllZeroIndices()) {
11454 AddToWorkList(GEPI);
11455 FI.setOperand(0, GEPI->getOperand(0));
11460 // Change free(malloc) into nothing, if the malloc has a single use.
11461 if (MallocInst *MI = dyn_cast<MallocInst>(Op))
11462 if (MI->hasOneUse()) {
11463 EraseInstFromFunction(FI);
11464 return EraseInstFromFunction(*MI);
11471 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
11472 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
11473 const TargetData *TD) {
11474 User *CI = cast<User>(LI.getOperand(0));
11475 Value *CastOp = CI->getOperand(0);
11476 LLVMContext *Context = IC.getContext();
11479 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(CI)) {
11480 // Instead of loading constant c string, use corresponding integer value
11481 // directly if string length is small enough.
11483 if (GetConstantStringInfo(CE->getOperand(0), Str) && !Str.empty()) {
11484 unsigned len = Str.length();
11485 const Type *Ty = cast<PointerType>(CE->getType())->getElementType();
11486 unsigned numBits = Ty->getPrimitiveSizeInBits();
11487 // Replace LI with immediate integer store.
11488 if ((numBits >> 3) == len + 1) {
11489 APInt StrVal(numBits, 0);
11490 APInt SingleChar(numBits, 0);
11491 if (TD->isLittleEndian()) {
11492 for (signed i = len-1; i >= 0; i--) {
11493 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
11494 StrVal = (StrVal << 8) | SingleChar;
11497 for (unsigned i = 0; i < len; i++) {
11498 SingleChar = (uint64_t) Str[i] & UCHAR_MAX;
11499 StrVal = (StrVal << 8) | SingleChar;
11501 // Append NULL at the end.
11503 StrVal = (StrVal << 8) | SingleChar;
11505 Value *NL = Context->getConstantInt(StrVal);
11506 return IC.ReplaceInstUsesWith(LI, NL);
11512 const PointerType *DestTy = cast<PointerType>(CI->getType());
11513 const Type *DestPTy = DestTy->getElementType();
11514 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
11516 // If the address spaces don't match, don't eliminate the cast.
11517 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
11520 const Type *SrcPTy = SrcTy->getElementType();
11522 if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
11523 isa<VectorType>(DestPTy)) {
11524 // If the source is an array, the code below will not succeed. Check to
11525 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11527 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
11528 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
11529 if (ASrcTy->getNumElements() != 0) {
11531 Idxs[0] = Idxs[1] = Context->getNullValue(Type::Int32Ty);
11532 CastOp = Context->getConstantExprGetElementPtr(CSrc, Idxs, 2);
11533 SrcTy = cast<PointerType>(CastOp->getType());
11534 SrcPTy = SrcTy->getElementType();
11537 if ((SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
11538 isa<VectorType>(SrcPTy)) &&
11539 // Do not allow turning this into a load of an integer, which is then
11540 // casted to a pointer, this pessimizes pointer analysis a lot.
11541 (isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
11542 IC.getTargetData().getTypeSizeInBits(SrcPTy) ==
11543 IC.getTargetData().getTypeSizeInBits(DestPTy)) {
11545 // Okay, we are casting from one integer or pointer type to another of
11546 // the same size. Instead of casting the pointer before the load, cast
11547 // the result of the loaded value.
11548 Value *NewLoad = IC.InsertNewInstBefore(new LoadInst(CastOp,
11550 LI.isVolatile()),LI);
11551 // Now cast the result of the load.
11552 return new BitCastInst(NewLoad, LI.getType());
11559 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
11560 Value *Op = LI.getOperand(0);
11562 // Attempt to improve the alignment.
11563 unsigned KnownAlign =
11564 GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
11566 (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
11567 LI.getAlignment()))
11568 LI.setAlignment(KnownAlign);
11570 // load (cast X) --> cast (load X) iff safe
11571 if (isa<CastInst>(Op))
11572 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11575 // None of the following transforms are legal for volatile loads.
11576 if (LI.isVolatile()) return 0;
11578 // Do really simple store-to-load forwarding and load CSE, to catch cases
11579 // where there are several consequtive memory accesses to the same location,
11580 // separated by a few arithmetic operations.
11581 BasicBlock::iterator BBI = &LI;
11582 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
11583 return ReplaceInstUsesWith(LI, AvailableVal);
11585 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
11586 const Value *GEPI0 = GEPI->getOperand(0);
11587 // TODO: Consider a target hook for valid address spaces for this xform.
11588 if (isa<ConstantPointerNull>(GEPI0) &&
11589 cast<PointerType>(GEPI0->getType())->getAddressSpace() == 0) {
11590 // Insert a new store to null instruction before the load to indicate
11591 // that this code is not reachable. We do this instead of inserting
11592 // an unreachable instruction directly because we cannot modify the
11594 new StoreInst(Context->getUndef(LI.getType()),
11595 Context->getNullValue(Op->getType()), &LI);
11596 return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType()));
11600 if (Constant *C = dyn_cast<Constant>(Op)) {
11601 // load null/undef -> undef
11602 // TODO: Consider a target hook for valid address spaces for this xform.
11603 if (isa<UndefValue>(C) || (C->isNullValue() &&
11604 cast<PointerType>(Op->getType())->getAddressSpace() == 0)) {
11605 // Insert a new store to null instruction before the load to indicate that
11606 // this code is not reachable. We do this instead of inserting an
11607 // unreachable instruction directly because we cannot modify the CFG.
11608 new StoreInst(Context->getUndef(LI.getType()),
11609 Context->getNullValue(Op->getType()), &LI);
11610 return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType()));
11613 // Instcombine load (constant global) into the value loaded.
11614 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op))
11615 if (GV->isConstant() && GV->hasDefinitiveInitializer())
11616 return ReplaceInstUsesWith(LI, GV->getInitializer());
11618 // Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded.
11619 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op)) {
11620 if (CE->getOpcode() == Instruction::GetElementPtr) {
11621 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(CE->getOperand(0)))
11622 if (GV->isConstant() && GV->hasDefinitiveInitializer())
11624 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE,
11626 return ReplaceInstUsesWith(LI, V);
11627 if (CE->getOperand(0)->isNullValue()) {
11628 // Insert a new store to null instruction before the load to indicate
11629 // that this code is not reachable. We do this instead of inserting
11630 // an unreachable instruction directly because we cannot modify the
11632 new StoreInst(Context->getUndef(LI.getType()),
11633 Context->getNullValue(Op->getType()), &LI);
11634 return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType()));
11637 } else if (CE->isCast()) {
11638 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
11644 // If this load comes from anywhere in a constant global, and if the global
11645 // is all undef or zero, we know what it loads.
11646 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op->getUnderlyingObject())){
11647 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
11648 if (GV->getInitializer()->isNullValue())
11649 return ReplaceInstUsesWith(LI, Context->getNullValue(LI.getType()));
11650 else if (isa<UndefValue>(GV->getInitializer()))
11651 return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType()));
11655 if (Op->hasOneUse()) {
11656 // Change select and PHI nodes to select values instead of addresses: this
11657 // helps alias analysis out a lot, allows many others simplifications, and
11658 // exposes redundancy in the code.
11660 // Note that we cannot do the transformation unless we know that the
11661 // introduced loads cannot trap! Something like this is valid as long as
11662 // the condition is always false: load (select bool %C, int* null, int* %G),
11663 // but it would not be valid if we transformed it to load from null
11664 // unconditionally.
11666 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
11667 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
11668 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI) &&
11669 isSafeToLoadUnconditionally(SI->getOperand(2), SI)) {
11670 Value *V1 = InsertNewInstBefore(new LoadInst(SI->getOperand(1),
11671 SI->getOperand(1)->getName()+".val"), LI);
11672 Value *V2 = InsertNewInstBefore(new LoadInst(SI->getOperand(2),
11673 SI->getOperand(2)->getName()+".val"), LI);
11674 return SelectInst::Create(SI->getCondition(), V1, V2);
11677 // load (select (cond, null, P)) -> load P
11678 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
11679 if (C->isNullValue()) {
11680 LI.setOperand(0, SI->getOperand(2));
11684 // load (select (cond, P, null)) -> load P
11685 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
11686 if (C->isNullValue()) {
11687 LI.setOperand(0, SI->getOperand(1));
11695 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
11696 /// when possible. This makes it generally easy to do alias analysis and/or
11697 /// SROA/mem2reg of the memory object.
11698 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
11699 User *CI = cast<User>(SI.getOperand(1));
11700 Value *CastOp = CI->getOperand(0);
11701 LLVMContext *Context = IC.getContext();
11703 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
11704 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
11705 if (SrcTy == 0) return 0;
11707 const Type *SrcPTy = SrcTy->getElementType();
11709 if (!DestPTy->isInteger() && !isa<PointerType>(DestPTy))
11712 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
11713 /// to its first element. This allows us to handle things like:
11714 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
11715 /// on 32-bit hosts.
11716 SmallVector<Value*, 4> NewGEPIndices;
11718 // If the source is an array, the code below will not succeed. Check to
11719 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
11721 if (isa<ArrayType>(SrcPTy) || isa<StructType>(SrcPTy)) {
11722 // Index through pointer.
11723 Constant *Zero = Context->getNullValue(Type::Int32Ty);
11724 NewGEPIndices.push_back(Zero);
11727 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
11728 if (!STy->getNumElements()) /* Struct can be empty {} */
11730 NewGEPIndices.push_back(Zero);
11731 SrcPTy = STy->getElementType(0);
11732 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
11733 NewGEPIndices.push_back(Zero);
11734 SrcPTy = ATy->getElementType();
11740 SrcTy = Context->getPointerType(SrcPTy, SrcTy->getAddressSpace());
11743 if (!SrcPTy->isInteger() && !isa<PointerType>(SrcPTy))
11746 // If the pointers point into different address spaces or if they point to
11747 // values with different sizes, we can't do the transformation.
11748 if (SrcTy->getAddressSpace() !=
11749 cast<PointerType>(CI->getType())->getAddressSpace() ||
11750 IC.getTargetData().getTypeSizeInBits(SrcPTy) !=
11751 IC.getTargetData().getTypeSizeInBits(DestPTy))
11754 // Okay, we are casting from one integer or pointer type to another of
11755 // the same size. Instead of casting the pointer before
11756 // the store, cast the value to be stored.
11758 Value *SIOp0 = SI.getOperand(0);
11759 Instruction::CastOps opcode = Instruction::BitCast;
11760 const Type* CastSrcTy = SIOp0->getType();
11761 const Type* CastDstTy = SrcPTy;
11762 if (isa<PointerType>(CastDstTy)) {
11763 if (CastSrcTy->isInteger())
11764 opcode = Instruction::IntToPtr;
11765 } else if (isa<IntegerType>(CastDstTy)) {
11766 if (isa<PointerType>(SIOp0->getType()))
11767 opcode = Instruction::PtrToInt;
11770 // SIOp0 is a pointer to aggregate and this is a store to the first field,
11771 // emit a GEP to index into its first field.
11772 if (!NewGEPIndices.empty()) {
11773 if (Constant *C = dyn_cast<Constant>(CastOp))
11774 CastOp = Context->getConstantExprGetElementPtr(C, &NewGEPIndices[0],
11775 NewGEPIndices.size());
11777 CastOp = IC.InsertNewInstBefore(
11778 GetElementPtrInst::Create(CastOp, NewGEPIndices.begin(),
11779 NewGEPIndices.end()), SI);
11782 if (Constant *C = dyn_cast<Constant>(SIOp0))
11783 NewCast = Context->getConstantExprCast(opcode, C, CastDstTy);
11785 NewCast = IC.InsertNewInstBefore(
11786 CastInst::Create(opcode, SIOp0, CastDstTy, SIOp0->getName()+".c"),
11788 return new StoreInst(NewCast, CastOp);
11791 /// equivalentAddressValues - Test if A and B will obviously have the same
11792 /// value. This includes recognizing that %t0 and %t1 will have the same
11793 /// value in code like this:
11794 /// %t0 = getelementptr \@a, 0, 3
11795 /// store i32 0, i32* %t0
11796 /// %t1 = getelementptr \@a, 0, 3
11797 /// %t2 = load i32* %t1
11799 static bool equivalentAddressValues(Value *A, Value *B) {
11800 // Test if the values are trivially equivalent.
11801 if (A == B) return true;
11803 // Test if the values come form identical arithmetic instructions.
11804 if (isa<BinaryOperator>(A) ||
11805 isa<CastInst>(A) ||
11807 isa<GetElementPtrInst>(A))
11808 if (Instruction *BI = dyn_cast<Instruction>(B))
11809 if (cast<Instruction>(A)->isIdenticalTo(BI))
11812 // Otherwise they may not be equivalent.
11816 // If this instruction has two uses, one of which is a llvm.dbg.declare,
11817 // return the llvm.dbg.declare.
11818 DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
11819 if (!V->hasNUses(2))
11821 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
11823 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
11825 if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
11826 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
11833 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
11834 Value *Val = SI.getOperand(0);
11835 Value *Ptr = SI.getOperand(1);
11837 if (isa<UndefValue>(Ptr)) { // store X, undef -> noop (even if volatile)
11838 EraseInstFromFunction(SI);
11843 // If the RHS is an alloca with a single use, zapify the store, making the
11845 // If the RHS is an alloca with a two uses, the other one being a
11846 // llvm.dbg.declare, zapify the store and the declare, making the
11847 // alloca dead. We must do this to prevent declare's from affecting
11849 if (!SI.isVolatile()) {
11850 if (Ptr->hasOneUse()) {
11851 if (isa<AllocaInst>(Ptr)) {
11852 EraseInstFromFunction(SI);
11856 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
11857 if (isa<AllocaInst>(GEP->getOperand(0))) {
11858 if (GEP->getOperand(0)->hasOneUse()) {
11859 EraseInstFromFunction(SI);
11863 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
11864 EraseInstFromFunction(*DI);
11865 EraseInstFromFunction(SI);
11872 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
11873 EraseInstFromFunction(*DI);
11874 EraseInstFromFunction(SI);
11880 // Attempt to improve the alignment.
11881 unsigned KnownAlign =
11882 GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
11884 (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
11885 SI.getAlignment()))
11886 SI.setAlignment(KnownAlign);
11888 // Do really simple DSE, to catch cases where there are several consecutive
11889 // stores to the same location, separated by a few arithmetic operations. This
11890 // situation often occurs with bitfield accesses.
11891 BasicBlock::iterator BBI = &SI;
11892 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
11895 // Don't count debug info directives, lest they affect codegen,
11896 // and we skip pointer-to-pointer bitcasts, which are NOPs.
11897 // It is necessary for correctness to skip those that feed into a
11898 // llvm.dbg.declare, as these are not present when debugging is off.
11899 if (isa<DbgInfoIntrinsic>(BBI) ||
11900 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
11905 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
11906 // Prev store isn't volatile, and stores to the same location?
11907 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
11908 SI.getOperand(1))) {
11911 EraseInstFromFunction(*PrevSI);
11917 // If this is a load, we have to stop. However, if the loaded value is from
11918 // the pointer we're loading and is producing the pointer we're storing,
11919 // then *this* store is dead (X = load P; store X -> P).
11920 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
11921 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
11922 !SI.isVolatile()) {
11923 EraseInstFromFunction(SI);
11927 // Otherwise, this is a load from some other location. Stores before it
11928 // may not be dead.
11932 // Don't skip over loads or things that can modify memory.
11933 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
11938 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
11940 // store X, null -> turns into 'unreachable' in SimplifyCFG
11941 if (isa<ConstantPointerNull>(Ptr) &&
11942 cast<PointerType>(Ptr->getType())->getAddressSpace() == 0) {
11943 if (!isa<UndefValue>(Val)) {
11944 SI.setOperand(0, Context->getUndef(Val->getType()));
11945 if (Instruction *U = dyn_cast<Instruction>(Val))
11946 AddToWorkList(U); // Dropped a use.
11949 return 0; // Do not modify these!
11952 // store undef, Ptr -> noop
11953 if (isa<UndefValue>(Val)) {
11954 EraseInstFromFunction(SI);
11959 // If the pointer destination is a cast, see if we can fold the cast into the
11961 if (isa<CastInst>(Ptr))
11962 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
11964 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
11966 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
11970 // If this store is the last instruction in the basic block (possibly
11971 // excepting debug info instructions and the pointer bitcasts that feed
11972 // into them), and if the block ends with an unconditional branch, try
11973 // to move it to the successor block.
11977 } while (isa<DbgInfoIntrinsic>(BBI) ||
11978 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType())));
11979 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
11980 if (BI->isUnconditional())
11981 if (SimplifyStoreAtEndOfBlock(SI))
11982 return 0; // xform done!
11987 /// SimplifyStoreAtEndOfBlock - Turn things like:
11988 /// if () { *P = v1; } else { *P = v2 }
11989 /// into a phi node with a store in the successor.
11991 /// Simplify things like:
11992 /// *P = v1; if () { *P = v2; }
11993 /// into a phi node with a store in the successor.
11995 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
11996 BasicBlock *StoreBB = SI.getParent();
11998 // Check to see if the successor block has exactly two incoming edges. If
11999 // so, see if the other predecessor contains a store to the same location.
12000 // if so, insert a PHI node (if needed) and move the stores down.
12001 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
12003 // Determine whether Dest has exactly two predecessors and, if so, compute
12004 // the other predecessor.
12005 pred_iterator PI = pred_begin(DestBB);
12006 BasicBlock *OtherBB = 0;
12007 if (*PI != StoreBB)
12010 if (PI == pred_end(DestBB))
12013 if (*PI != StoreBB) {
12018 if (++PI != pred_end(DestBB))
12021 // Bail out if all the relevant blocks aren't distinct (this can happen,
12022 // for example, if SI is in an infinite loop)
12023 if (StoreBB == DestBB || OtherBB == DestBB)
12026 // Verify that the other block ends in a branch and is not otherwise empty.
12027 BasicBlock::iterator BBI = OtherBB->getTerminator();
12028 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
12029 if (!OtherBr || BBI == OtherBB->begin())
12032 // If the other block ends in an unconditional branch, check for the 'if then
12033 // else' case. there is an instruction before the branch.
12034 StoreInst *OtherStore = 0;
12035 if (OtherBr->isUnconditional()) {
12037 // Skip over debugging info.
12038 while (isa<DbgInfoIntrinsic>(BBI) ||
12039 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
12040 if (BBI==OtherBB->begin())
12044 // If this isn't a store, or isn't a store to the same location, bail out.
12045 OtherStore = dyn_cast<StoreInst>(BBI);
12046 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1))
12049 // Otherwise, the other block ended with a conditional branch. If one of the
12050 // destinations is StoreBB, then we have the if/then case.
12051 if (OtherBr->getSuccessor(0) != StoreBB &&
12052 OtherBr->getSuccessor(1) != StoreBB)
12055 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
12056 // if/then triangle. See if there is a store to the same ptr as SI that
12057 // lives in OtherBB.
12059 // Check to see if we find the matching store.
12060 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
12061 if (OtherStore->getOperand(1) != SI.getOperand(1))
12065 // If we find something that may be using or overwriting the stored
12066 // value, or if we run out of instructions, we can't do the xform.
12067 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
12068 BBI == OtherBB->begin())
12072 // In order to eliminate the store in OtherBr, we have to
12073 // make sure nothing reads or overwrites the stored value in
12075 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
12076 // FIXME: This should really be AA driven.
12077 if (I->mayReadFromMemory() || I->mayWriteToMemory())
12082 // Insert a PHI node now if we need it.
12083 Value *MergedVal = OtherStore->getOperand(0);
12084 if (MergedVal != SI.getOperand(0)) {
12085 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
12086 PN->reserveOperandSpace(2);
12087 PN->addIncoming(SI.getOperand(0), SI.getParent());
12088 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
12089 MergedVal = InsertNewInstBefore(PN, DestBB->front());
12092 // Advance to a place where it is safe to insert the new store and
12094 BBI = DestBB->getFirstNonPHI();
12095 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
12096 OtherStore->isVolatile()), *BBI);
12098 // Nuke the old stores.
12099 EraseInstFromFunction(SI);
12100 EraseInstFromFunction(*OtherStore);
12106 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
12107 // Change br (not X), label True, label False to: br X, label False, True
12109 BasicBlock *TrueDest;
12110 BasicBlock *FalseDest;
12111 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest), *Context) &&
12112 !isa<Constant>(X)) {
12113 // Swap Destinations and condition...
12114 BI.setCondition(X);
12115 BI.setSuccessor(0, FalseDest);
12116 BI.setSuccessor(1, TrueDest);
12120 // Cannonicalize fcmp_one -> fcmp_oeq
12121 FCmpInst::Predicate FPred; Value *Y;
12122 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
12123 TrueDest, FalseDest), *Context))
12124 if ((FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
12125 FPred == FCmpInst::FCMP_OGE) && BI.getCondition()->hasOneUse()) {
12126 FCmpInst *I = cast<FCmpInst>(BI.getCondition());
12127 FCmpInst::Predicate NewPred = FCmpInst::getInversePredicate(FPred);
12128 Instruction *NewSCC = new FCmpInst(I, NewPred, X, Y, "");
12129 NewSCC->takeName(I);
12130 // Swap Destinations and condition...
12131 BI.setCondition(NewSCC);
12132 BI.setSuccessor(0, FalseDest);
12133 BI.setSuccessor(1, TrueDest);
12134 RemoveFromWorkList(I);
12135 I->eraseFromParent();
12136 AddToWorkList(NewSCC);
12140 // Cannonicalize icmp_ne -> icmp_eq
12141 ICmpInst::Predicate IPred;
12142 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
12143 TrueDest, FalseDest), *Context))
12144 if ((IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
12145 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
12146 IPred == ICmpInst::ICMP_SGE) && BI.getCondition()->hasOneUse()) {
12147 ICmpInst *I = cast<ICmpInst>(BI.getCondition());
12148 ICmpInst::Predicate NewPred = ICmpInst::getInversePredicate(IPred);
12149 Instruction *NewSCC = new ICmpInst(I, NewPred, X, Y, "");
12150 NewSCC->takeName(I);
12151 // Swap Destinations and condition...
12152 BI.setCondition(NewSCC);
12153 BI.setSuccessor(0, FalseDest);
12154 BI.setSuccessor(1, TrueDest);
12155 RemoveFromWorkList(I);
12156 I->eraseFromParent();;
12157 AddToWorkList(NewSCC);
12164 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
12165 Value *Cond = SI.getCondition();
12166 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
12167 if (I->getOpcode() == Instruction::Add)
12168 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
12169 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
12170 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
12172 Context->getConstantExprSub(cast<Constant>(SI.getOperand(i)),
12174 SI.setOperand(0, I->getOperand(0));
12182 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
12183 Value *Agg = EV.getAggregateOperand();
12185 if (!EV.hasIndices())
12186 return ReplaceInstUsesWith(EV, Agg);
12188 if (Constant *C = dyn_cast<Constant>(Agg)) {
12189 if (isa<UndefValue>(C))
12190 return ReplaceInstUsesWith(EV, Context->getUndef(EV.getType()));
12192 if (isa<ConstantAggregateZero>(C))
12193 return ReplaceInstUsesWith(EV, Context->getNullValue(EV.getType()));
12195 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
12196 // Extract the element indexed by the first index out of the constant
12197 Value *V = C->getOperand(*EV.idx_begin());
12198 if (EV.getNumIndices() > 1)
12199 // Extract the remaining indices out of the constant indexed by the
12201 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
12203 return ReplaceInstUsesWith(EV, V);
12205 return 0; // Can't handle other constants
12207 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
12208 // We're extracting from an insertvalue instruction, compare the indices
12209 const unsigned *exti, *exte, *insi, *inse;
12210 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
12211 exte = EV.idx_end(), inse = IV->idx_end();
12212 exti != exte && insi != inse;
12214 if (*insi != *exti)
12215 // The insert and extract both reference distinctly different elements.
12216 // This means the extract is not influenced by the insert, and we can
12217 // replace the aggregate operand of the extract with the aggregate
12218 // operand of the insert. i.e., replace
12219 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12220 // %E = extractvalue { i32, { i32 } } %I, 0
12222 // %E = extractvalue { i32, { i32 } } %A, 0
12223 return ExtractValueInst::Create(IV->getAggregateOperand(),
12224 EV.idx_begin(), EV.idx_end());
12226 if (exti == exte && insi == inse)
12227 // Both iterators are at the end: Index lists are identical. Replace
12228 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12229 // %C = extractvalue { i32, { i32 } } %B, 1, 0
12231 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
12232 if (exti == exte) {
12233 // The extract list is a prefix of the insert list. i.e. replace
12234 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
12235 // %E = extractvalue { i32, { i32 } } %I, 1
12237 // %X = extractvalue { i32, { i32 } } %A, 1
12238 // %E = insertvalue { i32 } %X, i32 42, 0
12239 // by switching the order of the insert and extract (though the
12240 // insertvalue should be left in, since it may have other uses).
12241 Value *NewEV = InsertNewInstBefore(
12242 ExtractValueInst::Create(IV->getAggregateOperand(),
12243 EV.idx_begin(), EV.idx_end()),
12245 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
12249 // The insert list is a prefix of the extract list
12250 // We can simply remove the common indices from the extract and make it
12251 // operate on the inserted value instead of the insertvalue result.
12253 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
12254 // %E = extractvalue { i32, { i32 } } %I, 1, 0
12256 // %E extractvalue { i32 } { i32 42 }, 0
12257 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
12260 // Can't simplify extracts from other values. Note that nested extracts are
12261 // already simplified implicitely by the above (extract ( extract (insert) )
12262 // will be translated into extract ( insert ( extract ) ) first and then just
12263 // the value inserted, if appropriate).
12267 /// CheapToScalarize - Return true if the value is cheaper to scalarize than it
12268 /// is to leave as a vector operation.
12269 static bool CheapToScalarize(Value *V, bool isConstant) {
12270 if (isa<ConstantAggregateZero>(V))
12272 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
12273 if (isConstant) return true;
12274 // If all elts are the same, we can extract.
12275 Constant *Op0 = C->getOperand(0);
12276 for (unsigned i = 1; i < C->getNumOperands(); ++i)
12277 if (C->getOperand(i) != Op0)
12281 Instruction *I = dyn_cast<Instruction>(V);
12282 if (!I) return false;
12284 // Insert element gets simplified to the inserted element or is deleted if
12285 // this is constant idx extract element and its a constant idx insertelt.
12286 if (I->getOpcode() == Instruction::InsertElement && isConstant &&
12287 isa<ConstantInt>(I->getOperand(2)))
12289 if (I->getOpcode() == Instruction::Load && I->hasOneUse())
12291 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
12292 if (BO->hasOneUse() &&
12293 (CheapToScalarize(BO->getOperand(0), isConstant) ||
12294 CheapToScalarize(BO->getOperand(1), isConstant)))
12296 if (CmpInst *CI = dyn_cast<CmpInst>(I))
12297 if (CI->hasOneUse() &&
12298 (CheapToScalarize(CI->getOperand(0), isConstant) ||
12299 CheapToScalarize(CI->getOperand(1), isConstant)))
12305 /// Read and decode a shufflevector mask.
12307 /// It turns undef elements into values that are larger than the number of
12308 /// elements in the input.
12309 static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
12310 unsigned NElts = SVI->getType()->getNumElements();
12311 if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
12312 return std::vector<unsigned>(NElts, 0);
12313 if (isa<UndefValue>(SVI->getOperand(2)))
12314 return std::vector<unsigned>(NElts, 2*NElts);
12316 std::vector<unsigned> Result;
12317 const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
12318 for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
12319 if (isa<UndefValue>(*i))
12320 Result.push_back(NElts*2); // undef -> 8
12322 Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
12326 /// FindScalarElement - Given a vector and an element number, see if the scalar
12327 /// value is already around as a register, for example if it were inserted then
12328 /// extracted from the vector.
12329 static Value *FindScalarElement(Value *V, unsigned EltNo,
12330 LLVMContext *Context) {
12331 assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
12332 const VectorType *PTy = cast<VectorType>(V->getType());
12333 unsigned Width = PTy->getNumElements();
12334 if (EltNo >= Width) // Out of range access.
12335 return Context->getUndef(PTy->getElementType());
12337 if (isa<UndefValue>(V))
12338 return Context->getUndef(PTy->getElementType());
12339 else if (isa<ConstantAggregateZero>(V))
12340 return Context->getNullValue(PTy->getElementType());
12341 else if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
12342 return CP->getOperand(EltNo);
12343 else if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
12344 // If this is an insert to a variable element, we don't know what it is.
12345 if (!isa<ConstantInt>(III->getOperand(2)))
12347 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
12349 // If this is an insert to the element we are looking for, return the
12351 if (EltNo == IIElt)
12352 return III->getOperand(1);
12354 // Otherwise, the insertelement doesn't modify the value, recurse on its
12356 return FindScalarElement(III->getOperand(0), EltNo, Context);
12357 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
12358 unsigned LHSWidth =
12359 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
12360 unsigned InEl = getShuffleMask(SVI)[EltNo];
12361 if (InEl < LHSWidth)
12362 return FindScalarElement(SVI->getOperand(0), InEl, Context);
12363 else if (InEl < LHSWidth*2)
12364 return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth, Context);
12366 return Context->getUndef(PTy->getElementType());
12369 // Otherwise, we don't know.
12373 Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
12374 // If vector val is undef, replace extract with scalar undef.
12375 if (isa<UndefValue>(EI.getOperand(0)))
12376 return ReplaceInstUsesWith(EI, Context->getUndef(EI.getType()));
12378 // If vector val is constant 0, replace extract with scalar 0.
12379 if (isa<ConstantAggregateZero>(EI.getOperand(0)))
12380 return ReplaceInstUsesWith(EI, Context->getNullValue(EI.getType()));
12382 if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
12383 // If vector val is constant with all elements the same, replace EI with
12384 // that element. When the elements are not identical, we cannot replace yet
12385 // (we do that below, but only when the index is constant).
12386 Constant *op0 = C->getOperand(0);
12387 for (unsigned i = 1; i < C->getNumOperands(); ++i)
12388 if (C->getOperand(i) != op0) {
12393 return ReplaceInstUsesWith(EI, op0);
12396 // If extracting a specified index from the vector, see if we can recursively
12397 // find a previously computed scalar that was inserted into the vector.
12398 if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
12399 unsigned IndexVal = IdxC->getZExtValue();
12400 unsigned VectorWidth =
12401 cast<VectorType>(EI.getOperand(0)->getType())->getNumElements();
12403 // If this is extracting an invalid index, turn this into undef, to avoid
12404 // crashing the code below.
12405 if (IndexVal >= VectorWidth)
12406 return ReplaceInstUsesWith(EI, Context->getUndef(EI.getType()));
12408 // This instruction only demands the single element from the input vector.
12409 // If the input vector has a single use, simplify it based on this use
12411 if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
12412 APInt UndefElts(VectorWidth, 0);
12413 APInt DemandedMask(VectorWidth, 1 << IndexVal);
12414 if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
12415 DemandedMask, UndefElts)) {
12416 EI.setOperand(0, V);
12421 if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal, Context))
12422 return ReplaceInstUsesWith(EI, Elt);
12424 // If the this extractelement is directly using a bitcast from a vector of
12425 // the same number of elements, see if we can find the source element from
12426 // it. In this case, we will end up needing to bitcast the scalars.
12427 if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
12428 if (const VectorType *VT =
12429 dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
12430 if (VT->getNumElements() == VectorWidth)
12431 if (Value *Elt = FindScalarElement(BCI->getOperand(0),
12432 IndexVal, Context))
12433 return new BitCastInst(Elt, EI.getType());
12437 if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
12438 if (I->hasOneUse()) {
12439 // Push extractelement into predecessor operation if legal and
12440 // profitable to do so
12441 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
12442 bool isConstantElt = isa<ConstantInt>(EI.getOperand(1));
12443 if (CheapToScalarize(BO, isConstantElt)) {
12444 ExtractElementInst *newEI0 =
12445 new ExtractElementInst(BO->getOperand(0), EI.getOperand(1),
12446 EI.getName()+".lhs");
12447 ExtractElementInst *newEI1 =
12448 new ExtractElementInst(BO->getOperand(1), EI.getOperand(1),
12449 EI.getName()+".rhs");
12450 InsertNewInstBefore(newEI0, EI);
12451 InsertNewInstBefore(newEI1, EI);
12452 return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
12454 } else if (isa<LoadInst>(I)) {
12456 cast<PointerType>(I->getOperand(0)->getType())->getAddressSpace();
12457 Value *Ptr = InsertBitCastBefore(I->getOperand(0),
12458 Context->getPointerType(EI.getType(), AS),EI);
12459 GetElementPtrInst *GEP =
12460 GetElementPtrInst::Create(Ptr, EI.getOperand(1), I->getName()+".gep");
12461 InsertNewInstBefore(GEP, EI);
12462 return new LoadInst(GEP);
12465 if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
12466 // Extracting the inserted element?
12467 if (IE->getOperand(2) == EI.getOperand(1))
12468 return ReplaceInstUsesWith(EI, IE->getOperand(1));
12469 // If the inserted and extracted elements are constants, they must not
12470 // be the same value, extract from the pre-inserted value instead.
12471 if (isa<Constant>(IE->getOperand(2)) &&
12472 isa<Constant>(EI.getOperand(1))) {
12473 AddUsesToWorkList(EI);
12474 EI.setOperand(0, IE->getOperand(0));
12477 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
12478 // If this is extracting an element from a shufflevector, figure out where
12479 // it came from and extract from the appropriate input element instead.
12480 if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
12481 unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
12483 unsigned LHSWidth =
12484 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
12486 if (SrcIdx < LHSWidth)
12487 Src = SVI->getOperand(0);
12488 else if (SrcIdx < LHSWidth*2) {
12489 SrcIdx -= LHSWidth;
12490 Src = SVI->getOperand(1);
12492 return ReplaceInstUsesWith(EI, Context->getUndef(EI.getType()));
12494 return new ExtractElementInst(Src, SrcIdx);
12501 /// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
12502 /// elements from either LHS or RHS, return the shuffle mask and true.
12503 /// Otherwise, return false.
12504 static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
12505 std::vector<Constant*> &Mask,
12506 LLVMContext *Context) {
12507 assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
12508 "Invalid CollectSingleShuffleElements");
12509 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
12511 if (isa<UndefValue>(V)) {
12512 Mask.assign(NumElts, Context->getUndef(Type::Int32Ty));
12514 } else if (V == LHS) {
12515 for (unsigned i = 0; i != NumElts; ++i)
12516 Mask.push_back(Context->getConstantInt(Type::Int32Ty, i));
12518 } else if (V == RHS) {
12519 for (unsigned i = 0; i != NumElts; ++i)
12520 Mask.push_back(Context->getConstantInt(Type::Int32Ty, i+NumElts));
12522 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12523 // If this is an insert of an extract from some other vector, include it.
12524 Value *VecOp = IEI->getOperand(0);
12525 Value *ScalarOp = IEI->getOperand(1);
12526 Value *IdxOp = IEI->getOperand(2);
12528 if (!isa<ConstantInt>(IdxOp))
12530 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12532 if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
12533 // Okay, we can handle this if the vector we are insertinting into is
12534 // transitively ok.
12535 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
12536 // If so, update the mask to reflect the inserted undef.
12537 Mask[InsertedIdx] = Context->getUndef(Type::Int32Ty);
12540 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
12541 if (isa<ConstantInt>(EI->getOperand(1)) &&
12542 EI->getOperand(0)->getType() == V->getType()) {
12543 unsigned ExtractedIdx =
12544 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12546 // This must be extracting from either LHS or RHS.
12547 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
12548 // Okay, we can handle this if the vector we are insertinting into is
12549 // transitively ok.
12550 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
12551 // If so, update the mask to reflect the inserted value.
12552 if (EI->getOperand(0) == LHS) {
12553 Mask[InsertedIdx % NumElts] =
12554 Context->getConstantInt(Type::Int32Ty, ExtractedIdx);
12556 assert(EI->getOperand(0) == RHS);
12557 Mask[InsertedIdx % NumElts] =
12558 Context->getConstantInt(Type::Int32Ty, ExtractedIdx+NumElts);
12567 // TODO: Handle shufflevector here!
12572 /// CollectShuffleElements - We are building a shuffle of V, using RHS as the
12573 /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
12574 /// that computes V and the LHS value of the shuffle.
12575 static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
12576 Value *&RHS, LLVMContext *Context) {
12577 assert(isa<VectorType>(V->getType()) &&
12578 (RHS == 0 || V->getType() == RHS->getType()) &&
12579 "Invalid shuffle!");
12580 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
12582 if (isa<UndefValue>(V)) {
12583 Mask.assign(NumElts, Context->getUndef(Type::Int32Ty));
12585 } else if (isa<ConstantAggregateZero>(V)) {
12586 Mask.assign(NumElts, Context->getConstantInt(Type::Int32Ty, 0));
12588 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
12589 // If this is an insert of an extract from some other vector, include it.
12590 Value *VecOp = IEI->getOperand(0);
12591 Value *ScalarOp = IEI->getOperand(1);
12592 Value *IdxOp = IEI->getOperand(2);
12594 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12595 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12596 EI->getOperand(0)->getType() == V->getType()) {
12597 unsigned ExtractedIdx =
12598 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12599 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12601 // Either the extracted from or inserted into vector must be RHSVec,
12602 // otherwise we'd end up with a shuffle of three inputs.
12603 if (EI->getOperand(0) == RHS || RHS == 0) {
12604 RHS = EI->getOperand(0);
12605 Value *V = CollectShuffleElements(VecOp, Mask, RHS, Context);
12606 Mask[InsertedIdx % NumElts] =
12607 Context->getConstantInt(Type::Int32Ty, NumElts+ExtractedIdx);
12611 if (VecOp == RHS) {
12612 Value *V = CollectShuffleElements(EI->getOperand(0), Mask,
12614 // Everything but the extracted element is replaced with the RHS.
12615 for (unsigned i = 0; i != NumElts; ++i) {
12616 if (i != InsertedIdx)
12617 Mask[i] = Context->getConstantInt(Type::Int32Ty, NumElts+i);
12622 // If this insertelement is a chain that comes from exactly these two
12623 // vectors, return the vector and the effective shuffle.
12624 if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask,
12626 return EI->getOperand(0);
12631 // TODO: Handle shufflevector here!
12633 // Otherwise, can't do anything fancy. Return an identity vector.
12634 for (unsigned i = 0; i != NumElts; ++i)
12635 Mask.push_back(Context->getConstantInt(Type::Int32Ty, i));
12639 Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
12640 Value *VecOp = IE.getOperand(0);
12641 Value *ScalarOp = IE.getOperand(1);
12642 Value *IdxOp = IE.getOperand(2);
12644 // Inserting an undef or into an undefined place, remove this.
12645 if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
12646 ReplaceInstUsesWith(IE, VecOp);
12648 // If the inserted element was extracted from some other vector, and if the
12649 // indexes are constant, try to turn this into a shufflevector operation.
12650 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
12651 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
12652 EI->getOperand(0)->getType() == IE.getType()) {
12653 unsigned NumVectorElts = IE.getType()->getNumElements();
12654 unsigned ExtractedIdx =
12655 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
12656 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
12658 if (ExtractedIdx >= NumVectorElts) // Out of range extract.
12659 return ReplaceInstUsesWith(IE, VecOp);
12661 if (InsertedIdx >= NumVectorElts) // Out of range insert.
12662 return ReplaceInstUsesWith(IE, Context->getUndef(IE.getType()));
12664 // If we are extracting a value from a vector, then inserting it right
12665 // back into the same place, just use the input vector.
12666 if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
12667 return ReplaceInstUsesWith(IE, VecOp);
12669 // We could theoretically do this for ANY input. However, doing so could
12670 // turn chains of insertelement instructions into a chain of shufflevector
12671 // instructions, and right now we do not merge shufflevectors. As such,
12672 // only do this in a situation where it is clear that there is benefit.
12673 if (isa<UndefValue>(VecOp) || isa<ConstantAggregateZero>(VecOp)) {
12674 // Turn this into shuffle(EIOp0, VecOp, Mask). The result has all of
12675 // the values of VecOp, except then one read from EIOp0.
12676 // Build a new shuffle mask.
12677 std::vector<Constant*> Mask;
12678 if (isa<UndefValue>(VecOp))
12679 Mask.assign(NumVectorElts, Context->getUndef(Type::Int32Ty));
12681 assert(isa<ConstantAggregateZero>(VecOp) && "Unknown thing");
12682 Mask.assign(NumVectorElts, Context->getConstantInt(Type::Int32Ty,
12685 Mask[InsertedIdx] =
12686 Context->getConstantInt(Type::Int32Ty, ExtractedIdx);
12687 return new ShuffleVectorInst(EI->getOperand(0), VecOp,
12688 Context->getConstantVector(Mask));
12691 // If this insertelement isn't used by some other insertelement, turn it
12692 // (and any insertelements it points to), into one big shuffle.
12693 if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
12694 std::vector<Constant*> Mask;
12696 Value *LHS = CollectShuffleElements(&IE, Mask, RHS, Context);
12697 if (RHS == 0) RHS = Context->getUndef(LHS->getType());
12698 // We now have a shuffle of LHS, RHS, Mask.
12699 return new ShuffleVectorInst(LHS, RHS,
12700 Context->getConstantVector(Mask));
12705 unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
12706 APInt UndefElts(VWidth, 0);
12707 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12708 if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts))
12715 Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
12716 Value *LHS = SVI.getOperand(0);
12717 Value *RHS = SVI.getOperand(1);
12718 std::vector<unsigned> Mask = getShuffleMask(&SVI);
12720 bool MadeChange = false;
12722 // Undefined shuffle mask -> undefined value.
12723 if (isa<UndefValue>(SVI.getOperand(2)))
12724 return ReplaceInstUsesWith(SVI, Context->getUndef(SVI.getType()));
12726 unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
12728 if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
12731 APInt UndefElts(VWidth, 0);
12732 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
12733 if (SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
12734 LHS = SVI.getOperand(0);
12735 RHS = SVI.getOperand(1);
12739 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
12740 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
12741 if (LHS == RHS || isa<UndefValue>(LHS)) {
12742 if (isa<UndefValue>(LHS) && LHS == RHS) {
12743 // shuffle(undef,undef,mask) -> undef.
12744 return ReplaceInstUsesWith(SVI, LHS);
12747 // Remap any references to RHS to use LHS.
12748 std::vector<Constant*> Elts;
12749 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
12750 if (Mask[i] >= 2*e)
12751 Elts.push_back(Context->getUndef(Type::Int32Ty));
12753 if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
12754 (Mask[i] < e && isa<UndefValue>(LHS))) {
12755 Mask[i] = 2*e; // Turn into undef.
12756 Elts.push_back(Context->getUndef(Type::Int32Ty));
12758 Mask[i] = Mask[i] % e; // Force to LHS.
12759 Elts.push_back(Context->getConstantInt(Type::Int32Ty, Mask[i]));
12763 SVI.setOperand(0, SVI.getOperand(1));
12764 SVI.setOperand(1, Context->getUndef(RHS->getType()));
12765 SVI.setOperand(2, Context->getConstantVector(Elts));
12766 LHS = SVI.getOperand(0);
12767 RHS = SVI.getOperand(1);
12771 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
12772 bool isLHSID = true, isRHSID = true;
12774 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
12775 if (Mask[i] >= e*2) continue; // Ignore undef values.
12776 // Is this an identity shuffle of the LHS value?
12777 isLHSID &= (Mask[i] == i);
12779 // Is this an identity shuffle of the RHS value?
12780 isRHSID &= (Mask[i]-e == i);
12783 // Eliminate identity shuffles.
12784 if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
12785 if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
12787 // If the LHS is a shufflevector itself, see if we can combine it with this
12788 // one without producing an unusual shuffle. Here we are really conservative:
12789 // we are absolutely afraid of producing a shuffle mask not in the input
12790 // program, because the code gen may not be smart enough to turn a merged
12791 // shuffle into two specific shuffles: it may produce worse code. As such,
12792 // we only merge two shuffles if the result is one of the two input shuffle
12793 // masks. In this case, merging the shuffles just removes one instruction,
12794 // which we know is safe. This is good for things like turning:
12795 // (splat(splat)) -> splat.
12796 if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
12797 if (isa<UndefValue>(RHS)) {
12798 std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
12800 std::vector<unsigned> NewMask;
12801 for (unsigned i = 0, e = Mask.size(); i != e; ++i)
12802 if (Mask[i] >= 2*e)
12803 NewMask.push_back(2*e);
12805 NewMask.push_back(LHSMask[Mask[i]]);
12807 // If the result mask is equal to the src shuffle or this shuffle mask, do
12808 // the replacement.
12809 if (NewMask == LHSMask || NewMask == Mask) {
12810 unsigned LHSInNElts =
12811 cast<VectorType>(LHSSVI->getOperand(0)->getType())->getNumElements();
12812 std::vector<Constant*> Elts;
12813 for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
12814 if (NewMask[i] >= LHSInNElts*2) {
12815 Elts.push_back(Context->getUndef(Type::Int32Ty));
12817 Elts.push_back(Context->getConstantInt(Type::Int32Ty, NewMask[i]));
12820 return new ShuffleVectorInst(LHSSVI->getOperand(0),
12821 LHSSVI->getOperand(1),
12822 Context->getConstantVector(Elts));
12827 return MadeChange ? &SVI : 0;
12833 /// TryToSinkInstruction - Try to move the specified instruction from its
12834 /// current block into the beginning of DestBlock, which can only happen if it's
12835 /// safe to move the instruction past all of the instructions between it and the
12836 /// end of its block.
12837 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
12838 assert(I->hasOneUse() && "Invariants didn't hold!");
12840 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
12841 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
12844 // Do not sink alloca instructions out of the entry block.
12845 if (isa<AllocaInst>(I) && I->getParent() ==
12846 &DestBlock->getParent()->getEntryBlock())
12849 // We can only sink load instructions if there is nothing between the load and
12850 // the end of block that could change the value.
12851 if (I->mayReadFromMemory()) {
12852 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
12854 if (Scan->mayWriteToMemory())
12858 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
12860 CopyPrecedingStopPoint(I, InsertPos);
12861 I->moveBefore(InsertPos);
12867 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
12868 /// all reachable code to the worklist.
12870 /// This has a couple of tricks to make the code faster and more powerful. In
12871 /// particular, we constant fold and DCE instructions as we go, to avoid adding
12872 /// them to the worklist (this significantly speeds up instcombine on code where
12873 /// many instructions are dead or constant). Additionally, if we find a branch
12874 /// whose condition is a known constant, we only visit the reachable successors.
12876 static void AddReachableCodeToWorklist(BasicBlock *BB,
12877 SmallPtrSet<BasicBlock*, 64> &Visited,
12879 const TargetData *TD) {
12880 SmallVector<BasicBlock*, 256> Worklist;
12881 Worklist.push_back(BB);
12883 while (!Worklist.empty()) {
12884 BB = Worklist.back();
12885 Worklist.pop_back();
12887 // We have now visited this block! If we've already been here, ignore it.
12888 if (!Visited.insert(BB)) continue;
12890 DbgInfoIntrinsic *DBI_Prev = NULL;
12891 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
12892 Instruction *Inst = BBI++;
12894 // DCE instruction if trivially dead.
12895 if (isInstructionTriviallyDead(Inst)) {
12897 DOUT << "IC: DCE: " << *Inst;
12898 Inst->eraseFromParent();
12902 // ConstantProp instruction if trivially constant.
12903 if (Constant *C = ConstantFoldInstruction(Inst, BB->getContext(), TD)) {
12904 DOUT << "IC: ConstFold to: " << *C << " from: " << *Inst;
12905 Inst->replaceAllUsesWith(C);
12907 Inst->eraseFromParent();
12911 // If there are two consecutive llvm.dbg.stoppoint calls then
12912 // it is likely that the optimizer deleted code in between these
12914 DbgInfoIntrinsic *DBI_Next = dyn_cast<DbgInfoIntrinsic>(Inst);
12917 && DBI_Prev->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint
12918 && DBI_Next->getIntrinsicID() == llvm::Intrinsic::dbg_stoppoint) {
12919 IC.RemoveFromWorkList(DBI_Prev);
12920 DBI_Prev->eraseFromParent();
12922 DBI_Prev = DBI_Next;
12927 IC.AddToWorkList(Inst);
12930 // Recursively visit successors. If this is a branch or switch on a
12931 // constant, only visit the reachable successor.
12932 TerminatorInst *TI = BB->getTerminator();
12933 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
12934 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
12935 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
12936 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
12937 Worklist.push_back(ReachableBB);
12940 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
12941 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
12942 // See if this is an explicit destination.
12943 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
12944 if (SI->getCaseValue(i) == Cond) {
12945 BasicBlock *ReachableBB = SI->getSuccessor(i);
12946 Worklist.push_back(ReachableBB);
12950 // Otherwise it is the default destination.
12951 Worklist.push_back(SI->getSuccessor(0));
12956 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
12957 Worklist.push_back(TI->getSuccessor(i));
12961 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
12962 bool Changed = false;
12963 TD = &getAnalysis<TargetData>();
12965 DEBUG(DOUT << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
12966 << F.getNameStr() << "\n");
12969 // Do a depth-first traversal of the function, populate the worklist with
12970 // the reachable instructions. Ignore blocks that are not reachable. Keep
12971 // track of which blocks we visit.
12972 SmallPtrSet<BasicBlock*, 64> Visited;
12973 AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
12975 // Do a quick scan over the function. If we find any blocks that are
12976 // unreachable, remove any instructions inside of them. This prevents
12977 // the instcombine code from having to deal with some bad special cases.
12978 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
12979 if (!Visited.count(BB)) {
12980 Instruction *Term = BB->getTerminator();
12981 while (Term != BB->begin()) { // Remove instrs bottom-up
12982 BasicBlock::iterator I = Term; --I;
12984 DOUT << "IC: DCE: " << *I;
12985 // A debug intrinsic shouldn't force another iteration if we weren't
12986 // going to do one without it.
12987 if (!isa<DbgInfoIntrinsic>(I)) {
12991 if (!I->use_empty())
12992 I->replaceAllUsesWith(Context->getUndef(I->getType()));
12993 I->eraseFromParent();
12998 while (!Worklist.empty()) {
12999 Instruction *I = RemoveOneFromWorkList();
13000 if (I == 0) continue; // skip null values.
13002 // Check to see if we can DCE the instruction.
13003 if (isInstructionTriviallyDead(I)) {
13004 // Add operands to the worklist.
13005 if (I->getNumOperands() < 4)
13006 AddUsesToWorkList(*I);
13009 DOUT << "IC: DCE: " << *I;
13011 I->eraseFromParent();
13012 RemoveFromWorkList(I);
13017 // Instruction isn't dead, see if we can constant propagate it.
13018 if (Constant *C = ConstantFoldInstruction(I, F.getContext(), TD)) {
13019 DOUT << "IC: ConstFold to: " << *C << " from: " << *I;
13021 // Add operands to the worklist.
13022 AddUsesToWorkList(*I);
13023 ReplaceInstUsesWith(*I, C);
13026 I->eraseFromParent();
13027 RemoveFromWorkList(I);
13033 (I->getType()->getTypeID() == Type::VoidTyID ||
13034 I->isTrapping())) {
13035 // See if we can constant fold its operands.
13036 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i)
13037 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(i))
13038 if (Constant *NewC = ConstantFoldConstantExpression(CE,
13039 F.getContext(), TD))
13046 // See if we can trivially sink this instruction to a successor basic block.
13047 if (I->hasOneUse()) {
13048 BasicBlock *BB = I->getParent();
13049 BasicBlock *UserParent = cast<Instruction>(I->use_back())->getParent();
13050 if (UserParent != BB) {
13051 bool UserIsSuccessor = false;
13052 // See if the user is one of our successors.
13053 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
13054 if (*SI == UserParent) {
13055 UserIsSuccessor = true;
13059 // If the user is one of our immediate successors, and if that successor
13060 // only has us as a predecessors (we'd have to split the critical edge
13061 // otherwise), we can keep going.
13062 if (UserIsSuccessor && !isa<PHINode>(I->use_back()) &&
13063 next(pred_begin(UserParent)) == pred_end(UserParent))
13064 // Okay, the CFG is simple enough, try to sink this instruction.
13065 Changed |= TryToSinkInstruction(I, UserParent);
13069 // Now that we have an instruction, try combining it to simplify it...
13073 DEBUG(std::ostringstream SS; I->print(SS); OrigI = SS.str(););
13074 if (Instruction *Result = visit(*I)) {
13076 // Should we replace the old instruction with a new one?
13078 DOUT << "IC: Old = " << *I
13079 << " New = " << *Result;
13081 // Everything uses the new instruction now.
13082 I->replaceAllUsesWith(Result);
13084 // Push the new instruction and any users onto the worklist.
13085 AddToWorkList(Result);
13086 AddUsersToWorkList(*Result);
13088 // Move the name to the new instruction first.
13089 Result->takeName(I);
13091 // Insert the new instruction into the basic block...
13092 BasicBlock *InstParent = I->getParent();
13093 BasicBlock::iterator InsertPos = I;
13095 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
13096 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
13099 InstParent->getInstList().insert(InsertPos, Result);
13101 // Make sure that we reprocess all operands now that we reduced their
13103 AddUsesToWorkList(*I);
13105 // Instructions can end up on the worklist more than once. Make sure
13106 // we do not process an instruction that has been deleted.
13107 RemoveFromWorkList(I);
13109 // Erase the old instruction.
13110 InstParent->getInstList().erase(I);
13113 DOUT << "IC: Mod = " << OrigI
13114 << " New = " << *I;
13117 // If the instruction was modified, it's possible that it is now dead.
13118 // if so, remove it.
13119 if (isInstructionTriviallyDead(I)) {
13120 // Make sure we process all operands now that we are reducing their
13122 AddUsesToWorkList(*I);
13124 // Instructions may end up in the worklist more than once. Erase all
13125 // occurrences of this instruction.
13126 RemoveFromWorkList(I);
13127 I->eraseFromParent();
13130 AddUsersToWorkList(*I);
13137 assert(WorklistMap.empty() && "Worklist empty, but map not?");
13139 // Do an explicit clear, this shrinks the map if needed.
13140 WorklistMap.clear();
13145 bool InstCombiner::runOnFunction(Function &F) {
13146 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
13148 bool EverMadeChange = false;
13150 // Iterate while there is work to do.
13151 unsigned Iteration = 0;
13152 while (DoOneIteration(F, Iteration++))
13153 EverMadeChange = true;
13154 return EverMadeChange;
13157 FunctionPass *llvm::createInstructionCombiningPass() {
13158 return new InstCombiner();