1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // InstructionCombining - Combine instructions to form fewer, simple
11 // instructions. This pass does not modify the CFG. This pass is where
12 // algebraic simplification happens.
14 // This pass combines things like:
20 // This is a simple worklist driven algorithm.
22 // This pass guarantees that the following canonicalizations are performed on
24 // 1. If a binary operator has a constant operand, it is moved to the RHS
25 // 2. Bitwise operators with constant operands are always grouped so that
26 // shifts are performed first, then or's, then and's, then xor's.
27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible
28 // 4. All cmp instructions on boolean values are replaced with logical ops
29 // 5. add X, X is represented as (X*2) => (X << 1)
30 // 6. Multiplies with a power-of-two constant argument are transformed into
34 //===----------------------------------------------------------------------===//
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Scalar.h"
38 #include "llvm/IntrinsicInst.h"
39 #include "llvm/LLVMContext.h"
40 #include "llvm/Pass.h"
41 #include "llvm/DerivedTypes.h"
42 #include "llvm/GlobalVariable.h"
43 #include "llvm/Operator.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/InstructionSimplify.h"
46 #include "llvm/Analysis/MemoryBuiltins.h"
47 #include "llvm/Analysis/ValueTracking.h"
48 #include "llvm/Target/TargetData.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/Local.h"
51 #include "llvm/Support/CallSite.h"
52 #include "llvm/Support/ConstantRange.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/GetElementPtrTypeIterator.h"
56 #include "llvm/Support/InstVisitor.h"
57 #include "llvm/Support/IRBuilder.h"
58 #include "llvm/Support/MathExtras.h"
59 #include "llvm/Support/PatternMatch.h"
60 #include "llvm/Support/TargetFolder.h"
61 #include "llvm/Support/raw_ostream.h"
62 #include "llvm/ADT/DenseMap.h"
63 #include "llvm/ADT/SmallVector.h"
64 #include "llvm/ADT/SmallPtrSet.h"
65 #include "llvm/ADT/Statistic.h"
66 #include "llvm/ADT/STLExtras.h"
70 using namespace llvm::PatternMatch;
72 STATISTIC(NumCombined , "Number of insts combined");
73 STATISTIC(NumConstProp, "Number of constant folds");
74 STATISTIC(NumDeadInst , "Number of dead inst eliminated");
75 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
76 STATISTIC(NumSunkInst , "Number of instructions sunk");
78 /// SelectPatternFlavor - We can match a variety of different patterns for
79 /// select operations.
80 enum SelectPatternFlavor {
88 /// InstCombineWorklist - This is the worklist management logic for
90 class InstCombineWorklist {
91 SmallVector<Instruction*, 256> Worklist;
92 DenseMap<Instruction*, unsigned> WorklistMap;
94 void operator=(const InstCombineWorklist&RHS); // DO NOT IMPLEMENT
95 InstCombineWorklist(const InstCombineWorklist&); // DO NOT IMPLEMENT
97 InstCombineWorklist() {}
99 bool isEmpty() const { return Worklist.empty(); }
101 /// Add - Add the specified instruction to the worklist if it isn't already
103 void Add(Instruction *I) {
104 if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
105 DEBUG(errs() << "IC: ADD: " << *I << '\n');
106 Worklist.push_back(I);
110 void AddValue(Value *V) {
111 if (Instruction *I = dyn_cast<Instruction>(V))
115 /// AddInitialGroup - Add the specified batch of stuff in reverse order.
116 /// which should only be done when the worklist is empty and when the group
117 /// has no duplicates.
118 void AddInitialGroup(Instruction *const *List, unsigned NumEntries) {
119 assert(Worklist.empty() && "Worklist must be empty to add initial group");
120 Worklist.reserve(NumEntries+16);
121 DEBUG(errs() << "IC: ADDING: " << NumEntries << " instrs to worklist\n");
122 for (; NumEntries; --NumEntries) {
123 Instruction *I = List[NumEntries-1];
124 WorklistMap.insert(std::make_pair(I, Worklist.size()));
125 Worklist.push_back(I);
129 // Remove - remove I from the worklist if it exists.
130 void Remove(Instruction *I) {
131 DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
132 if (It == WorklistMap.end()) return; // Not in worklist.
134 // Don't bother moving everything down, just null out the slot.
135 Worklist[It->second] = 0;
137 WorklistMap.erase(It);
140 Instruction *RemoveOne() {
141 Instruction *I = Worklist.back();
143 WorklistMap.erase(I);
147 /// AddUsersToWorkList - When an instruction is simplified, add all users of
148 /// the instruction to the work lists because they might get more simplified
151 void AddUsersToWorkList(Instruction &I) {
152 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
154 Add(cast<Instruction>(*UI));
158 /// Zap - check that the worklist is empty and nuke the backing store for
159 /// the map if it is large.
161 assert(WorklistMap.empty() && "Worklist empty, but map not?");
163 // Do an explicit clear, this shrinks the map if needed.
167 } // end anonymous namespace.
171 /// InstCombineIRInserter - This is an IRBuilder insertion helper that works
172 /// just like the normal insertion helper, but also adds any new instructions
173 /// to the instcombine worklist.
174 class InstCombineIRInserter : public IRBuilderDefaultInserter<true> {
175 InstCombineWorklist &Worklist;
177 InstCombineIRInserter(InstCombineWorklist &WL) : Worklist(WL) {}
179 void InsertHelper(Instruction *I, const Twine &Name,
180 BasicBlock *BB, BasicBlock::iterator InsertPt) const {
181 IRBuilderDefaultInserter<true>::InsertHelper(I, Name, BB, InsertPt);
185 } // end anonymous namespace
189 class InstCombiner : public FunctionPass,
190 public InstVisitor<InstCombiner, Instruction*> {
192 bool MustPreserveLCSSA;
195 /// Worklist - All of the instructions that need to be simplified.
196 InstCombineWorklist Worklist;
198 /// Builder - This is an IRBuilder that automatically inserts new
199 /// instructions into the worklist when they are created.
200 typedef IRBuilder<true, TargetFolder, InstCombineIRInserter> BuilderTy;
203 static char ID; // Pass identification, replacement for typeid
204 InstCombiner() : FunctionPass(&ID), TD(0), Builder(0) {}
206 LLVMContext *Context;
207 LLVMContext *getContext() const { return Context; }
210 virtual bool runOnFunction(Function &F);
212 bool DoOneIteration(Function &F, unsigned ItNum);
214 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
215 AU.addPreservedID(LCSSAID);
216 AU.setPreservesCFG();
219 TargetData *getTargetData() const { return TD; }
221 // Visitation implementation - Implement instruction combining for different
222 // instruction types. The semantics are as follows:
224 // null - No change was made
225 // I - Change was made, I is still valid, I may be dead though
226 // otherwise - Change was made, replace I with returned instruction
228 Instruction *visitAdd(BinaryOperator &I);
229 Instruction *visitFAdd(BinaryOperator &I);
230 Value *OptimizePointerDifference(Value *LHS, Value *RHS, const Type *Ty);
231 Instruction *visitSub(BinaryOperator &I);
232 Instruction *visitFSub(BinaryOperator &I);
233 Instruction *visitMul(BinaryOperator &I);
234 Instruction *visitFMul(BinaryOperator &I);
235 Instruction *visitURem(BinaryOperator &I);
236 Instruction *visitSRem(BinaryOperator &I);
237 Instruction *visitFRem(BinaryOperator &I);
238 bool SimplifyDivRemOfSelect(BinaryOperator &I);
239 Instruction *commonRemTransforms(BinaryOperator &I);
240 Instruction *commonIRemTransforms(BinaryOperator &I);
241 Instruction *commonDivTransforms(BinaryOperator &I);
242 Instruction *commonIDivTransforms(BinaryOperator &I);
243 Instruction *visitUDiv(BinaryOperator &I);
244 Instruction *visitSDiv(BinaryOperator &I);
245 Instruction *visitFDiv(BinaryOperator &I);
246 Instruction *FoldAndOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
247 Instruction *FoldAndOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS);
248 Instruction *visitAnd(BinaryOperator &I);
249 Instruction *FoldOrOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS);
250 Instruction *FoldOrOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS);
251 Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
252 Value *A, Value *B, Value *C);
253 Instruction *visitOr (BinaryOperator &I);
254 Instruction *visitXor(BinaryOperator &I);
255 Instruction *visitShl(BinaryOperator &I);
256 Instruction *visitAShr(BinaryOperator &I);
257 Instruction *visitLShr(BinaryOperator &I);
258 Instruction *commonShiftTransforms(BinaryOperator &I);
259 Instruction *FoldFCmp_IntToFP_Cst(FCmpInst &I, Instruction *LHSI,
261 Instruction *FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
262 GlobalVariable *GV, CmpInst &ICI,
263 ConstantInt *AndCst = 0);
264 Instruction *visitFCmpInst(FCmpInst &I);
265 Instruction *visitICmpInst(ICmpInst &I);
266 Instruction *visitICmpInstWithCastAndCast(ICmpInst &ICI);
267 Instruction *visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
270 Instruction *FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
271 ConstantInt *DivRHS);
272 Instruction *FoldICmpAddOpCst(ICmpInst &ICI, Value *X, ConstantInt *CI,
273 ICmpInst::Predicate Pred, Value *TheAdd);
274 Instruction *FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
275 ICmpInst::Predicate Cond, Instruction &I);
276 Instruction *FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
278 Instruction *commonCastTransforms(CastInst &CI);
279 Instruction *commonIntCastTransforms(CastInst &CI);
280 Instruction *commonPointerCastTransforms(CastInst &CI);
281 Instruction *visitTrunc(TruncInst &CI);
282 Instruction *visitZExt(ZExtInst &CI);
283 Instruction *visitSExt(SExtInst &CI);
284 Instruction *visitFPTrunc(FPTruncInst &CI);
285 Instruction *visitFPExt(CastInst &CI);
286 Instruction *visitFPToUI(FPToUIInst &FI);
287 Instruction *visitFPToSI(FPToSIInst &FI);
288 Instruction *visitUIToFP(CastInst &CI);
289 Instruction *visitSIToFP(CastInst &CI);
290 Instruction *visitPtrToInt(PtrToIntInst &CI);
291 Instruction *visitIntToPtr(IntToPtrInst &CI);
292 Instruction *visitBitCast(BitCastInst &CI);
293 Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI,
295 Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*);
296 Instruction *FoldSPFofSPF(Instruction *Inner, SelectPatternFlavor SPF1,
297 Value *A, Value *B, Instruction &Outer,
298 SelectPatternFlavor SPF2, Value *C);
299 Instruction *visitSelectInst(SelectInst &SI);
300 Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI);
301 Instruction *visitCallInst(CallInst &CI);
302 Instruction *visitInvokeInst(InvokeInst &II);
304 Instruction *SliceUpIllegalIntegerPHI(PHINode &PN);
305 Instruction *visitPHINode(PHINode &PN);
306 Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
307 Instruction *visitAllocaInst(AllocaInst &AI);
308 Instruction *visitFree(Instruction &FI);
309 Instruction *visitLoadInst(LoadInst &LI);
310 Instruction *visitStoreInst(StoreInst &SI);
311 Instruction *visitBranchInst(BranchInst &BI);
312 Instruction *visitSwitchInst(SwitchInst &SI);
313 Instruction *visitInsertElementInst(InsertElementInst &IE);
314 Instruction *visitExtractElementInst(ExtractElementInst &EI);
315 Instruction *visitShuffleVectorInst(ShuffleVectorInst &SVI);
316 Instruction *visitExtractValueInst(ExtractValueInst &EV);
318 // visitInstruction - Specify what to return for unhandled instructions...
319 Instruction *visitInstruction(Instruction &I) { return 0; }
322 Instruction *visitCallSite(CallSite CS);
323 bool transformConstExprCastCall(CallSite CS);
324 Instruction *transformCallThroughTrampoline(CallSite CS);
325 Instruction *transformZExtICmp(ICmpInst *ICI, Instruction &CI,
326 bool DoXform = true);
327 bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS);
328 DbgDeclareInst *hasOneUsePlusDeclare(Value *V);
332 // InsertNewInstBefore - insert an instruction New before instruction Old
333 // in the program. Add the new instruction to the worklist.
335 Instruction *InsertNewInstBefore(Instruction *New, Instruction &Old) {
336 assert(New && New->getParent() == 0 &&
337 "New instruction already inserted into a basic block!");
338 BasicBlock *BB = Old.getParent();
339 BB->getInstList().insert(&Old, New); // Insert inst
344 // ReplaceInstUsesWith - This method is to be used when an instruction is
345 // found to be dead, replacable with another preexisting expression. Here
346 // we add all uses of I to the worklist, replace all uses of I with the new
347 // value, then return I, so that the inst combiner will know that I was
350 Instruction *ReplaceInstUsesWith(Instruction &I, Value *V) {
351 Worklist.AddUsersToWorkList(I); // Add all modified instrs to worklist.
353 // If we are replacing the instruction with itself, this must be in a
354 // segment of unreachable code, so just clobber the instruction.
356 V = UndefValue::get(I.getType());
358 I.replaceAllUsesWith(V);
362 // EraseInstFromFunction - When dealing with an instruction that has side
363 // effects or produces a void value, we can't rely on DCE to delete the
364 // instruction. Instead, visit methods should return the value returned by
366 Instruction *EraseInstFromFunction(Instruction &I) {
367 DEBUG(errs() << "IC: ERASE " << I << '\n');
369 assert(I.use_empty() && "Cannot erase instruction that is used!");
370 // Make sure that we reprocess all operands now that we reduced their
372 if (I.getNumOperands() < 8) {
373 for (User::op_iterator i = I.op_begin(), e = I.op_end(); i != e; ++i)
374 if (Instruction *Op = dyn_cast<Instruction>(*i))
380 return 0; // Don't do anything with FI
383 void ComputeMaskedBits(Value *V, const APInt &Mask, APInt &KnownZero,
384 APInt &KnownOne, unsigned Depth = 0) const {
385 return llvm::ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD, Depth);
388 bool MaskedValueIsZero(Value *V, const APInt &Mask,
389 unsigned Depth = 0) const {
390 return llvm::MaskedValueIsZero(V, Mask, TD, Depth);
392 unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0) const {
393 return llvm::ComputeNumSignBits(Op, TD, Depth);
398 /// SimplifyCommutative - This performs a few simplifications for
399 /// commutative operators.
400 bool SimplifyCommutative(BinaryOperator &I);
402 /// SimplifyDemandedUseBits - Attempts to replace V with a simpler value
403 /// based on the demanded bits.
404 Value *SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
405 APInt& KnownZero, APInt& KnownOne,
407 bool SimplifyDemandedBits(Use &U, APInt DemandedMask,
408 APInt& KnownZero, APInt& KnownOne,
411 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
412 /// SimplifyDemandedBits knows about. See if the instruction has any
413 /// properties that allow us to simplify its operands.
414 bool SimplifyDemandedInstructionBits(Instruction &Inst);
416 Value *SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
417 APInt& UndefElts, unsigned Depth = 0);
419 // FoldOpIntoPhi - Given a binary operator, cast instruction, or select
420 // which has a PHI node as operand #0, see if we can fold the instruction
421 // into the PHI (which is only possible if all operands to the PHI are
424 // If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
425 // that would normally be unprofitable because they strongly encourage jump
427 Instruction *FoldOpIntoPhi(Instruction &I, bool AllowAggressive = false);
429 // FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
430 // operator and they all are only used by the PHI, PHI together their
431 // inputs, and do the operation once, to the result of the PHI.
432 Instruction *FoldPHIArgOpIntoPHI(PHINode &PN);
433 Instruction *FoldPHIArgBinOpIntoPHI(PHINode &PN);
434 Instruction *FoldPHIArgGEPIntoPHI(PHINode &PN);
435 Instruction *FoldPHIArgLoadIntoPHI(PHINode &PN);
438 Instruction *OptAndOp(Instruction *Op, ConstantInt *OpRHS,
439 ConstantInt *AndRHS, BinaryOperator &TheAnd);
441 Value *FoldLogicalPlusAnd(Value *LHS, Value *RHS, ConstantInt *Mask,
442 bool isSub, Instruction &I);
443 Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
444 bool isSigned, bool Inside, Instruction &IB);
445 Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
446 Instruction *MatchBSwap(BinaryOperator &I);
447 bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
448 Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
449 Instruction *SimplifyMemSet(MemSetInst *MI);
452 Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned);
454 bool CanEvaluateInDifferentType(Value *V, const Type *Ty,
455 unsigned CastOpc, int &NumCastsRemoved);
456 unsigned GetOrEnforceKnownAlignment(Value *V,
457 unsigned PrefAlign = 0);
460 } // end anonymous namespace
462 char InstCombiner::ID = 0;
463 static RegisterPass<InstCombiner>
464 X("instcombine", "Combine redundant instructions");
466 // getComplexity: Assign a complexity or rank value to LLVM Values...
467 // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst
468 static unsigned getComplexity(Value *V) {
469 if (isa<Instruction>(V)) {
470 if (BinaryOperator::isNeg(V) ||
471 BinaryOperator::isFNeg(V) ||
472 BinaryOperator::isNot(V))
476 if (isa<Argument>(V)) return 3;
477 return isa<Constant>(V) ? (isa<UndefValue>(V) ? 0 : 1) : 2;
480 // isOnlyUse - Return true if this instruction will be deleted if we stop using
482 static bool isOnlyUse(Value *V) {
483 return V->hasOneUse() || isa<Constant>(V);
486 // getPromotedType - Return the specified type promoted as it would be to pass
487 // though a va_arg area...
488 static const Type *getPromotedType(const Type *Ty) {
489 if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
490 if (ITy->getBitWidth() < 32)
491 return Type::getInt32Ty(Ty->getContext());
496 /// ShouldChangeType - Return true if it is desirable to convert a computation
497 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal
498 /// type for example, or from a smaller to a larger illegal type.
499 static bool ShouldChangeType(const Type *From, const Type *To,
500 const TargetData *TD) {
501 assert(isa<IntegerType>(From) && isa<IntegerType>(To));
503 // If we don't have TD, we don't know if the source/dest are legal.
504 if (!TD) return false;
506 unsigned FromWidth = From->getPrimitiveSizeInBits();
507 unsigned ToWidth = To->getPrimitiveSizeInBits();
508 bool FromLegal = TD->isLegalInteger(FromWidth);
509 bool ToLegal = TD->isLegalInteger(ToWidth);
511 // If this is a legal integer from type, and the result would be an illegal
512 // type, don't do the transformation.
513 if (FromLegal && !ToLegal)
516 // Otherwise, if both are illegal, do not increase the size of the result. We
517 // do allow things like i160 -> i64, but not i64 -> i160.
518 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
524 /// getBitCastOperand - If the specified operand is a CastInst, a constant
525 /// expression bitcast, or a GetElementPtrInst with all zero indices, return the
526 /// operand value, otherwise return null.
527 static Value *getBitCastOperand(Value *V) {
528 if (Operator *O = dyn_cast<Operator>(V)) {
529 if (O->getOpcode() == Instruction::BitCast)
530 return O->getOperand(0);
531 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
532 if (GEP->hasAllZeroIndices())
533 return GEP->getPointerOperand();
538 /// This function is a wrapper around CastInst::isEliminableCastPair. It
539 /// simply extracts arguments and returns what that function returns.
540 static Instruction::CastOps
541 isEliminableCastPair(
542 const CastInst *CI, ///< The first cast instruction
543 unsigned opcode, ///< The opcode of the second cast instruction
544 const Type *DstTy, ///< The target type for the second cast instruction
545 TargetData *TD ///< The target data for pointer size
548 const Type *SrcTy = CI->getOperand(0)->getType(); // A from above
549 const Type *MidTy = CI->getType(); // B from above
551 // Get the opcodes of the two Cast instructions
552 Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode());
553 Instruction::CastOps secondOp = Instruction::CastOps(opcode);
555 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
557 TD ? TD->getIntPtrType(CI->getContext()) : 0);
559 // We don't want to form an inttoptr or ptrtoint that converts to an integer
560 // type that differs from the pointer size.
561 if ((Res == Instruction::IntToPtr &&
562 (!TD || SrcTy != TD->getIntPtrType(CI->getContext()))) ||
563 (Res == Instruction::PtrToInt &&
564 (!TD || DstTy != TD->getIntPtrType(CI->getContext()))))
567 return Instruction::CastOps(Res);
570 /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results
571 /// in any code being generated. It does not require codegen if V is simple
572 /// enough or if the cast can be folded into other casts.
573 static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V,
574 const Type *Ty, TargetData *TD) {
575 if (V->getType() == Ty || isa<Constant>(V)) return false;
577 // If this is another cast that can be eliminated, it isn't codegen either.
578 if (const CastInst *CI = dyn_cast<CastInst>(V))
579 if (isEliminableCastPair(CI, opcode, Ty, TD))
584 // SimplifyCommutative - This performs a few simplifications for commutative
587 // 1. Order operands such that they are listed from right (least complex) to
588 // left (most complex). This puts constants before unary operators before
591 // 2. Transform: (op (op V, C1), C2) ==> (op V, (op C1, C2))
592 // 3. Transform: (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
594 bool InstCombiner::SimplifyCommutative(BinaryOperator &I) {
595 bool Changed = false;
596 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1)))
597 Changed = !I.swapOperands();
599 if (!I.isAssociative()) return Changed;
600 Instruction::BinaryOps Opcode = I.getOpcode();
601 if (BinaryOperator *Op = dyn_cast<BinaryOperator>(I.getOperand(0)))
602 if (Op->getOpcode() == Opcode && isa<Constant>(Op->getOperand(1))) {
603 if (isa<Constant>(I.getOperand(1))) {
604 Constant *Folded = ConstantExpr::get(I.getOpcode(),
605 cast<Constant>(I.getOperand(1)),
606 cast<Constant>(Op->getOperand(1)));
607 I.setOperand(0, Op->getOperand(0));
608 I.setOperand(1, Folded);
610 } else if (BinaryOperator *Op1=dyn_cast<BinaryOperator>(I.getOperand(1)))
611 if (Op1->getOpcode() == Opcode && isa<Constant>(Op1->getOperand(1)) &&
612 isOnlyUse(Op) && isOnlyUse(Op1)) {
613 Constant *C1 = cast<Constant>(Op->getOperand(1));
614 Constant *C2 = cast<Constant>(Op1->getOperand(1));
616 // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2))
617 Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2);
618 Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0),
622 I.setOperand(0, New);
623 I.setOperand(1, Folded);
630 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
631 // if the LHS is a constant zero (which is the 'negate' form).
633 static inline Value *dyn_castNegVal(Value *V) {
634 if (BinaryOperator::isNeg(V))
635 return BinaryOperator::getNegArgument(V);
637 // Constants can be considered to be negated values if they can be folded.
638 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
639 return ConstantExpr::getNeg(C);
641 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
642 if (C->getType()->getElementType()->isInteger())
643 return ConstantExpr::getNeg(C);
648 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the
649 // instruction if the LHS is a constant negative zero (which is the 'negate'
652 static inline Value *dyn_castFNegVal(Value *V) {
653 if (BinaryOperator::isFNeg(V))
654 return BinaryOperator::getFNegArgument(V);
656 // Constants can be considered to be negated values if they can be folded.
657 if (ConstantFP *C = dyn_cast<ConstantFP>(V))
658 return ConstantExpr::getFNeg(C);
660 if (ConstantVector *C = dyn_cast<ConstantVector>(V))
661 if (C->getType()->getElementType()->isFloatingPoint())
662 return ConstantExpr::getFNeg(C);
667 /// MatchSelectPattern - Pattern match integer [SU]MIN, [SU]MAX, and ABS idioms,
668 /// returning the kind and providing the out parameter results if we
669 /// successfully match.
670 static SelectPatternFlavor
671 MatchSelectPattern(Value *V, Value *&LHS, Value *&RHS) {
672 SelectInst *SI = dyn_cast<SelectInst>(V);
673 if (SI == 0) return SPF_UNKNOWN;
675 ICmpInst *ICI = dyn_cast<ICmpInst>(SI->getCondition());
676 if (ICI == 0) return SPF_UNKNOWN;
678 LHS = ICI->getOperand(0);
679 RHS = ICI->getOperand(1);
681 // (icmp X, Y) ? X : Y
682 if (SI->getTrueValue() == ICI->getOperand(0) &&
683 SI->getFalseValue() == ICI->getOperand(1)) {
684 switch (ICI->getPredicate()) {
685 default: return SPF_UNKNOWN; // Equality.
686 case ICmpInst::ICMP_UGT:
687 case ICmpInst::ICMP_UGE: return SPF_UMAX;
688 case ICmpInst::ICMP_SGT:
689 case ICmpInst::ICMP_SGE: return SPF_SMAX;
690 case ICmpInst::ICMP_ULT:
691 case ICmpInst::ICMP_ULE: return SPF_UMIN;
692 case ICmpInst::ICMP_SLT:
693 case ICmpInst::ICMP_SLE: return SPF_SMIN;
697 // (icmp X, Y) ? Y : X
698 if (SI->getTrueValue() == ICI->getOperand(1) &&
699 SI->getFalseValue() == ICI->getOperand(0)) {
700 switch (ICI->getPredicate()) {
701 default: return SPF_UNKNOWN; // Equality.
702 case ICmpInst::ICMP_UGT:
703 case ICmpInst::ICMP_UGE: return SPF_UMIN;
704 case ICmpInst::ICMP_SGT:
705 case ICmpInst::ICMP_SGE: return SPF_SMIN;
706 case ICmpInst::ICMP_ULT:
707 case ICmpInst::ICMP_ULE: return SPF_UMAX;
708 case ICmpInst::ICMP_SLT:
709 case ICmpInst::ICMP_SLE: return SPF_SMAX;
713 // TODO: (X > 4) ? X : 5 --> (X >= 5) ? X : 5 --> MAX(X, 5)
718 /// isFreeToInvert - Return true if the specified value is free to invert (apply
719 /// ~ to). This happens in cases where the ~ can be eliminated.
720 static inline bool isFreeToInvert(Value *V) {
722 if (BinaryOperator::isNot(V))
725 // Constants can be considered to be not'ed values.
726 if (isa<ConstantInt>(V))
729 // Compares can be inverted if they have a single use.
730 if (CmpInst *CI = dyn_cast<CmpInst>(V))
731 return CI->hasOneUse();
736 static inline Value *dyn_castNotVal(Value *V) {
737 // If this is not(not(x)) don't return that this is a not: we want the two
738 // not's to be folded first.
739 if (BinaryOperator::isNot(V)) {
740 Value *Operand = BinaryOperator::getNotArgument(V);
741 if (!isFreeToInvert(Operand))
745 // Constants can be considered to be not'ed values...
746 if (ConstantInt *C = dyn_cast<ConstantInt>(V))
747 return ConstantInt::get(C->getType(), ~C->getValue());
753 // dyn_castFoldableMul - If this value is a multiply that can be folded into
754 // other computations (because it has a constant operand), return the
755 // non-constant operand of the multiply, and set CST to point to the multiplier.
756 // Otherwise, return null.
758 static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) {
759 if (V->hasOneUse() && V->getType()->isInteger())
760 if (Instruction *I = dyn_cast<Instruction>(V)) {
761 if (I->getOpcode() == Instruction::Mul)
762 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1))))
763 return I->getOperand(0);
764 if (I->getOpcode() == Instruction::Shl)
765 if ((CST = dyn_cast<ConstantInt>(I->getOperand(1)))) {
766 // The multiplier is really 1 << CST.
767 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
768 uint32_t CSTVal = CST->getLimitedValue(BitWidth);
769 CST = ConstantInt::get(V->getType()->getContext(),
770 APInt(BitWidth, 1).shl(CSTVal));
771 return I->getOperand(0);
777 /// AddOne - Add one to a ConstantInt
778 static Constant *AddOne(Constant *C) {
779 return ConstantExpr::getAdd(C,
780 ConstantInt::get(C->getType(), 1));
782 /// SubOne - Subtract one from a ConstantInt
783 static Constant *SubOne(ConstantInt *C) {
784 return ConstantExpr::getSub(C,
785 ConstantInt::get(C->getType(), 1));
787 /// MultiplyOverflows - True if the multiply can not be expressed in an int
789 static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) {
790 uint32_t W = C1->getBitWidth();
791 APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
800 APInt MulExt = LHSExt * RHSExt;
803 return MulExt.ugt(APInt::getLowBitsSet(W * 2, W));
805 APInt Min = APInt::getSignedMinValue(W).sext(W * 2);
806 APInt Max = APInt::getSignedMaxValue(W).sext(W * 2);
807 return MulExt.slt(Min) || MulExt.sgt(Max);
811 /// ShrinkDemandedConstant - Check to see if the specified operand of the
812 /// specified instruction is a constant integer. If so, check to see if there
813 /// are any bits set in the constant that are not demanded. If so, shrink the
814 /// constant and return true.
815 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
817 assert(I && "No instruction?");
818 assert(OpNo < I->getNumOperands() && "Operand index too large");
820 // If the operand is not a constant integer, nothing to do.
821 ConstantInt *OpC = dyn_cast<ConstantInt>(I->getOperand(OpNo));
822 if (!OpC) return false;
824 // If there are no bits set that aren't demanded, nothing to do.
825 Demanded.zextOrTrunc(OpC->getValue().getBitWidth());
826 if ((~Demanded & OpC->getValue()) == 0)
829 // This instruction is producing bits that are not demanded. Shrink the RHS.
830 Demanded &= OpC->getValue();
831 I->setOperand(OpNo, ConstantInt::get(OpC->getType(), Demanded));
835 // ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
836 // set of known zero and one bits, compute the maximum and minimum values that
837 // could have the specified known zero and known one bits, returning them in
839 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
840 const APInt& KnownOne,
841 APInt& Min, APInt& Max) {
842 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
843 KnownZero.getBitWidth() == Min.getBitWidth() &&
844 KnownZero.getBitWidth() == Max.getBitWidth() &&
845 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
846 APInt UnknownBits = ~(KnownZero|KnownOne);
848 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
849 // bit if it is unknown.
851 Max = KnownOne|UnknownBits;
853 if (UnknownBits.isNegative()) { // Sign bit is unknown
854 Min.set(Min.getBitWidth()-1);
855 Max.clear(Max.getBitWidth()-1);
859 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
860 // a set of known zero and one bits, compute the maximum and minimum values that
861 // could have the specified known zero and known one bits, returning them in
863 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
864 const APInt &KnownOne,
865 APInt &Min, APInt &Max) {
866 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
867 KnownZero.getBitWidth() == Min.getBitWidth() &&
868 KnownZero.getBitWidth() == Max.getBitWidth() &&
869 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
870 APInt UnknownBits = ~(KnownZero|KnownOne);
872 // The minimum value is when the unknown bits are all zeros.
874 // The maximum value is when the unknown bits are all ones.
875 Max = KnownOne|UnknownBits;
878 /// SimplifyDemandedInstructionBits - Inst is an integer instruction that
879 /// SimplifyDemandedBits knows about. See if the instruction has any
880 /// properties that allow us to simplify its operands.
881 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
882 unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
883 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
884 APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
886 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask,
887 KnownZero, KnownOne, 0);
888 if (V == 0) return false;
889 if (V == &Inst) return true;
890 ReplaceInstUsesWith(Inst, V);
894 /// SimplifyDemandedBits - This form of SimplifyDemandedBits simplifies the
895 /// specified instruction operand if possible, updating it in place. It returns
896 /// true if it made any change and false otherwise.
897 bool InstCombiner::SimplifyDemandedBits(Use &U, APInt DemandedMask,
898 APInt &KnownZero, APInt &KnownOne,
900 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask,
901 KnownZero, KnownOne, Depth);
902 if (NewVal == 0) return false;
908 /// SimplifyDemandedUseBits - This function attempts to replace V with a simpler
909 /// value based on the demanded bits. When this function is called, it is known
910 /// that only the bits set in DemandedMask of the result of V are ever used
911 /// downstream. Consequently, depending on the mask and V, it may be possible
912 /// to replace V with a constant or one of its operands. In such cases, this
913 /// function does the replacement and returns true. In all other cases, it
914 /// returns false after analyzing the expression and setting KnownOne and known
915 /// to be one in the expression. KnownZero contains all the bits that are known
916 /// to be zero in the expression. These are provided to potentially allow the
917 /// caller (which might recursively be SimplifyDemandedBits itself) to simplify
918 /// the expression. KnownOne and KnownZero always follow the invariant that
919 /// KnownOne & KnownZero == 0. That is, a bit can't be both 1 and 0. Note that
920 /// the bits in KnownOne and KnownZero may only be accurate for those bits set
921 /// in DemandedMask. Note also that the bitwidth of V, DemandedMask, KnownZero
922 /// and KnownOne must all be the same.
924 /// This returns null if it did not change anything and it permits no
925 /// simplification. This returns V itself if it did some simplification of V's
926 /// operands based on the information about what bits are demanded. This returns
927 /// some other non-null value if it found out that V is equal to another value
928 /// in the context where the specified bits are demanded, but not for all users.
929 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
930 APInt &KnownZero, APInt &KnownOne,
932 assert(V != 0 && "Null pointer of Value???");
933 assert(Depth <= 6 && "Limit Search Depth");
934 uint32_t BitWidth = DemandedMask.getBitWidth();
935 const Type *VTy = V->getType();
936 assert((TD || !isa<PointerType>(VTy)) &&
937 "SimplifyDemandedBits needs to know bit widths!");
938 assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) &&
939 (!VTy->isIntOrIntVector() ||
940 VTy->getScalarSizeInBits() == BitWidth) &&
941 KnownZero.getBitWidth() == BitWidth &&
942 KnownOne.getBitWidth() == BitWidth &&
943 "Value *V, DemandedMask, KnownZero and KnownOne "
944 "must have same BitWidth");
945 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
946 // We know all of the bits for a constant!
947 KnownOne = CI->getValue() & DemandedMask;
948 KnownZero = ~KnownOne & DemandedMask;
951 if (isa<ConstantPointerNull>(V)) {
952 // We know all of the bits for a constant!
954 KnownZero = DemandedMask;
960 if (DemandedMask == 0) { // Not demanding any bits from V.
961 if (isa<UndefValue>(V))
963 return UndefValue::get(VTy);
966 if (Depth == 6) // Limit search depth.
969 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
970 APInt &RHSKnownZero = KnownZero, &RHSKnownOne = KnownOne;
972 Instruction *I = dyn_cast<Instruction>(V);
974 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
975 return 0; // Only analyze instructions.
978 // If there are multiple uses of this value and we aren't at the root, then
979 // we can't do any simplifications of the operands, because DemandedMask
980 // only reflects the bits demanded by *one* of the users.
981 if (Depth != 0 && !I->hasOneUse()) {
982 // Despite the fact that we can't simplify this instruction in all User's
983 // context, we can at least compute the knownzero/knownone bits, and we can
984 // do simplifications that apply to *just* the one user if we know that
985 // this instruction has a simpler value in that context.
986 if (I->getOpcode() == Instruction::And) {
987 // If either the LHS or the RHS are Zero, the result is zero.
988 ComputeMaskedBits(I->getOperand(1), DemandedMask,
989 RHSKnownZero, RHSKnownOne, Depth+1);
990 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownZero,
991 LHSKnownZero, LHSKnownOne, Depth+1);
993 // If all of the demanded bits are known 1 on one side, return the other.
994 // These bits cannot contribute to the result of the 'and' in this
996 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
997 (DemandedMask & ~LHSKnownZero))
998 return I->getOperand(0);
999 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
1000 (DemandedMask & ~RHSKnownZero))
1001 return I->getOperand(1);
1003 // If all of the demanded bits in the inputs are known zeros, return zero.
1004 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
1005 return Constant::getNullValue(VTy);
1007 } else if (I->getOpcode() == Instruction::Or) {
1008 // We can simplify (X|Y) -> X or Y in the user's context if we know that
1009 // only bits from X or Y are demanded.
1011 // If either the LHS or the RHS are One, the result is One.
1012 ComputeMaskedBits(I->getOperand(1), DemandedMask,
1013 RHSKnownZero, RHSKnownOne, Depth+1);
1014 ComputeMaskedBits(I->getOperand(0), DemandedMask & ~RHSKnownOne,
1015 LHSKnownZero, LHSKnownOne, Depth+1);
1017 // If all of the demanded bits are known zero on one side, return the
1018 // other. These bits cannot contribute to the result of the 'or' in this
1020 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
1021 (DemandedMask & ~LHSKnownOne))
1022 return I->getOperand(0);
1023 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
1024 (DemandedMask & ~RHSKnownOne))
1025 return I->getOperand(1);
1027 // If all of the potentially set bits on one side are known to be set on
1028 // the other side, just use the 'other' side.
1029 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
1030 (DemandedMask & (~RHSKnownZero)))
1031 return I->getOperand(0);
1032 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
1033 (DemandedMask & (~LHSKnownZero)))
1034 return I->getOperand(1);
1037 // Compute the KnownZero/KnownOne bits to simplify things downstream.
1038 ComputeMaskedBits(I, DemandedMask, KnownZero, KnownOne, Depth);
1042 // If this is the root being simplified, allow it to have multiple uses,
1043 // just set the DemandedMask to all bits so that we can try to simplify the
1044 // operands. This allows visitTruncInst (for example) to simplify the
1045 // operand of a trunc without duplicating all the logic below.
1046 if (Depth == 0 && !V->hasOneUse())
1047 DemandedMask = APInt::getAllOnesValue(BitWidth);
1049 switch (I->getOpcode()) {
1051 ComputeMaskedBits(I, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1053 case Instruction::And:
1054 // If either the LHS or the RHS are Zero, the result is zero.
1055 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1056 RHSKnownZero, RHSKnownOne, Depth+1) ||
1057 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownZero,
1058 LHSKnownZero, LHSKnownOne, Depth+1))
1060 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1061 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1063 // If all of the demanded bits are known 1 on one side, return the other.
1064 // These bits cannot contribute to the result of the 'and'.
1065 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) ==
1066 (DemandedMask & ~LHSKnownZero))
1067 return I->getOperand(0);
1068 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) ==
1069 (DemandedMask & ~RHSKnownZero))
1070 return I->getOperand(1);
1072 // If all of the demanded bits in the inputs are known zeros, return zero.
1073 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask)
1074 return Constant::getNullValue(VTy);
1076 // If the RHS is a constant, see if we can simplify it.
1077 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero))
1080 // Output known-1 bits are only known if set in both the LHS & RHS.
1081 RHSKnownOne &= LHSKnownOne;
1082 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1083 RHSKnownZero |= LHSKnownZero;
1085 case Instruction::Or:
1086 // If either the LHS or the RHS are One, the result is One.
1087 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1088 RHSKnownZero, RHSKnownOne, Depth+1) ||
1089 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask & ~RHSKnownOne,
1090 LHSKnownZero, LHSKnownOne, Depth+1))
1092 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1093 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1095 // If all of the demanded bits are known zero on one side, return the other.
1096 // These bits cannot contribute to the result of the 'or'.
1097 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) ==
1098 (DemandedMask & ~LHSKnownOne))
1099 return I->getOperand(0);
1100 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) ==
1101 (DemandedMask & ~RHSKnownOne))
1102 return I->getOperand(1);
1104 // If all of the potentially set bits on one side are known to be set on
1105 // the other side, just use the 'other' side.
1106 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) ==
1107 (DemandedMask & (~RHSKnownZero)))
1108 return I->getOperand(0);
1109 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) ==
1110 (DemandedMask & (~LHSKnownZero)))
1111 return I->getOperand(1);
1113 // If the RHS is a constant, see if we can simplify it.
1114 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1117 // Output known-0 bits are only known if clear in both the LHS & RHS.
1118 RHSKnownZero &= LHSKnownZero;
1119 // Output known-1 are known to be set if set in either the LHS | RHS.
1120 RHSKnownOne |= LHSKnownOne;
1122 case Instruction::Xor: {
1123 if (SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1124 RHSKnownZero, RHSKnownOne, Depth+1) ||
1125 SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1126 LHSKnownZero, LHSKnownOne, Depth+1))
1128 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1129 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1131 // If all of the demanded bits are known zero on one side, return the other.
1132 // These bits cannot contribute to the result of the 'xor'.
1133 if ((DemandedMask & RHSKnownZero) == DemandedMask)
1134 return I->getOperand(0);
1135 if ((DemandedMask & LHSKnownZero) == DemandedMask)
1136 return I->getOperand(1);
1138 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1139 APInt KnownZeroOut = (RHSKnownZero & LHSKnownZero) |
1140 (RHSKnownOne & LHSKnownOne);
1141 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1142 APInt KnownOneOut = (RHSKnownZero & LHSKnownOne) |
1143 (RHSKnownOne & LHSKnownZero);
1145 // If all of the demanded bits are known to be zero on one side or the
1146 // other, turn this into an *inclusive* or.
1147 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
1148 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) {
1150 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1152 return InsertNewInstBefore(Or, *I);
1155 // If all of the demanded bits on one side are known, and all of the set
1156 // bits on that side are also known to be set on the other side, turn this
1157 // into an AND, as we know the bits will be cleared.
1158 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
1159 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) {
1161 if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) {
1162 Constant *AndC = Constant::getIntegerValue(VTy,
1163 ~RHSKnownOne & DemandedMask);
1165 BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
1166 return InsertNewInstBefore(And, *I);
1170 // If the RHS is a constant, see if we can simplify it.
1171 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
1172 if (ShrinkDemandedConstant(I, 1, DemandedMask))
1175 // If our LHS is an 'and' and if it has one use, and if any of the bits we
1176 // are flipping are known to be set, then the xor is just resetting those
1177 // bits to zero. We can just knock out bits from the 'and' and the 'xor',
1178 // simplifying both of them.
1179 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
1180 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
1181 isa<ConstantInt>(I->getOperand(1)) &&
1182 isa<ConstantInt>(LHSInst->getOperand(1)) &&
1183 (LHSKnownOne & RHSKnownOne & DemandedMask) != 0) {
1184 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
1185 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
1186 APInt NewMask = ~(LHSKnownOne & RHSKnownOne & DemandedMask);
1189 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
1190 Instruction *NewAnd =
1191 BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp");
1192 InsertNewInstBefore(NewAnd, *I);
1195 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
1196 Instruction *NewXor =
1197 BinaryOperator::CreateXor(NewAnd, XorC, "tmp");
1198 return InsertNewInstBefore(NewXor, *I);
1202 RHSKnownZero = KnownZeroOut;
1203 RHSKnownOne = KnownOneOut;
1206 case Instruction::Select:
1207 if (SimplifyDemandedBits(I->getOperandUse(2), DemandedMask,
1208 RHSKnownZero, RHSKnownOne, Depth+1) ||
1209 SimplifyDemandedBits(I->getOperandUse(1), DemandedMask,
1210 LHSKnownZero, LHSKnownOne, Depth+1))
1212 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1213 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
1215 // If the operands are constants, see if we can simplify them.
1216 if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
1217 ShrinkDemandedConstant(I, 2, DemandedMask))
1220 // Only known if known in both the LHS and RHS.
1221 RHSKnownOne &= LHSKnownOne;
1222 RHSKnownZero &= LHSKnownZero;
1224 case Instruction::Trunc: {
1225 unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
1226 DemandedMask.zext(truncBf);
1227 RHSKnownZero.zext(truncBf);
1228 RHSKnownOne.zext(truncBf);
1229 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1230 RHSKnownZero, RHSKnownOne, Depth+1))
1232 DemandedMask.trunc(BitWidth);
1233 RHSKnownZero.trunc(BitWidth);
1234 RHSKnownOne.trunc(BitWidth);
1235 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1238 case Instruction::BitCast:
1239 if (!I->getOperand(0)->getType()->isIntOrIntVector())
1240 return false; // vector->int or fp->int?
1242 if (const VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
1243 if (const VectorType *SrcVTy =
1244 dyn_cast<VectorType>(I->getOperand(0)->getType())) {
1245 if (DstVTy->getNumElements() != SrcVTy->getNumElements())
1246 // Don't touch a bitcast between vectors of different element counts.
1249 // Don't touch a scalar-to-vector bitcast.
1251 } else if (isa<VectorType>(I->getOperand(0)->getType()))
1252 // Don't touch a vector-to-scalar bitcast.
1255 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1256 RHSKnownZero, RHSKnownOne, Depth+1))
1258 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1260 case Instruction::ZExt: {
1261 // Compute the bits in the result that are not present in the input.
1262 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1264 DemandedMask.trunc(SrcBitWidth);
1265 RHSKnownZero.trunc(SrcBitWidth);
1266 RHSKnownOne.trunc(SrcBitWidth);
1267 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask,
1268 RHSKnownZero, RHSKnownOne, Depth+1))
1270 DemandedMask.zext(BitWidth);
1271 RHSKnownZero.zext(BitWidth);
1272 RHSKnownOne.zext(BitWidth);
1273 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1274 // The top bits are known to be zero.
1275 RHSKnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth);
1278 case Instruction::SExt: {
1279 // Compute the bits in the result that are not present in the input.
1280 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
1282 APInt InputDemandedBits = DemandedMask &
1283 APInt::getLowBitsSet(BitWidth, SrcBitWidth);
1285 APInt NewBits(APInt::getHighBitsSet(BitWidth, BitWidth - SrcBitWidth));
1286 // If any of the sign extended bits are demanded, we know that the sign
1288 if ((NewBits & DemandedMask) != 0)
1289 InputDemandedBits.set(SrcBitWidth-1);
1291 InputDemandedBits.trunc(SrcBitWidth);
1292 RHSKnownZero.trunc(SrcBitWidth);
1293 RHSKnownOne.trunc(SrcBitWidth);
1294 if (SimplifyDemandedBits(I->getOperandUse(0), InputDemandedBits,
1295 RHSKnownZero, RHSKnownOne, Depth+1))
1297 InputDemandedBits.zext(BitWidth);
1298 RHSKnownZero.zext(BitWidth);
1299 RHSKnownOne.zext(BitWidth);
1300 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1302 // If the sign bit of the input is known set or clear, then we know the
1303 // top bits of the result.
1305 // If the input sign bit is known zero, or if the NewBits are not demanded
1306 // convert this into a zero extension.
1307 if (RHSKnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
1308 // Convert to ZExt cast
1309 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
1310 return InsertNewInstBefore(NewCast, *I);
1311 } else if (RHSKnownOne[SrcBitWidth-1]) { // Input sign bit known set
1312 RHSKnownOne |= NewBits;
1316 case Instruction::Add: {
1317 // Figure out what the input bits are. If the top bits of the and result
1318 // are not demanded, then the add doesn't demand them from its input
1320 unsigned NLZ = DemandedMask.countLeadingZeros();
1322 // If there is a constant on the RHS, there are a variety of xformations
1324 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
1325 // If null, this should be simplified elsewhere. Some of the xforms here
1326 // won't work if the RHS is zero.
1330 // If the top bit of the output is demanded, demand everything from the
1331 // input. Otherwise, we demand all the input bits except NLZ top bits.
1332 APInt InDemandedBits(APInt::getLowBitsSet(BitWidth, BitWidth - NLZ));
1334 // Find information about known zero/one bits in the input.
1335 if (SimplifyDemandedBits(I->getOperandUse(0), InDemandedBits,
1336 LHSKnownZero, LHSKnownOne, Depth+1))
1339 // If the RHS of the add has bits set that can't affect the input, reduce
1341 if (ShrinkDemandedConstant(I, 1, InDemandedBits))
1344 // Avoid excess work.
1345 if (LHSKnownZero == 0 && LHSKnownOne == 0)
1348 // Turn it into OR if input bits are zero.
1349 if ((LHSKnownZero & RHS->getValue()) == RHS->getValue()) {
1351 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
1353 return InsertNewInstBefore(Or, *I);
1356 // We can say something about the output known-zero and known-one bits,
1357 // depending on potential carries from the input constant and the
1358 // unknowns. For example if the LHS is known to have at most the 0x0F0F0
1359 // bits set and the RHS constant is 0x01001, then we know we have a known
1360 // one mask of 0x00001 and a known zero mask of 0xE0F0E.
1362 // To compute this, we first compute the potential carry bits. These are
1363 // the bits which may be modified. I'm not aware of a better way to do
1365 const APInt &RHSVal = RHS->getValue();
1366 APInt CarryBits((~LHSKnownZero + RHSVal) ^ (~LHSKnownZero ^ RHSVal));
1368 // Now that we know which bits have carries, compute the known-1/0 sets.
1370 // Bits are known one if they are known zero in one operand and one in the
1371 // other, and there is no input carry.
1372 RHSKnownOne = ((LHSKnownZero & RHSVal) |
1373 (LHSKnownOne & ~RHSVal)) & ~CarryBits;
1375 // Bits are known zero if they are known zero in both operands and there
1376 // is no input carry.
1377 RHSKnownZero = LHSKnownZero & ~RHSVal & ~CarryBits;
1379 // If the high-bits of this ADD are not demanded, then it does not demand
1380 // the high bits of its LHS or RHS.
1381 if (DemandedMask[BitWidth-1] == 0) {
1382 // Right fill the mask of bits for this ADD to demand the most
1383 // significant bit and all those below it.
1384 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1385 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1386 LHSKnownZero, LHSKnownOne, Depth+1) ||
1387 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1388 LHSKnownZero, LHSKnownOne, Depth+1))
1394 case Instruction::Sub:
1395 // If the high-bits of this SUB are not demanded, then it does not demand
1396 // the high bits of its LHS or RHS.
1397 if (DemandedMask[BitWidth-1] == 0) {
1398 // Right fill the mask of bits for this SUB to demand the most
1399 // significant bit and all those below it.
1400 uint32_t NLZ = DemandedMask.countLeadingZeros();
1401 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
1402 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedFromOps,
1403 LHSKnownZero, LHSKnownOne, Depth+1) ||
1404 SimplifyDemandedBits(I->getOperandUse(1), DemandedFromOps,
1405 LHSKnownZero, LHSKnownOne, Depth+1))
1408 // Otherwise just hand the sub off to ComputeMaskedBits to fill in
1409 // the known zeros and ones.
1410 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1412 case Instruction::Shl:
1413 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1414 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1415 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
1416 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1417 RHSKnownZero, RHSKnownOne, Depth+1))
1419 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1420 RHSKnownZero <<= ShiftAmt;
1421 RHSKnownOne <<= ShiftAmt;
1422 // low bits known zero.
1424 RHSKnownZero |= APInt::getLowBitsSet(BitWidth, ShiftAmt);
1427 case Instruction::LShr:
1428 // For a logical shift right
1429 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1430 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth);
1432 // Unsigned shift right.
1433 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1434 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1435 RHSKnownZero, RHSKnownOne, Depth+1))
1437 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1438 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1439 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1441 // Compute the new bits that are at the top now.
1442 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1443 RHSKnownZero |= HighBits; // high bits known zero.
1447 case Instruction::AShr:
1448 // If this is an arithmetic shift right and only the low-bit is set, we can
1449 // always convert this into a logical shr, even if the shift amount is
1450 // variable. The low bit of the shift cannot be an input sign bit unless
1451 // the shift amount is >= the size of the datatype, which is undefined.
1452 if (DemandedMask == 1) {
1453 // Perform the logical shift right.
1454 Instruction *NewVal = BinaryOperator::CreateLShr(
1455 I->getOperand(0), I->getOperand(1), I->getName());
1456 return InsertNewInstBefore(NewVal, *I);
1459 // If the sign bit is the only bit demanded by this ashr, then there is no
1460 // need to do it, the shift doesn't change the high bit.
1461 if (DemandedMask.isSignBit())
1462 return I->getOperand(0);
1464 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) {
1465 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth);
1467 // Signed shift right.
1468 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
1469 // If any of the "high bits" are demanded, we should set the sign bit as
1471 if (DemandedMask.countLeadingZeros() <= ShiftAmt)
1472 DemandedMaskIn.set(BitWidth-1);
1473 if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMaskIn,
1474 RHSKnownZero, RHSKnownOne, Depth+1))
1476 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
1477 // Compute the new bits that are at the top now.
1478 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
1479 RHSKnownZero = APIntOps::lshr(RHSKnownZero, ShiftAmt);
1480 RHSKnownOne = APIntOps::lshr(RHSKnownOne, ShiftAmt);
1482 // Handle the sign bits.
1483 APInt SignBit(APInt::getSignBit(BitWidth));
1484 // Adjust to where it is now in the mask.
1485 SignBit = APIntOps::lshr(SignBit, ShiftAmt);
1487 // If the input sign bit is known to be zero, or if none of the top bits
1488 // are demanded, turn this into an unsigned shift right.
1489 if (BitWidth <= ShiftAmt || RHSKnownZero[BitWidth-ShiftAmt-1] ||
1490 (HighBits & ~DemandedMask) == HighBits) {
1491 // Perform the logical shift right.
1492 Instruction *NewVal = BinaryOperator::CreateLShr(
1493 I->getOperand(0), SA, I->getName());
1494 return InsertNewInstBefore(NewVal, *I);
1495 } else if ((RHSKnownOne & SignBit) != 0) { // New bits are known one.
1496 RHSKnownOne |= HighBits;
1500 case Instruction::SRem:
1501 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
1502 APInt RA = Rem->getValue().abs();
1503 if (RA.isPowerOf2()) {
1504 if (DemandedMask.ult(RA)) // srem won't affect demanded bits
1505 return I->getOperand(0);
1507 APInt LowBits = RA - 1;
1508 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
1509 if (SimplifyDemandedBits(I->getOperandUse(0), Mask2,
1510 LHSKnownZero, LHSKnownOne, Depth+1))
1513 if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits))
1514 LHSKnownZero |= ~LowBits;
1516 KnownZero |= LHSKnownZero & DemandedMask;
1518 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
1522 case Instruction::URem: {
1523 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
1524 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
1525 if (SimplifyDemandedBits(I->getOperandUse(0), AllOnes,
1526 KnownZero2, KnownOne2, Depth+1) ||
1527 SimplifyDemandedBits(I->getOperandUse(1), AllOnes,
1528 KnownZero2, KnownOne2, Depth+1))
1531 unsigned Leaders = KnownZero2.countLeadingOnes();
1532 Leaders = std::max(Leaders,
1533 KnownZero2.countLeadingOnes());
1534 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
1537 case Instruction::Call:
1538 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1539 switch (II->getIntrinsicID()) {
1541 case Intrinsic::bswap: {
1542 // If the only bits demanded come from one byte of the bswap result,
1543 // just shift the input byte into position to eliminate the bswap.
1544 unsigned NLZ = DemandedMask.countLeadingZeros();
1545 unsigned NTZ = DemandedMask.countTrailingZeros();
1547 // Round NTZ down to the next byte. If we have 11 trailing zeros, then
1548 // we need all the bits down to bit 8. Likewise, round NLZ. If we
1549 // have 14 leading zeros, round to 8.
1552 // If we need exactly one byte, we can do this transformation.
1553 if (BitWidth-NLZ-NTZ == 8) {
1554 unsigned ResultBit = NTZ;
1555 unsigned InputBit = BitWidth-NTZ-8;
1557 // Replace this with either a left or right shift to get the byte into
1559 Instruction *NewVal;
1560 if (InputBit > ResultBit)
1561 NewVal = BinaryOperator::CreateLShr(I->getOperand(1),
1562 ConstantInt::get(I->getType(), InputBit-ResultBit));
1564 NewVal = BinaryOperator::CreateShl(I->getOperand(1),
1565 ConstantInt::get(I->getType(), ResultBit-InputBit));
1566 NewVal->takeName(I);
1567 return InsertNewInstBefore(NewVal, *I);
1570 // TODO: Could compute known zero/one bits based on the input.
1575 ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth);
1579 // If the client is only demanding bits that we know, return the known
1581 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask)
1582 return Constant::getIntegerValue(VTy, RHSKnownOne);
1587 /// SimplifyDemandedVectorElts - The specified value produces a vector with
1588 /// any number of elements. DemandedElts contains the set of elements that are
1589 /// actually used by the caller. This method analyzes which elements of the
1590 /// operand are undef and returns that information in UndefElts.
1592 /// If the information about demanded elements can be used to simplify the
1593 /// operation, the operation is simplified, then the resultant value is
1594 /// returned. This returns null if no change was made.
1595 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
1598 unsigned VWidth = cast<VectorType>(V->getType())->getNumElements();
1599 APInt EltMask(APInt::getAllOnesValue(VWidth));
1600 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1602 if (isa<UndefValue>(V)) {
1603 // If the entire vector is undefined, just return this info.
1604 UndefElts = EltMask;
1606 } else if (DemandedElts == 0) { // If nothing is demanded, provide undef.
1607 UndefElts = EltMask;
1608 return UndefValue::get(V->getType());
1612 if (ConstantVector *CP = dyn_cast<ConstantVector>(V)) {
1613 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1614 Constant *Undef = UndefValue::get(EltTy);
1616 std::vector<Constant*> Elts;
1617 for (unsigned i = 0; i != VWidth; ++i)
1618 if (!DemandedElts[i]) { // If not demanded, set to undef.
1619 Elts.push_back(Undef);
1621 } else if (isa<UndefValue>(CP->getOperand(i))) { // Already undef.
1622 Elts.push_back(Undef);
1624 } else { // Otherwise, defined.
1625 Elts.push_back(CP->getOperand(i));
1628 // If we changed the constant, return it.
1629 Constant *NewCP = ConstantVector::get(Elts);
1630 return NewCP != CP ? NewCP : 0;
1631 } else if (isa<ConstantAggregateZero>(V)) {
1632 // Simplify the CAZ to a ConstantVector where the non-demanded elements are
1635 // Check if this is identity. If so, return 0 since we are not simplifying
1637 if (DemandedElts == ((1ULL << VWidth) -1))
1640 const Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1641 Constant *Zero = Constant::getNullValue(EltTy);
1642 Constant *Undef = UndefValue::get(EltTy);
1643 std::vector<Constant*> Elts;
1644 for (unsigned i = 0; i != VWidth; ++i) {
1645 Constant *Elt = DemandedElts[i] ? Zero : Undef;
1646 Elts.push_back(Elt);
1648 UndefElts = DemandedElts ^ EltMask;
1649 return ConstantVector::get(Elts);
1652 // Limit search depth.
1656 // If multiple users are using the root value, procede with
1657 // simplification conservatively assuming that all elements
1659 if (!V->hasOneUse()) {
1660 // Quit if we find multiple users of a non-root value though.
1661 // They'll be handled when it's their turn to be visited by
1662 // the main instcombine process.
1664 // TODO: Just compute the UndefElts information recursively.
1667 // Conservatively assume that all elements are needed.
1668 DemandedElts = EltMask;
1671 Instruction *I = dyn_cast<Instruction>(V);
1672 if (!I) return 0; // Only analyze instructions.
1674 bool MadeChange = false;
1675 APInt UndefElts2(VWidth, 0);
1677 switch (I->getOpcode()) {
1680 case Instruction::InsertElement: {
1681 // If this is a variable index, we don't know which element it overwrites.
1682 // demand exactly the same input as we produce.
1683 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1685 // Note that we can't propagate undef elt info, because we don't know
1686 // which elt is getting updated.
1687 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1688 UndefElts2, Depth+1);
1689 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1693 // If this is inserting an element that isn't demanded, remove this
1695 unsigned IdxNo = Idx->getZExtValue();
1696 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1698 return I->getOperand(0);
1701 // Otherwise, the element inserted overwrites whatever was there, so the
1702 // input demanded set is simpler than the output set.
1703 APInt DemandedElts2 = DemandedElts;
1704 DemandedElts2.clear(IdxNo);
1705 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
1706 UndefElts, Depth+1);
1707 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1709 // The inserted element is defined.
1710 UndefElts.clear(IdxNo);
1713 case Instruction::ShuffleVector: {
1714 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1715 uint64_t LHSVWidth =
1716 cast<VectorType>(Shuffle->getOperand(0)->getType())->getNumElements();
1717 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1718 for (unsigned i = 0; i < VWidth; i++) {
1719 if (DemandedElts[i]) {
1720 unsigned MaskVal = Shuffle->getMaskValue(i);
1721 if (MaskVal != -1u) {
1722 assert(MaskVal < LHSVWidth * 2 &&
1723 "shufflevector mask index out of range!");
1724 if (MaskVal < LHSVWidth)
1725 LeftDemanded.set(MaskVal);
1727 RightDemanded.set(MaskVal - LHSVWidth);
1732 APInt UndefElts4(LHSVWidth, 0);
1733 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
1734 UndefElts4, Depth+1);
1735 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1737 APInt UndefElts3(LHSVWidth, 0);
1738 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
1739 UndefElts3, Depth+1);
1740 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1742 bool NewUndefElts = false;
1743 for (unsigned i = 0; i < VWidth; i++) {
1744 unsigned MaskVal = Shuffle->getMaskValue(i);
1745 if (MaskVal == -1u) {
1747 } else if (MaskVal < LHSVWidth) {
1748 if (UndefElts4[MaskVal]) {
1749 NewUndefElts = true;
1753 if (UndefElts3[MaskVal - LHSVWidth]) {
1754 NewUndefElts = true;
1761 // Add additional discovered undefs.
1762 std::vector<Constant*> Elts;
1763 for (unsigned i = 0; i < VWidth; ++i) {
1765 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
1767 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context),
1768 Shuffle->getMaskValue(i)));
1770 I->setOperand(2, ConstantVector::get(Elts));
1775 case Instruction::BitCast: {
1776 // Vector->vector casts only.
1777 const VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1779 unsigned InVWidth = VTy->getNumElements();
1780 APInt InputDemandedElts(InVWidth, 0);
1783 if (VWidth == InVWidth) {
1784 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1785 // elements as are demanded of us.
1787 InputDemandedElts = DemandedElts;
1788 } else if (VWidth > InVWidth) {
1792 // If there are more elements in the result than there are in the source,
1793 // then an input element is live if any of the corresponding output
1794 // elements are live.
1795 Ratio = VWidth/InVWidth;
1796 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1797 if (DemandedElts[OutIdx])
1798 InputDemandedElts.set(OutIdx/Ratio);
1804 // If there are more elements in the source than there are in the result,
1805 // then an input element is live if the corresponding output element is
1807 Ratio = InVWidth/VWidth;
1808 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1809 if (DemandedElts[InIdx/Ratio])
1810 InputDemandedElts.set(InIdx);
1813 // div/rem demand all inputs, because they don't want divide by zero.
1814 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1815 UndefElts2, Depth+1);
1817 I->setOperand(0, TmpV);
1821 UndefElts = UndefElts2;
1822 if (VWidth > InVWidth) {
1823 llvm_unreachable("Unimp");
1824 // If there are more elements in the result than there are in the source,
1825 // then an output element is undef if the corresponding input element is
1827 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1828 if (UndefElts2[OutIdx/Ratio])
1829 UndefElts.set(OutIdx);
1830 } else if (VWidth < InVWidth) {
1831 llvm_unreachable("Unimp");
1832 // If there are more elements in the source than there are in the result,
1833 // then a result element is undef if all of the corresponding input
1834 // elements are undef.
1835 UndefElts = ~0ULL >> (64-VWidth); // Start out all undef.
1836 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1837 if (!UndefElts2[InIdx]) // Not undef?
1838 UndefElts.clear(InIdx/Ratio); // Clear undef bit.
1842 case Instruction::And:
1843 case Instruction::Or:
1844 case Instruction::Xor:
1845 case Instruction::Add:
1846 case Instruction::Sub:
1847 case Instruction::Mul:
1848 // div/rem demand all inputs, because they don't want divide by zero.
1849 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1850 UndefElts, Depth+1);
1851 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1852 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1853 UndefElts2, Depth+1);
1854 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1856 // Output elements are undefined if both are undefined. Consider things
1857 // like undef&0. The result is known zero, not undef.
1858 UndefElts &= UndefElts2;
1861 case Instruction::Call: {
1862 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1864 switch (II->getIntrinsicID()) {
1867 // Binary vector operations that work column-wise. A dest element is a
1868 // function of the corresponding input elements from the two inputs.
1869 case Intrinsic::x86_sse_sub_ss:
1870 case Intrinsic::x86_sse_mul_ss:
1871 case Intrinsic::x86_sse_min_ss:
1872 case Intrinsic::x86_sse_max_ss:
1873 case Intrinsic::x86_sse2_sub_sd:
1874 case Intrinsic::x86_sse2_mul_sd:
1875 case Intrinsic::x86_sse2_min_sd:
1876 case Intrinsic::x86_sse2_max_sd:
1877 TmpV = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
1878 UndefElts, Depth+1);
1879 if (TmpV) { II->setOperand(1, TmpV); MadeChange = true; }
1880 TmpV = SimplifyDemandedVectorElts(II->getOperand(2), DemandedElts,
1881 UndefElts2, Depth+1);
1882 if (TmpV) { II->setOperand(2, TmpV); MadeChange = true; }
1884 // If only the low elt is demanded and this is a scalarizable intrinsic,
1885 // scalarize it now.
1886 if (DemandedElts == 1) {
1887 switch (II->getIntrinsicID()) {
1889 case Intrinsic::x86_sse_sub_ss:
1890 case Intrinsic::x86_sse_mul_ss:
1891 case Intrinsic::x86_sse2_sub_sd:
1892 case Intrinsic::x86_sse2_mul_sd:
1893 // TODO: Lower MIN/MAX/ABS/etc
1894 Value *LHS = II->getOperand(1);
1895 Value *RHS = II->getOperand(2);
1896 // Extract the element as scalars.
1897 LHS = InsertNewInstBefore(ExtractElementInst::Create(LHS,
1898 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
1899 RHS = InsertNewInstBefore(ExtractElementInst::Create(RHS,
1900 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), "tmp"), *II);
1902 switch (II->getIntrinsicID()) {
1903 default: llvm_unreachable("Case stmts out of sync!");
1904 case Intrinsic::x86_sse_sub_ss:
1905 case Intrinsic::x86_sse2_sub_sd:
1906 TmpV = InsertNewInstBefore(BinaryOperator::CreateFSub(LHS, RHS,
1907 II->getName()), *II);
1909 case Intrinsic::x86_sse_mul_ss:
1910 case Intrinsic::x86_sse2_mul_sd:
1911 TmpV = InsertNewInstBefore(BinaryOperator::CreateFMul(LHS, RHS,
1912 II->getName()), *II);
1917 InsertElementInst::Create(
1918 UndefValue::get(II->getType()), TmpV,
1919 ConstantInt::get(Type::getInt32Ty(*Context), 0U, false), II->getName());
1920 InsertNewInstBefore(New, *II);
1925 // Output elements are undefined if both are undefined. Consider things
1926 // like undef&0. The result is known zero, not undef.
1927 UndefElts &= UndefElts2;
1933 return MadeChange ? I : 0;
1937 /// AssociativeOpt - Perform an optimization on an associative operator. This
1938 /// function is designed to check a chain of associative operators for a
1939 /// potential to apply a certain optimization. Since the optimization may be
1940 /// applicable if the expression was reassociated, this checks the chain, then
1941 /// reassociates the expression as necessary to expose the optimization
1942 /// opportunity. This makes use of a special Functor, which must define
1943 /// 'shouldApply' and 'apply' methods.
1945 template<typename Functor>
1946 static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) {
1947 unsigned Opcode = Root.getOpcode();
1948 Value *LHS = Root.getOperand(0);
1950 // Quick check, see if the immediate LHS matches...
1951 if (F.shouldApply(LHS))
1952 return F.apply(Root);
1954 // Otherwise, if the LHS is not of the same opcode as the root, return.
1955 Instruction *LHSI = dyn_cast<Instruction>(LHS);
1956 while (LHSI && LHSI->getOpcode() == Opcode && LHSI->hasOneUse()) {
1957 // Should we apply this transform to the RHS?
1958 bool ShouldApply = F.shouldApply(LHSI->getOperand(1));
1960 // If not to the RHS, check to see if we should apply to the LHS...
1961 if (!ShouldApply && F.shouldApply(LHSI->getOperand(0))) {
1962 cast<BinaryOperator>(LHSI)->swapOperands(); // Make the LHS the RHS
1966 // If the functor wants to apply the optimization to the RHS of LHSI,
1967 // reassociate the expression from ((? op A) op B) to (? op (A op B))
1969 // Now all of the instructions are in the current basic block, go ahead
1970 // and perform the reassociation.
1971 Instruction *TmpLHSI = cast<Instruction>(Root.getOperand(0));
1973 // First move the selected RHS to the LHS of the root...
1974 Root.setOperand(0, LHSI->getOperand(1));
1976 // Make what used to be the LHS of the root be the user of the root...
1977 Value *ExtraOperand = TmpLHSI->getOperand(1);
1978 if (&Root == TmpLHSI) {
1979 Root.replaceAllUsesWith(Constant::getNullValue(TmpLHSI->getType()));
1982 Root.replaceAllUsesWith(TmpLHSI); // Users now use TmpLHSI
1983 TmpLHSI->setOperand(1, &Root); // TmpLHSI now uses the root
1984 BasicBlock::iterator ARI = &Root; ++ARI;
1985 TmpLHSI->moveBefore(ARI); // Move TmpLHSI to after Root
1988 // Now propagate the ExtraOperand down the chain of instructions until we
1990 while (TmpLHSI != LHSI) {
1991 Instruction *NextLHSI = cast<Instruction>(TmpLHSI->getOperand(0));
1992 // Move the instruction to immediately before the chain we are
1993 // constructing to avoid breaking dominance properties.
1994 NextLHSI->moveBefore(ARI);
1997 Value *NextOp = NextLHSI->getOperand(1);
1998 NextLHSI->setOperand(1, ExtraOperand);
2000 ExtraOperand = NextOp;
2003 // Now that the instructions are reassociated, have the functor perform
2004 // the transformation...
2005 return F.apply(Root);
2008 LHSI = dyn_cast<Instruction>(LHSI->getOperand(0));
2015 // AddRHS - Implements: X + X --> X << 1
2018 explicit AddRHS(Value *rhs) : RHS(rhs) {}
2019 bool shouldApply(Value *LHS) const { return LHS == RHS; }
2020 Instruction *apply(BinaryOperator &Add) const {
2021 return BinaryOperator::CreateShl(Add.getOperand(0),
2022 ConstantInt::get(Add.getType(), 1));
2026 // AddMaskingAnd - Implements (A & C1)+(B & C2) --> (A & C1)|(B & C2)
2028 struct AddMaskingAnd {
2030 explicit AddMaskingAnd(Constant *c) : C2(c) {}
2031 bool shouldApply(Value *LHS) const {
2033 return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
2034 ConstantExpr::getAnd(C1, C2)->isNullValue();
2036 Instruction *apply(BinaryOperator &Add) const {
2037 return BinaryOperator::CreateOr(Add.getOperand(0), Add.getOperand(1));
2043 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
2045 if (CastInst *CI = dyn_cast<CastInst>(&I))
2046 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType());
2048 // Figure out if the constant is the left or the right argument.
2049 bool ConstIsRHS = isa<Constant>(I.getOperand(1));
2050 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS));
2052 if (Constant *SOC = dyn_cast<Constant>(SO)) {
2054 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand);
2055 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC);
2058 Value *Op0 = SO, *Op1 = ConstOperand;
2060 std::swap(Op0, Op1);
2062 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
2063 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1,
2064 SO->getName()+".op");
2065 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I))
2066 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
2067 SO->getName()+".cmp");
2068 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I))
2069 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1,
2070 SO->getName()+".cmp");
2071 llvm_unreachable("Unknown binary instruction type!");
2074 // FoldOpIntoSelect - Given an instruction with a select as one operand and a
2075 // constant as the other operand, try to fold the binary operator into the
2076 // select arguments. This also works for Cast instructions, which obviously do
2077 // not have a second operand.
2078 static Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI,
2080 // Don't modify shared select instructions
2081 if (!SI->hasOneUse()) return 0;
2082 Value *TV = SI->getOperand(1);
2083 Value *FV = SI->getOperand(2);
2085 if (isa<Constant>(TV) || isa<Constant>(FV)) {
2086 // Bool selects with constant operands can be folded to logical ops.
2087 if (SI->getType() == Type::getInt1Ty(*IC->getContext())) return 0;
2089 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, IC);
2090 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, IC);
2092 return SelectInst::Create(SI->getCondition(), SelectTrueVal,
2099 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which
2100 /// has a PHI node as operand #0, see if we can fold the instruction into the
2101 /// PHI (which is only possible if all operands to the PHI are constants).
2103 /// If AllowAggressive is true, FoldOpIntoPhi will allow certain transforms
2104 /// that would normally be unprofitable because they strongly encourage jump
2106 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I,
2107 bool AllowAggressive) {
2108 AllowAggressive = false;
2109 PHINode *PN = cast<PHINode>(I.getOperand(0));
2110 unsigned NumPHIValues = PN->getNumIncomingValues();
2111 if (NumPHIValues == 0 ||
2112 // We normally only transform phis with a single use, unless we're trying
2113 // hard to make jump threading happen.
2114 (!PN->hasOneUse() && !AllowAggressive))
2118 // Check to see if all of the operands of the PHI are simple constants
2119 // (constantint/constantfp/undef). If there is one non-constant value,
2120 // remember the BB it is in. If there is more than one or if *it* is a PHI,
2121 // bail out. We don't do arbitrary constant expressions here because moving
2122 // their computation can be expensive without a cost model.
2123 BasicBlock *NonConstBB = 0;
2124 for (unsigned i = 0; i != NumPHIValues; ++i)
2125 if (!isa<Constant>(PN->getIncomingValue(i)) ||
2126 isa<ConstantExpr>(PN->getIncomingValue(i))) {
2127 if (NonConstBB) return 0; // More than one non-const value.
2128 if (isa<PHINode>(PN->getIncomingValue(i))) return 0; // Itself a phi.
2129 NonConstBB = PN->getIncomingBlock(i);
2131 // If the incoming non-constant value is in I's block, we have an infinite
2133 if (NonConstBB == I.getParent())
2137 // If there is exactly one non-constant value, we can insert a copy of the
2138 // operation in that block. However, if this is a critical edge, we would be
2139 // inserting the computation one some other paths (e.g. inside a loop). Only
2140 // do this if the pred block is unconditionally branching into the phi block.
2141 if (NonConstBB != 0 && !AllowAggressive) {
2142 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator());
2143 if (!BI || !BI->isUnconditional()) return 0;
2146 // Okay, we can do the transformation: create the new PHI node.
2147 PHINode *NewPN = PHINode::Create(I.getType(), "");
2148 NewPN->reserveOperandSpace(PN->getNumOperands()/2);
2149 InsertNewInstBefore(NewPN, *PN);
2150 NewPN->takeName(PN);
2152 // Next, add all of the operands to the PHI.
2153 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
2154 // We only currently try to fold the condition of a select when it is a phi,
2155 // not the true/false values.
2156 Value *TrueV = SI->getTrueValue();
2157 Value *FalseV = SI->getFalseValue();
2158 BasicBlock *PhiTransBB = PN->getParent();
2159 for (unsigned i = 0; i != NumPHIValues; ++i) {
2160 BasicBlock *ThisBB = PN->getIncomingBlock(i);
2161 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB);
2162 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB);
2164 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2165 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred;
2167 assert(PN->getIncomingBlock(i) == NonConstBB);
2168 InV = SelectInst::Create(PN->getIncomingValue(i), TrueVInPred,
2170 "phitmp", NonConstBB->getTerminator());
2171 Worklist.Add(cast<Instruction>(InV));
2173 NewPN->addIncoming(InV, ThisBB);
2175 } else if (I.getNumOperands() == 2) {
2176 Constant *C = cast<Constant>(I.getOperand(1));
2177 for (unsigned i = 0; i != NumPHIValues; ++i) {
2179 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2180 if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2181 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
2183 InV = ConstantExpr::get(I.getOpcode(), InC, C);
2185 assert(PN->getIncomingBlock(i) == NonConstBB);
2186 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I))
2187 InV = BinaryOperator::Create(BO->getOpcode(),
2188 PN->getIncomingValue(i), C, "phitmp",
2189 NonConstBB->getTerminator());
2190 else if (CmpInst *CI = dyn_cast<CmpInst>(&I))
2191 InV = CmpInst::Create(CI->getOpcode(),
2193 PN->getIncomingValue(i), C, "phitmp",
2194 NonConstBB->getTerminator());
2196 llvm_unreachable("Unknown binop!");
2198 Worklist.Add(cast<Instruction>(InV));
2200 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2203 CastInst *CI = cast<CastInst>(&I);
2204 const Type *RetTy = CI->getType();
2205 for (unsigned i = 0; i != NumPHIValues; ++i) {
2207 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) {
2208 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
2210 assert(PN->getIncomingBlock(i) == NonConstBB);
2211 InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i),
2212 I.getType(), "phitmp",
2213 NonConstBB->getTerminator());
2214 Worklist.Add(cast<Instruction>(InV));
2216 NewPN->addIncoming(InV, PN->getIncomingBlock(i));
2219 return ReplaceInstUsesWith(I, NewPN);
2223 /// WillNotOverflowSignedAdd - Return true if we can prove that:
2224 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
2225 /// This basically requires proving that the add in the original type would not
2226 /// overflow to change the sign bit or have a carry out.
2227 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS) {
2228 // There are different heuristics we can use for this. Here are some simple
2231 // Add has the property that adding any two 2's complement numbers can only
2232 // have one carry bit which can change a sign. As such, if LHS and RHS each
2233 // have at least two sign bits, we know that the addition of the two values
2234 // will sign extend fine.
2235 if (ComputeNumSignBits(LHS) > 1 && ComputeNumSignBits(RHS) > 1)
2239 // If one of the operands only has one non-zero bit, and if the other operand
2240 // has a known-zero bit in a more significant place than it (not including the
2241 // sign bit) the ripple may go up to and fill the zero, but won't change the
2242 // sign. For example, (X & ~4) + 1.
2250 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
2251 bool Changed = SimplifyCommutative(I);
2252 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2254 if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
2255 I.hasNoUnsignedWrap(), TD))
2256 return ReplaceInstUsesWith(I, V);
2259 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2260 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHSC)) {
2261 // X + (signbit) --> X ^ signbit
2262 const APInt& Val = CI->getValue();
2263 uint32_t BitWidth = Val.getBitWidth();
2264 if (Val == APInt::getSignBit(BitWidth))
2265 return BinaryOperator::CreateXor(LHS, RHS);
2267 // See if SimplifyDemandedBits can simplify this. This handles stuff like
2268 // (X & 254)+1 -> (X&254)|1
2269 if (SimplifyDemandedInstructionBits(I))
2272 // zext(bool) + C -> bool ? C + 1 : C
2273 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
2274 if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
2275 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
2278 if (isa<PHINode>(LHS))
2279 if (Instruction *NV = FoldOpIntoPhi(I))
2282 ConstantInt *XorRHS = 0;
2284 if (isa<ConstantInt>(RHSC) &&
2285 match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
2286 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
2287 const APInt& RHSVal = cast<ConstantInt>(RHSC)->getValue();
2289 uint32_t Size = TySizeBits / 2;
2290 APInt C0080Val(APInt(TySizeBits, 1ULL).shl(Size - 1));
2291 APInt CFF80Val(-C0080Val);
2293 if (TySizeBits > Size) {
2294 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
2295 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
2296 if ((RHSVal == CFF80Val && XorRHS->getValue() == C0080Val) ||
2297 (RHSVal == C0080Val && XorRHS->getValue() == CFF80Val)) {
2298 // This is a sign extend if the top bits are known zero.
2299 if (!MaskedValueIsZero(XorLHS,
2300 APInt::getHighBitsSet(TySizeBits, TySizeBits - Size)))
2301 Size = 0; // Not a sign ext, but can't be any others either.
2306 C0080Val = APIntOps::lshr(C0080Val, Size);
2307 CFF80Val = APIntOps::ashr(CFF80Val, Size);
2308 } while (Size >= 1);
2310 // FIXME: This shouldn't be necessary. When the backends can handle types
2311 // with funny bit widths then this switch statement should be removed. It
2312 // is just here to get the size of the "middle" type back up to something
2313 // that the back ends can handle.
2314 const Type *MiddleType = 0;
2317 case 32: MiddleType = Type::getInt32Ty(*Context); break;
2318 case 16: MiddleType = Type::getInt16Ty(*Context); break;
2319 case 8: MiddleType = Type::getInt8Ty(*Context); break;
2322 Value *NewTrunc = Builder->CreateTrunc(XorLHS, MiddleType, "sext");
2323 return new SExtInst(NewTrunc, I.getType(), I.getName());
2328 if (I.getType() == Type::getInt1Ty(*Context))
2329 return BinaryOperator::CreateXor(LHS, RHS);
2332 if (I.getType()->isInteger()) {
2333 if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS)))
2336 if (Instruction *RHSI = dyn_cast<Instruction>(RHS)) {
2337 if (RHSI->getOpcode() == Instruction::Sub)
2338 if (LHS == RHSI->getOperand(1)) // A + (B - A) --> B
2339 return ReplaceInstUsesWith(I, RHSI->getOperand(0));
2341 if (Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
2342 if (LHSI->getOpcode() == Instruction::Sub)
2343 if (RHS == LHSI->getOperand(1)) // (B - A) + A --> B
2344 return ReplaceInstUsesWith(I, LHSI->getOperand(0));
2349 // -A + -B --> -(A + B)
2350 if (Value *LHSV = dyn_castNegVal(LHS)) {
2351 if (LHS->getType()->isIntOrIntVector()) {
2352 if (Value *RHSV = dyn_castNegVal(RHS)) {
2353 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
2354 return BinaryOperator::CreateNeg(NewAdd);
2358 return BinaryOperator::CreateSub(RHS, LHSV);
2362 if (!isa<Constant>(RHS))
2363 if (Value *V = dyn_castNegVal(RHS))
2364 return BinaryOperator::CreateSub(LHS, V);
2368 if (Value *X = dyn_castFoldableMul(LHS, C2)) {
2369 if (X == RHS) // X*C + X --> X * (C+1)
2370 return BinaryOperator::CreateMul(RHS, AddOne(C2));
2372 // X*C1 + X*C2 --> X * (C1+C2)
2374 if (X == dyn_castFoldableMul(RHS, C1))
2375 return BinaryOperator::CreateMul(X, ConstantExpr::getAdd(C1, C2));
2378 // X + X*C --> X * (C+1)
2379 if (dyn_castFoldableMul(RHS, C2) == LHS)
2380 return BinaryOperator::CreateMul(LHS, AddOne(C2));
2382 // X + ~X --> -1 since ~X = -X-1
2383 if (dyn_castNotVal(LHS) == RHS ||
2384 dyn_castNotVal(RHS) == LHS)
2385 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
2388 // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0
2389 if (match(RHS, m_And(m_Value(), m_ConstantInt(C2))))
2390 if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2)))
2393 // A+B --> A|B iff A and B have no bits set in common.
2394 if (const IntegerType *IT = dyn_cast<IntegerType>(I.getType())) {
2395 APInt Mask = APInt::getAllOnesValue(IT->getBitWidth());
2396 APInt LHSKnownOne(IT->getBitWidth(), 0);
2397 APInt LHSKnownZero(IT->getBitWidth(), 0);
2398 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
2399 if (LHSKnownZero != 0) {
2400 APInt RHSKnownOne(IT->getBitWidth(), 0);
2401 APInt RHSKnownZero(IT->getBitWidth(), 0);
2402 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
2404 // No bits in common -> bitwise or.
2405 if ((LHSKnownZero|RHSKnownZero).isAllOnesValue())
2406 return BinaryOperator::CreateOr(LHS, RHS);
2410 // W*X + Y*Z --> W * (X+Z) iff W == Y
2411 if (I.getType()->isIntOrIntVector()) {
2412 Value *W, *X, *Y, *Z;
2413 if (match(LHS, m_Mul(m_Value(W), m_Value(X))) &&
2414 match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) {
2418 } else if (Y == X) {
2420 } else if (X == Z) {
2427 Value *NewAdd = Builder->CreateAdd(X, Z, LHS->getName());
2428 return BinaryOperator::CreateMul(W, NewAdd);
2433 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
2435 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
2436 return BinaryOperator::CreateSub(SubOne(CRHS), X);
2438 // (X & FF00) + xx00 -> (X+xx00) & FF00
2439 if (LHS->hasOneUse() &&
2440 match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) {
2441 Constant *Anded = ConstantExpr::getAnd(CRHS, C2);
2442 if (Anded == CRHS) {
2443 // See if all bits from the first bit set in the Add RHS up are included
2444 // in the mask. First, get the rightmost bit.
2445 const APInt& AddRHSV = CRHS->getValue();
2447 // Form a mask of all bits from the lowest bit added through the top.
2448 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
2450 // See if the and mask includes all of these bits.
2451 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
2453 if (AddRHSHighBits == AddRHSHighBitsAnd) {
2454 // Okay, the xform is safe. Insert the new add pronto.
2455 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
2456 return BinaryOperator::CreateAnd(NewAdd, C2);
2461 // Try to fold constant add into select arguments.
2462 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
2463 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2467 // add (select X 0 (sub n A)) A --> select X A n
2469 SelectInst *SI = dyn_cast<SelectInst>(LHS);
2472 SI = dyn_cast<SelectInst>(RHS);
2475 if (SI && SI->hasOneUse()) {
2476 Value *TV = SI->getTrueValue();
2477 Value *FV = SI->getFalseValue();
2480 // Can we fold the add into the argument of the select?
2481 // We check both true and false select arguments for a matching subtract.
2482 if (match(FV, m_Zero()) &&
2483 match(TV, m_Sub(m_Value(N), m_Specific(A))))
2484 // Fold the add into the true select value.
2485 return SelectInst::Create(SI->getCondition(), N, A);
2486 if (match(TV, m_Zero()) &&
2487 match(FV, m_Sub(m_Value(N), m_Specific(A))))
2488 // Fold the add into the false select value.
2489 return SelectInst::Create(SI->getCondition(), A, N);
2493 // Check for (add (sext x), y), see if we can merge this into an
2494 // integer add followed by a sext.
2495 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
2496 // (add (sext x), cst) --> (sext (add x, cst'))
2497 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
2499 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
2500 if (LHSConv->hasOneUse() &&
2501 ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
2502 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2503 // Insert the new, smaller add.
2504 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2506 return new SExtInst(NewAdd, I.getType());
2510 // (add (sext x), (sext y)) --> (sext (add int x, y))
2511 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
2512 // Only do this if x/y have the same type, if at last one of them has a
2513 // single use (so we don't increase the number of sexts), and if the
2514 // integer add will not overflow.
2515 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2516 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2517 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2518 RHSConv->getOperand(0))) {
2519 // Insert the new integer add.
2520 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2521 RHSConv->getOperand(0), "addconv");
2522 return new SExtInst(NewAdd, I.getType());
2527 return Changed ? &I : 0;
2530 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
2531 bool Changed = SimplifyCommutative(I);
2532 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
2534 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
2536 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
2537 if (CFP->isExactlyValue(ConstantFP::getNegativeZero
2538 (I.getType())->getValueAPF()))
2539 return ReplaceInstUsesWith(I, LHS);
2542 if (isa<PHINode>(LHS))
2543 if (Instruction *NV = FoldOpIntoPhi(I))
2548 // -A + -B --> -(A + B)
2549 if (Value *LHSV = dyn_castFNegVal(LHS))
2550 return BinaryOperator::CreateFSub(RHS, LHSV);
2553 if (!isa<Constant>(RHS))
2554 if (Value *V = dyn_castFNegVal(RHS))
2555 return BinaryOperator::CreateFSub(LHS, V);
2557 // Check for X+0.0. Simplify it to X if we know X is not -0.0.
2558 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
2559 if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS))
2560 return ReplaceInstUsesWith(I, LHS);
2562 // Check for (add double (sitofp x), y), see if we can merge this into an
2563 // integer add followed by a promotion.
2564 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
2565 // (add double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
2566 // ... if the constant fits in the integer value. This is useful for things
2567 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
2568 // requires a constant pool load, and generally allows the add to be better
2570 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
2572 ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
2573 if (LHSConv->hasOneUse() &&
2574 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
2575 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) {
2576 // Insert the new integer add.
2577 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2579 return new SIToFPInst(NewAdd, I.getType());
2583 // (add double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
2584 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
2585 // Only do this if x/y have the same type, if at last one of them has a
2586 // single use (so we don't increase the number of int->fp conversions),
2587 // and if the integer add will not overflow.
2588 if (LHSConv->getOperand(0)->getType()==RHSConv->getOperand(0)->getType()&&
2589 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
2590 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
2591 RHSConv->getOperand(0))) {
2592 // Insert the new integer add.
2593 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
2594 RHSConv->getOperand(0),"addconv");
2595 return new SIToFPInst(NewAdd, I.getType());
2600 return Changed ? &I : 0;
2604 /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
2605 /// code necessary to compute the offset from the base pointer (without adding
2606 /// in the base pointer). Return the result as a signed integer of intptr size.
2607 static Value *EmitGEPOffset(User *GEP, InstCombiner &IC) {
2608 TargetData &TD = *IC.getTargetData();
2609 gep_type_iterator GTI = gep_type_begin(GEP);
2610 const Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
2611 Value *Result = Constant::getNullValue(IntPtrTy);
2613 // Build a mask for high order bits.
2614 unsigned IntPtrWidth = TD.getPointerSizeInBits();
2615 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
2617 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
2620 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
2621 if (ConstantInt *OpC = dyn_cast<ConstantInt>(Op)) {
2622 if (OpC->isZero()) continue;
2624 // Handle a struct index, which adds its field offset to the pointer.
2625 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
2626 Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
2628 Result = IC.Builder->CreateAdd(Result,
2629 ConstantInt::get(IntPtrTy, Size),
2630 GEP->getName()+".offs");
2634 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
2636 ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
2637 Scale = ConstantExpr::getMul(OC, Scale);
2638 // Emit an add instruction.
2639 Result = IC.Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
2642 // Convert to correct type.
2643 if (Op->getType() != IntPtrTy)
2644 Op = IC.Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
2646 Constant *Scale = ConstantInt::get(IntPtrTy, Size);
2647 // We'll let instcombine(mul) convert this to a shl if possible.
2648 Op = IC.Builder->CreateMul(Op, Scale, GEP->getName()+".idx");
2651 // Emit an add instruction.
2652 Result = IC.Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
2658 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
2659 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
2660 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
2661 /// be complex, and scales are involved. The above expression would also be
2662 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
2663 /// This later form is less amenable to optimization though, and we are allowed
2664 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
2666 /// If we can't emit an optimized form for this expression, this returns null.
2668 static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I,
2670 TargetData &TD = *IC.getTargetData();
2671 gep_type_iterator GTI = gep_type_begin(GEP);
2673 // Check to see if this gep only has a single variable index. If so, and if
2674 // any constant indices are a multiple of its scale, then we can compute this
2675 // in terms of the scale of the variable index. For example, if the GEP
2676 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
2677 // because the expression will cross zero at the same point.
2678 unsigned i, e = GEP->getNumOperands();
2680 for (i = 1; i != e; ++i, ++GTI) {
2681 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
2682 // Compute the aggregate offset of constant indices.
2683 if (CI->isZero()) continue;
2685 // Handle a struct index, which adds its field offset to the pointer.
2686 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
2687 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
2689 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
2690 Offset += Size*CI->getSExtValue();
2693 // Found our variable index.
2698 // If there are no variable indices, we must have a constant offset, just
2699 // evaluate it the general way.
2700 if (i == e) return 0;
2702 Value *VariableIdx = GEP->getOperand(i);
2703 // Determine the scale factor of the variable element. For example, this is
2704 // 4 if the variable index is into an array of i32.
2705 uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType());
2707 // Verify that there are no other variable indices. If so, emit the hard way.
2708 for (++i, ++GTI; i != e; ++i, ++GTI) {
2709 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
2712 // Compute the aggregate offset of constant indices.
2713 if (CI->isZero()) continue;
2715 // Handle a struct index, which adds its field offset to the pointer.
2716 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
2717 Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
2719 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
2720 Offset += Size*CI->getSExtValue();
2724 // Okay, we know we have a single variable index, which must be a
2725 // pointer/array/vector index. If there is no offset, life is simple, return
2727 unsigned IntPtrWidth = TD.getPointerSizeInBits();
2729 // Cast to intptrty in case a truncation occurs. If an extension is needed,
2730 // we don't need to bother extending: the extension won't affect where the
2731 // computation crosses zero.
2732 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth)
2733 VariableIdx = new TruncInst(VariableIdx,
2734 TD.getIntPtrType(VariableIdx->getContext()),
2735 VariableIdx->getName(), &I);
2739 // Otherwise, there is an index. The computation we will do will be modulo
2740 // the pointer size, so get it.
2741 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
2743 Offset &= PtrSizeMask;
2744 VariableScale &= PtrSizeMask;
2746 // To do this transformation, any constant index must be a multiple of the
2747 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
2748 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
2749 // multiple of the variable scale.
2750 int64_t NewOffs = Offset / (int64_t)VariableScale;
2751 if (Offset != NewOffs*(int64_t)VariableScale)
2754 // Okay, we can do this evaluation. Start by converting the index to intptr.
2755 const Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
2756 if (VariableIdx->getType() != IntPtrTy)
2757 VariableIdx = CastInst::CreateIntegerCast(VariableIdx, IntPtrTy,
2759 VariableIdx->getName(), &I);
2760 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
2761 return BinaryOperator::CreateAdd(VariableIdx, OffsetVal, "offset", &I);
2765 /// Optimize pointer differences into the same array into a size. Consider:
2766 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
2767 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
2769 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
2771 assert(TD && "Must have target data info for this");
2773 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
2776 GetElementPtrInst *GEP = 0;
2777 ConstantExpr *CstGEP = 0;
2779 // TODO: Could also optimize &A[i] - &A[j] -> "i-j", and "&A.foo[i] - &A.foo".
2780 // For now we require one side to be the base pointer "A" or a constant
2781 // expression derived from it.
2782 if (GetElementPtrInst *LHSGEP = dyn_cast<GetElementPtrInst>(LHS)) {
2784 if (LHSGEP->getOperand(0) == RHS) {
2787 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(RHS)) {
2788 // (gep X, ...) - (ce_gep X, ...)
2789 if (CE->getOpcode() == Instruction::GetElementPtr &&
2790 LHSGEP->getOperand(0) == CE->getOperand(0)) {
2798 if (GetElementPtrInst *RHSGEP = dyn_cast<GetElementPtrInst>(RHS)) {
2800 if (RHSGEP->getOperand(0) == LHS) {
2803 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(LHS)) {
2804 // (ce_gep X, ...) - (gep X, ...)
2805 if (CE->getOpcode() == Instruction::GetElementPtr &&
2806 RHSGEP->getOperand(0) == CE->getOperand(0)) {
2817 // Emit the offset of the GEP and an intptr_t.
2818 Value *Result = EmitGEPOffset(GEP, *this);
2820 // If we had a constant expression GEP on the other side offsetting the
2821 // pointer, subtract it from the offset we have.
2823 Value *CstOffset = EmitGEPOffset(CstGEP, *this);
2824 Result = Builder->CreateSub(Result, CstOffset);
2828 // If we have p - gep(p, ...) then we have to negate the result.
2830 Result = Builder->CreateNeg(Result, "diff.neg");
2832 return Builder->CreateIntCast(Result, Ty, true);
2836 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
2837 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2839 if (Op0 == Op1) // sub X, X -> 0
2840 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
2842 // If this is a 'B = x-(-A)', change to B = x+A. This preserves NSW/NUW.
2843 if (Value *V = dyn_castNegVal(Op1)) {
2844 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
2845 Res->setHasNoSignedWrap(I.hasNoSignedWrap());
2846 Res->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
2850 if (isa<UndefValue>(Op0))
2851 return ReplaceInstUsesWith(I, Op0); // undef - X -> undef
2852 if (isa<UndefValue>(Op1))
2853 return ReplaceInstUsesWith(I, Op1); // X - undef -> undef
2854 if (I.getType() == Type::getInt1Ty(*Context))
2855 return BinaryOperator::CreateXor(Op0, Op1);
2857 if (ConstantInt *C = dyn_cast<ConstantInt>(Op0)) {
2858 // Replace (-1 - A) with (~A).
2859 if (C->isAllOnesValue())
2860 return BinaryOperator::CreateNot(Op1);
2862 // C - ~X == X + (1+C)
2864 if (match(Op1, m_Not(m_Value(X))))
2865 return BinaryOperator::CreateAdd(X, AddOne(C));
2867 // -(X >>u 31) -> (X >>s 31)
2868 // -(X >>s 31) -> (X >>u 31)
2870 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op1)) {
2871 if (SI->getOpcode() == Instruction::LShr) {
2872 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2873 // Check to see if we are shifting out everything but the sign bit.
2874 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2875 SI->getType()->getPrimitiveSizeInBits()-1) {
2876 // Ok, the transformation is safe. Insert AShr.
2877 return BinaryOperator::Create(Instruction::AShr,
2878 SI->getOperand(0), CU, SI->getName());
2881 } else if (SI->getOpcode() == Instruction::AShr) {
2882 if (ConstantInt *CU = dyn_cast<ConstantInt>(SI->getOperand(1))) {
2883 // Check to see if we are shifting out everything but the sign bit.
2884 if (CU->getLimitedValue(SI->getType()->getPrimitiveSizeInBits()) ==
2885 SI->getType()->getPrimitiveSizeInBits()-1) {
2886 // Ok, the transformation is safe. Insert LShr.
2887 return BinaryOperator::CreateLShr(
2888 SI->getOperand(0), CU, SI->getName());
2895 // Try to fold constant sub into select arguments.
2896 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
2897 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
2900 // C - zext(bool) -> bool ? C - 1 : C
2901 if (ZExtInst *ZI = dyn_cast<ZExtInst>(Op1))
2902 if (ZI->getSrcTy() == Type::getInt1Ty(*Context))
2903 return SelectInst::Create(ZI->getOperand(0), SubOne(C), C);
2906 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
2907 if (Op1I->getOpcode() == Instruction::Add) {
2908 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
2909 return BinaryOperator::CreateNeg(Op1I->getOperand(1),
2911 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
2912 return BinaryOperator::CreateNeg(Op1I->getOperand(0),
2914 else if (ConstantInt *CI1 = dyn_cast<ConstantInt>(I.getOperand(0))) {
2915 if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Op1I->getOperand(1)))
2916 // C1-(X+C2) --> (C1-C2)-X
2917 return BinaryOperator::CreateSub(
2918 ConstantExpr::getSub(CI1, CI2), Op1I->getOperand(0));
2922 if (Op1I->hasOneUse()) {
2923 // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression
2924 // is not used by anyone else...
2926 if (Op1I->getOpcode() == Instruction::Sub) {
2927 // Swap the two operands of the subexpr...
2928 Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1);
2929 Op1I->setOperand(0, IIOp1);
2930 Op1I->setOperand(1, IIOp0);
2932 // Create the new top level add instruction...
2933 return BinaryOperator::CreateAdd(Op0, Op1);
2936 // Replace (A - (A & B)) with (A & ~B) if this is the only use of (A&B)...
2938 if (Op1I->getOpcode() == Instruction::And &&
2939 (Op1I->getOperand(0) == Op0 || Op1I->getOperand(1) == Op0)) {
2940 Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0);
2942 Value *NewNot = Builder->CreateNot(OtherOp, "B.not");
2943 return BinaryOperator::CreateAnd(Op0, NewNot);
2946 // 0 - (X sdiv C) -> (X sdiv -C)
2947 if (Op1I->getOpcode() == Instruction::SDiv)
2948 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
2950 if (Constant *DivRHS = dyn_cast<Constant>(Op1I->getOperand(1)))
2951 return BinaryOperator::CreateSDiv(Op1I->getOperand(0),
2952 ConstantExpr::getNeg(DivRHS));
2954 // X - X*C --> X * (1-C)
2955 ConstantInt *C2 = 0;
2956 if (dyn_castFoldableMul(Op1I, C2) == Op0) {
2958 ConstantExpr::getSub(ConstantInt::get(I.getType(), 1),
2960 return BinaryOperator::CreateMul(Op0, CP1);
2965 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
2966 if (Op0I->getOpcode() == Instruction::Add) {
2967 if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X
2968 return ReplaceInstUsesWith(I, Op0I->getOperand(1));
2969 else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X
2970 return ReplaceInstUsesWith(I, Op0I->getOperand(0));
2971 } else if (Op0I->getOpcode() == Instruction::Sub) {
2972 if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y
2973 return BinaryOperator::CreateNeg(Op0I->getOperand(1),
2979 if (Value *X = dyn_castFoldableMul(Op0, C1)) {
2980 if (X == Op1) // X*C - X --> X * (C-1)
2981 return BinaryOperator::CreateMul(Op1, SubOne(C1));
2983 ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2)
2984 if (X == dyn_castFoldableMul(Op1, C2))
2985 return BinaryOperator::CreateMul(X, ConstantExpr::getSub(C1, C2));
2988 // Optimize pointer differences into the same array into a size. Consider:
2989 // &A[10] - &A[0]: we should compile this to "10".
2991 Value *LHSOp, *RHSOp;
2992 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
2993 match(Op1, m_PtrToInt(m_Value(RHSOp))))
2994 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
2995 return ReplaceInstUsesWith(I, Res);
2997 // trunc(p)-trunc(q) -> trunc(p-q)
2998 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
2999 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
3000 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
3001 return ReplaceInstUsesWith(I, Res);
3007 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
3008 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3010 // If this is a 'B = x-(-A)', change to B = x+A...
3011 if (Value *V = dyn_castFNegVal(Op1))
3012 return BinaryOperator::CreateFAdd(Op0, V);
3014 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
3015 if (Op1I->getOpcode() == Instruction::FAdd) {
3016 if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y
3017 return BinaryOperator::CreateFNeg(Op1I->getOperand(1),
3019 else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y
3020 return BinaryOperator::CreateFNeg(Op1I->getOperand(0),
3028 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
3029 /// comparison only checks the sign bit. If it only checks the sign bit, set
3030 /// TrueIfSigned if the result of the comparison is true when the input value is
3032 static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
3033 bool &TrueIfSigned) {
3035 case ICmpInst::ICMP_SLT: // True if LHS s< 0
3036 TrueIfSigned = true;
3037 return RHS->isZero();
3038 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
3039 TrueIfSigned = true;
3040 return RHS->isAllOnesValue();
3041 case ICmpInst::ICMP_SGT: // True if LHS s> -1
3042 TrueIfSigned = false;
3043 return RHS->isAllOnesValue();
3044 case ICmpInst::ICMP_UGT:
3045 // True if LHS u> RHS and RHS == high-bit-mask - 1
3046 TrueIfSigned = true;
3047 return RHS->getValue() ==
3048 APInt::getSignedMaxValue(RHS->getType()->getPrimitiveSizeInBits());
3049 case ICmpInst::ICMP_UGE:
3050 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
3051 TrueIfSigned = true;
3052 return RHS->getValue().isSignBit();
3058 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
3059 bool Changed = SimplifyCommutative(I);
3060 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3062 if (isa<UndefValue>(Op1)) // undef * X -> 0
3063 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3065 // Simplify mul instructions with a constant RHS.
3066 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
3067 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1C)) {
3069 // ((X << C1)*C2) == (X * (C2 << C1))
3070 if (BinaryOperator *SI = dyn_cast<BinaryOperator>(Op0))
3071 if (SI->getOpcode() == Instruction::Shl)
3072 if (Constant *ShOp = dyn_cast<Constant>(SI->getOperand(1)))
3073 return BinaryOperator::CreateMul(SI->getOperand(0),
3074 ConstantExpr::getShl(CI, ShOp));
3077 return ReplaceInstUsesWith(I, Op1C); // X * 0 == 0
3078 if (CI->equalsInt(1)) // X * 1 == X
3079 return ReplaceInstUsesWith(I, Op0);
3080 if (CI->isAllOnesValue()) // X * -1 == 0 - X
3081 return BinaryOperator::CreateNeg(Op0, I.getName());
3083 const APInt& Val = cast<ConstantInt>(CI)->getValue();
3084 if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C
3085 return BinaryOperator::CreateShl(Op0,
3086 ConstantInt::get(Op0->getType(), Val.logBase2()));
3088 } else if (isa<VectorType>(Op1C->getType())) {
3089 if (Op1C->isNullValue())
3090 return ReplaceInstUsesWith(I, Op1C);
3092 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
3093 if (Op1V->isAllOnesValue()) // X * -1 == 0 - X
3094 return BinaryOperator::CreateNeg(Op0, I.getName());
3096 // As above, vector X*splat(1.0) -> X in all defined cases.
3097 if (Constant *Splat = Op1V->getSplatValue()) {
3098 if (ConstantInt *CI = dyn_cast<ConstantInt>(Splat))
3099 if (CI->equalsInt(1))
3100 return ReplaceInstUsesWith(I, Op0);
3105 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0))
3106 if (Op0I->getOpcode() == Instruction::Add && Op0I->hasOneUse() &&
3107 isa<ConstantInt>(Op0I->getOperand(1)) && isa<ConstantInt>(Op1C)) {
3108 // Canonicalize (X+C1)*C2 -> X*C2+C1*C2.
3109 Value *Add = Builder->CreateMul(Op0I->getOperand(0), Op1C, "tmp");
3110 Value *C1C2 = Builder->CreateMul(Op1C, Op0I->getOperand(1));
3111 return BinaryOperator::CreateAdd(Add, C1C2);
3115 // Try to fold constant mul into select arguments.
3116 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3117 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3120 if (isa<PHINode>(Op0))
3121 if (Instruction *NV = FoldOpIntoPhi(I))
3125 if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y
3126 if (Value *Op1v = dyn_castNegVal(Op1))
3127 return BinaryOperator::CreateMul(Op0v, Op1v);
3129 // (X / Y) * Y = X - (X % Y)
3130 // (X / Y) * -Y = (X % Y) - X
3133 BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0);
3135 (BO->getOpcode() != Instruction::UDiv &&
3136 BO->getOpcode() != Instruction::SDiv)) {
3138 BO = dyn_cast<BinaryOperator>(Op1);
3140 Value *Neg = dyn_castNegVal(Op1C);
3141 if (BO && BO->hasOneUse() &&
3142 (BO->getOperand(1) == Op1C || BO->getOperand(1) == Neg) &&
3143 (BO->getOpcode() == Instruction::UDiv ||
3144 BO->getOpcode() == Instruction::SDiv)) {
3145 Value *Op0BO = BO->getOperand(0), *Op1BO = BO->getOperand(1);
3147 // If the division is exact, X % Y is zero.
3148 if (SDivOperator *SDiv = dyn_cast<SDivOperator>(BO))
3149 if (SDiv->isExact()) {
3151 return ReplaceInstUsesWith(I, Op0BO);
3152 return BinaryOperator::CreateNeg(Op0BO);
3156 if (BO->getOpcode() == Instruction::UDiv)
3157 Rem = Builder->CreateURem(Op0BO, Op1BO);
3159 Rem = Builder->CreateSRem(Op0BO, Op1BO);
3163 return BinaryOperator::CreateSub(Op0BO, Rem);
3164 return BinaryOperator::CreateSub(Rem, Op0BO);
3168 /// i1 mul -> i1 and.
3169 if (I.getType() == Type::getInt1Ty(*Context))
3170 return BinaryOperator::CreateAnd(Op0, Op1);
3172 // X*(1 << Y) --> X << Y
3173 // (1 << Y)*X --> X << Y
3176 if (match(Op0, m_Shl(m_One(), m_Value(Y))))
3177 return BinaryOperator::CreateShl(Op1, Y);
3178 if (match(Op1, m_Shl(m_One(), m_Value(Y))))
3179 return BinaryOperator::CreateShl(Op0, Y);
3182 // If one of the operands of the multiply is a cast from a boolean value, then
3183 // we know the bool is either zero or one, so this is a 'masking' multiply.
3184 // X * Y (where Y is 0 or 1) -> X & (0-Y)
3185 if (!isa<VectorType>(I.getType())) {
3186 // -2 is "-1 << 1" so it is all bits set except the low one.
3187 APInt Negative2(I.getType()->getPrimitiveSizeInBits(), (uint64_t)-2, true);
3189 Value *BoolCast = 0, *OtherOp = 0;
3190 if (MaskedValueIsZero(Op0, Negative2))
3191 BoolCast = Op0, OtherOp = Op1;
3192 else if (MaskedValueIsZero(Op1, Negative2))
3193 BoolCast = Op1, OtherOp = Op0;
3196 Value *V = Builder->CreateSub(Constant::getNullValue(I.getType()),
3198 return BinaryOperator::CreateAnd(V, OtherOp);
3202 return Changed ? &I : 0;
3205 Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
3206 bool Changed = SimplifyCommutative(I);
3207 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3209 // Simplify mul instructions with a constant RHS...
3210 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
3211 if (ConstantFP *Op1F = dyn_cast<ConstantFP>(Op1C)) {
3212 // "In IEEE floating point, x*1 is not equivalent to x for nans. However,
3213 // ANSI says we can drop signals, so we can do this anyway." (from GCC)
3214 if (Op1F->isExactlyValue(1.0))
3215 return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0'
3216 } else if (isa<VectorType>(Op1C->getType())) {
3217 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1C)) {
3218 // As above, vector X*splat(1.0) -> X in all defined cases.
3219 if (Constant *Splat = Op1V->getSplatValue()) {
3220 if (ConstantFP *F = dyn_cast<ConstantFP>(Splat))
3221 if (F->isExactlyValue(1.0))
3222 return ReplaceInstUsesWith(I, Op0);
3227 // Try to fold constant mul into select arguments.
3228 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3229 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3232 if (isa<PHINode>(Op0))
3233 if (Instruction *NV = FoldOpIntoPhi(I))
3237 if (Value *Op0v = dyn_castFNegVal(Op0)) // -X * -Y = X*Y
3238 if (Value *Op1v = dyn_castFNegVal(Op1))
3239 return BinaryOperator::CreateFMul(Op0v, Op1v);
3241 return Changed ? &I : 0;
3244 /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select
3246 bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
3247 SelectInst *SI = cast<SelectInst>(I.getOperand(1));
3249 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
3250 int NonNullOperand = -1;
3251 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(1)))
3252 if (ST->isNullValue())
3254 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
3255 if (Constant *ST = dyn_cast<Constant>(SI->getOperand(2)))
3256 if (ST->isNullValue())
3259 if (NonNullOperand == -1)
3262 Value *SelectCond = SI->getOperand(0);
3264 // Change the div/rem to use 'Y' instead of the select.
3265 I.setOperand(1, SI->getOperand(NonNullOperand));
3267 // Okay, we know we replace the operand of the div/rem with 'Y' with no
3268 // problem. However, the select, or the condition of the select may have
3269 // multiple uses. Based on our knowledge that the operand must be non-zero,
3270 // propagate the known value for the select into other uses of it, and
3271 // propagate a known value of the condition into its other users.
3273 // If the select and condition only have a single use, don't bother with this,
3275 if (SI->use_empty() && SelectCond->hasOneUse())
3278 // Scan the current block backward, looking for other uses of SI.
3279 BasicBlock::iterator BBI = &I, BBFront = I.getParent()->begin();
3281 while (BBI != BBFront) {
3283 // If we found a call to a function, we can't assume it will return, so
3284 // information from below it cannot be propagated above it.
3285 if (isa<CallInst>(BBI) && !isa<IntrinsicInst>(BBI))
3288 // Replace uses of the select or its condition with the known values.
3289 for (Instruction::op_iterator I = BBI->op_begin(), E = BBI->op_end();
3292 *I = SI->getOperand(NonNullOperand);
3294 } else if (*I == SelectCond) {
3295 *I = NonNullOperand == 1 ? ConstantInt::getTrue(*Context) :
3296 ConstantInt::getFalse(*Context);
3301 // If we past the instruction, quit looking for it.
3304 if (&*BBI == SelectCond)
3307 // If we ran out of things to eliminate, break out of the loop.
3308 if (SelectCond == 0 && SI == 0)
3316 /// This function implements the transforms on div instructions that work
3317 /// regardless of the kind of div instruction it is (udiv, sdiv, or fdiv). It is
3318 /// used by the visitors to those instructions.
3319 /// @brief Transforms common to all three div instructions
3320 Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) {
3321 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3323 // undef / X -> 0 for integer.
3324 // undef / X -> undef for FP (the undef could be a snan).
3325 if (isa<UndefValue>(Op0)) {
3326 if (Op0->getType()->isFPOrFPVector())
3327 return ReplaceInstUsesWith(I, Op0);
3328 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3331 // X / undef -> undef
3332 if (isa<UndefValue>(Op1))
3333 return ReplaceInstUsesWith(I, Op1);
3338 /// This function implements the transforms common to both integer division
3339 /// instructions (udiv and sdiv). It is called by the visitors to those integer
3340 /// division instructions.
3341 /// @brief Common integer divide transforms
3342 Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
3343 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3345 // (sdiv X, X) --> 1 (udiv X, X) --> 1
3347 if (const VectorType *Ty = dyn_cast<VectorType>(I.getType())) {
3348 Constant *CI = ConstantInt::get(Ty->getElementType(), 1);
3349 std::vector<Constant*> Elts(Ty->getNumElements(), CI);
3350 return ReplaceInstUsesWith(I, ConstantVector::get(Elts));
3353 Constant *CI = ConstantInt::get(I.getType(), 1);
3354 return ReplaceInstUsesWith(I, CI);
3357 if (Instruction *Common = commonDivTransforms(I))
3360 // Handle cases involving: [su]div X, (select Cond, Y, Z)
3361 // This does not apply for fdiv.
3362 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
3365 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3367 if (RHS->equalsInt(1))
3368 return ReplaceInstUsesWith(I, Op0);
3370 // (X / C1) / C2 -> X / (C1*C2)
3371 if (Instruction *LHS = dyn_cast<Instruction>(Op0))
3372 if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode())
3373 if (ConstantInt *LHSRHS = dyn_cast<ConstantInt>(LHS->getOperand(1))) {
3374 if (MultiplyOverflows(RHS, LHSRHS,
3375 I.getOpcode()==Instruction::SDiv))
3376 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3378 return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0),
3379 ConstantExpr::getMul(RHS, LHSRHS));
3382 if (!RHS->isZero()) { // avoid X udiv 0
3383 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
3384 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3386 if (isa<PHINode>(Op0))
3387 if (Instruction *NV = FoldOpIntoPhi(I))
3392 // 0 / X == 0, we don't need to preserve faults!
3393 if (ConstantInt *LHS = dyn_cast<ConstantInt>(Op0))
3394 if (LHS->equalsInt(0))
3395 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3397 // It can't be division by zero, hence it must be division by one.
3398 if (I.getType() == Type::getInt1Ty(*Context))
3399 return ReplaceInstUsesWith(I, Op0);
3401 if (ConstantVector *Op1V = dyn_cast<ConstantVector>(Op1)) {
3402 if (ConstantInt *X = cast_or_null<ConstantInt>(Op1V->getSplatValue()))
3405 return ReplaceInstUsesWith(I, Op0);
3411 Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
3412 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3414 // Handle the integer div common cases
3415 if (Instruction *Common = commonIDivTransforms(I))
3418 if (ConstantInt *C = dyn_cast<ConstantInt>(Op1)) {
3419 // X udiv C^2 -> X >> C
3420 // Check to see if this is an unsigned division with an exact power of 2,
3421 // if so, convert to a right shift.
3422 if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2
3423 return BinaryOperator::CreateLShr(Op0,
3424 ConstantInt::get(Op0->getType(), C->getValue().logBase2()));
3426 // X udiv C, where C >= signbit
3427 if (C->getValue().isNegative()) {
3428 Value *IC = Builder->CreateICmpULT( Op0, C);
3429 return SelectInst::Create(IC, Constant::getNullValue(I.getType()),
3430 ConstantInt::get(I.getType(), 1));
3434 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
3435 if (BinaryOperator *RHSI = dyn_cast<BinaryOperator>(I.getOperand(1))) {
3436 if (RHSI->getOpcode() == Instruction::Shl &&
3437 isa<ConstantInt>(RHSI->getOperand(0))) {
3438 const APInt& C1 = cast<ConstantInt>(RHSI->getOperand(0))->getValue();
3439 if (C1.isPowerOf2()) {
3440 Value *N = RHSI->getOperand(1);
3441 const Type *NTy = N->getType();
3442 if (uint32_t C2 = C1.logBase2())
3443 N = Builder->CreateAdd(N, ConstantInt::get(NTy, C2), "tmp");
3444 return BinaryOperator::CreateLShr(Op0, N);
3449 // udiv X, (Select Cond, C1, C2) --> Select Cond, (shr X, C1), (shr X, C2)
3450 // where C1&C2 are powers of two.
3451 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
3452 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3453 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3454 const APInt &TVA = STO->getValue(), &FVA = SFO->getValue();
3455 if (TVA.isPowerOf2() && FVA.isPowerOf2()) {
3456 // Compute the shift amounts
3457 uint32_t TSA = TVA.logBase2(), FSA = FVA.logBase2();
3458 // Construct the "on true" case of the select
3459 Constant *TC = ConstantInt::get(Op0->getType(), TSA);
3460 Value *TSI = Builder->CreateLShr(Op0, TC, SI->getName()+".t");
3462 // Construct the "on false" case of the select
3463 Constant *FC = ConstantInt::get(Op0->getType(), FSA);
3464 Value *FSI = Builder->CreateLShr(Op0, FC, SI->getName()+".f");
3466 // construct the select instruction and return it.
3467 return SelectInst::Create(SI->getOperand(0), TSI, FSI, SI->getName());
3473 Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
3474 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3476 // Handle the integer div common cases
3477 if (Instruction *Common = commonIDivTransforms(I))
3480 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3482 if (RHS->isAllOnesValue())
3483 return BinaryOperator::CreateNeg(Op0);
3485 // sdiv X, C --> ashr X, log2(C)
3486 if (cast<SDivOperator>(&I)->isExact() &&
3487 RHS->getValue().isNonNegative() &&
3488 RHS->getValue().isPowerOf2()) {
3489 Value *ShAmt = llvm::ConstantInt::get(RHS->getType(),
3490 RHS->getValue().exactLogBase2());
3491 return BinaryOperator::CreateAShr(Op0, ShAmt, I.getName());
3494 // -X/C --> X/-C provided the negation doesn't overflow.
3495 if (SubOperator *Sub = dyn_cast<SubOperator>(Op0))
3496 if (isa<Constant>(Sub->getOperand(0)) &&
3497 cast<Constant>(Sub->getOperand(0))->isNullValue() &&
3498 Sub->hasNoSignedWrap())
3499 return BinaryOperator::CreateSDiv(Sub->getOperand(1),
3500 ConstantExpr::getNeg(RHS));
3503 // If the sign bits of both operands are zero (i.e. we can prove they are
3504 // unsigned inputs), turn this into a udiv.
3505 if (I.getType()->isInteger()) {
3506 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3507 if (MaskedValueIsZero(Op0, Mask)) {
3508 if (MaskedValueIsZero(Op1, Mask)) {
3509 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
3510 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
3512 ConstantInt *ShiftedInt;
3513 if (match(Op1, m_Shl(m_ConstantInt(ShiftedInt), m_Value())) &&
3514 ShiftedInt->getValue().isPowerOf2()) {
3515 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
3516 // Safe because the only negative value (1 << Y) can take on is
3517 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
3518 // the sign bit set.
3519 return BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
3527 Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
3528 return commonDivTransforms(I);
3531 /// This function implements the transforms on rem instructions that work
3532 /// regardless of the kind of rem instruction it is (urem, srem, or frem). It
3533 /// is used by the visitors to those instructions.
3534 /// @brief Transforms common to all three rem instructions
3535 Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) {
3536 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3538 if (isa<UndefValue>(Op0)) { // undef % X -> 0
3539 if (I.getType()->isFPOrFPVector())
3540 return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN)
3541 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3543 if (isa<UndefValue>(Op1))
3544 return ReplaceInstUsesWith(I, Op1); // X % undef -> undef
3546 // Handle cases involving: rem X, (select Cond, Y, Z)
3547 if (isa<SelectInst>(Op1) && SimplifyDivRemOfSelect(I))
3553 /// This function implements the transforms common to both integer remainder
3554 /// instructions (urem and srem). It is called by the visitors to those integer
3555 /// remainder instructions.
3556 /// @brief Common integer remainder transforms
3557 Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) {
3558 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3560 if (Instruction *common = commonRemTransforms(I))
3563 // 0 % X == 0 for integer, we don't need to preserve faults!
3564 if (Constant *LHS = dyn_cast<Constant>(Op0))
3565 if (LHS->isNullValue())
3566 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3568 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3569 // X % 0 == undef, we don't need to preserve faults!
3570 if (RHS->equalsInt(0))
3571 return ReplaceInstUsesWith(I, UndefValue::get(I.getType()));
3573 if (RHS->equalsInt(1)) // X % 1 == 0
3574 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
3576 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
3577 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
3578 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
3580 } else if (isa<PHINode>(Op0I)) {
3581 if (Instruction *NV = FoldOpIntoPhi(I))
3585 // See if we can fold away this rem instruction.
3586 if (SimplifyDemandedInstructionBits(I))
3594 Instruction *InstCombiner::visitURem(BinaryOperator &I) {
3595 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3597 if (Instruction *common = commonIRemTransforms(I))
3600 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
3601 // X urem C^2 -> X and C
3602 // Check to see if this is an unsigned remainder with an exact power of 2,
3603 // if so, convert to a bitwise and.
3604 if (ConstantInt *C = dyn_cast<ConstantInt>(RHS))
3605 if (C->getValue().isPowerOf2())
3606 return BinaryOperator::CreateAnd(Op0, SubOne(C));
3609 if (Instruction *RHSI = dyn_cast<Instruction>(I.getOperand(1))) {
3610 // Turn A % (C << N), where C is 2^k, into A & ((C << N)-1)
3611 if (RHSI->getOpcode() == Instruction::Shl &&
3612 isa<ConstantInt>(RHSI->getOperand(0))) {
3613 if (cast<ConstantInt>(RHSI->getOperand(0))->getValue().isPowerOf2()) {
3614 Constant *N1 = Constant::getAllOnesValue(I.getType());
3615 Value *Add = Builder->CreateAdd(RHSI, N1, "tmp");
3616 return BinaryOperator::CreateAnd(Op0, Add);
3621 // urem X, (select Cond, 2^C1, 2^C2) --> select Cond, (and X, C1), (and X, C2)
3622 // where C1&C2 are powers of two.
3623 if (SelectInst *SI = dyn_cast<SelectInst>(Op1)) {
3624 if (ConstantInt *STO = dyn_cast<ConstantInt>(SI->getOperand(1)))
3625 if (ConstantInt *SFO = dyn_cast<ConstantInt>(SI->getOperand(2))) {
3626 // STO == 0 and SFO == 0 handled above.
3627 if ((STO->getValue().isPowerOf2()) &&
3628 (SFO->getValue().isPowerOf2())) {
3629 Value *TrueAnd = Builder->CreateAnd(Op0, SubOne(STO),
3630 SI->getName()+".t");
3631 Value *FalseAnd = Builder->CreateAnd(Op0, SubOne(SFO),
3632 SI->getName()+".f");
3633 return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd);
3641 Instruction *InstCombiner::visitSRem(BinaryOperator &I) {
3642 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3644 // Handle the integer rem common cases
3645 if (Instruction *Common = commonIRemTransforms(I))
3648 if (Value *RHSNeg = dyn_castNegVal(Op1))
3649 if (!isa<Constant>(RHSNeg) ||
3650 (isa<ConstantInt>(RHSNeg) &&
3651 cast<ConstantInt>(RHSNeg)->getValue().isStrictlyPositive())) {
3653 Worklist.AddValue(I.getOperand(1));
3654 I.setOperand(1, RHSNeg);
3658 // If the sign bits of both operands are zero (i.e. we can prove they are
3659 // unsigned inputs), turn this into a urem.
3660 if (I.getType()->isInteger()) {
3661 APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()));
3662 if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) {
3663 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
3664 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
3668 // If it's a constant vector, flip any negative values positive.
3669 if (ConstantVector *RHSV = dyn_cast<ConstantVector>(Op1)) {
3670 unsigned VWidth = RHSV->getNumOperands();
3672 bool hasNegative = false;
3673 for (unsigned i = 0; !hasNegative && i != VWidth; ++i)
3674 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i)))
3675 if (RHS->getValue().isNegative())
3679 std::vector<Constant *> Elts(VWidth);
3680 for (unsigned i = 0; i != VWidth; ++i) {
3681 if (ConstantInt *RHS = dyn_cast<ConstantInt>(RHSV->getOperand(i))) {
3682 if (RHS->getValue().isNegative())
3683 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
3689 Constant *NewRHSV = ConstantVector::get(Elts);
3690 if (NewRHSV != RHSV) {
3691 Worklist.AddValue(I.getOperand(1));
3692 I.setOperand(1, NewRHSV);
3701 Instruction *InstCombiner::visitFRem(BinaryOperator &I) {
3702 return commonRemTransforms(I);
3705 // isOneBitSet - Return true if there is exactly one bit set in the specified
3707 static bool isOneBitSet(const ConstantInt *CI) {
3708 return CI->getValue().isPowerOf2();
3711 // isHighOnes - Return true if the constant is of the form 1+0+.
3712 // This is the same as lowones(~X).
3713 static bool isHighOnes(const ConstantInt *CI) {
3714 return (~CI->getValue() + 1).isPowerOf2();
3717 /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
3718 /// are carefully arranged to allow folding of expressions such as:
3720 /// (A < B) | (A > B) --> (A != B)
3722 /// Note that this is only valid if the first and second predicates have the
3723 /// same sign. Is illegal to do: (A u< B) | (A s> B)
3725 /// Three bits are used to represent the condition, as follows:
3730 /// <=> Value Definition
3731 /// 000 0 Always false
3738 /// 111 7 Always true
3740 static unsigned getICmpCode(const ICmpInst *ICI) {
3741 switch (ICI->getPredicate()) {
3743 case ICmpInst::ICMP_UGT: return 1; // 001
3744 case ICmpInst::ICMP_SGT: return 1; // 001
3745 case ICmpInst::ICMP_EQ: return 2; // 010
3746 case ICmpInst::ICMP_UGE: return 3; // 011
3747 case ICmpInst::ICMP_SGE: return 3; // 011
3748 case ICmpInst::ICMP_ULT: return 4; // 100
3749 case ICmpInst::ICMP_SLT: return 4; // 100
3750 case ICmpInst::ICMP_NE: return 5; // 101
3751 case ICmpInst::ICMP_ULE: return 6; // 110
3752 case ICmpInst::ICMP_SLE: return 6; // 110
3755 llvm_unreachable("Invalid ICmp predicate!");
3760 /// getFCmpCode - Similar to getICmpCode but for FCmpInst. This encodes a fcmp
3761 /// predicate into a three bit mask. It also returns whether it is an ordered
3762 /// predicate by reference.
3763 static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
3766 case FCmpInst::FCMP_ORD: isOrdered = true; return 0; // 000
3767 case FCmpInst::FCMP_UNO: return 0; // 000
3768 case FCmpInst::FCMP_OGT: isOrdered = true; return 1; // 001
3769 case FCmpInst::FCMP_UGT: return 1; // 001
3770 case FCmpInst::FCMP_OEQ: isOrdered = true; return 2; // 010
3771 case FCmpInst::FCMP_UEQ: return 2; // 010
3772 case FCmpInst::FCMP_OGE: isOrdered = true; return 3; // 011
3773 case FCmpInst::FCMP_UGE: return 3; // 011
3774 case FCmpInst::FCMP_OLT: isOrdered = true; return 4; // 100
3775 case FCmpInst::FCMP_ULT: return 4; // 100
3776 case FCmpInst::FCMP_ONE: isOrdered = true; return 5; // 101
3777 case FCmpInst::FCMP_UNE: return 5; // 101
3778 case FCmpInst::FCMP_OLE: isOrdered = true; return 6; // 110
3779 case FCmpInst::FCMP_ULE: return 6; // 110
3782 // Not expecting FCMP_FALSE and FCMP_TRUE;
3783 llvm_unreachable("Unexpected FCmp predicate!");
3788 /// getICmpValue - This is the complement of getICmpCode, which turns an
3789 /// opcode and two operands into either a constant true or false, or a brand
3790 /// new ICmp instruction. The sign is passed in to determine which kind
3791 /// of predicate to use in the new icmp instruction.
3792 static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS,
3793 LLVMContext *Context) {
3795 default: llvm_unreachable("Illegal ICmp code!");
3796 case 0: return ConstantInt::getFalse(*Context);
3799 return new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS);
3801 return new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS);
3802 case 2: return new ICmpInst(ICmpInst::ICMP_EQ, LHS, RHS);
3805 return new ICmpInst(ICmpInst::ICMP_SGE, LHS, RHS);
3807 return new ICmpInst(ICmpInst::ICMP_UGE, LHS, RHS);
3810 return new ICmpInst(ICmpInst::ICMP_SLT, LHS, RHS);
3812 return new ICmpInst(ICmpInst::ICMP_ULT, LHS, RHS);
3813 case 5: return new ICmpInst(ICmpInst::ICMP_NE, LHS, RHS);
3816 return new ICmpInst(ICmpInst::ICMP_SLE, LHS, RHS);
3818 return new ICmpInst(ICmpInst::ICMP_ULE, LHS, RHS);
3819 case 7: return ConstantInt::getTrue(*Context);
3823 /// getFCmpValue - This is the complement of getFCmpCode, which turns an
3824 /// opcode and two operands into either a FCmp instruction. isordered is passed
3825 /// in to determine which kind of predicate to use in the new fcmp instruction.
3826 static Value *getFCmpValue(bool isordered, unsigned code,
3827 Value *LHS, Value *RHS, LLVMContext *Context) {
3829 default: llvm_unreachable("Illegal FCmp code!");
3832 return new FCmpInst(FCmpInst::FCMP_ORD, LHS, RHS);
3834 return new FCmpInst(FCmpInst::FCMP_UNO, LHS, RHS);
3837 return new FCmpInst(FCmpInst::FCMP_OGT, LHS, RHS);
3839 return new FCmpInst(FCmpInst::FCMP_UGT, LHS, RHS);
3842 return new FCmpInst(FCmpInst::FCMP_OEQ, LHS, RHS);
3844 return new FCmpInst(FCmpInst::FCMP_UEQ, LHS, RHS);
3847 return new FCmpInst(FCmpInst::FCMP_OGE, LHS, RHS);
3849 return new FCmpInst(FCmpInst::FCMP_UGE, LHS, RHS);
3852 return new FCmpInst(FCmpInst::FCMP_OLT, LHS, RHS);
3854 return new FCmpInst(FCmpInst::FCMP_ULT, LHS, RHS);
3857 return new FCmpInst(FCmpInst::FCMP_ONE, LHS, RHS);
3859 return new FCmpInst(FCmpInst::FCMP_UNE, LHS, RHS);
3862 return new FCmpInst(FCmpInst::FCMP_OLE, LHS, RHS);
3864 return new FCmpInst(FCmpInst::FCMP_ULE, LHS, RHS);
3865 case 7: return ConstantInt::getTrue(*Context);
3869 /// PredicatesFoldable - Return true if both predicates match sign or if at
3870 /// least one of them is an equality comparison (which is signless).
3871 static bool PredicatesFoldable(ICmpInst::Predicate p1, ICmpInst::Predicate p2) {
3872 return (CmpInst::isSigned(p1) == CmpInst::isSigned(p2)) ||
3873 (CmpInst::isSigned(p1) && ICmpInst::isEquality(p2)) ||
3874 (CmpInst::isSigned(p2) && ICmpInst::isEquality(p1));
3878 // FoldICmpLogical - Implements (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3879 struct FoldICmpLogical {
3882 ICmpInst::Predicate pred;
3883 FoldICmpLogical(InstCombiner &ic, ICmpInst *ICI)
3884 : IC(ic), LHS(ICI->getOperand(0)), RHS(ICI->getOperand(1)),
3885 pred(ICI->getPredicate()) {}
3886 bool shouldApply(Value *V) const {
3887 if (ICmpInst *ICI = dyn_cast<ICmpInst>(V))
3888 if (PredicatesFoldable(pred, ICI->getPredicate()))
3889 return ((ICI->getOperand(0) == LHS && ICI->getOperand(1) == RHS) ||
3890 (ICI->getOperand(0) == RHS && ICI->getOperand(1) == LHS));
3893 Instruction *apply(Instruction &Log) const {
3894 ICmpInst *ICI = cast<ICmpInst>(Log.getOperand(0));
3895 if (ICI->getOperand(0) != LHS) {
3896 assert(ICI->getOperand(1) == LHS);
3897 ICI->swapOperands(); // Swap the LHS and RHS of the ICmp
3900 ICmpInst *RHSICI = cast<ICmpInst>(Log.getOperand(1));
3901 unsigned LHSCode = getICmpCode(ICI);
3902 unsigned RHSCode = getICmpCode(RHSICI);
3904 switch (Log.getOpcode()) {
3905 case Instruction::And: Code = LHSCode & RHSCode; break;
3906 case Instruction::Or: Code = LHSCode | RHSCode; break;
3907 case Instruction::Xor: Code = LHSCode ^ RHSCode; break;
3908 default: llvm_unreachable("Illegal logical opcode!"); return 0;
3911 bool isSigned = RHSICI->isSigned() || ICI->isSigned();
3912 Value *RV = getICmpValue(isSigned, Code, LHS, RHS, IC.getContext());
3913 if (Instruction *I = dyn_cast<Instruction>(RV))
3915 // Otherwise, it's a constant boolean value...
3916 return IC.ReplaceInstUsesWith(Log, RV);
3919 } // end anonymous namespace
3921 // OptAndOp - This handles expressions of the form ((val OP C1) & C2). Where
3922 // the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. Op is
3923 // guaranteed to be a binary operator.
3924 Instruction *InstCombiner::OptAndOp(Instruction *Op,
3926 ConstantInt *AndRHS,
3927 BinaryOperator &TheAnd) {
3928 Value *X = Op->getOperand(0);
3929 Constant *Together = 0;
3931 Together = ConstantExpr::getAnd(AndRHS, OpRHS);
3933 switch (Op->getOpcode()) {
3934 case Instruction::Xor:
3935 if (Op->hasOneUse()) {
3936 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
3937 Value *And = Builder->CreateAnd(X, AndRHS);
3939 return BinaryOperator::CreateXor(And, Together);
3942 case Instruction::Or:
3943 if (Together == AndRHS) // (X | C) & C --> C
3944 return ReplaceInstUsesWith(TheAnd, AndRHS);
3946 if (Op->hasOneUse() && Together != OpRHS) {
3947 // (X | C1) & C2 --> (X | (C1&C2)) & C2
3948 Value *Or = Builder->CreateOr(X, Together);
3950 return BinaryOperator::CreateAnd(Or, AndRHS);
3953 case Instruction::Add:
3954 if (Op->hasOneUse()) {
3955 // Adding a one to a single bit bit-field should be turned into an XOR
3956 // of the bit. First thing to check is to see if this AND is with a
3957 // single bit constant.
3958 const APInt& AndRHSV = cast<ConstantInt>(AndRHS)->getValue();
3960 // If there is only one bit set...
3961 if (isOneBitSet(cast<ConstantInt>(AndRHS))) {
3962 // Ok, at this point, we know that we are masking the result of the
3963 // ADD down to exactly one bit. If the constant we are adding has
3964 // no bits set below this bit, then we can eliminate the ADD.
3965 const APInt& AddRHS = cast<ConstantInt>(OpRHS)->getValue();
3967 // Check to see if any bits below the one bit set in AndRHSV are set.
3968 if ((AddRHS & (AndRHSV-1)) == 0) {
3969 // If not, the only thing that can effect the output of the AND is
3970 // the bit specified by AndRHSV. If that bit is set, the effect of
3971 // the XOR is to toggle the bit. If it is clear, then the ADD has
3973 if ((AddRHS & AndRHSV) == 0) { // Bit is not set, noop
3974 TheAnd.setOperand(0, X);
3977 // Pull the XOR out of the AND.
3978 Value *NewAnd = Builder->CreateAnd(X, AndRHS);
3979 NewAnd->takeName(Op);
3980 return BinaryOperator::CreateXor(NewAnd, AndRHS);
3987 case Instruction::Shl: {
3988 // We know that the AND will not produce any of the bits shifted in, so if
3989 // the anded constant includes them, clear them now!
3991 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
3992 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
3993 APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
3994 ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShlMask);
3996 if (CI->getValue() == ShlMask) {
3997 // Masking out bits that the shift already masks
3998 return ReplaceInstUsesWith(TheAnd, Op); // No need for the and.
3999 } else if (CI != AndRHS) { // Reducing bits set in and.
4000 TheAnd.setOperand(1, CI);
4005 case Instruction::LShr:
4007 // We know that the AND will not produce any of the bits shifted in, so if
4008 // the anded constant includes them, clear them now! This only applies to
4009 // unsigned shifts, because a signed shr may bring in set bits!
4011 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
4012 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
4013 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
4014 ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask);
4016 if (CI->getValue() == ShrMask) {
4017 // Masking out bits that the shift already masks.
4018 return ReplaceInstUsesWith(TheAnd, Op);
4019 } else if (CI != AndRHS) {
4020 TheAnd.setOperand(1, CI); // Reduce bits set in and cst.
4025 case Instruction::AShr:
4027 // See if this is shifting in some sign extension, then masking it out
4029 if (Op->hasOneUse()) {
4030 uint32_t BitWidth = AndRHS->getType()->getBitWidth();
4031 uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
4032 APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
4033 Constant *C = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask);
4034 if (C == AndRHS) { // Masking out bits shifted in.
4035 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
4036 // Make the argument unsigned.
4037 Value *ShVal = Op->getOperand(0);
4038 ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName());
4039 return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
4048 /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is
4049 /// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient
4050 /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates
4051 /// whether to treat the V, Lo and HI as signed or not. IB is the location to
4052 /// insert new instructions.
4053 Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
4054 bool isSigned, bool Inside,
4056 assert(cast<ConstantInt>(ConstantExpr::getICmp((isSigned ?
4057 ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() &&
4058 "Lo is not <= Hi in range emission code!");
4061 if (Lo == Hi) // Trivially false.
4062 return new ICmpInst(ICmpInst::ICMP_NE, V, V);
4064 // V >= Min && V < Hi --> V < Hi
4065 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
4066 ICmpInst::Predicate pred = (isSigned ?
4067 ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT);
4068 return new ICmpInst(pred, V, Hi);
4071 // Emit V-Lo <u Hi-Lo
4072 Constant *NegLo = ConstantExpr::getNeg(Lo);
4073 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
4074 Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi);
4075 return new ICmpInst(ICmpInst::ICMP_ULT, Add, UpperBound);
4078 if (Lo == Hi) // Trivially true.
4079 return new ICmpInst(ICmpInst::ICMP_EQ, V, V);
4081 // V < Min || V >= Hi -> V > Hi-1
4082 Hi = SubOne(cast<ConstantInt>(Hi));
4083 if (cast<ConstantInt>(Lo)->isMinValue(isSigned)) {
4084 ICmpInst::Predicate pred = (isSigned ?
4085 ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT);
4086 return new ICmpInst(pred, V, Hi);
4089 // Emit V-Lo >u Hi-1-Lo
4090 // Note that Hi has already had one subtracted from it, above.
4091 ConstantInt *NegLo = cast<ConstantInt>(ConstantExpr::getNeg(Lo));
4092 Value *Add = Builder->CreateAdd(V, NegLo, V->getName()+".off");
4093 Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi);
4094 return new ICmpInst(ICmpInst::ICMP_UGT, Add, LowerBound);
4097 // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with
4098 // any number of 0s on either side. The 1s are allowed to wrap from LSB to
4099 // MSB, so 0x000FFF0, 0x0000FFFF, and 0xFF0000FF are all runs. 0x0F0F0000 is
4100 // not, since all 1s are not contiguous.
4101 static bool isRunOfOnes(ConstantInt *Val, uint32_t &MB, uint32_t &ME) {
4102 const APInt& V = Val->getValue();
4103 uint32_t BitWidth = Val->getType()->getBitWidth();
4104 if (!APIntOps::isShiftedMask(BitWidth, V)) return false;
4106 // look for the first zero bit after the run of ones
4107 MB = BitWidth - ((V - 1) ^ V).countLeadingZeros();
4108 // look for the first non-zero bit
4109 ME = V.getActiveBits();
4113 /// FoldLogicalPlusAnd - This is part of an expression (LHS +/- RHS) & Mask,
4114 /// where isSub determines whether the operator is a sub. If we can fold one of
4115 /// the following xforms:
4117 /// ((A & N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == Mask
4118 /// ((A | N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
4119 /// ((A ^ N) +/- B) & Mask -> (A +/- B) & Mask iff N&Mask == 0
4121 /// return (A +/- B).
4123 Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS,
4124 ConstantInt *Mask, bool isSub,
4126 Instruction *LHSI = dyn_cast<Instruction>(LHS);
4127 if (!LHSI || LHSI->getNumOperands() != 2 ||
4128 !isa<ConstantInt>(LHSI->getOperand(1))) return 0;
4130 ConstantInt *N = cast<ConstantInt>(LHSI->getOperand(1));
4132 switch (LHSI->getOpcode()) {
4134 case Instruction::And:
4135 if (ConstantExpr::getAnd(N, Mask) == Mask) {
4136 // If the AndRHS is a power of two minus one (0+1+), this is simple.
4137 if ((Mask->getValue().countLeadingZeros() +
4138 Mask->getValue().countPopulation()) ==
4139 Mask->getValue().getBitWidth())
4142 // Otherwise, if Mask is 0+1+0+, and if B is known to have the low 0+
4143 // part, we don't need any explicit masks to take them out of A. If that
4144 // is all N is, ignore it.
4145 uint32_t MB = 0, ME = 0;
4146 if (isRunOfOnes(Mask, MB, ME)) { // begin/end bit of run, inclusive
4147 uint32_t BitWidth = cast<IntegerType>(RHS->getType())->getBitWidth();
4148 APInt Mask(APInt::getLowBitsSet(BitWidth, MB-1));
4149 if (MaskedValueIsZero(RHS, Mask))
4154 case Instruction::Or:
4155 case Instruction::Xor:
4156 // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0
4157 if ((Mask->getValue().countLeadingZeros() +
4158 Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth()
4159 && ConstantExpr::getAnd(N, Mask)->isNullValue())
4165 return Builder->CreateSub(LHSI->getOperand(0), RHS, "fold");
4166 return Builder->CreateAdd(LHSI->getOperand(0), RHS, "fold");
4169 /// FoldAndOfICmps - Fold (icmp)&(icmp) if possible.
4170 Instruction *InstCombiner::FoldAndOfICmps(Instruction &I,
4171 ICmpInst *LHS, ICmpInst *RHS) {
4173 ConstantInt *LHSCst, *RHSCst;
4174 ICmpInst::Predicate LHSCC, RHSCC;
4176 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
4177 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val),
4178 m_ConstantInt(LHSCst))) ||
4179 !match(RHS, m_ICmp(RHSCC, m_Value(Val2),
4180 m_ConstantInt(RHSCst))))
4183 if (LHSCst == RHSCst && LHSCC == RHSCC) {
4184 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
4185 // where C is a power of 2
4186 if (LHSCC == ICmpInst::ICMP_ULT &&
4187 LHSCst->getValue().isPowerOf2()) {
4188 Value *NewOr = Builder->CreateOr(Val, Val2);
4189 return new ICmpInst(LHSCC, NewOr, LHSCst);
4192 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
4193 if (LHSCC == ICmpInst::ICMP_EQ && LHSCst->isZero()) {
4194 Value *NewOr = Builder->CreateOr(Val, Val2);
4195 return new ICmpInst(LHSCC, NewOr, LHSCst);
4199 // From here on, we only handle:
4200 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
4201 if (Val != Val2) return 0;
4203 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4204 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
4205 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
4206 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
4207 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
4210 // We can't fold (ugt x, C) & (sgt x, C2).
4211 if (!PredicatesFoldable(LHSCC, RHSCC))
4214 // Ensure that the larger constant is on the RHS.
4216 if (CmpInst::isSigned(LHSCC) ||
4217 (ICmpInst::isEquality(LHSCC) &&
4218 CmpInst::isSigned(RHSCC)))
4219 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
4221 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
4224 std::swap(LHS, RHS);
4225 std::swap(LHSCst, RHSCst);
4226 std::swap(LHSCC, RHSCC);
4229 // At this point, we know we have have two icmp instructions
4230 // comparing a value against two constants and and'ing the result
4231 // together. Because of the above check, we know that we only have
4232 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
4233 // (from the FoldICmpLogical check above), that the two constants
4234 // are not equal and that the larger constant is on the RHS
4235 assert(LHSCst != RHSCst && "Compares not folded above?");
4238 default: llvm_unreachable("Unknown integer condition code!");
4239 case ICmpInst::ICMP_EQ:
4241 default: llvm_unreachable("Unknown integer condition code!");
4242 case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false
4243 case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false
4244 case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false
4245 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4246 case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13
4247 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13
4248 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13
4249 return ReplaceInstUsesWith(I, LHS);
4251 case ICmpInst::ICMP_NE:
4253 default: llvm_unreachable("Unknown integer condition code!");
4254 case ICmpInst::ICMP_ULT:
4255 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13
4256 return new ICmpInst(ICmpInst::ICMP_ULT, Val, LHSCst);
4257 break; // (X != 13 & X u< 15) -> no change
4258 case ICmpInst::ICMP_SLT:
4259 if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13
4260 return new ICmpInst(ICmpInst::ICMP_SLT, Val, LHSCst);
4261 break; // (X != 13 & X s< 15) -> no change
4262 case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15
4263 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15
4264 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15
4265 return ReplaceInstUsesWith(I, RHS);
4266 case ICmpInst::ICMP_NE:
4267 if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1
4268 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
4269 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
4270 return new ICmpInst(ICmpInst::ICMP_UGT, Add,
4271 ConstantInt::get(Add->getType(), 1));
4273 break; // (X != 13 & X != 15) -> no change
4276 case ICmpInst::ICMP_ULT:
4278 default: llvm_unreachable("Unknown integer condition code!");
4279 case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false
4280 case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false
4281 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4282 case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change
4284 case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13
4285 case ICmpInst::ICMP_ULT: // (X u< 13 & X u< 15) -> X u< 13
4286 return ReplaceInstUsesWith(I, LHS);
4287 case ICmpInst::ICMP_SLT: // (X u< 13 & X s< 15) -> no change
4291 case ICmpInst::ICMP_SLT:
4293 default: llvm_unreachable("Unknown integer condition code!");
4294 case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false
4295 case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false
4296 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4297 case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change
4299 case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13
4300 case ICmpInst::ICMP_SLT: // (X s< 13 & X s< 15) -> X < 13
4301 return ReplaceInstUsesWith(I, LHS);
4302 case ICmpInst::ICMP_ULT: // (X s< 13 & X u< 15) -> no change
4306 case ICmpInst::ICMP_UGT:
4308 default: llvm_unreachable("Unknown integer condition code!");
4309 case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15
4310 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15
4311 return ReplaceInstUsesWith(I, RHS);
4312 case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change
4314 case ICmpInst::ICMP_NE:
4315 if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14
4316 return new ICmpInst(LHSCC, Val, RHSCst);
4317 break; // (X u> 13 & X != 15) -> no change
4318 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
4319 return InsertRangeTest(Val, AddOne(LHSCst),
4320 RHSCst, false, true, I);
4321 case ICmpInst::ICMP_SLT: // (X u> 13 & X s< 15) -> no change
4325 case ICmpInst::ICMP_SGT:
4327 default: llvm_unreachable("Unknown integer condition code!");
4328 case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15
4329 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15
4330 return ReplaceInstUsesWith(I, RHS);
4331 case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change
4333 case ICmpInst::ICMP_NE:
4334 if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14
4335 return new ICmpInst(LHSCC, Val, RHSCst);
4336 break; // (X s> 13 & X != 15) -> no change
4337 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
4338 return InsertRangeTest(Val, AddOne(LHSCst),
4339 RHSCst, true, true, I);
4340 case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change
4349 Instruction *InstCombiner::FoldAndOfFCmps(Instruction &I, FCmpInst *LHS,
4352 if (LHS->getPredicate() == FCmpInst::FCMP_ORD &&
4353 RHS->getPredicate() == FCmpInst::FCMP_ORD) {
4354 // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y)
4355 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
4356 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
4357 // If either of the constants are nans, then the whole thing returns
4359 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
4360 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4361 return new FCmpInst(FCmpInst::FCMP_ORD,
4362 LHS->getOperand(0), RHS->getOperand(0));
4365 // Handle vector zeros. This occurs because the canonical form of
4366 // "fcmp ord x,x" is "fcmp ord x, 0".
4367 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
4368 isa<ConstantAggregateZero>(RHS->getOperand(1)))
4369 return new FCmpInst(FCmpInst::FCMP_ORD,
4370 LHS->getOperand(0), RHS->getOperand(0));
4374 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
4375 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
4376 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
4379 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
4380 // Swap RHS operands to match LHS.
4381 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
4382 std::swap(Op1LHS, Op1RHS);
4385 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
4386 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
4388 return new FCmpInst((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS);
4390 if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE)
4391 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4392 if (Op0CC == FCmpInst::FCMP_TRUE)
4393 return ReplaceInstUsesWith(I, RHS);
4394 if (Op1CC == FCmpInst::FCMP_TRUE)
4395 return ReplaceInstUsesWith(I, LHS);
4399 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
4400 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
4402 std::swap(LHS, RHS);
4403 std::swap(Op0Pred, Op1Pred);
4404 std::swap(Op0Ordered, Op1Ordered);
4407 // uno && ueq -> uno && (uno || eq) -> ueq
4408 // ord && olt -> ord && (ord && lt) -> olt
4409 if (Op0Ordered == Op1Ordered)
4410 return ReplaceInstUsesWith(I, RHS);
4412 // uno && oeq -> uno && (ord && eq) -> false
4413 // uno && ord -> false
4415 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
4416 // ord && ueq -> ord && (uno || eq) -> oeq
4417 return cast<Instruction>(getFCmpValue(true, Op1Pred,
4418 Op0LHS, Op0RHS, Context));
4426 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
4427 bool Changed = SimplifyCommutative(I);
4428 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4430 if (Value *V = SimplifyAndInst(Op0, Op1, TD))
4431 return ReplaceInstUsesWith(I, V);
4433 // See if we can simplify any instructions used by the instruction whose sole
4434 // purpose is to compute bits we don't care about.
4435 if (SimplifyDemandedInstructionBits(I))
4438 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
4439 const APInt &AndRHSMask = AndRHS->getValue();
4440 APInt NotAndRHS(~AndRHSMask);
4442 // Optimize a variety of ((val OP C1) & C2) combinations...
4443 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
4444 Value *Op0LHS = Op0I->getOperand(0);
4445 Value *Op0RHS = Op0I->getOperand(1);
4446 switch (Op0I->getOpcode()) {
4448 case Instruction::Xor:
4449 case Instruction::Or:
4450 // If the mask is only needed on one incoming arm, push it up.
4451 if (!Op0I->hasOneUse()) break;
4453 if (MaskedValueIsZero(Op0LHS, NotAndRHS)) {
4454 // Not masking anything out for the LHS, move to RHS.
4455 Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS,
4456 Op0RHS->getName()+".masked");
4457 return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
4459 if (!isa<Constant>(Op0RHS) &&
4460 MaskedValueIsZero(Op0RHS, NotAndRHS)) {
4461 // Not masking anything out for the RHS, move to LHS.
4462 Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS,
4463 Op0LHS->getName()+".masked");
4464 return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
4468 case Instruction::Add:
4469 // ((A & N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == AndRHS.
4470 // ((A | N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4471 // ((A ^ N) + B) & AndRHS -> (A + B) & AndRHS iff N&AndRHS == 0
4472 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, false, I))
4473 return BinaryOperator::CreateAnd(V, AndRHS);
4474 if (Value *V = FoldLogicalPlusAnd(Op0RHS, Op0LHS, AndRHS, false, I))
4475 return BinaryOperator::CreateAnd(V, AndRHS); // Add commutes
4478 case Instruction::Sub:
4479 // ((A & N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == AndRHS.
4480 // ((A | N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4481 // ((A ^ N) - B) & AndRHS -> (A - B) & AndRHS iff N&AndRHS == 0
4482 if (Value *V = FoldLogicalPlusAnd(Op0LHS, Op0RHS, AndRHS, true, I))
4483 return BinaryOperator::CreateAnd(V, AndRHS);
4485 // (A - N) & AndRHS -> -N & AndRHS iff A&AndRHS==0 and AndRHS
4486 // has 1's for all bits that the subtraction with A might affect.
4487 if (Op0I->hasOneUse()) {
4488 uint32_t BitWidth = AndRHSMask.getBitWidth();
4489 uint32_t Zeros = AndRHSMask.countLeadingZeros();
4490 APInt Mask = APInt::getLowBitsSet(BitWidth, BitWidth - Zeros);
4492 ConstantInt *A = dyn_cast<ConstantInt>(Op0LHS);
4493 if (!(A && A->isZero()) && // avoid infinite recursion.
4494 MaskedValueIsZero(Op0LHS, Mask)) {
4495 Value *NewNeg = Builder->CreateNeg(Op0RHS);
4496 return BinaryOperator::CreateAnd(NewNeg, AndRHS);
4501 case Instruction::Shl:
4502 case Instruction::LShr:
4503 // (1 << x) & 1 --> zext(x == 0)
4504 // (1 >> x) & 1 --> zext(x == 0)
4505 if (AndRHSMask == 1 && Op0LHS == AndRHS) {
4507 Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
4508 return new ZExtInst(NewICmp, I.getType());
4513 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
4514 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
4516 } else if (CastInst *CI = dyn_cast<CastInst>(Op0)) {
4517 // If this is an integer truncation or change from signed-to-unsigned, and
4518 // if the source is an and/or with immediate, transform it. This
4519 // frequently occurs for bitfield accesses.
4520 if (Instruction *CastOp = dyn_cast<Instruction>(CI->getOperand(0))) {
4521 if ((isa<TruncInst>(CI) || isa<BitCastInst>(CI)) &&
4522 CastOp->getNumOperands() == 2)
4523 if (ConstantInt *AndCI =dyn_cast<ConstantInt>(CastOp->getOperand(1))){
4524 if (CastOp->getOpcode() == Instruction::And) {
4525 // Change: and (cast (and X, C1) to T), C2
4526 // into : and (cast X to T), trunc_or_bitcast(C1)&C2
4527 // This will fold the two constants together, which may allow
4528 // other simplifications.
4529 Value *NewCast = Builder->CreateTruncOrBitCast(
4530 CastOp->getOperand(0), I.getType(),
4531 CastOp->getName()+".shrunk");
4532 // trunc_or_bitcast(C1)&C2
4533 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
4534 C3 = ConstantExpr::getAnd(C3, AndRHS);
4535 return BinaryOperator::CreateAnd(NewCast, C3);
4536 } else if (CastOp->getOpcode() == Instruction::Or) {
4537 // Change: and (cast (or X, C1) to T), C2
4538 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2
4539 Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType());
4540 if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS)
4542 return ReplaceInstUsesWith(I, AndRHS);
4548 // Try to fold constant and into select arguments.
4549 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
4550 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
4552 if (isa<PHINode>(Op0))
4553 if (Instruction *NV = FoldOpIntoPhi(I))
4558 // (~A & ~B) == (~(A | B)) - De Morgan's Law
4559 if (Value *Op0NotVal = dyn_castNotVal(Op0))
4560 if (Value *Op1NotVal = dyn_castNotVal(Op1))
4561 if (Op0->hasOneUse() && Op1->hasOneUse()) {
4562 Value *Or = Builder->CreateOr(Op0NotVal, Op1NotVal,
4563 I.getName()+".demorgan");
4564 return BinaryOperator::CreateNot(Or);
4568 Value *A = 0, *B = 0, *C = 0, *D = 0;
4569 // (A|B) & ~(A&B) -> A^B
4570 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
4571 match(Op1, m_Not(m_And(m_Value(C), m_Value(D)))) &&
4572 ((A == C && B == D) || (A == D && B == C)))
4573 return BinaryOperator::CreateXor(A, B);
4575 // ~(A&B) & (A|B) -> A^B
4576 if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
4577 match(Op0, m_Not(m_And(m_Value(C), m_Value(D)))) &&
4578 ((A == C && B == D) || (A == D && B == C)))
4579 return BinaryOperator::CreateXor(A, B);
4581 if (Op0->hasOneUse() &&
4582 match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4583 if (A == Op1) { // (A^B)&A -> A&(A^B)
4584 I.swapOperands(); // Simplify below
4585 std::swap(Op0, Op1);
4586 } else if (B == Op1) { // (A^B)&B -> B&(B^A)
4587 cast<BinaryOperator>(Op0)->swapOperands();
4588 I.swapOperands(); // Simplify below
4589 std::swap(Op0, Op1);
4593 if (Op1->hasOneUse() &&
4594 match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
4595 if (B == Op0) { // B&(A^B) -> B&(B^A)
4596 cast<BinaryOperator>(Op1)->swapOperands();
4599 if (A == Op0) // A&(A^B) -> A & ~B
4600 return BinaryOperator::CreateAnd(A, Builder->CreateNot(B, "tmp"));
4603 // (A&((~A)|B)) -> A&B
4604 if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) ||
4605 match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1)))))
4606 return BinaryOperator::CreateAnd(A, Op1);
4607 if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) ||
4608 match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0)))))
4609 return BinaryOperator::CreateAnd(A, Op0);
4612 if (ICmpInst *RHS = dyn_cast<ICmpInst>(Op1)) {
4613 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
4614 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
4617 if (ICmpInst *LHS = dyn_cast<ICmpInst>(Op0))
4618 if (Instruction *Res = FoldAndOfICmps(I, LHS, RHS))
4622 // fold (and (cast A), (cast B)) -> (cast (and A, B))
4623 if (CastInst *Op0C = dyn_cast<CastInst>(Op0))
4624 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
4625 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ?
4626 const Type *SrcTy = Op0C->getOperand(0)->getType();
4627 if (SrcTy == Op1C->getOperand(0)->getType() &&
4628 SrcTy->isIntOrIntVector() &&
4629 // Only do this if the casts both really cause code to be generated.
4630 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
4632 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
4634 Value *NewOp = Builder->CreateAnd(Op0C->getOperand(0),
4635 Op1C->getOperand(0), I.getName());
4636 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
4640 // (X >> Z) & (Y >> Z) -> (X&Y) >> Z for all shifts.
4641 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
4642 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
4643 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
4644 SI0->getOperand(1) == SI1->getOperand(1) &&
4645 (SI0->hasOneUse() || SI1->hasOneUse())) {
4647 Builder->CreateAnd(SI0->getOperand(0), SI1->getOperand(0),
4649 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
4650 SI1->getOperand(1));
4654 // If and'ing two fcmp, try combine them into one.
4655 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
4656 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
4657 if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS))
4661 return Changed ? &I : 0;
4664 /// CollectBSwapParts - Analyze the specified subexpression and see if it is
4665 /// capable of providing pieces of a bswap. The subexpression provides pieces
4666 /// of a bswap if it is proven that each of the non-zero bytes in the output of
4667 /// the expression came from the corresponding "byte swapped" byte in some other
4668 /// value. For example, if the current subexpression is "(shl i32 %X, 24)" then
4669 /// we know that the expression deposits the low byte of %X into the high byte
4670 /// of the bswap result and that all other bytes are zero. This expression is
4671 /// accepted, the high byte of ByteValues is set to X to indicate a correct
4674 /// This function returns true if the match was unsuccessful and false if so.
4675 /// On entry to the function the "OverallLeftShift" is a signed integer value
4676 /// indicating the number of bytes that the subexpression is later shifted. For
4677 /// example, if the expression is later right shifted by 16 bits, the
4678 /// OverallLeftShift value would be -2 on entry. This is used to specify which
4679 /// byte of ByteValues is actually being set.
4681 /// Similarly, ByteMask is a bitmask where a bit is clear if its corresponding
4682 /// byte is masked to zero by a user. For example, in (X & 255), X will be
4683 /// processed with a bytemask of 1. Because bytemask is 32-bits, this limits
4684 /// this function to working on up to 32-byte (256 bit) values. ByteMask is
4685 /// always in the local (OverallLeftShift) coordinate space.
4687 static bool CollectBSwapParts(Value *V, int OverallLeftShift, uint32_t ByteMask,
4688 SmallVector<Value*, 8> &ByteValues) {
4689 if (Instruction *I = dyn_cast<Instruction>(V)) {
4690 // If this is an or instruction, it may be an inner node of the bswap.
4691 if (I->getOpcode() == Instruction::Or) {
4692 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4694 CollectBSwapParts(I->getOperand(1), OverallLeftShift, ByteMask,
4698 // If this is a logical shift by a constant multiple of 8, recurse with
4699 // OverallLeftShift and ByteMask adjusted.
4700 if (I->isLogicalShift() && isa<ConstantInt>(I->getOperand(1))) {
4702 cast<ConstantInt>(I->getOperand(1))->getLimitedValue(~0U);
4703 // Ensure the shift amount is defined and of a byte value.
4704 if ((ShAmt & 7) || (ShAmt > 8*ByteValues.size()))
4707 unsigned ByteShift = ShAmt >> 3;
4708 if (I->getOpcode() == Instruction::Shl) {
4709 // X << 2 -> collect(X, +2)
4710 OverallLeftShift += ByteShift;
4711 ByteMask >>= ByteShift;
4713 // X >>u 2 -> collect(X, -2)
4714 OverallLeftShift -= ByteShift;
4715 ByteMask <<= ByteShift;
4716 ByteMask &= (~0U >> (32-ByteValues.size()));
4719 if (OverallLeftShift >= (int)ByteValues.size()) return true;
4720 if (OverallLeftShift <= -(int)ByteValues.size()) return true;
4722 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4726 // If this is a logical 'and' with a mask that clears bytes, clear the
4727 // corresponding bytes in ByteMask.
4728 if (I->getOpcode() == Instruction::And &&
4729 isa<ConstantInt>(I->getOperand(1))) {
4730 // Scan every byte of the and mask, seeing if the byte is either 0 or 255.
4731 unsigned NumBytes = ByteValues.size();
4732 APInt Byte(I->getType()->getPrimitiveSizeInBits(), 255);
4733 const APInt &AndMask = cast<ConstantInt>(I->getOperand(1))->getValue();
4735 for (unsigned i = 0; i != NumBytes; ++i, Byte <<= 8) {
4736 // If this byte is masked out by a later operation, we don't care what
4738 if ((ByteMask & (1 << i)) == 0)
4741 // If the AndMask is all zeros for this byte, clear the bit.
4742 APInt MaskB = AndMask & Byte;
4744 ByteMask &= ~(1U << i);
4748 // If the AndMask is not all ones for this byte, it's not a bytezap.
4752 // Otherwise, this byte is kept.
4755 return CollectBSwapParts(I->getOperand(0), OverallLeftShift, ByteMask,
4760 // Okay, we got to something that isn't a shift, 'or' or 'and'. This must be
4761 // the input value to the bswap. Some observations: 1) if more than one byte
4762 // is demanded from this input, then it could not be successfully assembled
4763 // into a byteswap. At least one of the two bytes would not be aligned with
4764 // their ultimate destination.
4765 if (!isPowerOf2_32(ByteMask)) return true;
4766 unsigned InputByteNo = CountTrailingZeros_32(ByteMask);
4768 // 2) The input and ultimate destinations must line up: if byte 3 of an i32
4769 // is demanded, it needs to go into byte 0 of the result. This means that the
4770 // byte needs to be shifted until it lands in the right byte bucket. The
4771 // shift amount depends on the position: if the byte is coming from the high
4772 // part of the value (e.g. byte 3) then it must be shifted right. If from the
4773 // low part, it must be shifted left.
4774 unsigned DestByteNo = InputByteNo + OverallLeftShift;
4775 if (InputByteNo < ByteValues.size()/2) {
4776 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4779 if (ByteValues.size()-1-DestByteNo != InputByteNo)
4783 // If the destination byte value is already defined, the values are or'd
4784 // together, which isn't a bswap (unless it's an or of the same bits).
4785 if (ByteValues[DestByteNo] && ByteValues[DestByteNo] != V)
4787 ByteValues[DestByteNo] = V;
4791 /// MatchBSwap - Given an OR instruction, check to see if this is a bswap idiom.
4792 /// If so, insert the new bswap intrinsic and return it.
4793 Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) {
4794 const IntegerType *ITy = dyn_cast<IntegerType>(I.getType());
4795 if (!ITy || ITy->getBitWidth() % 16 ||
4796 // ByteMask only allows up to 32-byte values.
4797 ITy->getBitWidth() > 32*8)
4798 return 0; // Can only bswap pairs of bytes. Can't do vectors.
4800 /// ByteValues - For each byte of the result, we keep track of which value
4801 /// defines each byte.
4802 SmallVector<Value*, 8> ByteValues;
4803 ByteValues.resize(ITy->getBitWidth()/8);
4805 // Try to find all the pieces corresponding to the bswap.
4806 uint32_t ByteMask = ~0U >> (32-ByteValues.size());
4807 if (CollectBSwapParts(&I, 0, ByteMask, ByteValues))
4810 // Check to see if all of the bytes come from the same value.
4811 Value *V = ByteValues[0];
4812 if (V == 0) return 0; // Didn't find a byte? Must be zero.
4814 // Check to make sure that all of the bytes come from the same value.
4815 for (unsigned i = 1, e = ByteValues.size(); i != e; ++i)
4816 if (ByteValues[i] != V)
4818 const Type *Tys[] = { ITy };
4819 Module *M = I.getParent()->getParent()->getParent();
4820 Function *F = Intrinsic::getDeclaration(M, Intrinsic::bswap, Tys, 1);
4821 return CallInst::Create(F, V);
4824 /// MatchSelectFromAndOr - We have an expression of the form (A&C)|(B&D). Check
4825 /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then
4826 /// we can simplify this expression to "cond ? C : D or B".
4827 static Instruction *MatchSelectFromAndOr(Value *A, Value *B,
4829 LLVMContext *Context) {
4830 // If A is not a select of -1/0, this cannot match.
4832 if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond))))
4835 // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B.
4836 if (match(D, m_SelectCst<0, -1>(m_Specific(Cond))))
4837 return SelectInst::Create(Cond, C, B);
4838 if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
4839 return SelectInst::Create(Cond, C, B);
4840 // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D.
4841 if (match(B, m_SelectCst<0, -1>(m_Specific(Cond))))
4842 return SelectInst::Create(Cond, C, D);
4843 if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond)))))
4844 return SelectInst::Create(Cond, C, D);
4848 /// FoldOrOfICmps - Fold (icmp)|(icmp) if possible.
4849 Instruction *InstCombiner::FoldOrOfICmps(Instruction &I,
4850 ICmpInst *LHS, ICmpInst *RHS) {
4852 ConstantInt *LHSCst, *RHSCst;
4853 ICmpInst::Predicate LHSCC, RHSCC;
4855 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
4856 if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), m_ConstantInt(LHSCst))) ||
4857 !match(RHS, m_ICmp(RHSCC, m_Value(Val2), m_ConstantInt(RHSCst))))
4861 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
4862 if (LHSCst == RHSCst && LHSCC == RHSCC &&
4863 LHSCC == ICmpInst::ICMP_NE && LHSCst->isZero()) {
4864 Value *NewOr = Builder->CreateOr(Val, Val2);
4865 return new ICmpInst(LHSCC, NewOr, LHSCst);
4868 // From here on, we only handle:
4869 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
4870 if (Val != Val2) return 0;
4872 // ICMP_[US][GL]E X, CST is folded to ICMP_[US][GL]T elsewhere.
4873 if (LHSCC == ICmpInst::ICMP_UGE || LHSCC == ICmpInst::ICMP_ULE ||
4874 RHSCC == ICmpInst::ICMP_UGE || RHSCC == ICmpInst::ICMP_ULE ||
4875 LHSCC == ICmpInst::ICMP_SGE || LHSCC == ICmpInst::ICMP_SLE ||
4876 RHSCC == ICmpInst::ICMP_SGE || RHSCC == ICmpInst::ICMP_SLE)
4879 // We can't fold (ugt x, C) | (sgt x, C2).
4880 if (!PredicatesFoldable(LHSCC, RHSCC))
4883 // Ensure that the larger constant is on the RHS.
4885 if (CmpInst::isSigned(LHSCC) ||
4886 (ICmpInst::isEquality(LHSCC) &&
4887 CmpInst::isSigned(RHSCC)))
4888 ShouldSwap = LHSCst->getValue().sgt(RHSCst->getValue());
4890 ShouldSwap = LHSCst->getValue().ugt(RHSCst->getValue());
4893 std::swap(LHS, RHS);
4894 std::swap(LHSCst, RHSCst);
4895 std::swap(LHSCC, RHSCC);
4898 // At this point, we know we have have two icmp instructions
4899 // comparing a value against two constants and or'ing the result
4900 // together. Because of the above check, we know that we only have
4901 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
4902 // FoldICmpLogical check above), that the two constants are not
4904 assert(LHSCst != RHSCst && "Compares not folded above?");
4907 default: llvm_unreachable("Unknown integer condition code!");
4908 case ICmpInst::ICMP_EQ:
4910 default: llvm_unreachable("Unknown integer condition code!");
4911 case ICmpInst::ICMP_EQ:
4912 if (LHSCst == SubOne(RHSCst)) {
4913 // (X == 13 | X == 14) -> X-13 <u 2
4914 Constant *AddCST = ConstantExpr::getNeg(LHSCst);
4915 Value *Add = Builder->CreateAdd(Val, AddCST, Val->getName()+".off");
4916 AddCST = ConstantExpr::getSub(AddOne(RHSCst), LHSCst);
4917 return new ICmpInst(ICmpInst::ICMP_ULT, Add, AddCST);
4919 break; // (X == 13 | X == 15) -> no change
4920 case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change
4921 case ICmpInst::ICMP_SGT: // (X == 13 | X s> 14) -> no change
4923 case ICmpInst::ICMP_NE: // (X == 13 | X != 15) -> X != 15
4924 case ICmpInst::ICMP_ULT: // (X == 13 | X u< 15) -> X u< 15
4925 case ICmpInst::ICMP_SLT: // (X == 13 | X s< 15) -> X s< 15
4926 return ReplaceInstUsesWith(I, RHS);
4929 case ICmpInst::ICMP_NE:
4931 default: llvm_unreachable("Unknown integer condition code!");
4932 case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13
4933 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13
4934 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13
4935 return ReplaceInstUsesWith(I, LHS);
4936 case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true
4937 case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true
4938 case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true
4939 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4942 case ICmpInst::ICMP_ULT:
4944 default: llvm_unreachable("Unknown integer condition code!");
4945 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
4947 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
4948 // If RHSCst is [us]MAXINT, it is always false. Not handling
4949 // this can cause overflow.
4950 if (RHSCst->isMaxValue(false))
4951 return ReplaceInstUsesWith(I, LHS);
4952 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
4954 case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change
4956 case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15
4957 case ICmpInst::ICMP_ULT: // (X u< 13 | X u< 15) -> X u< 15
4958 return ReplaceInstUsesWith(I, RHS);
4959 case ICmpInst::ICMP_SLT: // (X u< 13 | X s< 15) -> no change
4963 case ICmpInst::ICMP_SLT:
4965 default: llvm_unreachable("Unknown integer condition code!");
4966 case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change
4968 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2
4969 // If RHSCst is [us]MAXINT, it is always false. Not handling
4970 // this can cause overflow.
4971 if (RHSCst->isMaxValue(true))
4972 return ReplaceInstUsesWith(I, LHS);
4973 return InsertRangeTest(Val, LHSCst, AddOne(RHSCst),
4975 case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change
4977 case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15
4978 case ICmpInst::ICMP_SLT: // (X s< 13 | X s< 15) -> X s< 15
4979 return ReplaceInstUsesWith(I, RHS);
4980 case ICmpInst::ICMP_ULT: // (X s< 13 | X u< 15) -> no change
4984 case ICmpInst::ICMP_UGT:
4986 default: llvm_unreachable("Unknown integer condition code!");
4987 case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13
4988 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13
4989 return ReplaceInstUsesWith(I, LHS);
4990 case ICmpInst::ICMP_SGT: // (X u> 13 | X s> 15) -> no change
4992 case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true
4993 case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true
4994 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
4995 case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change
4999 case ICmpInst::ICMP_SGT:
5001 default: llvm_unreachable("Unknown integer condition code!");
5002 case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13
5003 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13
5004 return ReplaceInstUsesWith(I, LHS);
5005 case ICmpInst::ICMP_UGT: // (X s> 13 | X u> 15) -> no change
5007 case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true
5008 case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true
5009 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5010 case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change
5018 Instruction *InstCombiner::FoldOrOfFCmps(Instruction &I, FCmpInst *LHS,
5020 if (LHS->getPredicate() == FCmpInst::FCMP_UNO &&
5021 RHS->getPredicate() == FCmpInst::FCMP_UNO &&
5022 LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) {
5023 if (ConstantFP *LHSC = dyn_cast<ConstantFP>(LHS->getOperand(1)))
5024 if (ConstantFP *RHSC = dyn_cast<ConstantFP>(RHS->getOperand(1))) {
5025 // If either of the constants are nans, then the whole thing returns
5027 if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
5028 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5030 // Otherwise, no need to compare the two constants, compare the
5032 return new FCmpInst(FCmpInst::FCMP_UNO,
5033 LHS->getOperand(0), RHS->getOperand(0));
5036 // Handle vector zeros. This occurs because the canonical form of
5037 // "fcmp uno x,x" is "fcmp uno x, 0".
5038 if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
5039 isa<ConstantAggregateZero>(RHS->getOperand(1)))
5040 return new FCmpInst(FCmpInst::FCMP_UNO,
5041 LHS->getOperand(0), RHS->getOperand(0));
5046 Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1);
5047 Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1);
5048 FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate();
5050 if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) {
5051 // Swap RHS operands to match LHS.
5052 Op1CC = FCmpInst::getSwappedPredicate(Op1CC);
5053 std::swap(Op1LHS, Op1RHS);
5055 if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) {
5056 // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y).
5058 return new FCmpInst((FCmpInst::Predicate)Op0CC,
5060 if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE)
5061 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5062 if (Op0CC == FCmpInst::FCMP_FALSE)
5063 return ReplaceInstUsesWith(I, RHS);
5064 if (Op1CC == FCmpInst::FCMP_FALSE)
5065 return ReplaceInstUsesWith(I, LHS);
5068 unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered);
5069 unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered);
5070 if (Op0Ordered == Op1Ordered) {
5071 // If both are ordered or unordered, return a new fcmp with
5072 // or'ed predicates.
5073 Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred,
5074 Op0LHS, Op0RHS, Context);
5075 if (Instruction *I = dyn_cast<Instruction>(RV))
5077 // Otherwise, it's a constant boolean value...
5078 return ReplaceInstUsesWith(I, RV);
5084 /// FoldOrWithConstants - This helper function folds:
5086 /// ((A | B) & C1) | (B & C2)
5092 /// when the XOR of the two constants is "all ones" (-1).
5093 Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op,
5094 Value *A, Value *B, Value *C) {
5095 ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
5099 ConstantInt *CI2 = 0;
5100 if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0;
5102 APInt Xor = CI1->getValue() ^ CI2->getValue();
5103 if (!Xor.isAllOnesValue()) return 0;
5105 if (V1 == A || V1 == B) {
5106 Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1);
5107 return BinaryOperator::CreateOr(NewOp, V1);
5113 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
5114 bool Changed = SimplifyCommutative(I);
5115 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5117 if (Value *V = SimplifyOrInst(Op0, Op1, TD))
5118 return ReplaceInstUsesWith(I, V);
5121 // See if we can simplify any instructions used by the instruction whose sole
5122 // purpose is to compute bits we don't care about.
5123 if (SimplifyDemandedInstructionBits(I))
5126 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
5127 ConstantInt *C1 = 0; Value *X = 0;
5128 // (X & C1) | C2 --> (X | C2) & (C1|C2)
5129 if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) &&
5131 Value *Or = Builder->CreateOr(X, RHS);
5133 return BinaryOperator::CreateAnd(Or,
5134 ConstantInt::get(*Context, RHS->getValue() | C1->getValue()));
5137 // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2)
5138 if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) &&
5140 Value *Or = Builder->CreateOr(X, RHS);
5142 return BinaryOperator::CreateXor(Or,
5143 ConstantInt::get(*Context, C1->getValue() & ~RHS->getValue()));
5146 // Try to fold constant and into select arguments.
5147 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
5148 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
5150 if (isa<PHINode>(Op0))
5151 if (Instruction *NV = FoldOpIntoPhi(I))
5155 Value *A = 0, *B = 0;
5156 ConstantInt *C1 = 0, *C2 = 0;
5158 // (A | B) | C and A | (B | C) -> bswap if possible.
5159 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible.
5160 if (match(Op0, m_Or(m_Value(), m_Value())) ||
5161 match(Op1, m_Or(m_Value(), m_Value())) ||
5162 (match(Op0, m_Shift(m_Value(), m_Value())) &&
5163 match(Op1, m_Shift(m_Value(), m_Value())))) {
5164 if (Instruction *BSwap = MatchBSwap(I))
5168 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
5169 if (Op0->hasOneUse() &&
5170 match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
5171 MaskedValueIsZero(Op1, C1->getValue())) {
5172 Value *NOr = Builder->CreateOr(A, Op1);
5174 return BinaryOperator::CreateXor(NOr, C1);
5177 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
5178 if (Op1->hasOneUse() &&
5179 match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) &&
5180 MaskedValueIsZero(Op0, C1->getValue())) {
5181 Value *NOr = Builder->CreateOr(A, Op0);
5183 return BinaryOperator::CreateXor(NOr, C1);
5187 Value *C = 0, *D = 0;
5188 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
5189 match(Op1, m_And(m_Value(B), m_Value(D)))) {
5190 Value *V1 = 0, *V2 = 0, *V3 = 0;
5191 C1 = dyn_cast<ConstantInt>(C);
5192 C2 = dyn_cast<ConstantInt>(D);
5193 if (C1 && C2) { // (A & C1)|(B & C2)
5194 // If we have: ((V + N) & C1) | (V & C2)
5195 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
5196 // replace with V+N.
5197 if (C1->getValue() == ~C2->getValue()) {
5198 if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+
5199 match(A, m_Add(m_Value(V1), m_Value(V2)))) {
5200 // Add commutes, try both ways.
5201 if (V1 == B && MaskedValueIsZero(V2, C2->getValue()))
5202 return ReplaceInstUsesWith(I, A);
5203 if (V2 == B && MaskedValueIsZero(V1, C2->getValue()))
5204 return ReplaceInstUsesWith(I, A);
5206 // Or commutes, try both ways.
5207 if ((C1->getValue() & (C1->getValue()+1)) == 0 &&
5208 match(B, m_Add(m_Value(V1), m_Value(V2)))) {
5209 // Add commutes, try both ways.
5210 if (V1 == A && MaskedValueIsZero(V2, C1->getValue()))
5211 return ReplaceInstUsesWith(I, B);
5212 if (V2 == A && MaskedValueIsZero(V1, C1->getValue()))
5213 return ReplaceInstUsesWith(I, B);
5217 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
5218 // iff (C1&C2) == 0 and (N&~C1) == 0
5219 if ((C1->getValue() & C2->getValue()) == 0) {
5220 if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
5221 ((V1 == B && MaskedValueIsZero(V2, ~C1->getValue())) || // (V|N)
5222 (V2 == B && MaskedValueIsZero(V1, ~C1->getValue())))) // (N|V)
5223 return BinaryOperator::CreateAnd(A,
5224 ConstantInt::get(A->getContext(),
5225 C1->getValue()|C2->getValue()));
5226 // Or commutes, try both ways.
5227 if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
5228 ((V1 == A && MaskedValueIsZero(V2, ~C2->getValue())) || // (V|N)
5229 (V2 == A && MaskedValueIsZero(V1, ~C2->getValue())))) // (N|V)
5230 return BinaryOperator::CreateAnd(B,
5231 ConstantInt::get(B->getContext(),
5232 C1->getValue()|C2->getValue()));
5236 // Check to see if we have any common things being and'ed. If so, find the
5237 // terms for V1 & (V2|V3).
5238 if (isOnlyUse(Op0) || isOnlyUse(Op1)) {
5240 if (A == B) // (A & C)|(A & D) == A & (C|D)
5241 V1 = A, V2 = C, V3 = D;
5242 else if (A == D) // (A & C)|(B & A) == A & (B|C)
5243 V1 = A, V2 = B, V3 = C;
5244 else if (C == B) // (A & C)|(C & D) == C & (A|D)
5245 V1 = C, V2 = A, V3 = D;
5246 else if (C == D) // (A & C)|(B & C) == C & (A|B)
5247 V1 = C, V2 = A, V3 = B;
5250 Value *Or = Builder->CreateOr(V2, V3, "tmp");
5251 return BinaryOperator::CreateAnd(V1, Or);
5255 // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants
5256 if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D, Context))
5258 if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C, Context))
5260 if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D, Context))
5262 if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C, Context))
5265 // ((A&~B)|(~A&B)) -> A^B
5266 if ((match(C, m_Not(m_Specific(D))) &&
5267 match(B, m_Not(m_Specific(A)))))
5268 return BinaryOperator::CreateXor(A, D);
5269 // ((~B&A)|(~A&B)) -> A^B
5270 if ((match(A, m_Not(m_Specific(D))) &&
5271 match(B, m_Not(m_Specific(C)))))
5272 return BinaryOperator::CreateXor(C, D);
5273 // ((A&~B)|(B&~A)) -> A^B
5274 if ((match(C, m_Not(m_Specific(B))) &&
5275 match(D, m_Not(m_Specific(A)))))
5276 return BinaryOperator::CreateXor(A, B);
5277 // ((~B&A)|(B&~A)) -> A^B
5278 if ((match(A, m_Not(m_Specific(B))) &&
5279 match(D, m_Not(m_Specific(C)))))
5280 return BinaryOperator::CreateXor(C, B);
5283 // (X >> Z) | (Y >> Z) -> (X|Y) >> Z for all shifts.
5284 if (BinaryOperator *SI1 = dyn_cast<BinaryOperator>(Op1)) {
5285 if (BinaryOperator *SI0 = dyn_cast<BinaryOperator>(Op0))
5286 if (SI0->isShift() && SI0->getOpcode() == SI1->getOpcode() &&
5287 SI0->getOperand(1) == SI1->getOperand(1) &&
5288 (SI0->hasOneUse() || SI1->hasOneUse())) {
5289 Value *NewOp = Builder->CreateOr(SI0->getOperand(0), SI1->getOperand(0),
5291 return BinaryOperator::Create(SI1->getOpcode(), NewOp,
5292 SI1->getOperand(1));
5296 // ((A|B)&1)|(B&-2) -> (A&1) | B
5297 if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
5298 match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
5299 Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C);
5300 if (Ret) return Ret;
5302 // (B&-2)|((A|B)&1) -> (A&1) | B
5303 if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) ||
5304 match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) {
5305 Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C);
5306 if (Ret) return Ret;
5309 // (~A | ~B) == (~(A & B)) - De Morgan's Law
5310 if (Value *Op0NotVal = dyn_castNotVal(Op0))
5311 if (Value *Op1NotVal = dyn_castNotVal(Op1))
5312 if (Op0->hasOneUse() && Op1->hasOneUse()) {
5313 Value *And = Builder->CreateAnd(Op0NotVal, Op1NotVal,
5314 I.getName()+".demorgan");
5315 return BinaryOperator::CreateNot(And);
5318 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
5319 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) {
5320 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
5323 if (ICmpInst *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
5324 if (Instruction *Res = FoldOrOfICmps(I, LHS, RHS))
5328 // fold (or (cast A), (cast B)) -> (cast (or A, B))
5329 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5330 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
5331 if (Op0C->getOpcode() == Op1C->getOpcode()) {// same cast kind ?
5332 if (!isa<ICmpInst>(Op0C->getOperand(0)) ||
5333 !isa<ICmpInst>(Op1C->getOperand(0))) {
5334 const Type *SrcTy = Op0C->getOperand(0)->getType();
5335 if (SrcTy == Op1C->getOperand(0)->getType() &&
5336 SrcTy->isIntOrIntVector() &&
5337 // Only do this if the casts both really cause code to be
5339 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
5341 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
5343 Value *NewOp = Builder->CreateOr(Op0C->getOperand(0),
5344 Op1C->getOperand(0), I.getName());
5345 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
5352 // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y)
5353 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) {
5354 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
5355 if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS))
5359 return Changed ? &I : 0;
5364 // XorSelf - Implements: X ^ X --> 0
5367 XorSelf(Value *rhs) : RHS(rhs) {}
5368 bool shouldApply(Value *LHS) const { return LHS == RHS; }
5369 Instruction *apply(BinaryOperator &Xor) const {
5376 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
5377 bool Changed = SimplifyCommutative(I);
5378 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5380 if (isa<UndefValue>(Op1)) {
5381 if (isa<UndefValue>(Op0))
5382 // Handle undef ^ undef -> 0 special case. This is a common
5384 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
5385 return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef
5388 // xor X, X = 0, even if X is nested in a sequence of Xor's.
5389 if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) {
5390 assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result;
5391 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
5394 // See if we can simplify any instructions used by the instruction whose sole
5395 // purpose is to compute bits we don't care about.
5396 if (SimplifyDemandedInstructionBits(I))
5398 if (isa<VectorType>(I.getType()))
5399 if (isa<ConstantAggregateZero>(Op1))
5400 return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X
5402 // Is this a ~ operation?
5403 if (Value *NotOp = dyn_castNotVal(&I)) {
5404 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(NotOp)) {
5405 if (Op0I->getOpcode() == Instruction::And ||
5406 Op0I->getOpcode() == Instruction::Or) {
5407 // ~(~X & Y) --> (X | ~Y) - De Morgan's Law
5408 // ~(~X | Y) === (X & ~Y) - De Morgan's Law
5409 if (dyn_castNotVal(Op0I->getOperand(1)))
5410 Op0I->swapOperands();
5411 if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) {
5413 Builder->CreateNot(Op0I->getOperand(1),
5414 Op0I->getOperand(1)->getName()+".not");
5415 if (Op0I->getOpcode() == Instruction::And)
5416 return BinaryOperator::CreateOr(Op0NotVal, NotY);
5417 return BinaryOperator::CreateAnd(Op0NotVal, NotY);
5420 // ~(X & Y) --> (~X | ~Y) - De Morgan's Law
5421 // ~(X | Y) === (~X & ~Y) - De Morgan's Law
5422 if (isFreeToInvert(Op0I->getOperand(0)) &&
5423 isFreeToInvert(Op0I->getOperand(1))) {
5425 Builder->CreateNot(Op0I->getOperand(0), "notlhs");
5427 Builder->CreateNot(Op0I->getOperand(1), "notrhs");
5428 if (Op0I->getOpcode() == Instruction::And)
5429 return BinaryOperator::CreateOr(NotX, NotY);
5430 return BinaryOperator::CreateAnd(NotX, NotY);
5437 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Op1)) {
5438 if (RHS->isOne() && Op0->hasOneUse()) {
5439 // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B
5440 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Op0))
5441 return new ICmpInst(ICI->getInversePredicate(),
5442 ICI->getOperand(0), ICI->getOperand(1));
5444 if (FCmpInst *FCI = dyn_cast<FCmpInst>(Op0))
5445 return new FCmpInst(FCI->getInversePredicate(),
5446 FCI->getOperand(0), FCI->getOperand(1));
5449 // fold (xor(zext(cmp)), 1) and (xor(sext(cmp)), -1) to ext(!cmp).
5450 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5451 if (CmpInst *CI = dyn_cast<CmpInst>(Op0C->getOperand(0))) {
5452 if (CI->hasOneUse() && Op0C->hasOneUse()) {
5453 Instruction::CastOps Opcode = Op0C->getOpcode();
5454 if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
5455 (RHS == ConstantExpr::getCast(Opcode,
5456 ConstantInt::getTrue(*Context),
5457 Op0C->getDestTy()))) {
5458 CI->setPredicate(CI->getInversePredicate());
5459 return CastInst::Create(Opcode, CI, Op0C->getType());
5465 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
5466 // ~(c-X) == X-c-1 == X+(-c-1)
5467 if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue())
5468 if (Constant *Op0I0C = dyn_cast<Constant>(Op0I->getOperand(0))) {
5469 Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C);
5470 Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C,
5471 ConstantInt::get(I.getType(), 1));
5472 return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS);
5475 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
5476 if (Op0I->getOpcode() == Instruction::Add) {
5477 // ~(X-c) --> (-c-1)-X
5478 if (RHS->isAllOnesValue()) {
5479 Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI);
5480 return BinaryOperator::CreateSub(
5481 ConstantExpr::getSub(NegOp0CI,
5482 ConstantInt::get(I.getType(), 1)),
5483 Op0I->getOperand(0));
5484 } else if (RHS->getValue().isSignBit()) {
5485 // (X + C) ^ signbit -> (X + C + signbit)
5486 Constant *C = ConstantInt::get(*Context,
5487 RHS->getValue() + Op0CI->getValue());
5488 return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
5491 } else if (Op0I->getOpcode() == Instruction::Or) {
5492 // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0
5493 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) {
5494 Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS);
5495 // Anything in both C1 and C2 is known to be zero, remove it from
5497 Constant *CommonBits = ConstantExpr::getAnd(Op0CI, RHS);
5498 NewRHS = ConstantExpr::getAnd(NewRHS,
5499 ConstantExpr::getNot(CommonBits));
5501 I.setOperand(0, Op0I->getOperand(0));
5502 I.setOperand(1, NewRHS);
5509 // Try to fold constant and into select arguments.
5510 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
5511 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
5513 if (isa<PHINode>(Op0))
5514 if (Instruction *NV = FoldOpIntoPhi(I))
5518 if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1
5520 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
5522 if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1
5524 return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType()));
5527 BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1);
5530 if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) {
5531 if (A == Op0) { // B^(B|A) == (A|B)^B
5532 Op1I->swapOperands();
5534 std::swap(Op0, Op1);
5535 } else if (B == Op0) { // B^(A|B) == (A|B)^B
5536 I.swapOperands(); // Simplified below.
5537 std::swap(Op0, Op1);
5539 } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) {
5540 return ReplaceInstUsesWith(I, B); // A^(A^B) == B
5541 } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) {
5542 return ReplaceInstUsesWith(I, A); // A^(B^A) == B
5543 } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) &&
5545 if (A == Op0) { // A^(A&B) -> A^(B&A)
5546 Op1I->swapOperands();
5549 if (B == Op0) { // A^(B&A) -> (B&A)^A
5550 I.swapOperands(); // Simplified below.
5551 std::swap(Op0, Op1);
5556 BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0);
5559 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
5560 Op0I->hasOneUse()) {
5561 if (A == Op1) // (B|A)^B == (A|B)^B
5563 if (B == Op1) // (A|B)^B == A & ~B
5564 return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1, "tmp"));
5565 } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) {
5566 return ReplaceInstUsesWith(I, B); // (A^B)^A == B
5567 } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) {
5568 return ReplaceInstUsesWith(I, A); // (B^A)^A == B
5569 } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5571 if (A == Op1) // (A&B)^A -> (B&A)^A
5573 if (B == Op1 && // (B&A)^A == ~B & A
5574 !isa<ConstantInt>(Op1)) { // Canonical form is (B&C)^C
5575 return BinaryOperator::CreateAnd(Builder->CreateNot(A, "tmp"), Op1);
5580 // (X >> Z) ^ (Y >> Z) -> (X^Y) >> Z for all shifts.
5581 if (Op0I && Op1I && Op0I->isShift() &&
5582 Op0I->getOpcode() == Op1I->getOpcode() &&
5583 Op0I->getOperand(1) == Op1I->getOperand(1) &&
5584 (Op1I->hasOneUse() || Op1I->hasOneUse())) {
5586 Builder->CreateXor(Op0I->getOperand(0), Op1I->getOperand(0),
5588 return BinaryOperator::Create(Op1I->getOpcode(), NewOp,
5589 Op1I->getOperand(1));
5593 Value *A, *B, *C, *D;
5594 // (A & B)^(A | B) -> A ^ B
5595 if (match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5596 match(Op1I, m_Or(m_Value(C), m_Value(D)))) {
5597 if ((A == C && B == D) || (A == D && B == C))
5598 return BinaryOperator::CreateXor(A, B);
5600 // (A | B)^(A & B) -> A ^ B
5601 if (match(Op0I, m_Or(m_Value(A), m_Value(B))) &&
5602 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
5603 if ((A == C && B == D) || (A == D && B == C))
5604 return BinaryOperator::CreateXor(A, B);
5608 if ((Op0I->hasOneUse() || Op1I->hasOneUse()) &&
5609 match(Op0I, m_And(m_Value(A), m_Value(B))) &&
5610 match(Op1I, m_And(m_Value(C), m_Value(D)))) {
5611 // (X & Y)^(X & Y) -> (Y^Z) & X
5612 Value *X = 0, *Y = 0, *Z = 0;
5614 X = A, Y = B, Z = D;
5616 X = A, Y = B, Z = C;
5618 X = B, Y = A, Z = D;
5620 X = B, Y = A, Z = C;
5623 Value *NewOp = Builder->CreateXor(Y, Z, Op0->getName());
5624 return BinaryOperator::CreateAnd(NewOp, X);
5629 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
5630 if (ICmpInst *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
5631 if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS)))
5634 // fold (xor (cast A), (cast B)) -> (cast (xor A, B))
5635 if (CastInst *Op0C = dyn_cast<CastInst>(Op0)) {
5636 if (CastInst *Op1C = dyn_cast<CastInst>(Op1))
5637 if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind?
5638 const Type *SrcTy = Op0C->getOperand(0)->getType();
5639 if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() &&
5640 // Only do this if the casts both really cause code to be generated.
5641 ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0),
5643 ValueRequiresCast(Op1C->getOpcode(), Op1C->getOperand(0),
5645 Value *NewOp = Builder->CreateXor(Op0C->getOperand(0),
5646 Op1C->getOperand(0), I.getName());
5647 return CastInst::Create(Op0C->getOpcode(), NewOp, I.getType());
5652 return Changed ? &I : 0;
5655 static ConstantInt *ExtractElement(Constant *V, Constant *Idx,
5656 LLVMContext *Context) {
5657 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
5660 static bool HasAddOverflow(ConstantInt *Result,
5661 ConstantInt *In1, ConstantInt *In2,
5664 if (In2->getValue().isNegative())
5665 return Result->getValue().sgt(In1->getValue());
5667 return Result->getValue().slt(In1->getValue());
5669 return Result->getValue().ult(In1->getValue());
5672 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
5673 /// overflowed for this type.
5674 static bool AddWithOverflow(Constant *&Result, Constant *In1,
5675 Constant *In2, LLVMContext *Context,
5676 bool IsSigned = false) {
5677 Result = ConstantExpr::getAdd(In1, In2);
5679 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5680 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5681 Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
5682 if (HasAddOverflow(ExtractElement(Result, Idx, Context),
5683 ExtractElement(In1, Idx, Context),
5684 ExtractElement(In2, Idx, Context),
5691 return HasAddOverflow(cast<ConstantInt>(Result),
5692 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5696 static bool HasSubOverflow(ConstantInt *Result,
5697 ConstantInt *In1, ConstantInt *In2,
5700 if (In2->getValue().isNegative())
5701 return Result->getValue().slt(In1->getValue());
5703 return Result->getValue().sgt(In1->getValue());
5705 return Result->getValue().ugt(In1->getValue());
5708 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
5709 /// overflowed for this type.
5710 static bool SubWithOverflow(Constant *&Result, Constant *In1,
5711 Constant *In2, LLVMContext *Context,
5712 bool IsSigned = false) {
5713 Result = ConstantExpr::getSub(In1, In2);
5715 if (const VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
5716 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
5717 Constant *Idx = ConstantInt::get(Type::getInt32Ty(*Context), i);
5718 if (HasSubOverflow(ExtractElement(Result, Idx, Context),
5719 ExtractElement(In1, Idx, Context),
5720 ExtractElement(In2, Idx, Context),
5727 return HasSubOverflow(cast<ConstantInt>(Result),
5728 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
5733 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
5734 /// else. At this point we know that the GEP is on the LHS of the comparison.
5735 Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
5736 ICmpInst::Predicate Cond,
5738 // Look through bitcasts.
5739 if (BitCastInst *BCI = dyn_cast<BitCastInst>(RHS))
5740 RHS = BCI->getOperand(0);
5742 Value *PtrBase = GEPLHS->getOperand(0);
5743 if (TD && PtrBase == RHS && GEPLHS->isInBounds()) {
5744 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
5745 // This transformation (ignoring the base and scales) is valid because we
5746 // know pointers can't overflow since the gep is inbounds. See if we can
5747 // output an optimized form.
5748 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, I, *this);
5750 // If not, synthesize the offset the hard way.
5752 Offset = EmitGEPOffset(GEPLHS, *this);
5753 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
5754 Constant::getNullValue(Offset->getType()));
5755 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
5756 // If the base pointers are different, but the indices are the same, just
5757 // compare the base pointer.
5758 if (PtrBase != GEPRHS->getOperand(0)) {
5759 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
5760 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
5761 GEPRHS->getOperand(0)->getType();
5763 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5764 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5765 IndicesTheSame = false;
5769 // If all indices are the same, just compare the base pointers.
5771 return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
5772 GEPLHS->getOperand(0), GEPRHS->getOperand(0));
5774 // Otherwise, the base pointers are different and the indices are
5775 // different, bail out.
5779 // If one of the GEPs has all zero indices, recurse.
5780 bool AllZeros = true;
5781 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
5782 if (!isa<Constant>(GEPLHS->getOperand(i)) ||
5783 !cast<Constant>(GEPLHS->getOperand(i))->isNullValue()) {
5788 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
5789 ICmpInst::getSwappedPredicate(Cond), I);
5791 // If the other GEP has all zero indices, recurse.
5793 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5794 if (!isa<Constant>(GEPRHS->getOperand(i)) ||
5795 !cast<Constant>(GEPRHS->getOperand(i))->isNullValue()) {
5800 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
5802 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
5803 // If the GEPs only differ by one index, compare it.
5804 unsigned NumDifferences = 0; // Keep track of # differences.
5805 unsigned DiffOperand = 0; // The operand that differs.
5806 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
5807 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
5808 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
5809 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
5810 // Irreconcilable differences.
5814 if (NumDifferences++) break;
5819 if (NumDifferences == 0) // SAME GEP?
5820 return ReplaceInstUsesWith(I, // No comparison is needed here.
5821 ConstantInt::get(Type::getInt1Ty(*Context),
5822 ICmpInst::isTrueWhenEqual(Cond)));
5824 else if (NumDifferences == 1) {
5825 Value *LHSV = GEPLHS->getOperand(DiffOperand);
5826 Value *RHSV = GEPRHS->getOperand(DiffOperand);
5827 // Make sure we do a signed comparison here.
5828 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
5832 // Only lower this if the icmp is the only user of the GEP or if we expect
5833 // the result to fold to a constant!
5835 (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
5836 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
5837 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
5838 Value *L = EmitGEPOffset(GEPLHS, *this);
5839 Value *R = EmitGEPOffset(GEPRHS, *this);
5840 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
5846 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
5848 Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
5851 if (!isa<ConstantFP>(RHSC)) return 0;
5852 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5854 // Get the width of the mantissa. We don't want to hack on conversions that
5855 // might lose information from the integer, e.g. "i64 -> float"
5856 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5857 if (MantissaWidth == -1) return 0; // Unknown.
5859 // Check to see that the input is converted from an integer type that is small
5860 // enough that preserves all bits. TODO: check here for "known" sign bits.
5861 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5862 unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits();
5864 // If this is a uitofp instruction, we need an extra bit to hold the sign.
5865 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5869 // If the conversion would lose info, don't hack on this.
5870 if ((int)InputSize > MantissaWidth)
5873 // Otherwise, we can potentially simplify the comparison. We know that it
5874 // will always come through as an integer value and we know the constant is
5875 // not a NAN (it would have been previously simplified).
5876 assert(!RHS.isNaN() && "NaN comparison not already folded!");
5878 ICmpInst::Predicate Pred;
5879 switch (I.getPredicate()) {
5880 default: llvm_unreachable("Unexpected predicate!");
5881 case FCmpInst::FCMP_UEQ:
5882 case FCmpInst::FCMP_OEQ:
5883 Pred = ICmpInst::ICMP_EQ;
5885 case FCmpInst::FCMP_UGT:
5886 case FCmpInst::FCMP_OGT:
5887 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5889 case FCmpInst::FCMP_UGE:
5890 case FCmpInst::FCMP_OGE:
5891 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5893 case FCmpInst::FCMP_ULT:
5894 case FCmpInst::FCMP_OLT:
5895 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5897 case FCmpInst::FCMP_ULE:
5898 case FCmpInst::FCMP_OLE:
5899 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5901 case FCmpInst::FCMP_UNE:
5902 case FCmpInst::FCMP_ONE:
5903 Pred = ICmpInst::ICMP_NE;
5905 case FCmpInst::FCMP_ORD:
5906 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5907 case FCmpInst::FCMP_UNO:
5908 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5911 const IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5913 // Now we know that the APFloat is a normal number, zero or inf.
5915 // See if the FP constant is too large for the integer. For example,
5916 // comparing an i8 to 300.0.
5917 unsigned IntWidth = IntTy->getScalarSizeInBits();
5920 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
5921 // and large values.
5922 APFloat SMax(RHS.getSemantics(), APFloat::fcZero, false);
5923 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5924 APFloat::rmNearestTiesToEven);
5925 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
5926 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
5927 Pred == ICmpInst::ICMP_SLE)
5928 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5929 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5932 // If the RHS value is > UnsignedMax, fold the comparison. This handles
5933 // +INF and large values.
5934 APFloat UMax(RHS.getSemantics(), APFloat::fcZero, false);
5935 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5936 APFloat::rmNearestTiesToEven);
5937 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
5938 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
5939 Pred == ICmpInst::ICMP_ULE)
5940 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5941 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5946 // See if the RHS value is < SignedMin.
5947 APFloat SMin(RHS.getSemantics(), APFloat::fcZero, false);
5948 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5949 APFloat::rmNearestTiesToEven);
5950 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
5951 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5952 Pred == ICmpInst::ICMP_SGE)
5953 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5954 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5958 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5959 // [0, UMAX], but it may still be fractional. See if it is fractional by
5960 // casting the FP value to the integer value and back, checking for equality.
5961 // Don't do this for zero, because -0.0 is not fractional.
5962 Constant *RHSInt = LHSUnsigned
5963 ? ConstantExpr::getFPToUI(RHSC, IntTy)
5964 : ConstantExpr::getFPToSI(RHSC, IntTy);
5965 if (!RHS.isZero()) {
5966 bool Equal = LHSUnsigned
5967 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
5968 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
5970 // If we had a comparison against a fractional value, we have to adjust
5971 // the compare predicate and sometimes the value. RHSC is rounded towards
5972 // zero at this point.
5974 default: llvm_unreachable("Unexpected integer comparison!");
5975 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
5976 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
5977 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
5978 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5979 case ICmpInst::ICMP_ULE:
5980 // (float)int <= 4.4 --> int <= 4
5981 // (float)int <= -4.4 --> false
5982 if (RHS.isNegative())
5983 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5985 case ICmpInst::ICMP_SLE:
5986 // (float)int <= 4.4 --> int <= 4
5987 // (float)int <= -4.4 --> int < -4
5988 if (RHS.isNegative())
5989 Pred = ICmpInst::ICMP_SLT;
5991 case ICmpInst::ICMP_ULT:
5992 // (float)int < -4.4 --> false
5993 // (float)int < 4.4 --> int <= 4
5994 if (RHS.isNegative())
5995 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
5996 Pred = ICmpInst::ICMP_ULE;
5998 case ICmpInst::ICMP_SLT:
5999 // (float)int < -4.4 --> int < -4
6000 // (float)int < 4.4 --> int <= 4
6001 if (!RHS.isNegative())
6002 Pred = ICmpInst::ICMP_SLE;
6004 case ICmpInst::ICMP_UGT:
6005 // (float)int > 4.4 --> int > 4
6006 // (float)int > -4.4 --> true
6007 if (RHS.isNegative())
6008 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6010 case ICmpInst::ICMP_SGT:
6011 // (float)int > 4.4 --> int > 4
6012 // (float)int > -4.4 --> int >= -4
6013 if (RHS.isNegative())
6014 Pred = ICmpInst::ICMP_SGE;
6016 case ICmpInst::ICMP_UGE:
6017 // (float)int >= -4.4 --> true
6018 // (float)int >= 4.4 --> int > 4
6019 if (!RHS.isNegative())
6020 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6021 Pred = ICmpInst::ICMP_UGT;
6023 case ICmpInst::ICMP_SGE:
6024 // (float)int >= -4.4 --> int >= -4
6025 // (float)int >= 4.4 --> int > 4
6026 if (!RHS.isNegative())
6027 Pred = ICmpInst::ICMP_SGT;
6033 // Lower this FP comparison into an appropriate integer version of the
6035 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
6038 /// FoldCmpLoadFromIndexedGlobal - Called we see this pattern:
6039 /// cmp pred (load (gep GV, ...)), cmpcst
6040 /// where GV is a global variable with a constant initializer. Try to simplify
6041 /// this into some simple computation that does not need the load. For example
6042 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
6044 /// If AndCst is non-null, then the loaded value is masked with that constant
6045 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
6046 Instruction *InstCombiner::
6047 FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
6048 CmpInst &ICI, ConstantInt *AndCst) {
6049 ConstantArray *Init = dyn_cast<ConstantArray>(GV->getInitializer());
6050 if (Init == 0 || Init->getNumOperands() > 1024) return 0;
6052 // There are many forms of this optimization we can handle, for now, just do
6053 // the simple index into a single-dimensional array.
6055 // Require: GEP GV, 0, i {{, constant indices}}
6056 if (GEP->getNumOperands() < 3 ||
6057 !isa<ConstantInt>(GEP->getOperand(1)) ||
6058 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
6059 isa<Constant>(GEP->getOperand(2)))
6062 // Check that indices after the variable are constants and in-range for the
6063 // type they index. Collect the indices. This is typically for arrays of
6065 SmallVector<unsigned, 4> LaterIndices;
6067 const Type *EltTy = cast<ArrayType>(Init->getType())->getElementType();
6068 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
6069 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
6070 if (Idx == 0) return 0; // Variable index.
6072 uint64_t IdxVal = Idx->getZExtValue();
6073 if ((unsigned)IdxVal != IdxVal) return 0; // Too large array index.
6075 if (const StructType *STy = dyn_cast<StructType>(EltTy))
6076 EltTy = STy->getElementType(IdxVal);
6077 else if (const ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
6078 if (IdxVal >= ATy->getNumElements()) return 0;
6079 EltTy = ATy->getElementType();
6081 return 0; // Unknown type.
6084 LaterIndices.push_back(IdxVal);
6087 enum { Overdefined = -3, Undefined = -2 };
6089 // Variables for our state machines.
6091 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
6092 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
6093 // and 87 is the second (and last) index. FirstTrueElement is -2 when
6094 // undefined, otherwise set to the first true element. SecondTrueElement is
6095 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
6096 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
6098 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
6099 // form "i != 47 & i != 87". Same state transitions as for true elements.
6100 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
6102 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
6103 /// define a state machine that triggers for ranges of values that the index
6104 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
6105 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
6106 /// index in the range (inclusive). We use -2 for undefined here because we
6107 /// use relative comparisons and don't want 0-1 to match -1.
6108 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
6110 // MagicBitvector - This is a magic bitvector where we set a bit if the
6111 // comparison is true for element 'i'. If there are 64 elements or less in
6112 // the array, this will fully represent all the comparison results.
6113 uint64_t MagicBitvector = 0;
6116 // Scan the array and see if one of our patterns matches.
6117 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
6118 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
6119 Constant *Elt = Init->getOperand(i);
6121 // If this is indexing an array of structures, get the structure element.
6122 if (!LaterIndices.empty())
6123 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices.data(),
6124 LaterIndices.size());
6126 // If the element is masked, handle it.
6127 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
6129 // Find out if the comparison would be true or false for the i'th element.
6130 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
6132 // If the result is undef for this element, ignore it.
6133 if (isa<UndefValue>(C)) {
6134 // Extend range state machines to cover this element in case there is an
6135 // undef in the middle of the range.
6136 if (TrueRangeEnd == (int)i-1)
6138 if (FalseRangeEnd == (int)i-1)
6143 // If we can't compute the result for any of the elements, we have to give
6144 // up evaluating the entire conditional.
6145 if (!isa<ConstantInt>(C)) return 0;
6147 // Otherwise, we know if the comparison is true or false for this element,
6148 // update our state machines.
6149 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
6151 // State machine for single/double/range index comparison.
6153 // Update the TrueElement state machine.
6154 if (FirstTrueElement == Undefined)
6155 FirstTrueElement = TrueRangeEnd = i; // First true element.
6157 // Update double-compare state machine.
6158 if (SecondTrueElement == Undefined)
6159 SecondTrueElement = i;
6161 SecondTrueElement = Overdefined;
6163 // Update range state machine.
6164 if (TrueRangeEnd == (int)i-1)
6167 TrueRangeEnd = Overdefined;
6170 // Update the FalseElement state machine.
6171 if (FirstFalseElement == Undefined)
6172 FirstFalseElement = FalseRangeEnd = i; // First false element.
6174 // Update double-compare state machine.
6175 if (SecondFalseElement == Undefined)
6176 SecondFalseElement = i;
6178 SecondFalseElement = Overdefined;
6180 // Update range state machine.
6181 if (FalseRangeEnd == (int)i-1)
6184 FalseRangeEnd = Overdefined;
6189 // If this element is in range, update our magic bitvector.
6190 if (i < 64 && IsTrueForElt)
6191 MagicBitvector |= 1ULL << i;
6193 // If all of our states become overdefined, bail out early. Since the
6194 // predicate is expensive, only check it every 8 elements. This is only
6195 // really useful for really huge arrays.
6196 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
6197 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
6198 FalseRangeEnd == Overdefined)
6202 // Now that we've scanned the entire array, emit our new comparison(s). We
6203 // order the state machines in complexity of the generated code.
6204 Value *Idx = GEP->getOperand(2);
6207 // If the comparison is only true for one or two elements, emit direct
6209 if (SecondTrueElement != Overdefined) {
6210 // None true -> false.
6211 if (FirstTrueElement == Undefined)
6212 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
6214 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
6216 // True for one element -> 'i == 47'.
6217 if (SecondTrueElement == Undefined)
6218 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
6220 // True for two elements -> 'i == 47 | i == 72'.
6221 Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
6222 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
6223 Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
6224 return BinaryOperator::CreateOr(C1, C2);
6227 // If the comparison is only false for one or two elements, emit direct
6229 if (SecondFalseElement != Overdefined) {
6230 // None false -> true.
6231 if (FirstFalseElement == Undefined)
6232 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
6234 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
6236 // False for one element -> 'i != 47'.
6237 if (SecondFalseElement == Undefined)
6238 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
6240 // False for two elements -> 'i != 47 & i != 72'.
6241 Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
6242 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
6243 Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
6244 return BinaryOperator::CreateAnd(C1, C2);
6247 // If the comparison can be replaced with a range comparison for the elements
6248 // where it is true, emit the range check.
6249 if (TrueRangeEnd != Overdefined) {
6250 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
6252 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
6253 if (FirstTrueElement) {
6254 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
6255 Idx = Builder->CreateAdd(Idx, Offs);
6258 Value *End = ConstantInt::get(Idx->getType(),
6259 TrueRangeEnd-FirstTrueElement+1);
6260 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
6263 // False range check.
6264 if (FalseRangeEnd != Overdefined) {
6265 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
6266 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
6267 if (FirstFalseElement) {
6268 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
6269 Idx = Builder->CreateAdd(Idx, Offs);
6272 Value *End = ConstantInt::get(Idx->getType(),
6273 FalseRangeEnd-FirstFalseElement);
6274 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
6278 // If a 32-bit or 64-bit magic bitvector captures the entire comparison state
6279 // of this load, replace it with computation that does:
6280 // ((magic_cst >> i) & 1) != 0
6281 if (Init->getNumOperands() <= 32 ||
6282 (TD && Init->getNumOperands() <= 64 && TD->isLegalInteger(64))) {
6284 if (Init->getNumOperands() <= 32)
6285 Ty = Type::getInt32Ty(Init->getContext());
6287 Ty = Type::getInt64Ty(Init->getContext());
6288 Value *V = Builder->CreateIntCast(Idx, Ty, false);
6289 V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
6290 V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
6291 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
6298 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
6299 bool Changed = false;
6301 /// Orders the operands of the compare so that they are listed from most
6302 /// complex to least complex. This puts constants before unary operators,
6303 /// before binary operators.
6304 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6309 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6311 if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, TD))
6312 return ReplaceInstUsesWith(I, V);
6314 // Simplify 'fcmp pred X, X'
6316 switch (I.getPredicate()) {
6317 default: llvm_unreachable("Unknown predicate!");
6318 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
6319 case FCmpInst::FCMP_ULT: // True if unordered or less than
6320 case FCmpInst::FCMP_UGT: // True if unordered or greater than
6321 case FCmpInst::FCMP_UNE: // True if unordered or not equal
6322 // Canonicalize these to be 'fcmp uno %X, 0.0'.
6323 I.setPredicate(FCmpInst::FCMP_UNO);
6324 I.setOperand(1, Constant::getNullValue(Op0->getType()));
6327 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
6328 case FCmpInst::FCMP_OEQ: // True if ordered and equal
6329 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
6330 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
6331 // Canonicalize these to be 'fcmp ord %X, 0.0'.
6332 I.setPredicate(FCmpInst::FCMP_ORD);
6333 I.setOperand(1, Constant::getNullValue(Op0->getType()));
6338 // Handle fcmp with constant RHS
6339 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
6340 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6341 switch (LHSI->getOpcode()) {
6342 case Instruction::PHI:
6343 // Only fold fcmp into the PHI if the phi and fcmp are in the same
6344 // block. If in the same block, we're encouraging jump threading. If
6345 // not, we are just pessimizing the code by making an i1 phi.
6346 if (LHSI->getParent() == I.getParent())
6347 if (Instruction *NV = FoldOpIntoPhi(I, true))
6350 case Instruction::SIToFP:
6351 case Instruction::UIToFP:
6352 if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
6355 case Instruction::Select: {
6356 // If either operand of the select is a constant, we can fold the
6357 // comparison into the select arms, which will cause one to be
6358 // constant folded and the select turned into a bitwise or.
6359 Value *Op1 = 0, *Op2 = 0;
6360 if (LHSI->hasOneUse()) {
6361 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
6362 // Fold the known value into the constant operand.
6363 Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
6364 // Insert a new FCmp of the other select operand.
6365 Op2 = Builder->CreateFCmp(I.getPredicate(),
6366 LHSI->getOperand(2), RHSC, I.getName());
6367 } else if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
6368 // Fold the known value into the constant operand.
6369 Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC);
6370 // Insert a new FCmp of the other select operand.
6371 Op1 = Builder->CreateFCmp(I.getPredicate(), LHSI->getOperand(1),
6377 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
6380 case Instruction::Load:
6381 if (GetElementPtrInst *GEP =
6382 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
6383 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6384 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
6385 !cast<LoadInst>(LHSI)->isVolatile())
6386 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
6393 return Changed ? &I : 0;
6396 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
6397 bool Changed = false;
6399 /// Orders the operands of the compare so that they are listed from most
6400 /// complex to least complex. This puts constants before unary operators,
6401 /// before binary operators.
6402 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6407 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6409 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, TD))
6410 return ReplaceInstUsesWith(I, V);
6412 const Type *Ty = Op0->getType();
6414 // icmp's with boolean values can always be turned into bitwise operations
6415 if (Ty == Type::getInt1Ty(*Context)) {
6416 switch (I.getPredicate()) {
6417 default: llvm_unreachable("Invalid icmp instruction!");
6418 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
6419 Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp");
6420 return BinaryOperator::CreateNot(Xor);
6422 case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
6423 return BinaryOperator::CreateXor(Op0, Op1);
6425 case ICmpInst::ICMP_UGT:
6426 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
6428 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
6429 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
6430 return BinaryOperator::CreateAnd(Not, Op1);
6432 case ICmpInst::ICMP_SGT:
6433 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
6435 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
6436 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
6437 return BinaryOperator::CreateAnd(Not, Op0);
6439 case ICmpInst::ICMP_UGE:
6440 std::swap(Op0, Op1); // Change icmp uge -> icmp ule
6442 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
6443 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
6444 return BinaryOperator::CreateOr(Not, Op1);
6446 case ICmpInst::ICMP_SGE:
6447 std::swap(Op0, Op1); // Change icmp sge -> icmp sle
6449 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
6450 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
6451 return BinaryOperator::CreateOr(Not, Op0);
6456 unsigned BitWidth = 0;
6458 BitWidth = TD->getTypeSizeInBits(Ty->getScalarType());
6459 else if (Ty->isIntOrIntVector())
6460 BitWidth = Ty->getScalarSizeInBits();
6462 bool isSignBit = false;
6464 // See if we are doing a comparison with a constant.
6465 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6466 Value *A = 0, *B = 0;
6468 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
6469 if (I.isEquality() && CI->isZero() &&
6470 match(Op0, m_Sub(m_Value(A), m_Value(B)))) {
6471 // (icmp cond A B) if cond is equality
6472 return new ICmpInst(I.getPredicate(), A, B);
6475 // If we have an icmp le or icmp ge instruction, turn it into the
6476 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
6477 // them being folded in the code below. The SimplifyICmpInst code has
6478 // already handled the edge cases for us, so we just assert on them.
6479 switch (I.getPredicate()) {
6481 case ICmpInst::ICMP_ULE:
6482 assert(!CI->isMaxValue(false)); // A <=u MAX -> TRUE
6483 return new ICmpInst(ICmpInst::ICMP_ULT, Op0,
6485 case ICmpInst::ICMP_SLE:
6486 assert(!CI->isMaxValue(true)); // A <=s MAX -> TRUE
6487 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
6489 case ICmpInst::ICMP_UGE:
6490 assert(!CI->isMinValue(false)); // A >=u MIN -> TRUE
6491 return new ICmpInst(ICmpInst::ICMP_UGT, Op0,
6493 case ICmpInst::ICMP_SGE:
6494 assert(!CI->isMinValue(true)); // A >=s MIN -> TRUE
6495 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
6499 // If this comparison is a normal comparison, it demands all
6500 // bits, if it is a sign bit comparison, it only demands the sign bit.
6502 isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
6505 // See if we can fold the comparison based on range information we can get
6506 // by checking whether bits are known to be zero or one in the input.
6507 if (BitWidth != 0) {
6508 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
6509 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
6511 if (SimplifyDemandedBits(I.getOperandUse(0),
6512 isSignBit ? APInt::getSignBit(BitWidth)
6513 : APInt::getAllOnesValue(BitWidth),
6514 Op0KnownZero, Op0KnownOne, 0))
6516 if (SimplifyDemandedBits(I.getOperandUse(1),
6517 APInt::getAllOnesValue(BitWidth),
6518 Op1KnownZero, Op1KnownOne, 0))
6521 // Given the known and unknown bits, compute a range that the LHS could be
6522 // in. Compute the Min, Max and RHS values based on the known bits. For the
6523 // EQ and NE we use unsigned values.
6524 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6525 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6527 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6529 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6532 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
6534 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
6538 // If Min and Max are known to be the same, then SimplifyDemandedBits
6539 // figured out that the LHS is a constant. Just constant fold this now so
6540 // that code below can assume that Min != Max.
6541 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
6542 return new ICmpInst(I.getPredicate(),
6543 ConstantInt::get(*Context, Op0Min), Op1);
6544 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
6545 return new ICmpInst(I.getPredicate(), Op0,
6546 ConstantInt::get(*Context, Op1Min));
6548 // Based on the range information we know about the LHS, see if we can
6549 // simplify this comparison. For example, (x&4) < 8 is always true.
6550 switch (I.getPredicate()) {
6551 default: llvm_unreachable("Unknown icmp opcode!");
6552 case ICmpInst::ICMP_EQ:
6553 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6554 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6556 case ICmpInst::ICMP_NE:
6557 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
6558 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6560 case ICmpInst::ICMP_ULT:
6561 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
6562 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6563 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
6564 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6565 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6566 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6567 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6568 if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
6569 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6572 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
6573 if (CI->isMinValue(true))
6574 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
6575 Constant::getAllOnesValue(Op0->getType()));
6578 case ICmpInst::ICMP_UGT:
6579 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
6580 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6581 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
6582 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6584 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6585 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6586 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6587 if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
6588 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6591 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
6592 if (CI->isMaxValue(true))
6593 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
6594 Constant::getNullValue(Op0->getType()));
6597 case ICmpInst::ICMP_SLT:
6598 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
6599 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6600 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
6601 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6602 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
6603 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6604 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6605 if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
6606 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6610 case ICmpInst::ICMP_SGT:
6611 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
6612 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6613 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
6614 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6616 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
6617 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6618 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6619 if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
6620 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6624 case ICmpInst::ICMP_SGE:
6625 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
6626 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
6627 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6628 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
6629 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6631 case ICmpInst::ICMP_SLE:
6632 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
6633 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
6634 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6635 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
6636 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6638 case ICmpInst::ICMP_UGE:
6639 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
6640 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
6641 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6642 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
6643 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6645 case ICmpInst::ICMP_ULE:
6646 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
6647 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
6648 return ReplaceInstUsesWith(I, ConstantInt::getTrue(*Context));
6649 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
6650 return ReplaceInstUsesWith(I, ConstantInt::getFalse(*Context));
6654 // Turn a signed comparison into an unsigned one if both operands
6655 // are known to have the same sign.
6657 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
6658 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
6659 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
6662 // Test if the ICmpInst instruction is used exclusively by a select as
6663 // part of a minimum or maximum operation. If so, refrain from doing
6664 // any other folding. This helps out other analyses which understand
6665 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6666 // and CodeGen. And in this case, at least one of the comparison
6667 // operands has at least one user besides the compare (the select),
6668 // which would often largely negate the benefit of folding anyway.
6670 if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
6671 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
6672 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
6675 // See if we are doing a comparison between a constant and an instruction that
6676 // can be folded into the comparison.
6677 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
6678 // Since the RHS is a ConstantInt (CI), if the left hand side is an
6679 // instruction, see if that instruction also has constants so that the
6680 // instruction can be folded into the icmp
6681 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6682 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
6686 // Handle icmp with constant (but not simple integer constant) RHS
6687 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
6688 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
6689 switch (LHSI->getOpcode()) {
6690 case Instruction::GetElementPtr:
6691 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
6692 if (RHSC->isNullValue() &&
6693 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
6694 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
6695 Constant::getNullValue(LHSI->getOperand(0)->getType()));
6697 case Instruction::PHI:
6698 // Only fold icmp into the PHI if the phi and icmp are in the same
6699 // block. If in the same block, we're encouraging jump threading. If
6700 // not, we are just pessimizing the code by making an i1 phi.
6701 if (LHSI->getParent() == I.getParent())
6702 if (Instruction *NV = FoldOpIntoPhi(I, true))
6705 case Instruction::Select: {
6706 // If either operand of the select is a constant, we can fold the
6707 // comparison into the select arms, which will cause one to be
6708 // constant folded and the select turned into a bitwise or.
6709 Value *Op1 = 0, *Op2 = 0;
6710 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1)))
6711 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
6712 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2)))
6713 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
6715 // We only want to perform this transformation if it will not lead to
6716 // additional code. This is true if either both sides of the select
6717 // fold to a constant (in which case the icmp is replaced with a select
6718 // which will usually simplify) or this is the only user of the
6719 // select (in which case we are trading a select+icmp for a simpler
6721 if ((Op1 && Op2) || (LHSI->hasOneUse() && (Op1 || Op2))) {
6723 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1),
6726 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2),
6728 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
6732 case Instruction::Call:
6733 // If we have (malloc != null), and if the malloc has a single use, we
6734 // can assume it is successful and remove the malloc.
6735 if (isMalloc(LHSI) && LHSI->hasOneUse() &&
6736 isa<ConstantPointerNull>(RHSC)) {
6737 // Need to explicitly erase malloc call here, instead of adding it to
6738 // Worklist, because it won't get DCE'd from the Worklist since
6739 // isInstructionTriviallyDead() returns false for function calls.
6740 // It is OK to replace LHSI/MallocCall with Undef because the
6741 // instruction that uses it will be erased via Worklist.
6742 if (extractMallocCall(LHSI)) {
6743 LHSI->replaceAllUsesWith(UndefValue::get(LHSI->getType()));
6744 EraseInstFromFunction(*LHSI);
6745 return ReplaceInstUsesWith(I,
6746 ConstantInt::get(Type::getInt1Ty(*Context),
6747 !I.isTrueWhenEqual()));
6749 if (CallInst* MallocCall = extractMallocCallFromBitCast(LHSI))
6750 if (MallocCall->hasOneUse()) {
6751 MallocCall->replaceAllUsesWith(
6752 UndefValue::get(MallocCall->getType()));
6753 EraseInstFromFunction(*MallocCall);
6754 Worklist.Add(LHSI); // The malloc's bitcast use.
6755 return ReplaceInstUsesWith(I,
6756 ConstantInt::get(Type::getInt1Ty(*Context),
6757 !I.isTrueWhenEqual()));
6761 case Instruction::IntToPtr:
6762 // icmp pred inttoptr(X), null -> icmp pred X, 0
6763 if (RHSC->isNullValue() && TD &&
6764 TD->getIntPtrType(RHSC->getContext()) ==
6765 LHSI->getOperand(0)->getType())
6766 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
6767 Constant::getNullValue(LHSI->getOperand(0)->getType()));
6770 case Instruction::Load:
6771 // Try to optimize things like "A[i] > 4" to index computations.
6772 if (GetElementPtrInst *GEP =
6773 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
6774 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6775 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
6776 !cast<LoadInst>(LHSI)->isVolatile())
6777 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
6784 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
6785 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
6786 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
6788 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
6789 if (Instruction *NI = FoldGEPICmp(GEP, Op0,
6790 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
6793 // Test to see if the operands of the icmp are casted versions of other
6794 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
6796 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
6797 if (isa<PointerType>(Op0->getType()) &&
6798 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
6799 // We keep moving the cast from the left operand over to the right
6800 // operand, where it can often be eliminated completely.
6801 Op0 = CI->getOperand(0);
6803 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
6804 // so eliminate it as well.
6805 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
6806 Op1 = CI2->getOperand(0);
6808 // If Op1 is a constant, we can fold the cast into the constant.
6809 if (Op0->getType() != Op1->getType()) {
6810 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
6811 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
6813 // Otherwise, cast the RHS right before the icmp
6814 Op1 = Builder->CreateBitCast(Op1, Op0->getType());
6817 return new ICmpInst(I.getPredicate(), Op0, Op1);
6821 if (isa<CastInst>(Op0)) {
6822 // Handle the special case of: icmp (cast bool to X), <cst>
6823 // This comes up when you have code like
6826 // For generality, we handle any zero-extension of any operand comparison
6827 // with a constant or another cast from the same type.
6828 if (isa<Constant>(Op1) || isa<CastInst>(Op1))
6829 if (Instruction *R = visitICmpInstWithCastAndCast(I))
6833 // See if it's the same type of instruction on the left and right.
6834 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
6835 if (BinaryOperator *Op1I = dyn_cast<BinaryOperator>(Op1)) {
6836 if (Op0I->getOpcode() == Op1I->getOpcode() && Op0I->hasOneUse() &&
6837 Op1I->hasOneUse() && Op0I->getOperand(1) == Op1I->getOperand(1)) {
6838 switch (Op0I->getOpcode()) {
6840 case Instruction::Add:
6841 case Instruction::Sub:
6842 case Instruction::Xor:
6843 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
6844 return new ICmpInst(I.getPredicate(), Op0I->getOperand(0),
6845 Op1I->getOperand(0));
6846 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
6847 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6848 if (CI->getValue().isSignBit()) {
6849 ICmpInst::Predicate Pred = I.isSigned()
6850 ? I.getUnsignedPredicate()
6851 : I.getSignedPredicate();
6852 return new ICmpInst(Pred, Op0I->getOperand(0),
6853 Op1I->getOperand(0));
6856 if (CI->getValue().isMaxSignedValue()) {
6857 ICmpInst::Predicate Pred = I.isSigned()
6858 ? I.getUnsignedPredicate()
6859 : I.getSignedPredicate();
6860 Pred = I.getSwappedPredicate(Pred);
6861 return new ICmpInst(Pred, Op0I->getOperand(0),
6862 Op1I->getOperand(0));
6866 case Instruction::Mul:
6867 if (!I.isEquality())
6870 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
6871 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
6872 // Mask = -1 >> count-trailing-zeros(Cst).
6873 if (!CI->isZero() && !CI->isOne()) {
6874 const APInt &AP = CI->getValue();
6875 ConstantInt *Mask = ConstantInt::get(*Context,
6876 APInt::getLowBitsSet(AP.getBitWidth(),
6878 AP.countTrailingZeros()));
6879 Value *And1 = Builder->CreateAnd(Op0I->getOperand(0), Mask);
6880 Value *And2 = Builder->CreateAnd(Op1I->getOperand(0), Mask);
6881 return new ICmpInst(I.getPredicate(), And1, And2);
6890 // ~x < ~y --> y < x
6892 if (match(Op0, m_Not(m_Value(A))) &&
6893 match(Op1, m_Not(m_Value(B))))
6894 return new ICmpInst(I.getPredicate(), B, A);
6897 if (I.isEquality()) {
6898 Value *A, *B, *C, *D;
6900 // -x == -y --> x == y
6901 if (match(Op0, m_Neg(m_Value(A))) &&
6902 match(Op1, m_Neg(m_Value(B))))
6903 return new ICmpInst(I.getPredicate(), A, B);
6905 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
6906 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
6907 Value *OtherVal = A == Op1 ? B : A;
6908 return new ICmpInst(I.getPredicate(), OtherVal,
6909 Constant::getNullValue(A->getType()));
6912 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
6913 // A^c1 == C^c2 --> A == C^(c1^c2)
6914 ConstantInt *C1, *C2;
6915 if (match(B, m_ConstantInt(C1)) &&
6916 match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) {
6918 ConstantInt::get(*Context, C1->getValue() ^ C2->getValue());
6919 Value *Xor = Builder->CreateXor(C, NC, "tmp");
6920 return new ICmpInst(I.getPredicate(), A, Xor);
6923 // A^B == A^D -> B == D
6924 if (A == C) return new ICmpInst(I.getPredicate(), B, D);
6925 if (A == D) return new ICmpInst(I.getPredicate(), B, C);
6926 if (B == C) return new ICmpInst(I.getPredicate(), A, D);
6927 if (B == D) return new ICmpInst(I.getPredicate(), A, C);
6931 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
6932 (A == Op0 || B == Op0)) {
6933 // A == (A^B) -> B == 0
6934 Value *OtherVal = A == Op0 ? B : A;
6935 return new ICmpInst(I.getPredicate(), OtherVal,
6936 Constant::getNullValue(A->getType()));
6939 // (A-B) == A -> B == 0
6940 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B))))
6941 return new ICmpInst(I.getPredicate(), B,
6942 Constant::getNullValue(B->getType()));
6944 // A == (A-B) -> B == 0
6945 if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B))))
6946 return new ICmpInst(I.getPredicate(), B,
6947 Constant::getNullValue(B->getType()));
6949 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6950 if (Op0->hasOneUse() && Op1->hasOneUse() &&
6951 match(Op0, m_And(m_Value(A), m_Value(B))) &&
6952 match(Op1, m_And(m_Value(C), m_Value(D)))) {
6953 Value *X = 0, *Y = 0, *Z = 0;
6956 X = B; Y = D; Z = A;
6957 } else if (A == D) {
6958 X = B; Y = C; Z = A;
6959 } else if (B == C) {
6960 X = A; Y = D; Z = B;
6961 } else if (B == D) {
6962 X = A; Y = C; Z = B;
6965 if (X) { // Build (X^Y) & Z
6966 Op1 = Builder->CreateXor(X, Y, "tmp");
6967 Op1 = Builder->CreateAnd(Op1, Z, "tmp");
6968 I.setOperand(0, Op1);
6969 I.setOperand(1, Constant::getNullValue(Op1->getType()));
6976 Value *X; ConstantInt *Cst;
6978 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
6979 return FoldICmpAddOpCst(I, X, Cst, I.getPredicate(), Op0);
6982 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
6983 return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate(), Op1);
6985 return Changed ? &I : 0;
6988 /// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
6989 Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI,
6990 Value *X, ConstantInt *CI,
6991 ICmpInst::Predicate Pred,
6993 // If we have X+0, exit early (simplifying logic below) and let it get folded
6994 // elsewhere. icmp X+0, X -> icmp X, X
6996 bool isTrue = ICmpInst::isTrueWhenEqual(Pred);
6997 return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
7000 // (X+4) == X -> false.
7001 if (Pred == ICmpInst::ICMP_EQ)
7002 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext()));
7004 // (X+4) != X -> true.
7005 if (Pred == ICmpInst::ICMP_NE)
7006 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
7008 // If this is an instruction (as opposed to constantexpr) get NUW/NSW info.
7009 bool isNUW = false, isNSW = false;
7010 if (BinaryOperator *Add = dyn_cast<BinaryOperator>(TheAdd)) {
7011 isNUW = Add->hasNoUnsignedWrap();
7012 isNSW = Add->hasNoSignedWrap();
7015 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
7016 // so the values can never be equal. Similiarly for all other "or equals"
7019 // (X+1) <u X --> X >u (MAXUINT-1) --> X != 255
7020 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
7021 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
7022 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
7023 // If this is an NUW add, then this is always false.
7025 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(X->getContext()));
7027 Value *R = ConstantExpr::getSub(ConstantInt::get(CI->getType(), -1ULL), CI);
7028 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
7031 // (X+1) >u X --> X <u (0-1) --> X != 255
7032 // (X+2) >u X --> X <u (0-2) --> X <u 254
7033 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
7034 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
7035 // If this is an NUW add, then this is always true.
7037 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext()));
7038 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
7041 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
7042 ConstantInt *SMax = ConstantInt::get(X->getContext(),
7043 APInt::getSignedMaxValue(BitWidth));
7045 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
7046 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
7047 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
7048 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
7049 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
7050 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
7051 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
7052 // If this is an NSW add, then we have two cases: if the constant is
7053 // positive, then this is always false, if negative, this is always true.
7055 bool isTrue = CI->getValue().isNegative();
7056 return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
7059 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
7062 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
7063 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
7064 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
7065 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
7066 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
7067 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
7069 // If this is an NSW add, then we have two cases: if the constant is
7070 // positive, then this is always true, if negative, this is always false.
7072 bool isTrue = !CI->getValue().isNegative();
7073 return ReplaceInstUsesWith(ICI, ConstantInt::get(ICI.getType(), isTrue));
7076 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
7077 Constant *C = ConstantInt::get(X->getContext(), CI->getValue()-1);
7078 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
7081 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
7082 /// and CmpRHS are both known to be integer constants.
7083 Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
7084 ConstantInt *DivRHS) {
7085 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
7086 const APInt &CmpRHSV = CmpRHS->getValue();
7088 // FIXME: If the operand types don't match the type of the divide
7089 // then don't attempt this transform. The code below doesn't have the
7090 // logic to deal with a signed divide and an unsigned compare (and
7091 // vice versa). This is because (x /s C1) <s C2 produces different
7092 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
7093 // (x /u C1) <u C2. Simply casting the operands and result won't
7094 // work. :( The if statement below tests that condition and bails
7096 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
7097 if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
7099 if (DivRHS->isZero())
7100 return 0; // The ProdOV computation fails on divide by zero.
7101 if (DivIsSigned && DivRHS->isAllOnesValue())
7102 return 0; // The overflow computation also screws up here
7103 if (DivRHS->isOne())
7104 return 0; // Not worth bothering, and eliminates some funny cases
7107 // Compute Prod = CI * DivRHS. We are essentially solving an equation
7108 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
7109 // C2 (CI). By solving for X we can turn this into a range check
7110 // instead of computing a divide.
7111 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
7113 // Determine if the product overflows by seeing if the product is
7114 // not equal to the divide. Make sure we do the same kind of divide
7115 // as in the LHS instruction that we're folding.
7116 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
7117 ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
7119 // Get the ICmp opcode
7120 ICmpInst::Predicate Pred = ICI.getPredicate();
7122 // Figure out the interval that is being checked. For example, a comparison
7123 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
7124 // Compute this interval based on the constants involved and the signedness of
7125 // the compare/divide. This computes a half-open interval, keeping track of
7126 // whether either value in the interval overflows. After analysis each
7127 // overflow variable is set to 0 if it's corresponding bound variable is valid
7128 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
7129 int LoOverflow = 0, HiOverflow = 0;
7130 Constant *LoBound = 0, *HiBound = 0;
7132 if (!DivIsSigned) { // udiv
7133 // e.g. X/5 op 3 --> [15, 20)
7135 HiOverflow = LoOverflow = ProdOV;
7137 HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, Context, false);
7138 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
7139 if (CmpRHSV == 0) { // (X / pos) op 0
7140 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
7141 LoBound = cast<ConstantInt>(ConstantExpr::getNeg(SubOne(DivRHS)));
7143 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
7144 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
7145 HiOverflow = LoOverflow = ProdOV;
7147 HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, Context, true);
7148 } else { // (X / pos) op neg
7149 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
7150 HiBound = AddOne(Prod);
7151 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
7153 ConstantInt* DivNeg =
7154 cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
7155 LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, Context,
7159 } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0.
7160 if (CmpRHSV == 0) { // (X / neg) op 0
7161 // e.g. X/-5 op 0 --> [-4, 5)
7162 LoBound = AddOne(DivRHS);
7163 HiBound = cast<ConstantInt>(ConstantExpr::getNeg(DivRHS));
7164 if (HiBound == DivRHS) { // -INTMIN = INTMIN
7165 HiOverflow = 1; // [INTMIN+1, overflow)
7166 HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN
7168 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
7169 // e.g. X/-5 op 3 --> [-19, -14)
7170 HiBound = AddOne(Prod);
7171 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
7173 LoOverflow = AddWithOverflow(LoBound, HiBound,
7174 DivRHS, Context, true) ? -1 : 0;
7175 } else { // (X / neg) op neg
7176 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
7177 LoOverflow = HiOverflow = ProdOV;
7179 HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, Context, true);
7182 // Dividing by a negative swaps the condition. LT <-> GT
7183 Pred = ICmpInst::getSwappedPredicate(Pred);
7186 Value *X = DivI->getOperand(0);
7188 default: llvm_unreachable("Unhandled icmp opcode!");
7189 case ICmpInst::ICMP_EQ:
7190 if (LoOverflow && HiOverflow)
7191 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
7192 else if (HiOverflow)
7193 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
7194 ICmpInst::ICMP_UGE, X, LoBound);
7195 else if (LoOverflow)
7196 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
7197 ICmpInst::ICMP_ULT, X, HiBound);
7199 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, true, ICI);
7200 case ICmpInst::ICMP_NE:
7201 if (LoOverflow && HiOverflow)
7202 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
7203 else if (HiOverflow)
7204 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
7205 ICmpInst::ICMP_ULT, X, LoBound);
7206 else if (LoOverflow)
7207 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
7208 ICmpInst::ICMP_UGE, X, HiBound);
7210 return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, false, ICI);
7211 case ICmpInst::ICMP_ULT:
7212 case ICmpInst::ICMP_SLT:
7213 if (LoOverflow == +1) // Low bound is greater than input range.
7214 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
7215 if (LoOverflow == -1) // Low bound is less than input range.
7216 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
7217 return new ICmpInst(Pred, X, LoBound);
7218 case ICmpInst::ICMP_UGT:
7219 case ICmpInst::ICMP_SGT:
7220 if (HiOverflow == +1) // High bound greater than input range.
7221 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
7222 else if (HiOverflow == -1) // High bound less than input range.
7223 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
7224 if (Pred == ICmpInst::ICMP_UGT)
7225 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
7227 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
7232 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
7234 Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
7237 const APInt &RHSV = RHS->getValue();
7239 switch (LHSI->getOpcode()) {
7240 case Instruction::Trunc:
7241 if (ICI.isEquality() && LHSI->hasOneUse()) {
7242 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
7243 // of the high bits truncated out of x are known.
7244 unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
7245 SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
7246 APInt Mask(APInt::getHighBitsSet(SrcBits, SrcBits-DstBits));
7247 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
7248 ComputeMaskedBits(LHSI->getOperand(0), Mask, KnownZero, KnownOne);
7250 // If all the high bits are known, we can do this xform.
7251 if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
7252 // Pull in the high bits from known-ones set.
7253 APInt NewRHS(RHS->getValue());
7254 NewRHS.zext(SrcBits);
7256 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
7257 ConstantInt::get(*Context, NewRHS));
7262 case Instruction::Xor: // (icmp pred (xor X, XorCST), CI)
7263 if (ConstantInt *XorCST = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
7264 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
7266 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
7267 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
7268 Value *CompareVal = LHSI->getOperand(0);
7270 // If the sign bit of the XorCST is not set, there is no change to
7271 // the operation, just stop using the Xor.
7272 if (!XorCST->getValue().isNegative()) {
7273 ICI.setOperand(0, CompareVal);
7278 // Was the old condition true if the operand is positive?
7279 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
7281 // If so, the new one isn't.
7282 isTrueIfPositive ^= true;
7284 if (isTrueIfPositive)
7285 return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal,
7288 return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal,
7292 if (LHSI->hasOneUse()) {
7293 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
7294 if (!ICI.isEquality() && XorCST->getValue().isSignBit()) {
7295 const APInt &SignBit = XorCST->getValue();
7296 ICmpInst::Predicate Pred = ICI.isSigned()
7297 ? ICI.getUnsignedPredicate()
7298 : ICI.getSignedPredicate();
7299 return new ICmpInst(Pred, LHSI->getOperand(0),
7300 ConstantInt::get(*Context, RHSV ^ SignBit));
7303 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
7304 if (!ICI.isEquality() && XorCST->getValue().isMaxSignedValue()) {
7305 const APInt &NotSignBit = XorCST->getValue();
7306 ICmpInst::Predicate Pred = ICI.isSigned()
7307 ? ICI.getUnsignedPredicate()
7308 : ICI.getSignedPredicate();
7309 Pred = ICI.getSwappedPredicate(Pred);
7310 return new ICmpInst(Pred, LHSI->getOperand(0),
7311 ConstantInt::get(*Context, RHSV ^ NotSignBit));
7316 case Instruction::And: // (icmp pred (and X, AndCST), RHS)
7317 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
7318 LHSI->getOperand(0)->hasOneUse()) {
7319 ConstantInt *AndCST = cast<ConstantInt>(LHSI->getOperand(1));
7321 // If the LHS is an AND of a truncating cast, we can widen the
7322 // and/compare to be the input width without changing the value
7323 // produced, eliminating a cast.
7324 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
7325 // We can do this transformation if either the AND constant does not
7326 // have its sign bit set or if it is an equality comparison.
7327 // Extending a relational comparison when we're checking the sign
7328 // bit would not work.
7329 if (Cast->hasOneUse() &&
7330 (ICI.isEquality() ||
7331 (AndCST->getValue().isNonNegative() && RHSV.isNonNegative()))) {
7333 cast<IntegerType>(Cast->getOperand(0)->getType())->getBitWidth();
7334 APInt NewCST = AndCST->getValue();
7335 NewCST.zext(BitWidth);
7337 NewCI.zext(BitWidth);
7339 Builder->CreateAnd(Cast->getOperand(0),
7340 ConstantInt::get(*Context, NewCST), LHSI->getName());
7341 return new ICmpInst(ICI.getPredicate(), NewAnd,
7342 ConstantInt::get(*Context, NewCI));
7346 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
7347 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
7348 // happens a LOT in code produced by the C front-end, for bitfield
7350 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
7351 if (Shift && !Shift->isShift())
7355 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : 0;
7356 const Type *Ty = Shift ? Shift->getType() : 0; // Type of the shift.
7357 const Type *AndTy = AndCST->getType(); // Type of the and.
7359 // We can fold this as long as we can't shift unknown bits
7360 // into the mask. This can only happen with signed shift
7361 // rights, as they sign-extend.
7363 bool CanFold = Shift->isLogicalShift();
7365 // To test for the bad case of the signed shr, see if any
7366 // of the bits shifted in could be tested after the mask.
7367 uint32_t TyBits = Ty->getPrimitiveSizeInBits();
7368 int ShAmtVal = TyBits - ShAmt->getLimitedValue(TyBits);
7370 uint32_t BitWidth = AndTy->getPrimitiveSizeInBits();
7371 if ((APInt::getHighBitsSet(BitWidth, BitWidth-ShAmtVal) &
7372 AndCST->getValue()) == 0)
7378 if (Shift->getOpcode() == Instruction::Shl)
7379 NewCst = ConstantExpr::getLShr(RHS, ShAmt);
7381 NewCst = ConstantExpr::getShl(RHS, ShAmt);
7383 // Check to see if we are shifting out any of the bits being
7385 if (ConstantExpr::get(Shift->getOpcode(),
7386 NewCst, ShAmt) != RHS) {
7387 // If we shifted bits out, the fold is not going to work out.
7388 // As a special case, check to see if this means that the
7389 // result is always true or false now.
7390 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
7391 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
7392 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
7393 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
7395 ICI.setOperand(1, NewCst);
7396 Constant *NewAndCST;
7397 if (Shift->getOpcode() == Instruction::Shl)
7398 NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt);
7400 NewAndCST = ConstantExpr::getShl(AndCST, ShAmt);
7401 LHSI->setOperand(1, NewAndCST);
7402 LHSI->setOperand(0, Shift->getOperand(0));
7403 Worklist.Add(Shift); // Shift is dead.
7409 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
7410 // preferable because it allows the C<<Y expression to be hoisted out
7411 // of a loop if Y is invariant and X is not.
7412 if (Shift && Shift->hasOneUse() && RHSV == 0 &&
7413 ICI.isEquality() && !Shift->isArithmeticShift() &&
7414 !isa<Constant>(Shift->getOperand(0))) {
7417 if (Shift->getOpcode() == Instruction::LShr) {
7418 NS = Builder->CreateShl(AndCST, Shift->getOperand(1), "tmp");
7420 // Insert a logical shift.
7421 NS = Builder->CreateLShr(AndCST, Shift->getOperand(1), "tmp");
7424 // Compute X & (C << Y).
7426 Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
7428 ICI.setOperand(0, NewAnd);
7433 // Try to optimize things like "A[i]&42 == 0" to index computations.
7434 if (LoadInst *LI = dyn_cast<LoadInst>(LHSI->getOperand(0))) {
7435 if (GetElementPtrInst *GEP =
7436 dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
7437 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
7438 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
7439 !LI->isVolatile() && isa<ConstantInt>(LHSI->getOperand(1))) {
7440 ConstantInt *C = cast<ConstantInt>(LHSI->getOperand(1));
7441 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C))
7447 case Instruction::Or: {
7448 if (!ICI.isEquality() || !RHS->isNullValue() || !LHSI->hasOneUse())
7451 if (match(LHSI, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
7452 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
7453 // -> and (icmp eq P, null), (icmp eq Q, null).
7455 Value *ICIP = Builder->CreateICmp(ICI.getPredicate(), P,
7456 Constant::getNullValue(P->getType()));
7457 Value *ICIQ = Builder->CreateICmp(ICI.getPredicate(), Q,
7458 Constant::getNullValue(Q->getType()));
7460 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
7461 Op = BinaryOperator::CreateAnd(ICIP, ICIQ);
7463 Op = BinaryOperator::CreateOr(ICIP, ICIQ);
7469 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
7470 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
7473 uint32_t TypeBits = RHSV.getBitWidth();
7475 // Check that the shift amount is in range. If not, don't perform
7476 // undefined shifts. When the shift is visited it will be
7478 if (ShAmt->uge(TypeBits))
7481 if (ICI.isEquality()) {
7482 // If we are comparing against bits always shifted out, the
7483 // comparison cannot succeed.
7485 ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt),
7487 if (Comp != RHS) {// Comparing against a bit that we know is zero.
7488 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
7489 Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
7490 return ReplaceInstUsesWith(ICI, Cst);
7493 if (LHSI->hasOneUse()) {
7494 // Otherwise strength reduce the shift into an and.
7495 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
7497 ConstantInt::get(*Context, APInt::getLowBitsSet(TypeBits,
7498 TypeBits-ShAmtVal));
7501 Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
7502 return new ICmpInst(ICI.getPredicate(), And,
7503 ConstantInt::get(*Context, RHSV.lshr(ShAmtVal)));
7507 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
7508 bool TrueIfSigned = false;
7509 if (LHSI->hasOneUse() &&
7510 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
7511 // (X << 31) <s 0 --> (X&1) != 0
7512 Constant *Mask = ConstantInt::get(*Context, APInt(TypeBits, 1) <<
7513 (TypeBits-ShAmt->getZExtValue()-1));
7515 Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
7516 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
7517 And, Constant::getNullValue(And->getType()));
7522 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
7523 case Instruction::AShr: {
7524 // Only handle equality comparisons of shift-by-constant.
7525 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
7526 if (!ShAmt || !ICI.isEquality()) break;
7528 // Check that the shift amount is in range. If not, don't perform
7529 // undefined shifts. When the shift is visited it will be
7531 uint32_t TypeBits = RHSV.getBitWidth();
7532 if (ShAmt->uge(TypeBits))
7535 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
7537 // If we are comparing against bits always shifted out, the
7538 // comparison cannot succeed.
7539 APInt Comp = RHSV << ShAmtVal;
7540 if (LHSI->getOpcode() == Instruction::LShr)
7541 Comp = Comp.lshr(ShAmtVal);
7543 Comp = Comp.ashr(ShAmtVal);
7545 if (Comp != RHSV) { // Comparing against a bit that we know is zero.
7546 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
7547 Constant *Cst = ConstantInt::get(Type::getInt1Ty(*Context), IsICMP_NE);
7548 return ReplaceInstUsesWith(ICI, Cst);
7551 // Otherwise, check to see if the bits shifted out are known to be zero.
7552 // If so, we can compare against the unshifted value:
7553 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
7554 if (LHSI->hasOneUse() &&
7555 MaskedValueIsZero(LHSI->getOperand(0),
7556 APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) {
7557 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
7558 ConstantExpr::getShl(RHS, ShAmt));
7561 if (LHSI->hasOneUse()) {
7562 // Otherwise strength reduce the shift into an and.
7563 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
7564 Constant *Mask = ConstantInt::get(*Context, Val);
7566 Value *And = Builder->CreateAnd(LHSI->getOperand(0),
7567 Mask, LHSI->getName()+".mask");
7568 return new ICmpInst(ICI.getPredicate(), And,
7569 ConstantExpr::getShl(RHS, ShAmt));
7574 case Instruction::SDiv:
7575 case Instruction::UDiv:
7576 // Fold: icmp pred ([us]div X, C1), C2 -> range test
7577 // Fold this div into the comparison, producing a range check.
7578 // Determine, based on the divide type, what the range is being
7579 // checked. If there is an overflow on the low or high side, remember
7580 // it, otherwise compute the range [low, hi) bounding the new value.
7581 // See: InsertRangeTest above for the kinds of replacements possible.
7582 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
7583 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
7588 case Instruction::Add:
7589 // Fold: icmp pred (add X, C1), C2
7590 if (!ICI.isEquality()) {
7591 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
7593 const APInt &LHSV = LHSC->getValue();
7595 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
7598 if (ICI.isSigned()) {
7599 if (CR.getLower().isSignBit()) {
7600 return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
7601 ConstantInt::get(*Context, CR.getUpper()));
7602 } else if (CR.getUpper().isSignBit()) {
7603 return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
7604 ConstantInt::get(*Context, CR.getLower()));
7607 if (CR.getLower().isMinValue()) {
7608 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
7609 ConstantInt::get(*Context, CR.getUpper()));
7610 } else if (CR.getUpper().isMinValue()) {
7611 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
7612 ConstantInt::get(*Context, CR.getLower()));
7619 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
7620 if (ICI.isEquality()) {
7621 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
7623 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
7624 // the second operand is a constant, simplify a bit.
7625 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
7626 switch (BO->getOpcode()) {
7627 case Instruction::SRem:
7628 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
7629 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
7630 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
7631 if (V.sgt(APInt(V.getBitWidth(), 1)) && V.isPowerOf2()) {
7633 Builder->CreateURem(BO->getOperand(0), BO->getOperand(1),
7635 return new ICmpInst(ICI.getPredicate(), NewRem,
7636 Constant::getNullValue(BO->getType()));
7640 case Instruction::Add:
7641 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
7642 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7643 if (BO->hasOneUse())
7644 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7645 ConstantExpr::getSub(RHS, BOp1C));
7646 } else if (RHSV == 0) {
7647 // Replace ((add A, B) != 0) with (A != -B) if A or B is
7648 // efficiently invertible, or if the add has just this one use.
7649 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
7651 if (Value *NegVal = dyn_castNegVal(BOp1))
7652 return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
7653 else if (Value *NegVal = dyn_castNegVal(BOp0))
7654 return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
7655 else if (BO->hasOneUse()) {
7656 Value *Neg = Builder->CreateNeg(BOp1);
7658 return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
7662 case Instruction::Xor:
7663 // For the xor case, we can xor two constants together, eliminating
7664 // the explicit xor.
7665 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1)))
7666 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7667 ConstantExpr::getXor(RHS, BOC));
7670 case Instruction::Sub:
7671 // Replace (([sub|xor] A, B) != 0) with (A != B)
7673 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
7677 case Instruction::Or:
7678 // If bits are being or'd in that are not present in the constant we
7679 // are comparing against, then the comparison could never succeed!
7680 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
7681 Constant *NotCI = ConstantExpr::getNot(RHS);
7682 if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
7683 return ReplaceInstUsesWith(ICI,
7684 ConstantInt::get(Type::getInt1Ty(*Context),
7689 case Instruction::And:
7690 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
7691 // If bits are being compared against that are and'd out, then the
7692 // comparison can never succeed!
7693 if ((RHSV & ~BOC->getValue()) != 0)
7694 return ReplaceInstUsesWith(ICI,
7695 ConstantInt::get(Type::getInt1Ty(*Context),
7698 // If we have ((X & C) == C), turn it into ((X & C) != 0).
7699 if (RHS == BOC && RHSV.isPowerOf2())
7700 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
7701 ICmpInst::ICMP_NE, LHSI,
7702 Constant::getNullValue(RHS->getType()));
7704 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
7705 if (BOC->getValue().isSignBit()) {
7706 Value *X = BO->getOperand(0);
7707 Constant *Zero = Constant::getNullValue(X->getType());
7708 ICmpInst::Predicate pred = isICMP_NE ?
7709 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
7710 return new ICmpInst(pred, X, Zero);
7713 // ((X & ~7) == 0) --> X < 8
7714 if (RHSV == 0 && isHighOnes(BOC)) {
7715 Value *X = BO->getOperand(0);
7716 Constant *NegX = ConstantExpr::getNeg(BOC);
7717 ICmpInst::Predicate pred = isICMP_NE ?
7718 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
7719 return new ICmpInst(pred, X, NegX);
7724 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
7725 // Handle icmp {eq|ne} <intrinsic>, intcst.
7726 if (II->getIntrinsicID() == Intrinsic::bswap) {
7728 ICI.setOperand(0, II->getOperand(1));
7729 ICI.setOperand(1, ConstantInt::get(*Context, RHSV.byteSwap()));
7737 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
7738 /// We only handle extending casts so far.
7740 Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
7741 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
7742 Value *LHSCIOp = LHSCI->getOperand(0);
7743 const Type *SrcTy = LHSCIOp->getType();
7744 const Type *DestTy = LHSCI->getType();
7747 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
7748 // integer type is the same size as the pointer type.
7749 if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
7750 TD->getPointerSizeInBits() ==
7751 cast<IntegerType>(DestTy)->getBitWidth()) {
7753 if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1))) {
7754 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
7755 } else if (PtrToIntInst *RHSC = dyn_cast<PtrToIntInst>(ICI.getOperand(1))) {
7756 RHSOp = RHSC->getOperand(0);
7757 // If the pointer types don't match, insert a bitcast.
7758 if (LHSCIOp->getType() != RHSOp->getType())
7759 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
7763 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
7766 // The code below only handles extension cast instructions, so far.
7768 if (LHSCI->getOpcode() != Instruction::ZExt &&
7769 LHSCI->getOpcode() != Instruction::SExt)
7772 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
7773 bool isSignedCmp = ICI.isSigned();
7775 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
7776 // Not an extension from the same type?
7777 RHSCIOp = CI->getOperand(0);
7778 if (RHSCIOp->getType() != LHSCIOp->getType())
7781 // If the signedness of the two casts doesn't agree (i.e. one is a sext
7782 // and the other is a zext), then we can't handle this.
7783 if (CI->getOpcode() != LHSCI->getOpcode())
7786 // Deal with equality cases early.
7787 if (ICI.isEquality())
7788 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
7790 // A signed comparison of sign extended values simplifies into a
7791 // signed comparison.
7792 if (isSignedCmp && isSignedExt)
7793 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
7795 // The other three cases all fold into an unsigned comparison.
7796 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
7799 // If we aren't dealing with a constant on the RHS, exit early
7800 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
7804 // Compute the constant that would happen if we truncated to SrcTy then
7805 // reextended to DestTy.
7806 Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
7807 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(),
7810 // If the re-extended constant didn't change...
7812 // Deal with equality cases early.
7813 if (ICI.isEquality())
7814 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
7816 // A signed comparison of sign extended values simplifies into a
7817 // signed comparison.
7818 if (isSignedExt && isSignedCmp)
7819 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
7821 // The other three cases all fold into an unsigned comparison.
7822 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, Res1);
7825 // The re-extended constant changed so the constant cannot be represented
7826 // in the shorter type. Consequently, we cannot emit a simple comparison.
7828 // First, handle some easy cases. We know the result cannot be equal at this
7829 // point so handle the ICI.isEquality() cases
7830 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
7831 return ReplaceInstUsesWith(ICI, ConstantInt::getFalse(*Context));
7832 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
7833 return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(*Context));
7835 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
7836 // should have been folded away previously and not enter in here.
7839 // We're performing a signed comparison.
7840 if (cast<ConstantInt>(CI)->getValue().isNegative())
7841 Result = ConstantInt::getFalse(*Context); // X < (small) --> false
7843 Result = ConstantInt::getTrue(*Context); // X < (large) --> true
7845 // We're performing an unsigned comparison.
7847 // We're performing an unsigned comp with a sign extended value.
7848 // This is true if the input is >= 0. [aka >s -1]
7849 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
7850 Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
7852 // Unsigned extend & unsigned compare -> always true.
7853 Result = ConstantInt::getTrue(*Context);
7857 // Finally, return the value computed.
7858 if (ICI.getPredicate() == ICmpInst::ICMP_ULT ||
7859 ICI.getPredicate() == ICmpInst::ICMP_SLT)
7860 return ReplaceInstUsesWith(ICI, Result);
7862 assert((ICI.getPredicate()==ICmpInst::ICMP_UGT ||
7863 ICI.getPredicate()==ICmpInst::ICMP_SGT) &&
7864 "ICmp should be folded!");
7865 if (Constant *CI = dyn_cast<Constant>(Result))
7866 return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI));
7867 return BinaryOperator::CreateNot(Result);
7870 Instruction *InstCombiner::visitShl(BinaryOperator &I) {
7871 return commonShiftTransforms(I);
7874 Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
7875 return commonShiftTransforms(I);
7878 Instruction *InstCombiner::visitAShr(BinaryOperator &I) {
7879 if (Instruction *R = commonShiftTransforms(I))
7882 Value *Op0 = I.getOperand(0);
7884 // ashr int -1, X = -1 (for any arithmetic shift rights of ~0)
7885 if (ConstantInt *CSI = dyn_cast<ConstantInt>(Op0))
7886 if (CSI->isAllOnesValue())
7887 return ReplaceInstUsesWith(I, CSI);
7889 // See if we can turn a signed shr into an unsigned shr.
7890 if (MaskedValueIsZero(Op0,
7891 APInt::getSignBit(I.getType()->getScalarSizeInBits())))
7892 return BinaryOperator::CreateLShr(Op0, I.getOperand(1));
7894 // Arithmetic shifting an all-sign-bit value is a no-op.
7895 unsigned NumSignBits = ComputeNumSignBits(Op0);
7896 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
7897 return ReplaceInstUsesWith(I, Op0);
7902 Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
7903 assert(I.getOperand(1)->getType() == I.getOperand(0)->getType());
7904 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7906 // shl X, 0 == X and shr X, 0 == X
7907 // shl 0, X == 0 and shr 0, X == 0
7908 if (Op1 == Constant::getNullValue(Op1->getType()) ||
7909 Op0 == Constant::getNullValue(Op0->getType()))
7910 return ReplaceInstUsesWith(I, Op0);
7912 if (isa<UndefValue>(Op0)) {
7913 if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef
7914 return ReplaceInstUsesWith(I, Op0);
7915 else // undef << X -> 0, undef >>u X -> 0
7916 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7918 if (isa<UndefValue>(Op1)) {
7919 if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X
7920 return ReplaceInstUsesWith(I, Op0);
7921 else // X << undef, X >>u undef -> 0
7922 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
7925 // See if we can fold away this shift.
7926 if (SimplifyDemandedInstructionBits(I))
7929 // Try to fold constant and into select arguments.
7930 if (isa<Constant>(Op0))
7931 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
7932 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7935 if (ConstantInt *CUI = dyn_cast<ConstantInt>(Op1))
7936 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
7941 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
7942 BinaryOperator &I) {
7943 bool isLeftShift = I.getOpcode() == Instruction::Shl;
7945 // See if we can simplify any instructions used by the instruction whose sole
7946 // purpose is to compute bits we don't care about.
7947 uint32_t TypeBits = Op0->getType()->getScalarSizeInBits();
7949 // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate
7952 if (Op1->uge(TypeBits)) {
7953 if (I.getOpcode() != Instruction::AShr)
7954 return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType()));
7956 I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1));
7961 // ((X*C1) << C2) == (X * (C1 << C2))
7962 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(Op0))
7963 if (BO->getOpcode() == Instruction::Mul && isLeftShift)
7964 if (Constant *BOOp = dyn_cast<Constant>(BO->getOperand(1)))
7965 return BinaryOperator::CreateMul(BO->getOperand(0),
7966 ConstantExpr::getShl(BOOp, Op1));
7968 // Try to fold constant and into select arguments.
7969 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
7970 if (Instruction *R = FoldOpIntoSelect(I, SI, this))
7972 if (isa<PHINode>(Op0))
7973 if (Instruction *NV = FoldOpIntoPhi(I))
7976 // Fold shift2(trunc(shift1(x,c1)), c2) -> trunc(shift2(shift1(x,c1),c2))
7977 if (TruncInst *TI = dyn_cast<TruncInst>(Op0)) {
7978 Instruction *TrOp = dyn_cast<Instruction>(TI->getOperand(0));
7979 // If 'shift2' is an ashr, we would have to get the sign bit into a funny
7980 // place. Don't try to do this transformation in this case. Also, we
7981 // require that the input operand is a shift-by-constant so that we have
7982 // confidence that the shifts will get folded together. We could do this
7983 // xform in more cases, but it is unlikely to be profitable.
7984 if (TrOp && I.isLogicalShift() && TrOp->isShift() &&
7985 isa<ConstantInt>(TrOp->getOperand(1))) {
7986 // Okay, we'll do this xform. Make the shift of shift.
7987 Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType());
7988 // (shift2 (shift1 & 0x00FF), c2)
7989 Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName());
7991 // For logical shifts, the truncation has the effect of making the high
7992 // part of the register be zeros. Emulate this by inserting an AND to
7993 // clear the top bits as needed. This 'and' will usually be zapped by
7994 // other xforms later if dead.
7995 unsigned SrcSize = TrOp->getType()->getScalarSizeInBits();
7996 unsigned DstSize = TI->getType()->getScalarSizeInBits();
7997 APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize));
7999 // The mask we constructed says what the trunc would do if occurring
8000 // between the shifts. We want to know the effect *after* the second
8001 // shift. We know that it is a logical shift by a constant, so adjust the
8002 // mask as appropriate.
8003 if (I.getOpcode() == Instruction::Shl)
8004 MaskV <<= Op1->getZExtValue();
8006 assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
8007 MaskV = MaskV.lshr(Op1->getZExtValue());
8011 Value *And = Builder->CreateAnd(NSh, ConstantInt::get(*Context, MaskV),
8014 // Return the value truncated to the interesting size.
8015 return new TruncInst(And, I.getType());
8019 if (Op0->hasOneUse()) {
8020 if (BinaryOperator *Op0BO = dyn_cast<BinaryOperator>(Op0)) {
8021 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
8024 switch (Op0BO->getOpcode()) {
8026 case Instruction::Add:
8027 case Instruction::And:
8028 case Instruction::Or:
8029 case Instruction::Xor: {
8030 // These operators commute.
8031 // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C)
8032 if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() &&
8033 match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
8034 m_Specific(Op1)))) {
8035 Value *YS = // (Y << C)
8036 Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
8038 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1,
8039 Op0BO->getOperand(1)->getName());
8040 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
8041 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context,
8042 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
8045 // Turn (Y + ((X >> C) & CC)) << C -> ((X & (CC << C)) + (Y << C))
8046 Value *Op0BOOp1 = Op0BO->getOperand(1);
8047 if (isLeftShift && Op0BOOp1->hasOneUse() &&
8049 m_And(m_Shr(m_Value(V1), m_Specific(Op1)),
8050 m_ConstantInt(CC))) &&
8051 cast<BinaryOperator>(Op0BOOp1)->getOperand(0)->hasOneUse()) {
8052 Value *YS = // (Y << C)
8053 Builder->CreateShl(Op0BO->getOperand(0), Op1,
8056 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
8057 V1->getName()+".mask");
8058 return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
8063 case Instruction::Sub: {
8064 // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C)
8065 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
8066 match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
8067 m_Specific(Op1)))) {
8068 Value *YS = // (Y << C)
8069 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
8071 Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS,
8072 Op0BO->getOperand(0)->getName());
8073 uint32_t Op1Val = Op1->getLimitedValue(TypeBits);
8074 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context,
8075 APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val)));
8078 // Turn (((X >> C)&CC) + Y) << C -> (X + (Y << C)) & (CC << C)
8079 if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() &&
8080 match(Op0BO->getOperand(0),
8081 m_And(m_Shr(m_Value(V1), m_Value(V2)),
8082 m_ConstantInt(CC))) && V2 == Op1 &&
8083 cast<BinaryOperator>(Op0BO->getOperand(0))
8084 ->getOperand(0)->hasOneUse()) {
8085 Value *YS = // (Y << C)
8086 Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
8088 Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
8089 V1->getName()+".mask");
8091 return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
8099 // If the operand is an bitwise operator with a constant RHS, and the
8100 // shift is the only use, we can pull it out of the shift.
8101 if (ConstantInt *Op0C = dyn_cast<ConstantInt>(Op0BO->getOperand(1))) {
8102 bool isValid = true; // Valid only for And, Or, Xor
8103 bool highBitSet = false; // Transform if high bit of constant set?
8105 switch (Op0BO->getOpcode()) {
8106 default: isValid = false; break; // Do not perform transform!
8107 case Instruction::Add:
8108 isValid = isLeftShift;
8110 case Instruction::Or:
8111 case Instruction::Xor:
8114 case Instruction::And:
8119 // If this is a signed shift right, and the high bit is modified
8120 // by the logical operation, do not perform the transformation.
8121 // The highBitSet boolean indicates the value of the high bit of
8122 // the constant which would cause it to be modified for this
8125 if (isValid && I.getOpcode() == Instruction::AShr)
8126 isValid = Op0C->getValue()[TypeBits-1] == highBitSet;
8129 Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
8132 Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
8133 NewShift->takeName(Op0BO);
8135 return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
8142 // Find out if this is a shift of a shift by a constant.
8143 BinaryOperator *ShiftOp = dyn_cast<BinaryOperator>(Op0);
8144 if (ShiftOp && !ShiftOp->isShift())
8147 if (ShiftOp && isa<ConstantInt>(ShiftOp->getOperand(1))) {
8148 ConstantInt *ShiftAmt1C = cast<ConstantInt>(ShiftOp->getOperand(1));
8149 uint32_t ShiftAmt1 = ShiftAmt1C->getLimitedValue(TypeBits);
8150 uint32_t ShiftAmt2 = Op1->getLimitedValue(TypeBits);
8151 assert(ShiftAmt2 != 0 && "Should have been simplified earlier");
8152 if (ShiftAmt1 == 0) return 0; // Will be simplified in the future.
8153 Value *X = ShiftOp->getOperand(0);
8155 uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift.
8157 const IntegerType *Ty = cast<IntegerType>(I.getType());
8159 // Check for (X << c1) << c2 and (X >> c1) >> c2
8160 if (I.getOpcode() == ShiftOp->getOpcode()) {
8161 // If this is oversized composite shift, then unsigned shifts get 0, ashr
8163 if (AmtSum >= TypeBits) {
8164 if (I.getOpcode() != Instruction::AShr)
8165 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
8166 AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr.
8169 return BinaryOperator::Create(I.getOpcode(), X,
8170 ConstantInt::get(Ty, AmtSum));
8173 if (ShiftOp->getOpcode() == Instruction::LShr &&
8174 I.getOpcode() == Instruction::AShr) {
8175 if (AmtSum >= TypeBits)
8176 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));
8178 // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0.
8179 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum));
8182 if (ShiftOp->getOpcode() == Instruction::AShr &&
8183 I.getOpcode() == Instruction::LShr) {
8184 // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0.
8185 if (AmtSum >= TypeBits)
8186 AmtSum = TypeBits-1;
8188 Value *Shift = Builder->CreateAShr(X, ConstantInt::get(Ty, AmtSum));
8190 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
8191 return BinaryOperator::CreateAnd(Shift, ConstantInt::get(*Context, Mask));
8194 // Okay, if we get here, one shift must be left, and the other shift must be
8195 // right. See if the amounts are equal.
8196 if (ShiftAmt1 == ShiftAmt2) {
8197 // If we have ((X >>? C) << C), turn this into X & (-1 << C).
8198 if (I.getOpcode() == Instruction::Shl) {
8199 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1));
8200 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask));
8202 // If we have ((X << C) >>u C), turn this into X & (-1 >>u C).
8203 if (I.getOpcode() == Instruction::LShr) {
8204 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1));
8205 return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask));
8207 // We can simplify ((X << C) >>s C) into a trunc + sext.
8208 // NOTE: we could do this for any C, but that would make 'unusual' integer
8209 // types. For now, just stick to ones well-supported by the code
8211 const Type *SExtType = 0;
8212 switch (Ty->getBitWidth() - ShiftAmt1) {
8219 SExtType = IntegerType::get(*Context, Ty->getBitWidth() - ShiftAmt1);
8224 return new SExtInst(Builder->CreateTrunc(X, SExtType, "sext"), Ty);
8225 // Otherwise, we can't handle it yet.
8226 } else if (ShiftAmt1 < ShiftAmt2) {
8227 uint32_t ShiftDiff = ShiftAmt2-ShiftAmt1;
8229 // (X >>? C1) << C2 --> X << (C2-C1) & (-1 << C2)
8230 if (I.getOpcode() == Instruction::Shl) {
8231 assert(ShiftOp->getOpcode() == Instruction::LShr ||
8232 ShiftOp->getOpcode() == Instruction::AShr);
8233 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
8235 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
8236 return BinaryOperator::CreateAnd(Shift,
8237 ConstantInt::get(*Context, Mask));
8240 // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2)
8241 if (I.getOpcode() == Instruction::LShr) {
8242 assert(ShiftOp->getOpcode() == Instruction::Shl);
8243 Value *Shift = Builder->CreateLShr(X, ConstantInt::get(Ty, ShiftDiff));
8245 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
8246 return BinaryOperator::CreateAnd(Shift,
8247 ConstantInt::get(*Context, Mask));
8250 // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in.
8252 assert(ShiftAmt2 < ShiftAmt1);
8253 uint32_t ShiftDiff = ShiftAmt1-ShiftAmt2;
8255 // (X >>? C1) << C2 --> X >>? (C1-C2) & (-1 << C2)
8256 if (I.getOpcode() == Instruction::Shl) {
8257 assert(ShiftOp->getOpcode() == Instruction::LShr ||
8258 ShiftOp->getOpcode() == Instruction::AShr);
8259 Value *Shift = Builder->CreateBinOp(ShiftOp->getOpcode(), X,
8260 ConstantInt::get(Ty, ShiftDiff));
8262 APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2));
8263 return BinaryOperator::CreateAnd(Shift,
8264 ConstantInt::get(*Context, Mask));
8267 // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2)
8268 if (I.getOpcode() == Instruction::LShr) {
8269 assert(ShiftOp->getOpcode() == Instruction::Shl);
8270 Value *Shift = Builder->CreateShl(X, ConstantInt::get(Ty, ShiftDiff));
8272 APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2));
8273 return BinaryOperator::CreateAnd(Shift,
8274 ConstantInt::get(*Context, Mask));
8277 // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in.
8284 /// DecomposeSimpleLinearExpr - Analyze 'Val', seeing if it is a simple linear
8285 /// expression. If so, decompose it, returning some value X, such that Val is
8288 static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
8289 int &Offset, LLVMContext *Context) {
8290 assert(Val->getType() == Type::getInt32Ty(*Context) &&
8291 "Unexpected allocation size type!");
8292 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
8293 Offset = CI->getZExtValue();
8295 return ConstantInt::get(Type::getInt32Ty(*Context), 0);
8296 } else if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
8297 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
8298 if (I->getOpcode() == Instruction::Shl) {
8299 // This is a value scaled by '1 << the shift amt'.
8300 Scale = 1U << RHS->getZExtValue();
8302 return I->getOperand(0);
8303 } else if (I->getOpcode() == Instruction::Mul) {
8304 // This value is scaled by 'RHS'.
8305 Scale = RHS->getZExtValue();
8307 return I->getOperand(0);
8308 } else if (I->getOpcode() == Instruction::Add) {
8309 // We have X+C. Check to see if we really have (X*C2)+C1,
8310 // where C1 is divisible by C2.
8313 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale,
8315 Offset += RHS->getZExtValue();
8322 // Otherwise, we can't look past this.
8329 /// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
8330 /// try to eliminate the cast by moving the type information into the alloc.
8331 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
8333 const PointerType *PTy = cast<PointerType>(CI.getType());
8335 BuilderTy AllocaBuilder(*Builder);
8336 AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
8338 // Remove any uses of AI that are dead.
8339 assert(!CI.use_empty() && "Dead instructions should be removed earlier!");
8341 for (Value::use_iterator UI = AI.use_begin(), E = AI.use_end(); UI != E; ) {
8342 Instruction *User = cast<Instruction>(*UI++);
8343 if (isInstructionTriviallyDead(User)) {
8344 while (UI != E && *UI == User)
8345 ++UI; // If this instruction uses AI more than once, don't break UI.
8348 DEBUG(errs() << "IC: DCE: " << *User << '\n');
8349 EraseInstFromFunction(*User);
8353 // This requires TargetData to get the alloca alignment and size information.
8356 // Get the type really allocated and the type casted to.
8357 const Type *AllocElTy = AI.getAllocatedType();
8358 const Type *CastElTy = PTy->getElementType();
8359 if (!AllocElTy->isSized() || !CastElTy->isSized()) return 0;
8361 unsigned AllocElTyAlign = TD->getABITypeAlignment(AllocElTy);
8362 unsigned CastElTyAlign = TD->getABITypeAlignment(CastElTy);
8363 if (CastElTyAlign < AllocElTyAlign) return 0;
8365 // If the allocation has multiple uses, only promote it if we are strictly
8366 // increasing the alignment of the resultant allocation. If we keep it the
8367 // same, we open the door to infinite loops of various kinds. (A reference
8368 // from a dbg.declare doesn't count as a use for this purpose.)
8369 if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) &&
8370 CastElTyAlign == AllocElTyAlign) return 0;
8372 uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy);
8373 uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy);
8374 if (CastElTySize == 0 || AllocElTySize == 0) return 0;
8376 // See if we can satisfy the modulus by pulling a scale out of the array
8378 unsigned ArraySizeScale;
8380 Value *NumElements = // See if the array size is a decomposable linear expr.
8381 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale,
8382 ArrayOffset, Context);
8384 // If we can now satisfy the modulus, by using a non-1 scale, we really can
8386 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
8387 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return 0;
8389 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
8394 Amt = ConstantInt::get(Type::getInt32Ty(*Context), Scale);
8395 // Insert before the alloca, not before the cast.
8396 Amt = AllocaBuilder.CreateMul(Amt, NumElements, "tmp");
8399 if (int Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
8400 Value *Off = ConstantInt::get(Type::getInt32Ty(*Context), Offset, true);
8401 Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
8404 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
8405 New->setAlignment(AI.getAlignment());
8408 // If the allocation has one real use plus a dbg.declare, just remove the
8410 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(&AI)) {
8411 EraseInstFromFunction(*DI);
8413 // If the allocation has multiple real uses, insert a cast and change all
8414 // things that used it to use the new cast. This will also hack on CI, but it
8416 else if (!AI.hasOneUse()) {
8417 // New is the allocation instruction, pointer typed. AI is the original
8418 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
8419 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
8420 AI.replaceAllUsesWith(NewCast);
8422 return ReplaceInstUsesWith(CI, New);
8425 /// CanEvaluateInDifferentType - Return true if we can take the specified value
8426 /// and return it as type Ty without inserting any new casts and without
8427 /// changing the computed value. This is used by code that tries to decide
8428 /// whether promoting or shrinking integer operations to wider or smaller types
8429 /// will allow us to eliminate a truncate or extend.
8431 /// This is a truncation operation if Ty is smaller than V->getType(), or an
8432 /// extension operation if Ty is larger.
8434 /// If CastOpc is a truncation, then Ty will be a type smaller than V. We
8435 /// should return true if trunc(V) can be computed by computing V in the smaller
8436 /// type. If V is an instruction, then trunc(inst(x,y)) can be computed as
8437 /// inst(trunc(x),trunc(y)), which only makes sense if x and y can be
8438 /// efficiently truncated.
8440 /// If CastOpc is a sext or zext, we are asking if the low bits of the value can
8441 /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get
8442 /// the final result.
8443 bool InstCombiner::CanEvaluateInDifferentType(Value *V, const Type *Ty,
8445 int &NumCastsRemoved){
8446 // We can always evaluate constants in another type.
8447 if (isa<Constant>(V))
8450 Instruction *I = dyn_cast<Instruction>(V);
8451 if (!I) return false;
8453 const Type *OrigTy = V->getType();
8455 // If this is an extension or truncate, we can often eliminate it.
8456 if (isa<TruncInst>(I) || isa<ZExtInst>(I) || isa<SExtInst>(I)) {
8457 // If this is a cast from the destination type, we can trivially eliminate
8458 // it, and this will remove a cast overall.
8459 if (I->getOperand(0)->getType() == Ty) {
8460 // If the first operand is itself a cast, and is eliminable, do not count
8461 // this as an eliminable cast. We would prefer to eliminate those two
8463 if (!isa<CastInst>(I->getOperand(0)) && I->hasOneUse())
8469 // We can't extend or shrink something that has multiple uses: doing so would
8470 // require duplicating the instruction in general, which isn't profitable.
8471 if (!I->hasOneUse()) return false;
8473 unsigned Opc = I->getOpcode();
8475 case Instruction::Add:
8476 case Instruction::Sub:
8477 case Instruction::Mul:
8478 case Instruction::And:
8479 case Instruction::Or:
8480 case Instruction::Xor:
8481 // These operators can all arbitrarily be extended or truncated.
8482 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
8484 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
8487 case Instruction::UDiv:
8488 case Instruction::URem: {
8489 // UDiv and URem can be truncated if all the truncated bits are zero.
8490 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
8491 uint32_t BitWidth = Ty->getScalarSizeInBits();
8492 if (BitWidth < OrigBitWidth) {
8493 APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth);
8494 if (MaskedValueIsZero(I->getOperand(0), Mask) &&
8495 MaskedValueIsZero(I->getOperand(1), Mask)) {
8496 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
8498 CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc,
8504 case Instruction::Shl:
8505 // If we are truncating the result of this SHL, and if it's a shift of a
8506 // constant amount, we can always perform a SHL in a smaller type.
8507 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
8508 uint32_t BitWidth = Ty->getScalarSizeInBits();
8509 if (BitWidth < OrigTy->getScalarSizeInBits() &&
8510 CI->getLimitedValue(BitWidth) < BitWidth)
8511 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
8515 case Instruction::LShr:
8516 // If this is a truncate of a logical shr, we can truncate it to a smaller
8517 // lshr iff we know that the bits we would otherwise be shifting in are
8519 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
8520 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
8521 uint32_t BitWidth = Ty->getScalarSizeInBits();
8522 if (BitWidth < OrigBitWidth &&
8523 MaskedValueIsZero(I->getOperand(0),
8524 APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) &&
8525 CI->getLimitedValue(BitWidth) < BitWidth) {
8526 return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc,
8531 case Instruction::ZExt:
8532 case Instruction::SExt:
8533 case Instruction::Trunc:
8534 // If this is the same kind of case as our original (e.g. zext+zext), we
8535 // can safely replace it. Note that replacing it does not reduce the number
8536 // of casts in the input.
8540 // sext (zext ty1), ty2 -> zext ty2
8541 if (CastOpc == Instruction::SExt && Opc == Instruction::ZExt)
8544 case Instruction::Select: {
8545 SelectInst *SI = cast<SelectInst>(I);
8546 return CanEvaluateInDifferentType(SI->getTrueValue(), Ty, CastOpc,
8548 CanEvaluateInDifferentType(SI->getFalseValue(), Ty, CastOpc,
8551 case Instruction::PHI: {
8552 // We can change a phi if we can change all operands.
8553 PHINode *PN = cast<PHINode>(I);
8554 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
8555 if (!CanEvaluateInDifferentType(PN->getIncomingValue(i), Ty, CastOpc,
8561 // TODO: Can handle more cases here.
8568 /// EvaluateInDifferentType - Given an expression that
8569 /// CanEvaluateInDifferentType returns true for, actually insert the code to
8570 /// evaluate the expression.
8571 Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty,
8573 if (Constant *C = dyn_cast<Constant>(V))
8574 return ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
8576 // Otherwise, it must be an instruction.
8577 Instruction *I = cast<Instruction>(V);
8578 Instruction *Res = 0;
8579 unsigned Opc = I->getOpcode();
8581 case Instruction::Add:
8582 case Instruction::Sub:
8583 case Instruction::Mul:
8584 case Instruction::And:
8585 case Instruction::Or:
8586 case Instruction::Xor:
8587 case Instruction::AShr:
8588 case Instruction::LShr:
8589 case Instruction::Shl:
8590 case Instruction::UDiv:
8591 case Instruction::URem: {
8592 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
8593 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
8594 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
8597 case Instruction::Trunc:
8598 case Instruction::ZExt:
8599 case Instruction::SExt:
8600 // If the source type of the cast is the type we're trying for then we can
8601 // just return the source. There's no need to insert it because it is not
8603 if (I->getOperand(0)->getType() == Ty)
8604 return I->getOperand(0);
8606 // Otherwise, must be the same type of cast, so just reinsert a new one.
8607 Res = CastInst::Create(cast<CastInst>(I)->getOpcode(), I->getOperand(0),Ty);
8609 case Instruction::Select: {
8610 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
8611 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
8612 Res = SelectInst::Create(I->getOperand(0), True, False);
8615 case Instruction::PHI: {
8616 PHINode *OPN = cast<PHINode>(I);
8617 PHINode *NPN = PHINode::Create(Ty);
8618 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
8619 Value *V =EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
8620 NPN->addIncoming(V, OPN->getIncomingBlock(i));
8626 // TODO: Can handle more cases here.
8627 llvm_unreachable("Unreachable!");
8632 return InsertNewInstBefore(Res, *I);
8635 /// @brief Implement the transforms common to all CastInst visitors.
8636 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
8637 Value *Src = CI.getOperand(0);
8639 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
8640 // eliminate it now.
8641 if (CastInst *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
8642 if (Instruction::CastOps opc =
8643 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
8644 // The first cast (CSrc) is eliminable so we need to fix up or replace
8645 // the second cast (CI). CSrc will then have a good chance of being dead.
8646 return CastInst::Create(opc, CSrc->getOperand(0), CI.getType());
8650 // If we are casting a select then fold the cast into the select
8651 if (SelectInst *SI = dyn_cast<SelectInst>(Src))
8652 if (Instruction *NV = FoldOpIntoSelect(CI, SI, this))
8655 // If we are casting a PHI then fold the cast into the PHI
8656 if (isa<PHINode>(Src)) {
8657 // We don't do this if this would create a PHI node with an illegal type if
8658 // it is currently legal.
8659 if (!isa<IntegerType>(Src->getType()) ||
8660 !isa<IntegerType>(CI.getType()) ||
8661 ShouldChangeType(CI.getType(), Src->getType(), TD))
8662 if (Instruction *NV = FoldOpIntoPhi(CI))
8669 /// FindElementAtOffset - Given a type and a constant offset, determine whether
8670 /// or not there is a sequence of GEP indices into the type that will land us at
8671 /// the specified offset. If so, fill them into NewIndices and return the
8672 /// resultant element type, otherwise return null.
8673 static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
8674 SmallVectorImpl<Value*> &NewIndices,
8675 const TargetData *TD,
8676 LLVMContext *Context) {
8678 if (!Ty->isSized()) return 0;
8680 // Start with the index over the outer type. Note that the type size
8681 // might be zero (even if the offset isn't zero) if the indexed type
8682 // is something like [0 x {int, int}]
8683 const Type *IntPtrTy = TD->getIntPtrType(*Context);
8684 int64_t FirstIdx = 0;
8685 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
8686 FirstIdx = Offset/TySize;
8687 Offset -= FirstIdx*TySize;
8689 // Handle hosts where % returns negative instead of values [0..TySize).
8693 assert(Offset >= 0);
8695 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset");
8698 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx));
8700 // Index into the types. If we fail, set OrigBase to null.
8702 // Indexing into tail padding between struct/array elements.
8703 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty))
8706 if (const StructType *STy = dyn_cast<StructType>(Ty)) {
8707 const StructLayout *SL = TD->getStructLayout(STy);
8708 assert(Offset < (int64_t)SL->getSizeInBytes() &&
8709 "Offset must stay within the indexed type");
8711 unsigned Elt = SL->getElementContainingOffset(Offset);
8712 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Elt));
8714 Offset -= SL->getElementOffset(Elt);
8715 Ty = STy->getElementType(Elt);
8716 } else if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
8717 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType());
8718 assert(EltSize && "Cannot index into a zero-sized array");
8719 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize));
8721 Ty = AT->getElementType();
8723 // Otherwise, we can't index into the middle of this atomic type, bail.
8731 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
8732 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
8733 Value *Src = CI.getOperand(0);
8735 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
8736 // If casting the result of a getelementptr instruction with no offset, turn
8737 // this into a cast of the original pointer!
8738 if (GEP->hasAllZeroIndices()) {
8739 // Changing the cast operand is usually not a good idea but it is safe
8740 // here because the pointer operand is being replaced with another
8741 // pointer operand so the opcode doesn't need to change.
8743 CI.setOperand(0, GEP->getOperand(0));
8747 // If the GEP has a single use, and the base pointer is a bitcast, and the
8748 // GEP computes a constant offset, see if we can convert these three
8749 // instructions into fewer. This typically happens with unions and other
8750 // non-type-safe code.
8751 if (TD && GEP->hasOneUse() && isa<BitCastInst>(GEP->getOperand(0))) {
8752 if (GEP->hasAllConstantIndices()) {
8753 // We are guaranteed to get a constant from EmitGEPOffset.
8754 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(GEP, *this));
8755 int64_t Offset = OffsetV->getSExtValue();
8757 // Get the base pointer input of the bitcast, and the type it points to.
8758 Value *OrigBase = cast<BitCastInst>(GEP->getOperand(0))->getOperand(0);
8759 const Type *GEPIdxTy =
8760 cast<PointerType>(OrigBase->getType())->getElementType();
8761 SmallVector<Value*, 8> NewIndices;
8762 if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices, TD, Context)) {
8763 // If we were able to index down into an element, create the GEP
8764 // and bitcast the result. This eliminates one bitcast, potentially
8766 Value *NGEP = cast<GEPOperator>(GEP)->isInBounds() ?
8767 Builder->CreateInBoundsGEP(OrigBase,
8768 NewIndices.begin(), NewIndices.end()) :
8769 Builder->CreateGEP(OrigBase, NewIndices.begin(), NewIndices.end());
8770 NGEP->takeName(GEP);
8772 if (isa<BitCastInst>(CI))
8773 return new BitCastInst(NGEP, CI.getType());
8774 assert(isa<PtrToIntInst>(CI));
8775 return new PtrToIntInst(NGEP, CI.getType());
8781 return commonCastTransforms(CI);
8784 /// commonIntCastTransforms - This function implements the common transforms
8785 /// for trunc, zext, and sext.
8786 Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) {
8787 if (Instruction *Result = commonCastTransforms(CI))
8790 Value *Src = CI.getOperand(0);
8791 const Type *SrcTy = Src->getType();
8792 const Type *DestTy = CI.getType();
8793 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
8794 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
8796 // See if we can simplify any instructions used by the LHS whose sole
8797 // purpose is to compute bits we don't care about.
8798 if (SimplifyDemandedInstructionBits(CI))
8801 // If the source isn't an instruction or has more than one use then we
8802 // can't do anything more.
8803 Instruction *SrcI = dyn_cast<Instruction>(Src);
8804 if (!SrcI || !Src->hasOneUse())
8807 // Attempt to propagate the cast into the instruction for int->int casts.
8808 int NumCastsRemoved = 0;
8809 // Only do this if the dest type is a simple type, don't convert the
8810 // expression tree to something weird like i93 unless the source is also
8812 if ((isa<VectorType>(DestTy) ||
8813 ShouldChangeType(SrcI->getType(), DestTy, TD)) &&
8814 CanEvaluateInDifferentType(SrcI, DestTy,
8815 CI.getOpcode(), NumCastsRemoved)) {
8816 // If this cast is a truncate, evaluting in a different type always
8817 // eliminates the cast, so it is always a win. If this is a zero-extension,
8818 // we need to do an AND to maintain the clear top-part of the computation,
8819 // so we require that the input have eliminated at least one cast. If this
8820 // is a sign extension, we insert two new casts (to do the extension) so we
8821 // require that two casts have been eliminated.
8822 bool DoXForm = false;
8823 bool JustReplace = false;
8824 switch (CI.getOpcode()) {
8826 // All the others use floating point so we shouldn't actually
8827 // get here because of the check above.
8828 llvm_unreachable("Unknown cast type");
8829 case Instruction::Trunc:
8832 case Instruction::ZExt: {
8833 DoXForm = NumCastsRemoved >= 1;
8835 if (!DoXForm && 0) {
8836 // If it's unnecessary to issue an AND to clear the high bits, it's
8837 // always profitable to do this xform.
8838 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, false);
8839 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8840 if (MaskedValueIsZero(TryRes, Mask))
8841 return ReplaceInstUsesWith(CI, TryRes);
8843 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8844 if (TryI->use_empty())
8845 EraseInstFromFunction(*TryI);
8849 case Instruction::SExt: {
8850 DoXForm = NumCastsRemoved >= 2;
8851 if (!DoXForm && !isa<TruncInst>(SrcI) && 0) {
8852 // If we do not have to emit the truncate + sext pair, then it's always
8853 // profitable to do this xform.
8855 // It's not safe to eliminate the trunc + sext pair if one of the
8856 // eliminated cast is a truncate. e.g.
8857 // t2 = trunc i32 t1 to i16
8858 // t3 = sext i16 t2 to i32
8861 Value *TryRes = EvaluateInDifferentType(SrcI, DestTy, true);
8862 unsigned NumSignBits = ComputeNumSignBits(TryRes);
8863 if (NumSignBits > (DestBitSize - SrcBitSize))
8864 return ReplaceInstUsesWith(CI, TryRes);
8866 if (Instruction *TryI = dyn_cast<Instruction>(TryRes))
8867 if (TryI->use_empty())
8868 EraseInstFromFunction(*TryI);
8875 DEBUG(errs() << "ICE: EvaluateInDifferentType converting expression type"
8876 " to avoid cast: " << CI);
8877 Value *Res = EvaluateInDifferentType(SrcI, DestTy,
8878 CI.getOpcode() == Instruction::SExt);
8880 // Just replace this cast with the result.
8881 return ReplaceInstUsesWith(CI, Res);
8883 assert(Res->getType() == DestTy);
8884 switch (CI.getOpcode()) {
8885 default: llvm_unreachable("Unknown cast type!");
8886 case Instruction::Trunc:
8887 // Just replace this cast with the result.
8888 return ReplaceInstUsesWith(CI, Res);
8889 case Instruction::ZExt: {
8890 assert(SrcBitSize < DestBitSize && "Not a zext?");
8892 // If the high bits are already zero, just replace this cast with the
8894 APInt Mask(APInt::getBitsSet(DestBitSize, SrcBitSize, DestBitSize));
8895 if (MaskedValueIsZero(Res, Mask))
8896 return ReplaceInstUsesWith(CI, Res);
8898 // We need to emit an AND to clear the high bits.
8899 Constant *C = ConstantInt::get(*Context,
8900 APInt::getLowBitsSet(DestBitSize, SrcBitSize));
8901 return BinaryOperator::CreateAnd(Res, C);
8903 case Instruction::SExt: {
8904 // If the high bits are already filled with sign bit, just replace this
8905 // cast with the result.
8906 unsigned NumSignBits = ComputeNumSignBits(Res);
8907 if (NumSignBits > (DestBitSize - SrcBitSize))
8908 return ReplaceInstUsesWith(CI, Res);
8910 // We need to emit a cast to truncate, then a cast to sext.
8911 return new SExtInst(Builder->CreateTrunc(Res, Src->getType()), DestTy);
8917 Value *Op0 = SrcI->getNumOperands() > 0 ? SrcI->getOperand(0) : 0;
8918 Value *Op1 = SrcI->getNumOperands() > 1 ? SrcI->getOperand(1) : 0;
8920 switch (SrcI->getOpcode()) {
8921 case Instruction::Add:
8922 case Instruction::Mul:
8923 case Instruction::And:
8924 case Instruction::Or:
8925 case Instruction::Xor:
8926 // If we are discarding information, rewrite.
8927 if (DestBitSize < SrcBitSize && DestBitSize != 1) {
8928 // Don't insert two casts unless at least one can be eliminated.
8929 if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) ||
8930 !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) {
8931 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
8932 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
8933 return BinaryOperator::Create(
8934 cast<BinaryOperator>(SrcI)->getOpcode(), Op0c, Op1c);
8938 // cast (xor bool X, true) to int --> xor (cast bool X to int), 1
8939 if (isa<ZExtInst>(CI) && SrcBitSize == 1 &&
8940 SrcI->getOpcode() == Instruction::Xor &&
8941 Op1 == ConstantInt::getTrue(*Context) &&
8942 (!Op0->hasOneUse() || !isa<CmpInst>(Op0))) {
8943 Value *New = Builder->CreateZExt(Op0, DestTy, Op0->getName());
8944 return BinaryOperator::CreateXor(New,
8945 ConstantInt::get(CI.getType(), 1));
8949 case Instruction::Shl: {
8950 // Canonicalize trunc inside shl, if we can.
8951 ConstantInt *CI = dyn_cast<ConstantInt>(Op1);
8952 if (CI && DestBitSize < SrcBitSize &&
8953 CI->getLimitedValue(DestBitSize) < DestBitSize) {
8954 Value *Op0c = Builder->CreateTrunc(Op0, DestTy, Op0->getName());
8955 Value *Op1c = Builder->CreateTrunc(Op1, DestTy, Op1->getName());
8956 return BinaryOperator::CreateShl(Op0c, Op1c);
8964 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
8965 if (Instruction *Result = commonIntCastTransforms(CI))
8968 Value *Src = CI.getOperand(0);
8969 const Type *Ty = CI.getType();
8970 uint32_t DestBitWidth = Ty->getScalarSizeInBits();
8971 uint32_t SrcBitWidth = Src->getType()->getScalarSizeInBits();
8973 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0)
8974 if (DestBitWidth == 1) {
8975 Constant *One = ConstantInt::get(Src->getType(), 1);
8976 Src = Builder->CreateAnd(Src, One, "tmp");
8977 Value *Zero = Constant::getNullValue(Src->getType());
8978 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
8981 // Optimize trunc(lshr(), c) to pull the shift through the truncate.
8982 ConstantInt *ShAmtV = 0;
8984 if (Src->hasOneUse() &&
8985 match(Src, m_LShr(m_Value(ShiftOp), m_ConstantInt(ShAmtV)))) {
8986 uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth);
8988 // Get a mask for the bits shifting in.
8989 APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth));
8990 if (MaskedValueIsZero(ShiftOp, Mask)) {
8991 if (ShAmt >= DestBitWidth) // All zeros.
8992 return ReplaceInstUsesWith(CI, Constant::getNullValue(Ty));
8994 // Okay, we can shrink this. Truncate the input, then return a new
8996 Value *V1 = Builder->CreateTrunc(ShiftOp, Ty, ShiftOp->getName());
8997 Value *V2 = ConstantExpr::getTrunc(ShAmtV, Ty);
8998 return BinaryOperator::CreateLShr(V1, V2);
9005 /// transformZExtICmp - Transform (zext icmp) to bitwise / integer operations
9006 /// in order to eliminate the icmp.
9007 Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI,
9009 // If we are just checking for a icmp eq of a single bit and zext'ing it
9010 // to an integer, then shift the bit to the appropriate place and then
9011 // cast to integer to avoid the comparison.
9012 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(ICI->getOperand(1))) {
9013 const APInt &Op1CV = Op1C->getValue();
9015 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
9016 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
9017 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
9018 (ICI->getPredicate() == ICmpInst::ICMP_SGT &&Op1CV.isAllOnesValue())) {
9019 if (!DoXform) return ICI;
9021 Value *In = ICI->getOperand(0);
9022 Value *Sh = ConstantInt::get(In->getType(),
9023 In->getType()->getScalarSizeInBits()-1);
9024 In = Builder->CreateLShr(In, Sh, In->getName()+".lobit");
9025 if (In->getType() != CI.getType())
9026 In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/, "tmp");
9028 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
9029 Constant *One = ConstantInt::get(In->getType(), 1);
9030 In = Builder->CreateXor(In, One, In->getName()+".not");
9033 return ReplaceInstUsesWith(CI, In);
9038 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
9039 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
9040 // zext (X == 1) to i32 --> X iff X has only the low bit set.
9041 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
9042 // zext (X != 0) to i32 --> X iff X has only the low bit set.
9043 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
9044 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
9045 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
9046 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
9047 // This only works for EQ and NE
9048 ICI->isEquality()) {
9049 // If Op1C some other power of two, convert:
9050 uint32_t BitWidth = Op1C->getType()->getBitWidth();
9051 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
9052 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
9053 ComputeMaskedBits(ICI->getOperand(0), TypeMask, KnownZero, KnownOne);
9055 APInt KnownZeroMask(~KnownZero);
9056 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
9057 if (!DoXform) return ICI;
9059 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
9060 if (Op1CV != 0 && (Op1CV != KnownZeroMask)) {
9061 // (X&4) == 2 --> false
9062 // (X&4) != 2 --> true
9063 Constant *Res = ConstantInt::get(Type::getInt1Ty(*Context), isNE);
9064 Res = ConstantExpr::getZExt(Res, CI.getType());
9065 return ReplaceInstUsesWith(CI, Res);
9068 uint32_t ShiftAmt = KnownZeroMask.logBase2();
9069 Value *In = ICI->getOperand(0);
9071 // Perform a logical shr by shiftamt.
9072 // Insert the shift to put the result in the low bit.
9073 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
9074 In->getName()+".lobit");
9077 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
9078 Constant *One = ConstantInt::get(In->getType(), 1);
9079 In = Builder->CreateXor(In, One, "tmp");
9082 if (CI.getType() == In->getType())
9083 return ReplaceInstUsesWith(CI, In);
9085 return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
9090 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
9091 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
9092 // may lead to additional simplifications.
9093 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
9094 if (const IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
9095 uint32_t BitWidth = ITy->getBitWidth();
9096 Value *LHS = ICI->getOperand(0);
9097 Value *RHS = ICI->getOperand(1);
9099 APInt KnownZeroLHS(BitWidth, 0), KnownOneLHS(BitWidth, 0);
9100 APInt KnownZeroRHS(BitWidth, 0), KnownOneRHS(BitWidth, 0);
9101 APInt TypeMask(APInt::getAllOnesValue(BitWidth));
9102 ComputeMaskedBits(LHS, TypeMask, KnownZeroLHS, KnownOneLHS);
9103 ComputeMaskedBits(RHS, TypeMask, KnownZeroRHS, KnownOneRHS);
9105 if (KnownZeroLHS == KnownZeroRHS && KnownOneLHS == KnownOneRHS) {
9106 APInt KnownBits = KnownZeroLHS | KnownOneLHS;
9107 APInt UnknownBit = ~KnownBits;
9108 if (UnknownBit.countPopulation() == 1) {
9109 if (!DoXform) return ICI;
9111 Value *Result = Builder->CreateXor(LHS, RHS);
9113 // Mask off any bits that are set and won't be shifted away.
9114 if (KnownOneLHS.uge(UnknownBit))
9115 Result = Builder->CreateAnd(Result,
9116 ConstantInt::get(ITy, UnknownBit));
9118 // Shift the bit we're testing down to the lsb.
9119 Result = Builder->CreateLShr(
9120 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
9122 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
9123 Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1));
9124 Result->takeName(ICI);
9125 return ReplaceInstUsesWith(CI, Result);
9134 Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
9135 // If one of the common conversion will work, do it.
9136 if (Instruction *Result = commonIntCastTransforms(CI))
9139 Value *Src = CI.getOperand(0);
9141 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
9142 // types and if the sizes are just right we can convert this into a logical
9143 // 'and' which will be much cheaper than the pair of casts.
9144 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
9145 // Get the sizes of the types involved. We know that the intermediate type
9146 // will be smaller than A or C, but don't know the relation between A and C.
9147 Value *A = CSrc->getOperand(0);
9148 unsigned SrcSize = A->getType()->getScalarSizeInBits();
9149 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
9150 unsigned DstSize = CI.getType()->getScalarSizeInBits();
9151 // If we're actually extending zero bits, then if
9152 // SrcSize < DstSize: zext(a & mask)
9153 // SrcSize == DstSize: a & mask
9154 // SrcSize > DstSize: trunc(a) & mask
9155 if (SrcSize < DstSize) {
9156 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
9157 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
9158 Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
9159 return new ZExtInst(And, CI.getType());
9162 if (SrcSize == DstSize) {
9163 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
9164 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
9167 if (SrcSize > DstSize) {
9168 Value *Trunc = Builder->CreateTrunc(A, CI.getType(), "tmp");
9169 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
9170 return BinaryOperator::CreateAnd(Trunc,
9171 ConstantInt::get(Trunc->getType(),
9176 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
9177 return transformZExtICmp(ICI, CI);
9179 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src);
9180 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
9181 // zext (or icmp, icmp) --> or (zext icmp), (zext icmp) if at least one
9182 // of the (zext icmp) will be transformed.
9183 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
9184 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
9185 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
9186 (transformZExtICmp(LHS, CI, false) ||
9187 transformZExtICmp(RHS, CI, false))) {
9188 Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName());
9189 Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName());
9190 return BinaryOperator::Create(Instruction::Or, LCast, RCast);
9194 // zext(trunc(t) & C) -> (t & zext(C)).
9195 if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse())
9196 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
9197 if (TruncInst *TI = dyn_cast<TruncInst>(SrcI->getOperand(0))) {
9198 Value *TI0 = TI->getOperand(0);
9199 if (TI0->getType() == CI.getType())
9201 BinaryOperator::CreateAnd(TI0,
9202 ConstantExpr::getZExt(C, CI.getType()));
9205 // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)).
9206 if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse())
9207 if (ConstantInt *C = dyn_cast<ConstantInt>(SrcI->getOperand(1)))
9208 if (BinaryOperator *And = dyn_cast<BinaryOperator>(SrcI->getOperand(0)))
9209 if (And->getOpcode() == Instruction::And && And->hasOneUse() &&
9210 And->getOperand(1) == C)
9211 if (TruncInst *TI = dyn_cast<TruncInst>(And->getOperand(0))) {
9212 Value *TI0 = TI->getOperand(0);
9213 if (TI0->getType() == CI.getType()) {
9214 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
9215 Value *NewAnd = Builder->CreateAnd(TI0, ZC, "tmp");
9216 return BinaryOperator::CreateXor(NewAnd, ZC);
9223 Instruction *InstCombiner::visitSExt(SExtInst &CI) {
9224 if (Instruction *I = commonIntCastTransforms(CI))
9227 Value *Src = CI.getOperand(0);
9229 // Canonicalize sign-extend from i1 to a select.
9230 if (Src->getType() == Type::getInt1Ty(*Context))
9231 return SelectInst::Create(Src,
9232 Constant::getAllOnesValue(CI.getType()),
9233 Constant::getNullValue(CI.getType()));
9235 // See if the value being truncated is already sign extended. If so, just
9236 // eliminate the trunc/sext pair.
9237 if (Operator::getOpcode(Src) == Instruction::Trunc) {
9238 Value *Op = cast<User>(Src)->getOperand(0);
9239 unsigned OpBits = Op->getType()->getScalarSizeInBits();
9240 unsigned MidBits = Src->getType()->getScalarSizeInBits();
9241 unsigned DestBits = CI.getType()->getScalarSizeInBits();
9242 unsigned NumSignBits = ComputeNumSignBits(Op);
9244 if (OpBits == DestBits) {
9245 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign
9246 // bits, it is already ready.
9247 if (NumSignBits > DestBits-MidBits)
9248 return ReplaceInstUsesWith(CI, Op);
9249 } else if (OpBits < DestBits) {
9250 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign
9251 // bits, just sext from i32.
9252 if (NumSignBits > OpBits-MidBits)
9253 return new SExtInst(Op, CI.getType(), "tmp");
9255 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign
9256 // bits, just truncate to i32.
9257 if (NumSignBits > OpBits-MidBits)
9258 return new TruncInst(Op, CI.getType(), "tmp");
9262 // If the input is a shl/ashr pair of a same constant, then this is a sign
9263 // extension from a smaller value. If we could trust arbitrary bitwidth
9264 // integers, we could turn this into a truncate to the smaller bit and then
9265 // use a sext for the whole extension. Since we don't, look deeper and check
9266 // for a truncate. If the source and dest are the same type, eliminate the
9267 // trunc and extend and just do shifts. For example, turn:
9268 // %a = trunc i32 %i to i8
9269 // %b = shl i8 %a, 6
9270 // %c = ashr i8 %b, 6
9271 // %d = sext i8 %c to i32
9273 // %a = shl i32 %i, 30
9274 // %d = ashr i32 %a, 30
9276 ConstantInt *BA = 0, *CA = 0;
9277 if (match(Src, m_AShr(m_Shl(m_Value(A), m_ConstantInt(BA)),
9278 m_ConstantInt(CA))) &&
9279 BA == CA && isa<TruncInst>(A)) {
9280 Value *I = cast<TruncInst>(A)->getOperand(0);
9281 if (I->getType() == CI.getType()) {
9282 unsigned MidSize = Src->getType()->getScalarSizeInBits();
9283 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
9284 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
9285 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
9286 I = Builder->CreateShl(I, ShAmtV, CI.getName());
9287 return BinaryOperator::CreateAShr(I, ShAmtV);
9294 /// FitsInFPType - Return a Constant* for the specified FP constant if it fits
9295 /// in the specified FP type without changing its value.
9296 static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem,
9297 LLVMContext *Context) {
9299 APFloat F = CFP->getValueAPF();
9300 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
9302 return ConstantFP::get(*Context, F);
9306 /// LookThroughFPExtensions - If this is an fp extension instruction, look
9307 /// through it until we get the source value.
9308 static Value *LookThroughFPExtensions(Value *V, LLVMContext *Context) {
9309 if (Instruction *I = dyn_cast<Instruction>(V))
9310 if (I->getOpcode() == Instruction::FPExt)
9311 return LookThroughFPExtensions(I->getOperand(0), Context);
9313 // If this value is a constant, return the constant in the smallest FP type
9314 // that can accurately represent it. This allows us to turn
9315 // (float)((double)X+2.0) into x+2.0f.
9316 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
9317 if (CFP->getType() == Type::getPPC_FP128Ty(*Context))
9318 return V; // No constant folding of this.
9319 // See if the value can be truncated to float and then reextended.
9320 if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle, Context))
9322 if (CFP->getType() == Type::getDoubleTy(*Context))
9323 return V; // Won't shrink.
9324 if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble, Context))
9326 // Don't try to shrink to various long double types.
9332 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
9333 if (Instruction *I = commonCastTransforms(CI))
9336 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
9337 // smaller than the destination type, we can eliminate the truncate by doing
9338 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as
9339 // many builtins (sqrt, etc).
9340 BinaryOperator *OpI = dyn_cast<BinaryOperator>(CI.getOperand(0));
9341 if (OpI && OpI->hasOneUse()) {
9342 switch (OpI->getOpcode()) {
9344 case Instruction::FAdd:
9345 case Instruction::FSub:
9346 case Instruction::FMul:
9347 case Instruction::FDiv:
9348 case Instruction::FRem:
9349 const Type *SrcTy = OpI->getType();
9350 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0), Context);
9351 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1), Context);
9352 if (LHSTrunc->getType() != SrcTy &&
9353 RHSTrunc->getType() != SrcTy) {
9354 unsigned DstSize = CI.getType()->getScalarSizeInBits();
9355 // If the source types were both smaller than the destination type of
9356 // the cast, do this xform.
9357 if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize &&
9358 RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) {
9359 LHSTrunc = Builder->CreateFPExt(LHSTrunc, CI.getType());
9360 RHSTrunc = Builder->CreateFPExt(RHSTrunc, CI.getType());
9361 return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
9370 Instruction *InstCombiner::visitFPExt(CastInst &CI) {
9371 return commonCastTransforms(CI);
9374 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) {
9375 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
9377 return commonCastTransforms(FI);
9379 // fptoui(uitofp(X)) --> X
9380 // fptoui(sitofp(X)) --> X
9381 // This is safe if the intermediate type has enough bits in its mantissa to
9382 // accurately represent all values of X. For example, do not do this with
9383 // i64->float->i64. This is also safe for sitofp case, because any negative
9384 // 'X' value would cause an undefined result for the fptoui.
9385 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
9386 OpI->getOperand(0)->getType() == FI.getType() &&
9387 (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
9388 OpI->getType()->getFPMantissaWidth())
9389 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
9391 return commonCastTransforms(FI);
9394 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) {
9395 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0));
9397 return commonCastTransforms(FI);
9399 // fptosi(sitofp(X)) --> X
9400 // fptosi(uitofp(X)) --> X
9401 // This is safe if the intermediate type has enough bits in its mantissa to
9402 // accurately represent all values of X. For example, do not do this with
9403 // i64->float->i64. This is also safe for sitofp case, because any negative
9404 // 'X' value would cause an undefined result for the fptoui.
9405 if ((isa<UIToFPInst>(OpI) || isa<SIToFPInst>(OpI)) &&
9406 OpI->getOperand(0)->getType() == FI.getType() &&
9407 (int)FI.getType()->getScalarSizeInBits() <=
9408 OpI->getType()->getFPMantissaWidth())
9409 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
9411 return commonCastTransforms(FI);
9414 Instruction *InstCombiner::visitUIToFP(CastInst &CI) {
9415 return commonCastTransforms(CI);
9418 Instruction *InstCombiner::visitSIToFP(CastInst &CI) {
9419 return commonCastTransforms(CI);
9422 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
9423 // If the destination integer type is smaller than the intptr_t type for
9424 // this target, do a ptrtoint to intptr_t then do a trunc. This allows the
9425 // trunc to be exposed to other transforms. Don't do this for extending
9426 // ptrtoint's, because we don't know if the target sign or zero extends its
9429 CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) {
9430 Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
9431 TD->getIntPtrType(CI.getContext()),
9433 return new TruncInst(P, CI.getType());
9436 return commonPointerCastTransforms(CI);
9439 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
9440 // If the source integer type is larger than the intptr_t type for
9441 // this target, do a trunc to the intptr_t type, then inttoptr of it. This
9442 // allows the trunc to be exposed to other transforms. Don't do this for
9443 // extending inttoptr's, because we don't know if the target sign or zero
9444 // extends to pointers.
9445 if (TD && CI.getOperand(0)->getType()->getScalarSizeInBits() >
9446 TD->getPointerSizeInBits()) {
9447 Value *P = Builder->CreateTrunc(CI.getOperand(0),
9448 TD->getIntPtrType(CI.getContext()), "tmp");
9449 return new IntToPtrInst(P, CI.getType());
9452 if (Instruction *I = commonCastTransforms(CI))
9458 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
9459 // If the operands are integer typed then apply the integer transforms,
9460 // otherwise just apply the common ones.
9461 Value *Src = CI.getOperand(0);
9462 const Type *SrcTy = Src->getType();
9463 const Type *DestTy = CI.getType();
9465 if (isa<PointerType>(SrcTy)) {
9466 if (Instruction *I = commonPointerCastTransforms(CI))
9469 if (Instruction *Result = commonCastTransforms(CI))
9474 // Get rid of casts from one type to the same type. These are useless and can
9475 // be replaced by the operand.
9476 if (DestTy == Src->getType())
9477 return ReplaceInstUsesWith(CI, Src);
9479 if (const PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) {
9480 const PointerType *SrcPTy = cast<PointerType>(SrcTy);
9481 const Type *DstElTy = DstPTy->getElementType();
9482 const Type *SrcElTy = SrcPTy->getElementType();
9484 // If the address spaces don't match, don't eliminate the bitcast, which is
9485 // required for changing types.
9486 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
9489 // If we are casting a alloca to a pointer to a type of the same
9490 // size, rewrite the allocation instruction to allocate the "right" type.
9491 // There is no need to modify malloc calls because it is their bitcast that
9492 // needs to be cleaned up.
9493 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
9494 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
9497 // If the source and destination are pointers, and this cast is equivalent
9498 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
9499 // This can enhance SROA and other transforms that want type-safe pointers.
9500 Constant *ZeroUInt = Constant::getNullValue(Type::getInt32Ty(*Context));
9501 unsigned NumZeros = 0;
9502 while (SrcElTy != DstElTy &&
9503 isa<CompositeType>(SrcElTy) && !isa<PointerType>(SrcElTy) &&
9504 SrcElTy->getNumContainedTypes() /* not "{}" */) {
9505 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(ZeroUInt);
9509 // If we found a path from the src to dest, create the getelementptr now.
9510 if (SrcElTy == DstElTy) {
9511 SmallVector<Value*, 8> Idxs(NumZeros+1, ZeroUInt);
9512 return GetElementPtrInst::CreateInBounds(Src, Idxs.begin(), Idxs.end(), "",
9513 ((Instruction*) NULL));
9517 if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
9518 if (DestVTy->getNumElements() == 1) {
9519 if (!isa<VectorType>(SrcTy)) {
9520 Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType());
9521 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
9522 Constant::getNullValue(Type::getInt32Ty(*Context)));
9524 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
9528 if (const VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) {
9529 if (SrcVTy->getNumElements() == 1) {
9530 if (!isa<VectorType>(DestTy)) {
9532 Builder->CreateExtractElement(Src,
9533 Constant::getNullValue(Type::getInt32Ty(*Context)));
9534 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
9539 if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(Src)) {
9540 if (SVI->hasOneUse()) {
9541 // Okay, we have (bitconvert (shuffle ..)). Check to see if this is
9542 // a bitconvert to a vector with the same # elts.
9543 if (isa<VectorType>(DestTy) &&
9544 cast<VectorType>(DestTy)->getNumElements() ==
9545 SVI->getType()->getNumElements() &&
9546 SVI->getType()->getNumElements() ==
9547 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements()) {
9549 // If either of the operands is a cast from CI.getType(), then
9550 // evaluating the shuffle in the casted destination's type will allow
9551 // us to eliminate at least one cast.
9552 if (((Tmp = dyn_cast<CastInst>(SVI->getOperand(0))) &&
9553 Tmp->getOperand(0)->getType() == DestTy) ||
9554 ((Tmp = dyn_cast<CastInst>(SVI->getOperand(1))) &&
9555 Tmp->getOperand(0)->getType() == DestTy)) {
9556 Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
9557 Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
9558 // Return a new shuffle vector. Use the same element ID's, as we
9559 // know the vector types match #elts.
9560 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
9568 /// GetSelectFoldableOperands - We want to turn code that looks like this:
9570 /// %D = select %cond, %C, %A
9572 /// %C = select %cond, %B, 0
9575 /// Assuming that the specified instruction is an operand to the select, return
9576 /// a bitmask indicating which operands of this instruction are foldable if they
9577 /// equal the other incoming value of the select.
9579 static unsigned GetSelectFoldableOperands(Instruction *I) {
9580 switch (I->getOpcode()) {
9581 case Instruction::Add:
9582 case Instruction::Mul:
9583 case Instruction::And:
9584 case Instruction::Or:
9585 case Instruction::Xor:
9586 return 3; // Can fold through either operand.
9587 case Instruction::Sub: // Can only fold on the amount subtracted.
9588 case Instruction::Shl: // Can only fold on the shift amount.
9589 case Instruction::LShr:
9590 case Instruction::AShr:
9593 return 0; // Cannot fold
9597 /// GetSelectFoldableConstant - For the same transformation as the previous
9598 /// function, return the identity constant that goes into the select.
9599 static Constant *GetSelectFoldableConstant(Instruction *I,
9600 LLVMContext *Context) {
9601 switch (I->getOpcode()) {
9602 default: llvm_unreachable("This cannot happen!");
9603 case Instruction::Add:
9604 case Instruction::Sub:
9605 case Instruction::Or:
9606 case Instruction::Xor:
9607 case Instruction::Shl:
9608 case Instruction::LShr:
9609 case Instruction::AShr:
9610 return Constant::getNullValue(I->getType());
9611 case Instruction::And:
9612 return Constant::getAllOnesValue(I->getType());
9613 case Instruction::Mul:
9614 return ConstantInt::get(I->getType(), 1);
9618 /// FoldSelectOpOp - Here we have (select c, TI, FI), and we know that TI and FI
9619 /// have the same opcode and only one use each. Try to simplify this.
9620 Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI,
9622 if (TI->getNumOperands() == 1) {
9623 // If this is a non-volatile load or a cast from the same type,
9626 if (TI->getOperand(0)->getType() != FI->getOperand(0)->getType())
9629 return 0; // unknown unary op.
9632 // Fold this by inserting a select from the input values.
9633 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), TI->getOperand(0),
9634 FI->getOperand(0), SI.getName()+".v");
9635 InsertNewInstBefore(NewSI, SI);
9636 return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
9640 // Only handle binary operators here.
9641 if (!isa<BinaryOperator>(TI))
9644 // Figure out if the operations have any operands in common.
9645 Value *MatchOp, *OtherOpT, *OtherOpF;
9647 if (TI->getOperand(0) == FI->getOperand(0)) {
9648 MatchOp = TI->getOperand(0);
9649 OtherOpT = TI->getOperand(1);
9650 OtherOpF = FI->getOperand(1);
9651 MatchIsOpZero = true;
9652 } else if (TI->getOperand(1) == FI->getOperand(1)) {
9653 MatchOp = TI->getOperand(1);
9654 OtherOpT = TI->getOperand(0);
9655 OtherOpF = FI->getOperand(0);
9656 MatchIsOpZero = false;
9657 } else if (!TI->isCommutative()) {
9659 } else if (TI->getOperand(0) == FI->getOperand(1)) {
9660 MatchOp = TI->getOperand(0);
9661 OtherOpT = TI->getOperand(1);
9662 OtherOpF = FI->getOperand(0);
9663 MatchIsOpZero = true;
9664 } else if (TI->getOperand(1) == FI->getOperand(0)) {
9665 MatchOp = TI->getOperand(1);
9666 OtherOpT = TI->getOperand(0);
9667 OtherOpF = FI->getOperand(1);
9668 MatchIsOpZero = true;
9673 // If we reach here, they do have operations in common.
9674 SelectInst *NewSI = SelectInst::Create(SI.getCondition(), OtherOpT,
9675 OtherOpF, SI.getName()+".v");
9676 InsertNewInstBefore(NewSI, SI);
9678 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TI)) {
9680 return BinaryOperator::Create(BO->getOpcode(), MatchOp, NewSI);
9682 return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp);
9684 llvm_unreachable("Shouldn't get here");
9688 static bool isSelect01(Constant *C1, Constant *C2) {
9689 ConstantInt *C1I = dyn_cast<ConstantInt>(C1);
9692 ConstantInt *C2I = dyn_cast<ConstantInt>(C2);
9695 return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne());
9698 /// FoldSelectIntoOp - Try fold the select into one of the operands to
9699 /// facilitate further optimization.
9700 Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal,
9702 // See the comment above GetSelectFoldableOperands for a description of the
9703 // transformation we are doing here.
9704 if (Instruction *TVI = dyn_cast<Instruction>(TrueVal)) {
9705 if (TVI->hasOneUse() && TVI->getNumOperands() == 2 &&
9706 !isa<Constant>(FalseVal)) {
9707 if (unsigned SFO = GetSelectFoldableOperands(TVI)) {
9708 unsigned OpToFold = 0;
9709 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
9711 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
9716 Constant *C = GetSelectFoldableConstant(TVI, Context);
9717 Value *OOp = TVI->getOperand(2-OpToFold);
9718 // Avoid creating select between 2 constants unless it's selecting
9720 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9721 Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C);
9722 InsertNewInstBefore(NewSel, SI);
9723 NewSel->takeName(TVI);
9724 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(TVI))
9725 return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel);
9726 llvm_unreachable("Unknown instruction!!");
9733 if (Instruction *FVI = dyn_cast<Instruction>(FalseVal)) {
9734 if (FVI->hasOneUse() && FVI->getNumOperands() == 2 &&
9735 !isa<Constant>(TrueVal)) {
9736 if (unsigned SFO = GetSelectFoldableOperands(FVI)) {
9737 unsigned OpToFold = 0;
9738 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
9740 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
9745 Constant *C = GetSelectFoldableConstant(FVI, Context);
9746 Value *OOp = FVI->getOperand(2-OpToFold);
9747 // Avoid creating select between 2 constants unless it's selecting
9749 if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
9750 Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp);
9751 InsertNewInstBefore(NewSel, SI);
9752 NewSel->takeName(FVI);
9753 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FVI))
9754 return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel);
9755 llvm_unreachable("Unknown instruction!!");
9765 /// visitSelectInstWithICmp - Visit a SelectInst that has an
9766 /// ICmpInst as its first operand.
9768 Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI,
9770 bool Changed = false;
9771 ICmpInst::Predicate Pred = ICI->getPredicate();
9772 Value *CmpLHS = ICI->getOperand(0);
9773 Value *CmpRHS = ICI->getOperand(1);
9774 Value *TrueVal = SI.getTrueValue();
9775 Value *FalseVal = SI.getFalseValue();
9777 // Check cases where the comparison is with a constant that
9778 // can be adjusted to fit the min/max idiom. We may edit ICI in
9779 // place here, so make sure the select is the only user.
9780 if (ICI->hasOneUse())
9781 if (ConstantInt *CI = dyn_cast<ConstantInt>(CmpRHS)) {
9784 case ICmpInst::ICMP_ULT:
9785 case ICmpInst::ICMP_SLT: {
9786 // X < MIN ? T : F --> F
9787 if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT))
9788 return ReplaceInstUsesWith(SI, FalseVal);
9789 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
9790 Constant *AdjustedRHS = SubOne(CI);
9791 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9792 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9793 Pred = ICmpInst::getSwappedPredicate(Pred);
9794 CmpRHS = AdjustedRHS;
9795 std::swap(FalseVal, TrueVal);
9796 ICI->setPredicate(Pred);
9797 ICI->setOperand(1, CmpRHS);
9798 SI.setOperand(1, TrueVal);
9799 SI.setOperand(2, FalseVal);
9804 case ICmpInst::ICMP_UGT:
9805 case ICmpInst::ICMP_SGT: {
9806 // X > MAX ? T : F --> F
9807 if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT))
9808 return ReplaceInstUsesWith(SI, FalseVal);
9809 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
9810 Constant *AdjustedRHS = AddOne(CI);
9811 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
9812 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
9813 Pred = ICmpInst::getSwappedPredicate(Pred);
9814 CmpRHS = AdjustedRHS;
9815 std::swap(FalseVal, TrueVal);
9816 ICI->setPredicate(Pred);
9817 ICI->setOperand(1, CmpRHS);
9818 SI.setOperand(1, TrueVal);
9819 SI.setOperand(2, FalseVal);
9826 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if signed
9827 // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed
9828 CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
9829 if (match(TrueVal, m_ConstantInt<-1>()) &&
9830 match(FalseVal, m_ConstantInt<0>()))
9831 Pred = ICI->getPredicate();
9832 else if (match(TrueVal, m_ConstantInt<0>()) &&
9833 match(FalseVal, m_ConstantInt<-1>()))
9834 Pred = CmpInst::getInversePredicate(ICI->getPredicate());
9836 if (Pred != CmpInst::BAD_ICMP_PREDICATE) {
9837 // If we are just checking for a icmp eq of a single bit and zext'ing it
9838 // to an integer, then shift the bit to the appropriate place and then
9839 // cast to integer to avoid the comparison.
9840 const APInt &Op1CV = CI->getValue();
9842 // sext (x <s 0) to i32 --> x>>s31 true if signbit set.
9843 // sext (x >s -1) to i32 --> (x>>s31)^-1 true if signbit clear.
9844 if ((Pred == ICmpInst::ICMP_SLT && Op1CV == 0) ||
9845 (Pred == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) {
9846 Value *In = ICI->getOperand(0);
9847 Value *Sh = ConstantInt::get(In->getType(),
9848 In->getType()->getScalarSizeInBits()-1);
9849 In = InsertNewInstBefore(BinaryOperator::CreateAShr(In, Sh,
9850 In->getName()+".lobit"),
9852 if (In->getType() != SI.getType())
9853 In = CastInst::CreateIntegerCast(In, SI.getType(),
9854 true/*SExt*/, "tmp", ICI);
9856 if (Pred == ICmpInst::ICMP_SGT)
9857 In = InsertNewInstBefore(BinaryOperator::CreateNot(In,
9858 In->getName()+".not"), *ICI);
9860 return ReplaceInstUsesWith(SI, In);
9865 if (CmpLHS == TrueVal && CmpRHS == FalseVal) {
9866 // Transform (X == Y) ? X : Y -> Y
9867 if (Pred == ICmpInst::ICMP_EQ)
9868 return ReplaceInstUsesWith(SI, FalseVal);
9869 // Transform (X != Y) ? X : Y -> X
9870 if (Pred == ICmpInst::ICMP_NE)
9871 return ReplaceInstUsesWith(SI, TrueVal);
9872 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9874 } else if (CmpLHS == FalseVal && CmpRHS == TrueVal) {
9875 // Transform (X == Y) ? Y : X -> X
9876 if (Pred == ICmpInst::ICMP_EQ)
9877 return ReplaceInstUsesWith(SI, FalseVal);
9878 // Transform (X != Y) ? Y : X -> Y
9879 if (Pred == ICmpInst::ICMP_NE)
9880 return ReplaceInstUsesWith(SI, TrueVal);
9881 /// NOTE: if we wanted to, this is where to detect integer MIN/MAX
9883 return Changed ? &SI : 0;
9887 /// CanSelectOperandBeMappingIntoPredBlock - SI is a select whose condition is a
9888 /// PHI node (but the two may be in different blocks). See if the true/false
9889 /// values (V) are live in all of the predecessor blocks of the PHI. For
9890 /// example, cases like this cannot be mapped:
9892 /// X = phi [ C1, BB1], [C2, BB2]
9894 /// Z = select X, Y, 0
9896 /// because Y is not live in BB1/BB2.
9898 static bool CanSelectOperandBeMappingIntoPredBlock(const Value *V,
9899 const SelectInst &SI) {
9900 // If the value is a non-instruction value like a constant or argument, it
9901 // can always be mapped.
9902 const Instruction *I = dyn_cast<Instruction>(V);
9903 if (I == 0) return true;
9905 // If V is a PHI node defined in the same block as the condition PHI, we can
9906 // map the arguments.
9907 const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
9909 if (const PHINode *VP = dyn_cast<PHINode>(I))
9910 if (VP->getParent() == CondPHI->getParent())
9913 // Otherwise, if the PHI and select are defined in the same block and if V is
9914 // defined in a different block, then we can transform it.
9915 if (SI.getParent() == CondPHI->getParent() &&
9916 I->getParent() != CondPHI->getParent())
9919 // Otherwise we have a 'hard' case and we can't tell without doing more
9920 // detailed dominator based analysis, punt.
9924 /// FoldSPFofSPF - We have an SPF (e.g. a min or max) of an SPF of the form:
9925 /// SPF2(SPF1(A, B), C)
9926 Instruction *InstCombiner::FoldSPFofSPF(Instruction *Inner,
9927 SelectPatternFlavor SPF1,
9930 SelectPatternFlavor SPF2, Value *C) {
9931 if (C == A || C == B) {
9932 // MAX(MAX(A, B), B) -> MAX(A, B)
9933 // MIN(MIN(a, b), a) -> MIN(a, b)
9935 return ReplaceInstUsesWith(Outer, Inner);
9937 // MAX(MIN(a, b), a) -> a
9938 // MIN(MAX(a, b), a) -> a
9939 if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) ||
9940 (SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) ||
9941 (SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) ||
9942 (SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
9943 return ReplaceInstUsesWith(Outer, C);
9946 // TODO: MIN(MIN(A, 23), 97)
9953 Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
9954 Value *CondVal = SI.getCondition();
9955 Value *TrueVal = SI.getTrueValue();
9956 Value *FalseVal = SI.getFalseValue();
9958 // select true, X, Y -> X
9959 // select false, X, Y -> Y
9960 if (ConstantInt *C = dyn_cast<ConstantInt>(CondVal))
9961 return ReplaceInstUsesWith(SI, C->getZExtValue() ? TrueVal : FalseVal);
9963 // select C, X, X -> X
9964 if (TrueVal == FalseVal)
9965 return ReplaceInstUsesWith(SI, TrueVal);
9967 if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
9968 return ReplaceInstUsesWith(SI, FalseVal);
9969 if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
9970 return ReplaceInstUsesWith(SI, TrueVal);
9971 if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
9972 if (isa<Constant>(TrueVal))
9973 return ReplaceInstUsesWith(SI, TrueVal);
9975 return ReplaceInstUsesWith(SI, FalseVal);
9978 if (SI.getType() == Type::getInt1Ty(*Context)) {
9979 if (ConstantInt *C = dyn_cast<ConstantInt>(TrueVal)) {
9980 if (C->getZExtValue()) {
9981 // Change: A = select B, true, C --> A = or B, C
9982 return BinaryOperator::CreateOr(CondVal, FalseVal);
9984 // Change: A = select B, false, C --> A = and !B, C
9986 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9987 "not."+CondVal->getName()), SI);
9988 return BinaryOperator::CreateAnd(NotCond, FalseVal);
9990 } else if (ConstantInt *C = dyn_cast<ConstantInt>(FalseVal)) {
9991 if (C->getZExtValue() == false) {
9992 // Change: A = select B, C, false --> A = and B, C
9993 return BinaryOperator::CreateAnd(CondVal, TrueVal);
9995 // Change: A = select B, C, true --> A = or !B, C
9997 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
9998 "not."+CondVal->getName()), SI);
9999 return BinaryOperator::CreateOr(NotCond, TrueVal);
10003 // select a, b, a -> a&b
10004 // select a, a, b -> a|b
10005 if (CondVal == TrueVal)
10006 return BinaryOperator::CreateOr(CondVal, FalseVal);
10007 else if (CondVal == FalseVal)
10008 return BinaryOperator::CreateAnd(CondVal, TrueVal);
10011 // Selecting between two integer constants?
10012 if (ConstantInt *TrueValC = dyn_cast<ConstantInt>(TrueVal))
10013 if (ConstantInt *FalseValC = dyn_cast<ConstantInt>(FalseVal)) {
10014 // select C, 1, 0 -> zext C to int
10015 if (FalseValC->isZero() && TrueValC->getValue() == 1) {
10016 return CastInst::Create(Instruction::ZExt, CondVal, SI.getType());
10017 } else if (TrueValC->isZero() && FalseValC->getValue() == 1) {
10018 // select C, 0, 1 -> zext !C to int
10020 InsertNewInstBefore(BinaryOperator::CreateNot(CondVal,
10021 "not."+CondVal->getName()), SI);
10022 return CastInst::Create(Instruction::ZExt, NotCond, SI.getType());
10025 if (ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition())) {
10026 // If one of the constants is zero (we know they can't both be) and we
10027 // have an icmp instruction with zero, and we have an 'and' with the
10028 // non-constant value, eliminate this whole mess. This corresponds to
10029 // cases like this: ((X & 27) ? 27 : 0)
10030 if (TrueValC->isZero() || FalseValC->isZero())
10031 if (IC->isEquality() && isa<ConstantInt>(IC->getOperand(1)) &&
10032 cast<Constant>(IC->getOperand(1))->isNullValue())
10033 if (Instruction *ICA = dyn_cast<Instruction>(IC->getOperand(0)))
10034 if (ICA->getOpcode() == Instruction::And &&
10035 isa<ConstantInt>(ICA->getOperand(1)) &&
10036 (ICA->getOperand(1) == TrueValC ||
10037 ICA->getOperand(1) == FalseValC) &&
10038 isOneBitSet(cast<ConstantInt>(ICA->getOperand(1)))) {
10039 // Okay, now we know that everything is set up, we just don't
10040 // know whether we have a icmp_ne or icmp_eq and whether the
10041 // true or false val is the zero.
10042 bool ShouldNotVal = !TrueValC->isZero();
10043 ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
10046 V = InsertNewInstBefore(BinaryOperator::Create(
10047 Instruction::Xor, V, ICA->getOperand(1)), SI);
10048 return ReplaceInstUsesWith(SI, V);
10053 // See if we are selecting two values based on a comparison of the two values.
10054 if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
10055 if (FCI->getOperand(0) == TrueVal && FCI->getOperand(1) == FalseVal) {
10056 // Transform (X == Y) ? X : Y -> Y
10057 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
10058 // This is not safe in general for floating point:
10059 // consider X== -0, Y== +0.
10060 // It becomes safe if either operand is a nonzero constant.
10061 ConstantFP *CFPt, *CFPf;
10062 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
10063 !CFPt->getValueAPF().isZero()) ||
10064 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
10065 !CFPf->getValueAPF().isZero()))
10066 return ReplaceInstUsesWith(SI, FalseVal);
10068 // Transform (X != Y) ? X : Y -> X
10069 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
10070 return ReplaceInstUsesWith(SI, TrueVal);
10071 // NOTE: if we wanted to, this is where to detect MIN/MAX
10073 } else if (FCI->getOperand(0) == FalseVal && FCI->getOperand(1) == TrueVal){
10074 // Transform (X == Y) ? Y : X -> X
10075 if (FCI->getPredicate() == FCmpInst::FCMP_OEQ) {
10076 // This is not safe in general for floating point:
10077 // consider X== -0, Y== +0.
10078 // It becomes safe if either operand is a nonzero constant.
10079 ConstantFP *CFPt, *CFPf;
10080 if (((CFPt = dyn_cast<ConstantFP>(TrueVal)) &&
10081 !CFPt->getValueAPF().isZero()) ||
10082 ((CFPf = dyn_cast<ConstantFP>(FalseVal)) &&
10083 !CFPf->getValueAPF().isZero()))
10084 return ReplaceInstUsesWith(SI, FalseVal);
10086 // Transform (X != Y) ? Y : X -> Y
10087 if (FCI->getPredicate() == FCmpInst::FCMP_ONE)
10088 return ReplaceInstUsesWith(SI, TrueVal);
10089 // NOTE: if we wanted to, this is where to detect MIN/MAX
10091 // NOTE: if we wanted to, this is where to detect ABS
10094 // See if we are selecting two values based on a comparison of the two values.
10095 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
10096 if (Instruction *Result = visitSelectInstWithICmp(SI, ICI))
10099 if (Instruction *TI = dyn_cast<Instruction>(TrueVal))
10100 if (Instruction *FI = dyn_cast<Instruction>(FalseVal))
10101 if (TI->hasOneUse() && FI->hasOneUse()) {
10102 Instruction *AddOp = 0, *SubOp = 0;
10104 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
10105 if (TI->getOpcode() == FI->getOpcode())
10106 if (Instruction *IV = FoldSelectOpOp(SI, TI, FI))
10109 // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is
10110 // even legal for FP.
10111 if ((TI->getOpcode() == Instruction::Sub &&
10112 FI->getOpcode() == Instruction::Add) ||
10113 (TI->getOpcode() == Instruction::FSub &&
10114 FI->getOpcode() == Instruction::FAdd)) {
10115 AddOp = FI; SubOp = TI;
10116 } else if ((FI->getOpcode() == Instruction::Sub &&
10117 TI->getOpcode() == Instruction::Add) ||
10118 (FI->getOpcode() == Instruction::FSub &&
10119 TI->getOpcode() == Instruction::FAdd)) {
10120 AddOp = TI; SubOp = FI;
10124 Value *OtherAddOp = 0;
10125 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
10126 OtherAddOp = AddOp->getOperand(1);
10127 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
10128 OtherAddOp = AddOp->getOperand(0);
10132 // So at this point we know we have (Y -> OtherAddOp):
10133 // select C, (add X, Y), (sub X, Z)
10134 Value *NegVal; // Compute -Z
10135 if (Constant *C = dyn_cast<Constant>(SubOp->getOperand(1))) {
10136 NegVal = ConstantExpr::getNeg(C);
10138 NegVal = InsertNewInstBefore(
10139 BinaryOperator::CreateNeg(SubOp->getOperand(1),
10143 Value *NewTrueOp = OtherAddOp;
10144 Value *NewFalseOp = NegVal;
10146 std::swap(NewTrueOp, NewFalseOp);
10147 Instruction *NewSel =
10148 SelectInst::Create(CondVal, NewTrueOp,
10149 NewFalseOp, SI.getName() + ".p");
10151 NewSel = InsertNewInstBefore(NewSel, SI);
10152 return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
10157 // See if we can fold the select into one of our operands.
10158 if (SI.getType()->isInteger()) {
10159 if (Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal))
10162 // MAX(MAX(a, b), a) -> MAX(a, b)
10163 // MIN(MIN(a, b), a) -> MIN(a, b)
10164 // MAX(MIN(a, b), a) -> a
10165 // MIN(MAX(a, b), a) -> a
10166 Value *LHS, *RHS, *LHS2, *RHS2;
10167 if (SelectPatternFlavor SPF = MatchSelectPattern(&SI, LHS, RHS)) {
10168 if (SelectPatternFlavor SPF2 = MatchSelectPattern(LHS, LHS2, RHS2))
10169 if (Instruction *R = FoldSPFofSPF(cast<Instruction>(LHS),SPF2,LHS2,RHS2,
10172 if (SelectPatternFlavor SPF2 = MatchSelectPattern(RHS, LHS2, RHS2))
10173 if (Instruction *R = FoldSPFofSPF(cast<Instruction>(RHS),SPF2,LHS2,RHS2,
10179 // ABS(-X) -> ABS(X)
10180 // ABS(ABS(X)) -> ABS(X)
10183 // See if we can fold the select into a phi node if the condition is a select.
10184 if (isa<PHINode>(SI.getCondition()))
10185 // The true/false values have to be live in the PHI predecessor's blocks.
10186 if (CanSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
10187 CanSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
10188 if (Instruction *NV = FoldOpIntoPhi(SI))
10191 if (BinaryOperator::isNot(CondVal)) {
10192 SI.setOperand(0, BinaryOperator::getNotArgument(CondVal));
10193 SI.setOperand(1, FalseVal);
10194 SI.setOperand(2, TrueVal);
10201 /// EnforceKnownAlignment - If the specified pointer points to an object that
10202 /// we control, modify the object's alignment to PrefAlign. This isn't
10203 /// often possible though. If alignment is important, a more reliable approach
10204 /// is to simply align all global variables and allocation instructions to
10205 /// their preferred alignment from the beginning.
10207 static unsigned EnforceKnownAlignment(Value *V,
10208 unsigned Align, unsigned PrefAlign) {
10210 User *U = dyn_cast<User>(V);
10211 if (!U) return Align;
10213 switch (Operator::getOpcode(U)) {
10215 case Instruction::BitCast:
10216 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
10217 case Instruction::GetElementPtr: {
10218 // If all indexes are zero, it is just the alignment of the base pointer.
10219 bool AllZeroOperands = true;
10220 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
10221 if (!isa<Constant>(*i) ||
10222 !cast<Constant>(*i)->isNullValue()) {
10223 AllZeroOperands = false;
10227 if (AllZeroOperands) {
10228 // Treat this like a bitcast.
10229 return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
10235 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
10236 // If there is a large requested alignment and we can, bump up the alignment
10238 if (!GV->isDeclaration()) {
10239 if (GV->getAlignment() >= PrefAlign)
10240 Align = GV->getAlignment();
10242 GV->setAlignment(PrefAlign);
10246 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
10247 // If there is a requested alignment and if this is an alloca, round up.
10248 if (AI->getAlignment() >= PrefAlign)
10249 Align = AI->getAlignment();
10251 AI->setAlignment(PrefAlign);
10259 /// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
10260 /// we can determine, return it, otherwise return 0. If PrefAlign is specified,
10261 /// and it is more than the alignment of the ultimate object, see if we can
10262 /// increase the alignment of the ultimate object, making this check succeed.
10263 unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
10264 unsigned PrefAlign) {
10265 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
10266 sizeof(PrefAlign) * CHAR_BIT;
10267 APInt Mask = APInt::getAllOnesValue(BitWidth);
10268 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
10269 ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
10270 unsigned TrailZ = KnownZero.countTrailingOnes();
10271 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
10273 if (PrefAlign > Align)
10274 Align = EnforceKnownAlignment(V, Align, PrefAlign);
10276 // We don't need to make any adjustment.
10280 Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
10281 unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
10282 unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
10283 unsigned MinAlign = std::min(DstAlign, SrcAlign);
10284 unsigned CopyAlign = MI->getAlignment();
10286 if (CopyAlign < MinAlign) {
10287 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
10292 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
10294 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
10295 if (MemOpLength == 0) return 0;
10297 // Source and destination pointer types are always "i8*" for intrinsic. See
10298 // if the size is something we can handle with a single primitive load/store.
10299 // A single load+store correctly handles overlapping memory in the memmove
10301 unsigned Size = MemOpLength->getZExtValue();
10302 if (Size == 0) return MI; // Delete this mem transfer.
10304 if (Size > 8 || (Size&(Size-1)))
10305 return 0; // If not 1/2/4/8 bytes, exit.
10307 // Use an integer load+store unless we can find something better.
10309 PointerType::getUnqual(IntegerType::get(*Context, Size<<3));
10311 // Memcpy forces the use of i8* for the source and destination. That means
10312 // that if you're using memcpy to move one double around, you'll get a cast
10313 // from double* to i8*. We'd much rather use a double load+store rather than
10314 // an i64 load+store, here because this improves the odds that the source or
10315 // dest address will be promotable. See if we can find a better type than the
10316 // integer datatype.
10317 if (Value *Op = getBitCastOperand(MI->getOperand(1))) {
10318 const Type *SrcETy = cast<PointerType>(Op->getType())->getElementType();
10319 if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
10320 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
10321 // down through these levels if so.
10322 while (!SrcETy->isSingleValueType()) {
10323 if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
10324 if (STy->getNumElements() == 1)
10325 SrcETy = STy->getElementType(0);
10328 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
10329 if (ATy->getNumElements() == 1)
10330 SrcETy = ATy->getElementType();
10337 if (SrcETy->isSingleValueType())
10338 NewPtrTy = PointerType::getUnqual(SrcETy);
10343 // If the memcpy/memmove provides better alignment info than we can
10345 SrcAlign = std::max(SrcAlign, CopyAlign);
10346 DstAlign = std::max(DstAlign, CopyAlign);
10348 Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
10349 Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
10350 Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
10351 InsertNewInstBefore(L, *MI);
10352 InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
10354 // Set the size of the copy to 0, it will be deleted on the next iteration.
10355 MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
10359 Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
10360 unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
10361 if (MI->getAlignment() < Alignment) {
10362 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
10363 Alignment, false));
10367 // Extract the length and alignment and fill if they are constant.
10368 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
10369 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
10370 if (!LenC || !FillC || FillC->getType() != Type::getInt8Ty(*Context))
10372 uint64_t Len = LenC->getZExtValue();
10373 Alignment = MI->getAlignment();
10375 // If the length is zero, this is a no-op
10376 if (Len == 0) return MI; // memset(d,c,0,a) -> noop
10378 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
10379 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
10380 const Type *ITy = IntegerType::get(*Context, Len*8); // n=1 -> i8.
10382 Value *Dest = MI->getDest();
10383 Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
10385 // Alignment 0 is identity for alignment 1 for memset, but not store.
10386 if (Alignment == 0) Alignment = 1;
10388 // Extract the fill value and store.
10389 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
10390 InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
10391 Dest, false, Alignment), *MI);
10393 // Set the size of the copy to 0, it will be deleted on the next iteration.
10394 MI->setLength(Constant::getNullValue(LenC->getType()));
10402 /// visitCallInst - CallInst simplification. This mostly only handles folding
10403 /// of intrinsic instructions. For normal calls, it allows visitCallSite to do
10404 /// the heavy lifting.
10406 Instruction *InstCombiner::visitCallInst(CallInst &CI) {
10407 if (isFreeCall(&CI))
10408 return visitFree(CI);
10410 // If the caller function is nounwind, mark the call as nounwind, even if the
10412 if (CI.getParent()->getParent()->doesNotThrow() &&
10413 !CI.doesNotThrow()) {
10414 CI.setDoesNotThrow();
10418 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
10419 if (!II) return visitCallSite(&CI);
10421 // Intrinsics cannot occur in an invoke, so handle them here instead of in
10423 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
10424 bool Changed = false;
10426 // memmove/cpy/set of zero bytes is a noop.
10427 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
10428 if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
10430 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
10431 if (CI->getZExtValue() == 1) {
10432 // Replace the instruction with just byte operations. We would
10433 // transform other cases to loads/stores, but we don't know if
10434 // alignment is sufficient.
10438 // If we have a memmove and the source operation is a constant global,
10439 // then the source and dest pointers can't alias, so we can change this
10440 // into a call to memcpy.
10441 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
10442 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
10443 if (GVSrc->isConstant()) {
10444 Module *M = CI.getParent()->getParent()->getParent();
10445 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
10446 const Type *Tys[1];
10447 Tys[0] = CI.getOperand(3)->getType();
10449 Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
10454 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
10455 // memmove(x,x,size) -> noop.
10456 if (MTI->getSource() == MTI->getDest())
10457 return EraseInstFromFunction(CI);
10460 // If we can determine a pointer alignment that is bigger than currently
10461 // set, update the alignment.
10462 if (isa<MemTransferInst>(MI)) {
10463 if (Instruction *I = SimplifyMemTransfer(MI))
10465 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
10466 if (Instruction *I = SimplifyMemSet(MSI))
10470 if (Changed) return II;
10473 switch (II->getIntrinsicID()) {
10475 case Intrinsic::bswap:
10476 // bswap(bswap(x)) -> x
10477 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
10478 if (Operand->getIntrinsicID() == Intrinsic::bswap)
10479 return ReplaceInstUsesWith(CI, Operand->getOperand(1));
10481 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
10482 if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
10483 if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
10484 if (Operand->getIntrinsicID() == Intrinsic::bswap) {
10485 unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
10486 TI->getType()->getPrimitiveSizeInBits();
10487 Value *CV = ConstantInt::get(Operand->getType(), C);
10488 Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
10489 return new TruncInst(V, TI->getType());
10494 case Intrinsic::powi:
10495 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
10496 // powi(x, 0) -> 1.0
10497 if (Power->isZero())
10498 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
10500 if (Power->isOne())
10501 return ReplaceInstUsesWith(CI, II->getOperand(1));
10502 // powi(x, -1) -> 1/x
10503 if (Power->isAllOnesValue())
10504 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
10505 II->getOperand(1));
10509 case Intrinsic::uadd_with_overflow: {
10510 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
10511 const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
10512 uint32_t BitWidth = IT->getBitWidth();
10513 APInt Mask = APInt::getSignBit(BitWidth);
10514 APInt LHSKnownZero(BitWidth, 0);
10515 APInt LHSKnownOne(BitWidth, 0);
10516 ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
10517 bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
10518 bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
10520 if (LHSKnownNegative || LHSKnownPositive) {
10521 APInt RHSKnownZero(BitWidth, 0);
10522 APInt RHSKnownOne(BitWidth, 0);
10523 ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
10524 bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
10525 bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
10526 if (LHSKnownNegative && RHSKnownNegative) {
10527 // The sign bit is set in both cases: this MUST overflow.
10528 // Create a simple add instruction, and insert it into the struct.
10529 Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
10532 UndefValue::get(LHS->getType()), ConstantInt::getTrue(*Context)
10534 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
10535 return InsertValueInst::Create(Struct, Add, 0);
10538 if (LHSKnownPositive && RHSKnownPositive) {
10539 // The sign bit is clear in both cases: this CANNOT overflow.
10540 // Create a simple add instruction, and insert it into the struct.
10541 Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
10544 UndefValue::get(LHS->getType()), ConstantInt::getFalse(*Context)
10546 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
10547 return InsertValueInst::Create(Struct, Add, 0);
10551 // FALL THROUGH uadd into sadd
10552 case Intrinsic::sadd_with_overflow:
10553 // Canonicalize constants into the RHS.
10554 if (isa<Constant>(II->getOperand(1)) &&
10555 !isa<Constant>(II->getOperand(2))) {
10556 Value *LHS = II->getOperand(1);
10557 II->setOperand(1, II->getOperand(2));
10558 II->setOperand(2, LHS);
10562 // X + undef -> undef
10563 if (isa<UndefValue>(II->getOperand(2)))
10564 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
10566 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
10567 // X + 0 -> {X, false}
10568 if (RHS->isZero()) {
10570 UndefValue::get(II->getOperand(0)->getType()),
10571 ConstantInt::getFalse(*Context)
10573 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
10574 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
10578 case Intrinsic::usub_with_overflow:
10579 case Intrinsic::ssub_with_overflow:
10580 // undef - X -> undef
10581 // X - undef -> undef
10582 if (isa<UndefValue>(II->getOperand(1)) ||
10583 isa<UndefValue>(II->getOperand(2)))
10584 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
10586 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
10587 // X - 0 -> {X, false}
10588 if (RHS->isZero()) {
10590 UndefValue::get(II->getOperand(1)->getType()),
10591 ConstantInt::getFalse(*Context)
10593 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
10594 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
10598 case Intrinsic::umul_with_overflow:
10599 case Intrinsic::smul_with_overflow:
10600 // Canonicalize constants into the RHS.
10601 if (isa<Constant>(II->getOperand(1)) &&
10602 !isa<Constant>(II->getOperand(2))) {
10603 Value *LHS = II->getOperand(1);
10604 II->setOperand(1, II->getOperand(2));
10605 II->setOperand(2, LHS);
10609 // X * undef -> undef
10610 if (isa<UndefValue>(II->getOperand(2)))
10611 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
10613 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
10614 // X*0 -> {0, false}
10615 if (RHSI->isZero())
10616 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
10618 // X * 1 -> {X, false}
10619 if (RHSI->equalsInt(1)) {
10621 UndefValue::get(II->getOperand(1)->getType()),
10622 ConstantInt::getFalse(*Context)
10624 Constant *Struct = ConstantStruct::get(*Context, V, 2, false);
10625 return InsertValueInst::Create(Struct, II->getOperand(1), 0);
10629 case Intrinsic::ppc_altivec_lvx:
10630 case Intrinsic::ppc_altivec_lvxl:
10631 case Intrinsic::x86_sse_loadu_ps:
10632 case Intrinsic::x86_sse2_loadu_pd:
10633 case Intrinsic::x86_sse2_loadu_dq:
10634 // Turn PPC lvx -> load if the pointer is known aligned.
10635 // Turn X86 loadups -> load if the pointer is known aligned.
10636 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
10637 Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
10638 PointerType::getUnqual(II->getType()));
10639 return new LoadInst(Ptr);
10642 case Intrinsic::ppc_altivec_stvx:
10643 case Intrinsic::ppc_altivec_stvxl:
10644 // Turn stvx -> store if the pointer is known aligned.
10645 if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
10646 const Type *OpPtrTy =
10647 PointerType::getUnqual(II->getOperand(1)->getType());
10648 Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
10649 return new StoreInst(II->getOperand(1), Ptr);
10652 case Intrinsic::x86_sse_storeu_ps:
10653 case Intrinsic::x86_sse2_storeu_pd:
10654 case Intrinsic::x86_sse2_storeu_dq:
10655 // Turn X86 storeu -> store if the pointer is known aligned.
10656 if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
10657 const Type *OpPtrTy =
10658 PointerType::getUnqual(II->getOperand(2)->getType());
10659 Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
10660 return new StoreInst(II->getOperand(2), Ptr);
10664 case Intrinsic::x86_sse_cvttss2si: {
10665 // These intrinsics only demands the 0th element of its input vector. If
10666 // we can simplify the input based on that, do so now.
10668 cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
10669 APInt DemandedElts(VWidth, 1);
10670 APInt UndefElts(VWidth, 0);
10671 if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
10673 II->setOperand(1, V);
10679 case Intrinsic::ppc_altivec_vperm:
10680 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
10681 if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
10682 assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
10684 // Check that all of the elements are integer constants or undefs.
10685 bool AllEltsOk = true;
10686 for (unsigned i = 0; i != 16; ++i) {
10687 if (!isa<ConstantInt>(Mask->getOperand(i)) &&
10688 !isa<UndefValue>(Mask->getOperand(i))) {
10695 // Cast the input vectors to byte vectors.
10696 Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
10697 Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
10698 Value *Result = UndefValue::get(Op0->getType());
10700 // Only extract each element once.
10701 Value *ExtractedElts[32];
10702 memset(ExtractedElts, 0, sizeof(ExtractedElts));
10704 for (unsigned i = 0; i != 16; ++i) {
10705 if (isa<UndefValue>(Mask->getOperand(i)))
10707 unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
10708 Idx &= 31; // Match the hardware behavior.
10710 if (ExtractedElts[Idx] == 0) {
10711 ExtractedElts[Idx] =
10712 Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
10713 ConstantInt::get(Type::getInt32Ty(*Context), Idx&15, false),
10717 // Insert this value into the result vector.
10718 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
10719 ConstantInt::get(Type::getInt32Ty(*Context), i, false),
10722 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
10727 case Intrinsic::stackrestore: {
10728 // If the save is right next to the restore, remove the restore. This can
10729 // happen when variable allocas are DCE'd.
10730 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
10731 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
10732 BasicBlock::iterator BI = SS;
10734 return EraseInstFromFunction(CI);
10738 // Scan down this block to see if there is another stack restore in the
10739 // same block without an intervening call/alloca.
10740 BasicBlock::iterator BI = II;
10741 TerminatorInst *TI = II->getParent()->getTerminator();
10742 bool CannotRemove = false;
10743 for (++BI; &*BI != TI; ++BI) {
10744 if (isa<AllocaInst>(BI) || isMalloc(BI)) {
10745 CannotRemove = true;
10748 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
10749 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
10750 // If there is a stackrestore below this one, remove this one.
10751 if (II->getIntrinsicID() == Intrinsic::stackrestore)
10752 return EraseInstFromFunction(CI);
10753 // Otherwise, ignore the intrinsic.
10755 // If we found a non-intrinsic call, we can't remove the stack
10757 CannotRemove = true;
10763 // If the stack restore is in a return/unwind block and if there are no
10764 // allocas or calls between the restore and the return, nuke the restore.
10765 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
10766 return EraseInstFromFunction(CI);
10771 return visitCallSite(II);
10774 // InvokeInst simplification
10776 Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
10777 return visitCallSite(&II);
10780 /// isSafeToEliminateVarargsCast - If this cast does not affect the value
10781 /// passed through the varargs area, we can eliminate the use of the cast.
10782 static bool isSafeToEliminateVarargsCast(const CallSite CS,
10783 const CastInst * const CI,
10784 const TargetData * const TD,
10786 if (!CI->isLosslessCast())
10789 // The size of ByVal arguments is derived from the type, so we
10790 // can't change to a type with a different size. If the size were
10791 // passed explicitly we could avoid this check.
10792 if (!CS.paramHasAttr(ix, Attribute::ByVal))
10795 const Type* SrcTy =
10796 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
10797 const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
10798 if (!SrcTy->isSized() || !DstTy->isSized())
10800 if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
10805 // visitCallSite - Improvements for call and invoke instructions.
10807 Instruction *InstCombiner::visitCallSite(CallSite CS) {
10808 bool Changed = false;
10810 // If the callee is a constexpr cast of a function, attempt to move the cast
10811 // to the arguments of the call/invoke.
10812 if (transformConstExprCastCall(CS)) return 0;
10814 Value *Callee = CS.getCalledValue();
10816 if (Function *CalleeF = dyn_cast<Function>(Callee))
10817 if (CalleeF->getCallingConv() != CS.getCallingConv()) {
10818 Instruction *OldCall = CS.getInstruction();
10819 // If the call and callee calling conventions don't match, this call must
10820 // be unreachable, as the call is undefined.
10821 new StoreInst(ConstantInt::getTrue(*Context),
10822 UndefValue::get(Type::getInt1PtrTy(*Context)),
10824 // If OldCall dues not return void then replaceAllUsesWith undef.
10825 // This allows ValueHandlers and custom metadata to adjust itself.
10826 if (!OldCall->getType()->isVoidTy())
10827 OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
10828 if (isa<CallInst>(OldCall)) // Not worth removing an invoke here.
10829 return EraseInstFromFunction(*OldCall);
10833 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
10834 // This instruction is not reachable, just remove it. We insert a store to
10835 // undef so that we know that this code is not reachable, despite the fact
10836 // that we can't modify the CFG here.
10837 new StoreInst(ConstantInt::getTrue(*Context),
10838 UndefValue::get(Type::getInt1PtrTy(*Context)),
10839 CS.getInstruction());
10841 // If CS dues not return void then replaceAllUsesWith undef.
10842 // This allows ValueHandlers and custom metadata to adjust itself.
10843 if (!CS.getInstruction()->getType()->isVoidTy())
10844 CS.getInstruction()->
10845 replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
10847 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
10848 // Don't break the CFG, insert a dummy cond branch.
10849 BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
10850 ConstantInt::getTrue(*Context), II);
10852 return EraseInstFromFunction(*CS.getInstruction());
10855 if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
10856 if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
10857 if (In->getIntrinsicID() == Intrinsic::init_trampoline)
10858 return transformCallThroughTrampoline(CS);
10860 const PointerType *PTy = cast<PointerType>(Callee->getType());
10861 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
10862 if (FTy->isVarArg()) {
10863 int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
10864 // See if we can optimize any arguments passed through the varargs area of
10866 for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
10867 E = CS.arg_end(); I != E; ++I, ++ix) {
10868 CastInst *CI = dyn_cast<CastInst>(*I);
10869 if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
10870 *I = CI->getOperand(0);
10876 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
10877 // Inline asm calls cannot throw - mark them 'nounwind'.
10878 CS.setDoesNotThrow();
10882 return Changed ? CS.getInstruction() : 0;
10885 // transformConstExprCastCall - If the callee is a constexpr cast of a function,
10886 // attempt to move the cast to the arguments of the call/invoke.
10888 bool InstCombiner::transformConstExprCastCall(CallSite CS) {
10889 if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
10890 ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
10891 if (CE->getOpcode() != Instruction::BitCast ||
10892 !isa<Function>(CE->getOperand(0)))
10894 Function *Callee = cast<Function>(CE->getOperand(0));
10895 Instruction *Caller = CS.getInstruction();
10896 const AttrListPtr &CallerPAL = CS.getAttributes();
10898 // Okay, this is a cast from a function to a different type. Unless doing so
10899 // would cause a type conversion of one of our arguments, change this call to
10900 // be a direct call with arguments casted to the appropriate types.
10902 const FunctionType *FT = Callee->getFunctionType();
10903 const Type *OldRetTy = Caller->getType();
10904 const Type *NewRetTy = FT->getReturnType();
10906 if (isa<StructType>(NewRetTy))
10907 return false; // TODO: Handle multiple return values.
10909 // Check to see if we are changing the return type...
10910 if (OldRetTy != NewRetTy) {
10911 if (Callee->isDeclaration() &&
10912 // Conversion is ok if changing from one pointer type to another or from
10913 // a pointer to an integer of the same size.
10914 !((isa<PointerType>(OldRetTy) || !TD ||
10915 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
10916 (isa<PointerType>(NewRetTy) || !TD ||
10917 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
10918 return false; // Cannot transform this return value.
10920 if (!Caller->use_empty() &&
10921 // void -> non-void is handled specially
10922 !NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
10923 return false; // Cannot transform this return value.
10925 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
10926 Attributes RAttrs = CallerPAL.getRetAttributes();
10927 if (RAttrs & Attribute::typeIncompatible(NewRetTy))
10928 return false; // Attribute not compatible with transformed value.
10931 // If the callsite is an invoke instruction, and the return value is used by
10932 // a PHI node in a successor, we cannot change the return type of the call
10933 // because there is no place to put the cast instruction (without breaking
10934 // the critical edge). Bail out in this case.
10935 if (!Caller->use_empty())
10936 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
10937 for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
10939 if (PHINode *PN = dyn_cast<PHINode>(*UI))
10940 if (PN->getParent() == II->getNormalDest() ||
10941 PN->getParent() == II->getUnwindDest())
10945 unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
10946 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
10948 CallSite::arg_iterator AI = CS.arg_begin();
10949 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
10950 const Type *ParamTy = FT->getParamType(i);
10951 const Type *ActTy = (*AI)->getType();
10953 if (!CastInst::isCastable(ActTy, ParamTy))
10954 return false; // Cannot transform this parameter value.
10956 if (CallerPAL.getParamAttributes(i + 1)
10957 & Attribute::typeIncompatible(ParamTy))
10958 return false; // Attribute not compatible with transformed value.
10960 // Converting from one pointer type to another or between a pointer and an
10961 // integer of the same size is safe even if we do not have a body.
10962 bool isConvertible = ActTy == ParamTy ||
10963 (TD && ((isa<PointerType>(ParamTy) ||
10964 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
10965 (isa<PointerType>(ActTy) ||
10966 ActTy == TD->getIntPtrType(Caller->getContext()))));
10967 if (Callee->isDeclaration() && !isConvertible) return false;
10970 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
10971 Callee->isDeclaration())
10972 return false; // Do not delete arguments unless we have a function body.
10974 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
10975 !CallerPAL.isEmpty())
10976 // In this case we have more arguments than the new function type, but we
10977 // won't be dropping them. Check that these extra arguments have attributes
10978 // that are compatible with being a vararg call argument.
10979 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
10980 if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
10982 Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
10983 if (PAttrs & Attribute::VarArgsIncompatible)
10987 // Okay, we decided that this is a safe thing to do: go ahead and start
10988 // inserting cast instructions as necessary...
10989 std::vector<Value*> Args;
10990 Args.reserve(NumActualArgs);
10991 SmallVector<AttributeWithIndex, 8> attrVec;
10992 attrVec.reserve(NumCommonArgs);
10994 // Get any return attributes.
10995 Attributes RAttrs = CallerPAL.getRetAttributes();
10997 // If the return value is not being used, the type may not be compatible
10998 // with the existing attributes. Wipe out any problematic attributes.
10999 RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
11001 // Add the new return attributes.
11003 attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
11005 AI = CS.arg_begin();
11006 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
11007 const Type *ParamTy = FT->getParamType(i);
11008 if ((*AI)->getType() == ParamTy) {
11009 Args.push_back(*AI);
11011 Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
11012 false, ParamTy, false);
11013 Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
11016 // Add any parameter attributes.
11017 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
11018 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
11021 // If the function takes more arguments than the call was taking, add them
11023 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
11024 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
11026 // If we are removing arguments to the function, emit an obnoxious warning.
11027 if (FT->getNumParams() < NumActualArgs) {
11028 if (!FT->isVarArg()) {
11029 errs() << "WARNING: While resolving call to function '"
11030 << Callee->getName() << "' arguments were dropped!\n";
11032 // Add all of the arguments in their promoted form to the arg list.
11033 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
11034 const Type *PTy = getPromotedType((*AI)->getType());
11035 if (PTy != (*AI)->getType()) {
11036 // Must promote to pass through va_arg area!
11037 Instruction::CastOps opcode =
11038 CastInst::getCastOpcode(*AI, false, PTy, false);
11039 Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
11041 Args.push_back(*AI);
11044 // Add any parameter attributes.
11045 if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
11046 attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
11051 if (Attributes FnAttrs = CallerPAL.getFnAttributes())
11052 attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
11054 if (NewRetTy->isVoidTy())
11055 Caller->setName(""); // Void type should not have a name.
11057 const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
11061 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
11062 NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
11063 Args.begin(), Args.end(),
11064 Caller->getName(), Caller);
11065 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
11066 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
11068 NC = CallInst::Create(Callee, Args.begin(), Args.end(),
11069 Caller->getName(), Caller);
11070 CallInst *CI = cast<CallInst>(Caller);
11071 if (CI->isTailCall())
11072 cast<CallInst>(NC)->setTailCall();
11073 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
11074 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
11077 // Insert a cast of the return type as necessary.
11079 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
11080 if (!NV->getType()->isVoidTy()) {
11081 Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
11083 NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
11085 // If this is an invoke instruction, we should insert it after the first
11086 // non-phi, instruction in the normal successor block.
11087 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
11088 BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
11089 InsertNewInstBefore(NC, *I);
11091 // Otherwise, it's a call, just insert cast right after the call instr
11092 InsertNewInstBefore(NC, *Caller);
11094 Worklist.AddUsersToWorkList(*Caller);
11096 NV = UndefValue::get(Caller->getType());
11101 if (!Caller->use_empty())
11102 Caller->replaceAllUsesWith(NV);
11104 EraseInstFromFunction(*Caller);
11108 // transformCallThroughTrampoline - Turn a call to a function created by the
11109 // init_trampoline intrinsic into a direct call to the underlying function.
11111 Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
11112 Value *Callee = CS.getCalledValue();
11113 const PointerType *PTy = cast<PointerType>(Callee->getType());
11114 const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
11115 const AttrListPtr &Attrs = CS.getAttributes();
11117 // If the call already has the 'nest' attribute somewhere then give up -
11118 // otherwise 'nest' would occur twice after splicing in the chain.
11119 if (Attrs.hasAttrSomewhere(Attribute::Nest))
11122 IntrinsicInst *Tramp =
11123 cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
11125 Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
11126 const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
11127 const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
11129 const AttrListPtr &NestAttrs = NestF->getAttributes();
11130 if (!NestAttrs.isEmpty()) {
11131 unsigned NestIdx = 1;
11132 const Type *NestTy = 0;
11133 Attributes NestAttr = Attribute::None;
11135 // Look for a parameter marked with the 'nest' attribute.
11136 for (FunctionType::param_iterator I = NestFTy->param_begin(),
11137 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
11138 if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
11139 // Record the parameter type and any other attributes.
11141 NestAttr = NestAttrs.getParamAttributes(NestIdx);
11146 Instruction *Caller = CS.getInstruction();
11147 std::vector<Value*> NewArgs;
11148 NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
11150 SmallVector<AttributeWithIndex, 8> NewAttrs;
11151 NewAttrs.reserve(Attrs.getNumSlots() + 1);
11153 // Insert the nest argument into the call argument list, which may
11154 // mean appending it. Likewise for attributes.
11156 // Add any result attributes.
11157 if (Attributes Attr = Attrs.getRetAttributes())
11158 NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
11162 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
11164 if (Idx == NestIdx) {
11165 // Add the chain argument and attributes.
11166 Value *NestVal = Tramp->getOperand(3);
11167 if (NestVal->getType() != NestTy)
11168 NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
11169 NewArgs.push_back(NestVal);
11170 NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
11176 // Add the original argument and attributes.
11177 NewArgs.push_back(*I);
11178 if (Attributes Attr = Attrs.getParamAttributes(Idx))
11180 (AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
11186 // Add any function attributes.
11187 if (Attributes Attr = Attrs.getFnAttributes())
11188 NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
11190 // The trampoline may have been bitcast to a bogus type (FTy).
11191 // Handle this by synthesizing a new function type, equal to FTy
11192 // with the chain parameter inserted.
11194 std::vector<const Type*> NewTypes;
11195 NewTypes.reserve(FTy->getNumParams()+1);
11197 // Insert the chain's type into the list of parameter types, which may
11198 // mean appending it.
11201 FunctionType::param_iterator I = FTy->param_begin(),
11202 E = FTy->param_end();
11205 if (Idx == NestIdx)
11206 // Add the chain's type.
11207 NewTypes.push_back(NestTy);
11212 // Add the original type.
11213 NewTypes.push_back(*I);
11219 // Replace the trampoline call with a direct call. Let the generic
11220 // code sort out any function type mismatches.
11221 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
11223 Constant *NewCallee =
11224 NestF->getType() == PointerType::getUnqual(NewFTy) ?
11225 NestF : ConstantExpr::getBitCast(NestF,
11226 PointerType::getUnqual(NewFTy));
11227 const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
11230 Instruction *NewCaller;
11231 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
11232 NewCaller = InvokeInst::Create(NewCallee,
11233 II->getNormalDest(), II->getUnwindDest(),
11234 NewArgs.begin(), NewArgs.end(),
11235 Caller->getName(), Caller);
11236 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
11237 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
11239 NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
11240 Caller->getName(), Caller);
11241 if (cast<CallInst>(Caller)->isTailCall())
11242 cast<CallInst>(NewCaller)->setTailCall();
11243 cast<CallInst>(NewCaller)->
11244 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
11245 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
11247 if (!Caller->getType()->isVoidTy())
11248 Caller->replaceAllUsesWith(NewCaller);
11249 Caller->eraseFromParent();
11250 Worklist.Remove(Caller);
11255 // Replace the trampoline call with a direct call. Since there is no 'nest'
11256 // parameter, there is no need to adjust the argument list. Let the generic
11257 // code sort out any function type mismatches.
11258 Constant *NewCallee =
11259 NestF->getType() == PTy ? NestF :
11260 ConstantExpr::getBitCast(NestF, PTy);
11261 CS.setCalledFunction(NewCallee);
11262 return CS.getInstruction();
11265 /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)]
11266 /// and if a/b/c and the add's all have a single use, turn this into a phi
11267 /// and a single binop.
11268 Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) {
11269 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
11270 assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst));
11271 unsigned Opc = FirstInst->getOpcode();
11272 Value *LHSVal = FirstInst->getOperand(0);
11273 Value *RHSVal = FirstInst->getOperand(1);
11275 const Type *LHSType = LHSVal->getType();
11276 const Type *RHSType = RHSVal->getType();
11278 // Scan to see if all operands are the same opcode, and all have one use.
11279 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
11280 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
11281 if (!I || I->getOpcode() != Opc || !I->hasOneUse() ||
11282 // Verify type of the LHS matches so we don't fold cmp's of different
11283 // types or GEP's with different index types.
11284 I->getOperand(0)->getType() != LHSType ||
11285 I->getOperand(1)->getType() != RHSType)
11288 // If they are CmpInst instructions, check their predicates
11289 if (Opc == Instruction::ICmp || Opc == Instruction::FCmp)
11290 if (cast<CmpInst>(I)->getPredicate() !=
11291 cast<CmpInst>(FirstInst)->getPredicate())
11294 // Keep track of which operand needs a phi node.
11295 if (I->getOperand(0) != LHSVal) LHSVal = 0;
11296 if (I->getOperand(1) != RHSVal) RHSVal = 0;
11299 // If both LHS and RHS would need a PHI, don't do this transformation,
11300 // because it would increase the number of PHIs entering the block,
11301 // which leads to higher register pressure. This is especially
11302 // bad when the PHIs are in the header of a loop.
11303 if (!LHSVal && !RHSVal)
11306 // Otherwise, this is safe to transform!
11308 Value *InLHS = FirstInst->getOperand(0);
11309 Value *InRHS = FirstInst->getOperand(1);
11310 PHINode *NewLHS = 0, *NewRHS = 0;
11312 NewLHS = PHINode::Create(LHSType,
11313 FirstInst->getOperand(0)->getName() + ".pn");
11314 NewLHS->reserveOperandSpace(PN.getNumOperands()/2);
11315 NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0));
11316 InsertNewInstBefore(NewLHS, PN);
11321 NewRHS = PHINode::Create(RHSType,
11322 FirstInst->getOperand(1)->getName() + ".pn");
11323 NewRHS->reserveOperandSpace(PN.getNumOperands()/2);
11324 NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0));
11325 InsertNewInstBefore(NewRHS, PN);
11329 // Add all operands to the new PHIs.
11330 if (NewLHS || NewRHS) {
11331 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
11332 Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i));
11334 Value *NewInLHS = InInst->getOperand(0);
11335 NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i));
11338 Value *NewInRHS = InInst->getOperand(1);
11339 NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i));
11344 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
11345 return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal);
11346 CmpInst *CIOp = cast<CmpInst>(FirstInst);
11347 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
11351 Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) {
11352 GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0));
11354 SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(),
11355 FirstInst->op_end());
11356 // This is true if all GEP bases are allocas and if all indices into them are
11358 bool AllBasePointersAreAllocas = true;
11360 // We don't want to replace this phi if the replacement would require
11361 // more than one phi, which leads to higher register pressure. This is
11362 // especially bad when the PHIs are in the header of a loop.
11363 bool NeededPhi = false;
11365 // Scan to see if all operands are the same opcode, and all have one use.
11366 for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) {
11367 GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i));
11368 if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() ||
11369 GEP->getNumOperands() != FirstInst->getNumOperands())
11372 // Keep track of whether or not all GEPs are of alloca pointers.
11373 if (AllBasePointersAreAllocas &&
11374 (!isa<AllocaInst>(GEP->getOperand(0)) ||
11375 !GEP->hasAllConstantIndices()))
11376 AllBasePointersAreAllocas = false;
11378 // Compare the operand lists.
11379 for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) {
11380 if (FirstInst->getOperand(op) == GEP->getOperand(op))
11383 // Don't merge two GEPs when two operands differ (introducing phi nodes)
11384 // if one of the PHIs has a constant for the index. The index may be
11385 // substantially cheaper to compute for the constants, so making it a
11386 // variable index could pessimize the path. This also handles the case
11387 // for struct indices, which must always be constant.
11388 if (isa<ConstantInt>(FirstInst->getOperand(op)) ||
11389 isa<ConstantInt>(GEP->getOperand(op)))
11392 if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType())
11395 // If we already needed a PHI for an earlier operand, and another operand
11396 // also requires a PHI, we'd be introducing more PHIs than we're
11397 // eliminating, which increases register pressure on entry to the PHI's
11402 FixedOperands[op] = 0; // Needs a PHI.
11407 // If all of the base pointers of the PHI'd GEPs are from allocas, don't
11408 // bother doing this transformation. At best, this will just save a bit of
11409 // offset calculation, but all the predecessors will have to materialize the
11410 // stack address into a register anyway. We'd actually rather *clone* the
11411 // load up into the predecessors so that we have a load of a gep of an alloca,
11412 // which can usually all be folded into the load.
11413 if (AllBasePointersAreAllocas)
11416 // Otherwise, this is safe to transform. Insert PHI nodes for each operand
11417 // that is variable.
11418 SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size());
11420 bool HasAnyPHIs = false;
11421 for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) {
11422 if (FixedOperands[i]) continue; // operand doesn't need a phi.
11423 Value *FirstOp = FirstInst->getOperand(i);
11424 PHINode *NewPN = PHINode::Create(FirstOp->getType(),
11425 FirstOp->getName()+".pn");
11426 InsertNewInstBefore(NewPN, PN);
11428 NewPN->reserveOperandSpace(e);
11429 NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0));
11430 OperandPhis[i] = NewPN;
11431 FixedOperands[i] = NewPN;
11436 // Add all operands to the new PHIs.
11438 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
11439 GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i));
11440 BasicBlock *InBB = PN.getIncomingBlock(i);
11442 for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op)
11443 if (PHINode *OpPhi = OperandPhis[op])
11444 OpPhi->addIncoming(InGEP->getOperand(op), InBB);
11448 Value *Base = FixedOperands[0];
11449 return cast<GEPOperator>(FirstInst)->isInBounds() ?
11450 GetElementPtrInst::CreateInBounds(Base, FixedOperands.begin()+1,
11451 FixedOperands.end()) :
11452 GetElementPtrInst::Create(Base, FixedOperands.begin()+1,
11453 FixedOperands.end());
11457 /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to
11458 /// sink the load out of the block that defines it. This means that it must be
11459 /// obvious the value of the load is not changed from the point of the load to
11460 /// the end of the block it is in.
11462 /// Finally, it is safe, but not profitable, to sink a load targetting a
11463 /// non-address-taken alloca. Doing so will cause us to not promote the alloca
11465 static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
11466 BasicBlock::iterator BBI = L, E = L->getParent()->end();
11468 for (++BBI; BBI != E; ++BBI)
11469 if (BBI->mayWriteToMemory())
11472 // Check for non-address taken alloca. If not address-taken already, it isn't
11473 // profitable to do this xform.
11474 if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
11475 bool isAddressTaken = false;
11476 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
11478 if (isa<LoadInst>(UI)) continue;
11479 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
11480 // If storing TO the alloca, then the address isn't taken.
11481 if (SI->getOperand(1) == AI) continue;
11483 isAddressTaken = true;
11487 if (!isAddressTaken && AI->isStaticAlloca())
11491 // If this load is a load from a GEP with a constant offset from an alloca,
11492 // then we don't want to sink it. In its present form, it will be
11493 // load [constant stack offset]. Sinking it will cause us to have to
11494 // materialize the stack addresses in each predecessor in a register only to
11495 // do a shared load from register in the successor.
11496 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0)))
11497 if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0)))
11498 if (AI->isStaticAlloca() && GEP->hasAllConstantIndices())
11504 Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) {
11505 LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0));
11507 // When processing loads, we need to propagate two bits of information to the
11508 // sunk load: whether it is volatile, and what its alignment is. We currently
11509 // don't sink loads when some have their alignment specified and some don't.
11510 // visitLoadInst will propagate an alignment onto the load when TD is around,
11511 // and if TD isn't around, we can't handle the mixed case.
11512 bool isVolatile = FirstLI->isVolatile();
11513 unsigned LoadAlignment = FirstLI->getAlignment();
11515 // We can't sink the load if the loaded value could be modified between the
11516 // load and the PHI.
11517 if (FirstLI->getParent() != PN.getIncomingBlock(0) ||
11518 !isSafeAndProfitableToSinkLoad(FirstLI))
11521 // If the PHI is of volatile loads and the load block has multiple
11522 // successors, sinking it would remove a load of the volatile value from
11523 // the path through the other successor.
11525 FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1)
11528 // Check to see if all arguments are the same operation.
11529 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
11530 LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i));
11531 if (!LI || !LI->hasOneUse())
11534 // We can't sink the load if the loaded value could be modified between
11535 // the load and the PHI.
11536 if (LI->isVolatile() != isVolatile ||
11537 LI->getParent() != PN.getIncomingBlock(i) ||
11538 !isSafeAndProfitableToSinkLoad(LI))
11541 // If some of the loads have an alignment specified but not all of them,
11542 // we can't do the transformation.
11543 if ((LoadAlignment != 0) != (LI->getAlignment() != 0))
11546 LoadAlignment = std::min(LoadAlignment, LI->getAlignment());
11548 // If the PHI is of volatile loads and the load block has multiple
11549 // successors, sinking it would remove a load of the volatile value from
11550 // the path through the other successor.
11552 LI->getParent()->getTerminator()->getNumSuccessors() != 1)
11556 // Okay, they are all the same operation. Create a new PHI node of the
11557 // correct type, and PHI together all of the LHS's of the instructions.
11558 PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(),
11559 PN.getName()+".in");
11560 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
11562 Value *InVal = FirstLI->getOperand(0);
11563 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
11565 // Add all operands to the new PHI.
11566 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
11567 Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0);
11568 if (NewInVal != InVal)
11570 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
11575 // The new PHI unions all of the same values together. This is really
11576 // common, so we handle it intelligently here for compile-time speed.
11580 InsertNewInstBefore(NewPN, PN);
11584 // If this was a volatile load that we are merging, make sure to loop through
11585 // and mark all the input loads as non-volatile. If we don't do this, we will
11586 // insert a new volatile load and the old ones will not be deletable.
11588 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
11589 cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false);
11591 return new LoadInst(PhiVal, "", isVolatile, LoadAlignment);
11596 /// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary"
11597 /// operator and they all are only used by the PHI, PHI together their
11598 /// inputs, and do the operation once, to the result of the PHI.
11599 Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
11600 Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0));
11602 if (isa<GetElementPtrInst>(FirstInst))
11603 return FoldPHIArgGEPIntoPHI(PN);
11604 if (isa<LoadInst>(FirstInst))
11605 return FoldPHIArgLoadIntoPHI(PN);
11607 // Scan the instruction, looking for input operations that can be folded away.
11608 // If all input operands to the phi are the same instruction (e.g. a cast from
11609 // the same type or "+42") we can pull the operation through the PHI, reducing
11610 // code size and simplifying code.
11611 Constant *ConstantOp = 0;
11612 const Type *CastSrcTy = 0;
11614 if (isa<CastInst>(FirstInst)) {
11615 CastSrcTy = FirstInst->getOperand(0)->getType();
11617 // Be careful about transforming integer PHIs. We don't want to pessimize
11618 // the code by turning an i32 into an i1293.
11619 if (isa<IntegerType>(PN.getType()) && isa<IntegerType>(CastSrcTy)) {
11620 if (!ShouldChangeType(PN.getType(), CastSrcTy, TD))
11623 } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) {
11624 // Can fold binop, compare or shift here if the RHS is a constant,
11625 // otherwise call FoldPHIArgBinOpIntoPHI.
11626 ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1));
11627 if (ConstantOp == 0)
11628 return FoldPHIArgBinOpIntoPHI(PN);
11630 return 0; // Cannot fold this operation.
11633 // Check to see if all arguments are the same operation.
11634 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
11635 Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i));
11636 if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst))
11639 if (I->getOperand(0)->getType() != CastSrcTy)
11640 return 0; // Cast operation must match.
11641 } else if (I->getOperand(1) != ConstantOp) {
11646 // Okay, they are all the same operation. Create a new PHI node of the
11647 // correct type, and PHI together all of the LHS's of the instructions.
11648 PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(),
11649 PN.getName()+".in");
11650 NewPN->reserveOperandSpace(PN.getNumOperands()/2);
11652 Value *InVal = FirstInst->getOperand(0);
11653 NewPN->addIncoming(InVal, PN.getIncomingBlock(0));
11655 // Add all operands to the new PHI.
11656 for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) {
11657 Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0);
11658 if (NewInVal != InVal)
11660 NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i));
11665 // The new PHI unions all of the same values together. This is really
11666 // common, so we handle it intelligently here for compile-time speed.
11670 InsertNewInstBefore(NewPN, PN);
11674 // Insert and return the new operation.
11675 if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst))
11676 return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType());
11678 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst))
11679 return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp);
11681 CmpInst *CIOp = cast<CmpInst>(FirstInst);
11682 return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(),
11683 PhiVal, ConstantOp);
11686 /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
11688 static bool DeadPHICycle(PHINode *PN,
11689 SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
11690 if (PN->use_empty()) return true;
11691 if (!PN->hasOneUse()) return false;
11693 // Remember this node, and if we find the cycle, return.
11694 if (!PotentiallyDeadPHIs.insert(PN))
11697 // Don't scan crazily complex things.
11698 if (PotentiallyDeadPHIs.size() == 16)
11701 if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
11702 return DeadPHICycle(PU, PotentiallyDeadPHIs);
11707 /// PHIsEqualValue - Return true if this phi node is always equal to
11708 /// NonPhiInVal. This happens with mutually cyclic phi nodes like:
11709 /// z = some value; x = phi (y, z); y = phi (x, z)
11710 static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
11711 SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
11712 // See if we already saw this PHI node.
11713 if (!ValueEqualPHIs.insert(PN))
11716 // Don't scan crazily complex things.
11717 if (ValueEqualPHIs.size() == 16)
11720 // Scan the operands to see if they are either phi nodes or are equal to
11722 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
11723 Value *Op = PN->getIncomingValue(i);
11724 if (PHINode *OpPN = dyn_cast<PHINode>(Op)) {
11725 if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs))
11727 } else if (Op != NonPhiInVal)
11736 struct PHIUsageRecord {
11737 unsigned PHIId; // The ID # of the PHI (something determinstic to sort on)
11738 unsigned Shift; // The amount shifted.
11739 Instruction *Inst; // The trunc instruction.
11741 PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User)
11742 : PHIId(pn), Shift(Sh), Inst(User) {}
11744 bool operator<(const PHIUsageRecord &RHS) const {
11745 if (PHIId < RHS.PHIId) return true;
11746 if (PHIId > RHS.PHIId) return false;
11747 if (Shift < RHS.Shift) return true;
11748 if (Shift > RHS.Shift) return false;
11749 return Inst->getType()->getPrimitiveSizeInBits() <
11750 RHS.Inst->getType()->getPrimitiveSizeInBits();
11754 struct LoweredPHIRecord {
11755 PHINode *PN; // The PHI that was lowered.
11756 unsigned Shift; // The amount shifted.
11757 unsigned Width; // The width extracted.
11759 LoweredPHIRecord(PHINode *pn, unsigned Sh, const Type *Ty)
11760 : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {}
11762 // Ctor form used by DenseMap.
11763 LoweredPHIRecord(PHINode *pn, unsigned Sh)
11764 : PN(pn), Shift(Sh), Width(0) {}
11770 struct DenseMapInfo<LoweredPHIRecord> {
11771 static inline LoweredPHIRecord getEmptyKey() {
11772 return LoweredPHIRecord(0, 0);
11774 static inline LoweredPHIRecord getTombstoneKey() {
11775 return LoweredPHIRecord(0, 1);
11777 static unsigned getHashValue(const LoweredPHIRecord &Val) {
11778 return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^
11781 static bool isEqual(const LoweredPHIRecord &LHS,
11782 const LoweredPHIRecord &RHS) {
11783 return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift &&
11784 LHS.Width == RHS.Width;
11788 struct isPodLike<LoweredPHIRecord> { static const bool value = true; };
11792 /// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an
11793 /// illegal type: see if it is only used by trunc or trunc(lshr) operations. If
11794 /// so, we split the PHI into the various pieces being extracted. This sort of
11795 /// thing is introduced when SROA promotes an aggregate to large integer values.
11797 /// TODO: The user of the trunc may be an bitcast to float/double/vector or an
11798 /// inttoptr. We should produce new PHIs in the right type.
11800 Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
11801 // PHIUsers - Keep track of all of the truncated values extracted from a set
11802 // of PHIs, along with their offset. These are the things we want to rewrite.
11803 SmallVector<PHIUsageRecord, 16> PHIUsers;
11805 // PHIs are often mutually cyclic, so we keep track of a whole set of PHI
11806 // nodes which are extracted from. PHIsToSlice is a set we use to avoid
11807 // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to
11808 // check the uses of (to ensure they are all extracts).
11809 SmallVector<PHINode*, 8> PHIsToSlice;
11810 SmallPtrSet<PHINode*, 8> PHIsInspected;
11812 PHIsToSlice.push_back(&FirstPhi);
11813 PHIsInspected.insert(&FirstPhi);
11815 for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) {
11816 PHINode *PN = PHIsToSlice[PHIId];
11818 // Scan the input list of the PHI. If any input is an invoke, and if the
11819 // input is defined in the predecessor, then we won't be split the critical
11820 // edge which is required to insert a truncate. Because of this, we have to
11822 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
11823 InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i));
11824 if (II == 0) continue;
11825 if (II->getParent() != PN->getIncomingBlock(i))
11828 // If we have a phi, and if it's directly in the predecessor, then we have
11829 // a critical edge where we need to put the truncate. Since we can't
11830 // split the edge in instcombine, we have to bail out.
11835 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
11837 Instruction *User = cast<Instruction>(*UI);
11839 // If the user is a PHI, inspect its uses recursively.
11840 if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
11841 if (PHIsInspected.insert(UserPN))
11842 PHIsToSlice.push_back(UserPN);
11846 // Truncates are always ok.
11847 if (isa<TruncInst>(User)) {
11848 PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User));
11852 // Otherwise it must be a lshr which can only be used by one trunc.
11853 if (User->getOpcode() != Instruction::LShr ||
11854 !User->hasOneUse() || !isa<TruncInst>(User->use_back()) ||
11855 !isa<ConstantInt>(User->getOperand(1)))
11858 unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue();
11859 PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back()));
11863 // If we have no users, they must be all self uses, just nuke the PHI.
11864 if (PHIUsers.empty())
11865 return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType()));
11867 // If this phi node is transformable, create new PHIs for all the pieces
11868 // extracted out of it. First, sort the users by their offset and size.
11869 array_pod_sort(PHIUsers.begin(), PHIUsers.end());
11871 DEBUG(errs() << "SLICING UP PHI: " << FirstPhi << '\n';
11872 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
11873 errs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] <<'\n';
11876 // PredValues - This is a temporary used when rewriting PHI nodes. It is
11877 // hoisted out here to avoid construction/destruction thrashing.
11878 DenseMap<BasicBlock*, Value*> PredValues;
11880 // ExtractedVals - Each new PHI we introduce is saved here so we don't
11881 // introduce redundant PHIs.
11882 DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals;
11884 for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) {
11885 unsigned PHIId = PHIUsers[UserI].PHIId;
11886 PHINode *PN = PHIsToSlice[PHIId];
11887 unsigned Offset = PHIUsers[UserI].Shift;
11888 const Type *Ty = PHIUsers[UserI].Inst->getType();
11892 // If we've already lowered a user like this, reuse the previously lowered
11894 if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) {
11896 // Otherwise, Create the new PHI node for this user.
11897 EltPHI = PHINode::Create(Ty, PN->getName()+".off"+Twine(Offset), PN);
11898 assert(EltPHI->getType() != PN->getType() &&
11899 "Truncate didn't shrink phi?");
11901 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
11902 BasicBlock *Pred = PN->getIncomingBlock(i);
11903 Value *&PredVal = PredValues[Pred];
11905 // If we already have a value for this predecessor, reuse it.
11907 EltPHI->addIncoming(PredVal, Pred);
11911 // Handle the PHI self-reuse case.
11912 Value *InVal = PN->getIncomingValue(i);
11915 EltPHI->addIncoming(PredVal, Pred);
11919 if (PHINode *InPHI = dyn_cast<PHINode>(PN)) {
11920 // If the incoming value was a PHI, and if it was one of the PHIs we
11921 // already rewrote it, just use the lowered value.
11922 if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) {
11924 EltPHI->addIncoming(PredVal, Pred);
11929 // Otherwise, do an extract in the predecessor.
11930 Builder->SetInsertPoint(Pred, Pred->getTerminator());
11931 Value *Res = InVal;
11933 Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(),
11934 Offset), "extract");
11935 Res = Builder->CreateTrunc(Res, Ty, "extract.t");
11937 EltPHI->addIncoming(Res, Pred);
11939 // If the incoming value was a PHI, and if it was one of the PHIs we are
11940 // rewriting, we will ultimately delete the code we inserted. This
11941 // means we need to revisit that PHI to make sure we extract out the
11943 if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i)))
11944 if (PHIsInspected.count(OldInVal)) {
11945 unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(),
11946 OldInVal)-PHIsToSlice.begin();
11947 PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset,
11948 cast<Instruction>(Res)));
11952 PredValues.clear();
11954 DEBUG(errs() << " Made element PHI for offset " << Offset << ": "
11955 << *EltPHI << '\n');
11956 ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI;
11959 // Replace the use of this piece with the PHI node.
11960 ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI);
11963 // Replace all the remaining uses of the PHI nodes (self uses and the lshrs)
11965 Value *Undef = UndefValue::get(FirstPhi.getType());
11966 for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i)
11967 ReplaceInstUsesWith(*PHIsToSlice[i], Undef);
11968 return ReplaceInstUsesWith(FirstPhi, Undef);
11971 // PHINode simplification
11973 Instruction *InstCombiner::visitPHINode(PHINode &PN) {
11974 // If LCSSA is around, don't mess with Phi nodes
11975 if (MustPreserveLCSSA) return 0;
11977 if (Value *V = PN.hasConstantValue())
11978 return ReplaceInstUsesWith(PN, V);
11980 // If all PHI operands are the same operation, pull them through the PHI,
11981 // reducing code size.
11982 if (isa<Instruction>(PN.getIncomingValue(0)) &&
11983 isa<Instruction>(PN.getIncomingValue(1)) &&
11984 cast<Instruction>(PN.getIncomingValue(0))->getOpcode() ==
11985 cast<Instruction>(PN.getIncomingValue(1))->getOpcode() &&
11986 // FIXME: The hasOneUse check will fail for PHIs that use the value more
11987 // than themselves more than once.
11988 PN.getIncomingValue(0)->hasOneUse())
11989 if (Instruction *Result = FoldPHIArgOpIntoPHI(PN))
11992 // If this is a trivial cycle in the PHI node graph, remove it. Basically, if
11993 // this PHI only has a single use (a PHI), and if that PHI only has one use (a
11994 // PHI)... break the cycle.
11995 if (PN.hasOneUse()) {
11996 Instruction *PHIUser = cast<Instruction>(PN.use_back());
11997 if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
11998 SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
11999 PotentiallyDeadPHIs.insert(&PN);
12000 if (DeadPHICycle(PU, PotentiallyDeadPHIs))
12001 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
12004 // If this phi has a single use, and if that use just computes a value for
12005 // the next iteration of a loop, delete the phi. This occurs with unused
12006 // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this
12007 // common case here is good because the only other things that catch this
12008 // are induction variable analysis (sometimes) and ADCE, which is only run
12010 if (PHIUser->hasOneUse() &&
12011 (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
12012 PHIUser->use_back() == &PN) {
12013 return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
12017 // We sometimes end up with phi cycles that non-obviously end up being the
12018 // same value, for example:
12019 // z = some value; x = phi (y, z); y = phi (x, z)
12020 // where the phi nodes don't necessarily need to be in the same block. Do a
12021 // quick check to see if the PHI node only contains a single non-phi value, if
12022 // so, scan to see if the phi cycle is actually equal to that value.
12024 unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues();
12025 // Scan for the first non-phi operand.
12026 while (InValNo != NumOperandVals &&
12027 isa<PHINode>(PN.getIncomingValue(InValNo)))
12030 if (InValNo != NumOperandVals) {
12031 Value *NonPhiInVal = PN.getOperand(InValNo);
12033 // Scan the rest of the operands to see if there are any conflicts, if so
12034 // there is no need to recursively scan other phis.
12035 for (++InValNo; InValNo != NumOperandVals; ++InValNo) {
12036 Value *OpVal = PN.getIncomingValue(InValNo);
12037 if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal))
12041 // If we scanned over all operands, then we have one unique value plus
12042 // phi values. Scan PHI nodes to see if they all merge in each other or
12044 if (InValNo == NumOperandVals) {
12045 SmallPtrSet<PHINode*, 16> ValueEqualPHIs;
12046 if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs))
12047 return ReplaceInstUsesWith(PN, NonPhiInVal);
12052 // If there are multiple PHIs, sort their operands so that they all list
12053 // the blocks in the same order. This will help identical PHIs be eliminated
12054 // by other passes. Other passes shouldn't depend on this for correctness
12056 PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin());
12057 if (&PN != FirstPN)
12058 for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) {
12059 BasicBlock *BBA = PN.getIncomingBlock(i);
12060 BasicBlock *BBB = FirstPN->getIncomingBlock(i);
12062 Value *VA = PN.getIncomingValue(i);
12063 unsigned j = PN.getBasicBlockIndex(BBB);
12064 Value *VB = PN.getIncomingValue(j);
12065 PN.setIncomingBlock(i, BBB);
12066 PN.setIncomingValue(i, VB);
12067 PN.setIncomingBlock(j, BBA);
12068 PN.setIncomingValue(j, VA);
12069 // NOTE: Instcombine normally would want us to "return &PN" if we
12070 // modified any of the operands of an instruction. However, since we
12071 // aren't adding or removing uses (just rearranging them) we don't do
12072 // this in this case.
12076 // If this is an integer PHI and we know that it has an illegal type, see if
12077 // it is only used by trunc or trunc(lshr) operations. If so, we split the
12078 // PHI into the various pieces being extracted. This sort of thing is
12079 // introduced when SROA promotes an aggregate to a single large integer type.
12080 if (isa<IntegerType>(PN.getType()) && TD &&
12081 !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits()))
12082 if (Instruction *Res = SliceUpIllegalIntegerPHI(PN))
12088 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
12089 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
12091 if (Value *V = SimplifyGEPInst(&Ops[0], Ops.size(), TD))
12092 return ReplaceInstUsesWith(GEP, V);
12094 Value *PtrOp = GEP.getOperand(0);
12096 if (isa<UndefValue>(GEP.getOperand(0)))
12097 return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType()));
12099 // Eliminate unneeded casts for indices.
12101 bool MadeChange = false;
12102 unsigned PtrSize = TD->getPointerSizeInBits();
12104 gep_type_iterator GTI = gep_type_begin(GEP);
12105 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
12106 I != E; ++I, ++GTI) {
12107 if (!isa<SequentialType>(*GTI)) continue;
12109 // If we are using a wider index than needed for this platform, shrink it
12110 // to what we need. If narrower, sign-extend it to what we need. This
12111 // explicit cast can make subsequent optimizations more obvious.
12112 unsigned OpBits = cast<IntegerType>((*I)->getType())->getBitWidth();
12113 if (OpBits == PtrSize)
12116 *I = Builder->CreateIntCast(*I, TD->getIntPtrType(GEP.getContext()),true);
12119 if (MadeChange) return &GEP;
12122 // Combine Indices - If the source pointer to this getelementptr instruction
12123 // is a getelementptr instruction, combine the indices of the two
12124 // getelementptr instructions into a single instruction.
12126 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) {
12127 // Note that if our source is a gep chain itself that we wait for that
12128 // chain to be resolved before we perform this transformation. This
12129 // avoids us creating a TON of code in some cases.
12131 if (GetElementPtrInst *SrcGEP =
12132 dyn_cast<GetElementPtrInst>(Src->getOperand(0)))
12133 if (SrcGEP->getNumOperands() == 2)
12134 return 0; // Wait until our source is folded to completion.
12136 SmallVector<Value*, 8> Indices;
12138 // Find out whether the last index in the source GEP is a sequential idx.
12139 bool EndsWithSequential = false;
12140 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
12142 EndsWithSequential = !isa<StructType>(*I);
12144 // Can we combine the two pointer arithmetics offsets?
12145 if (EndsWithSequential) {
12146 // Replace: gep (gep %P, long B), long A, ...
12147 // With: T = long A+B; gep %P, T, ...
12150 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
12151 Value *GO1 = GEP.getOperand(1);
12152 if (SO1 == Constant::getNullValue(SO1->getType())) {
12154 } else if (GO1 == Constant::getNullValue(GO1->getType())) {
12157 // If they aren't the same type, then the input hasn't been processed
12158 // by the loop above yet (which canonicalizes sequential index types to
12159 // intptr_t). Just avoid transforming this until the input has been
12161 if (SO1->getType() != GO1->getType())
12163 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum");
12166 // Update the GEP in place if possible.
12167 if (Src->getNumOperands() == 2) {
12168 GEP.setOperand(0, Src->getOperand(0));
12169 GEP.setOperand(1, Sum);
12172 Indices.append(Src->op_begin()+1, Src->op_end()-1);
12173 Indices.push_back(Sum);
12174 Indices.append(GEP.op_begin()+2, GEP.op_end());
12175 } else if (isa<Constant>(*GEP.idx_begin()) &&
12176 cast<Constant>(*GEP.idx_begin())->isNullValue() &&
12177 Src->getNumOperands() != 1) {
12178 // Otherwise we can do the fold if the first index of the GEP is a zero
12179 Indices.append(Src->op_begin()+1, Src->op_end());
12180 Indices.append(GEP.idx_begin()+1, GEP.idx_end());
12183 if (!Indices.empty())
12184 return (cast<GEPOperator>(&GEP)->isInBounds() &&
12185 Src->isInBounds()) ?
12186 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices.begin(),
12187 Indices.end(), GEP.getName()) :
12188 GetElementPtrInst::Create(Src->getOperand(0), Indices.begin(),
12189 Indices.end(), GEP.getName());
12192 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0).
12193 if (Value *X = getBitCastOperand(PtrOp)) {
12194 assert(isa<PointerType>(X->getType()) && "Must be cast from pointer");
12196 // If the input bitcast is actually "bitcast(bitcast(x))", then we don't
12197 // want to change the gep until the bitcasts are eliminated.
12198 if (getBitCastOperand(X)) {
12199 Worklist.AddValue(PtrOp);
12203 bool HasZeroPointerIndex = false;
12204 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1)))
12205 HasZeroPointerIndex = C->isZero();
12207 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ...
12208 // into : GEP [10 x i8]* X, i32 0, ...
12210 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ...
12211 // into : GEP i8* X, ...
12213 // This occurs when the program declares an array extern like "int X[];"
12214 if (HasZeroPointerIndex) {
12215 const PointerType *CPTy = cast<PointerType>(PtrOp->getType());
12216 const PointerType *XTy = cast<PointerType>(X->getType());
12217 if (const ArrayType *CATy =
12218 dyn_cast<ArrayType>(CPTy->getElementType())) {
12219 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ?
12220 if (CATy->getElementType() == XTy->getElementType()) {
12221 // -> GEP i8* X, ...
12222 SmallVector<Value*, 8> Indices(GEP.idx_begin()+1, GEP.idx_end());
12223 return cast<GEPOperator>(&GEP)->isInBounds() ?
12224 GetElementPtrInst::CreateInBounds(X, Indices.begin(), Indices.end(),
12226 GetElementPtrInst::Create(X, Indices.begin(), Indices.end(),
12230 if (const ArrayType *XATy = dyn_cast<ArrayType>(XTy->getElementType())){
12231 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ?
12232 if (CATy->getElementType() == XATy->getElementType()) {
12233 // -> GEP [10 x i8]* X, i32 0, ...
12234 // At this point, we know that the cast source type is a pointer
12235 // to an array of the same type as the destination pointer
12236 // array. Because the array type is never stepped over (there
12237 // is a leading zero) we can fold the cast into this GEP.
12238 GEP.setOperand(0, X);
12243 } else if (GEP.getNumOperands() == 2) {
12244 // Transform things like:
12245 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V
12246 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast
12247 const Type *SrcElTy = cast<PointerType>(X->getType())->getElementType();
12248 const Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType();
12249 if (TD && isa<ArrayType>(SrcElTy) &&
12250 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) ==
12251 TD->getTypeAllocSize(ResElTy)) {
12253 Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
12254 Idx[1] = GEP.getOperand(1);
12255 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
12256 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
12257 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
12258 // V and GEP are both pointer types --> BitCast
12259 return new BitCastInst(NewGEP, GEP.getType());
12262 // Transform things like:
12263 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp
12264 // (where tmp = 8*tmp2) into:
12265 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast
12267 if (TD && isa<ArrayType>(SrcElTy) && ResElTy == Type::getInt8Ty(*Context)) {
12268 uint64_t ArrayEltSize =
12269 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType());
12271 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We
12272 // allow either a mul, shift, or constant here.
12274 ConstantInt *Scale = 0;
12275 if (ArrayEltSize == 1) {
12276 NewIdx = GEP.getOperand(1);
12277 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1);
12278 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) {
12279 NewIdx = ConstantInt::get(CI->getType(), 1);
12281 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){
12282 if (Inst->getOpcode() == Instruction::Shl &&
12283 isa<ConstantInt>(Inst->getOperand(1))) {
12284 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1));
12285 uint32_t ShAmtVal = ShAmt->getLimitedValue(64);
12286 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()),
12288 NewIdx = Inst->getOperand(0);
12289 } else if (Inst->getOpcode() == Instruction::Mul &&
12290 isa<ConstantInt>(Inst->getOperand(1))) {
12291 Scale = cast<ConstantInt>(Inst->getOperand(1));
12292 NewIdx = Inst->getOperand(0);
12296 // If the index will be to exactly the right offset with the scale taken
12297 // out, perform the transformation. Note, we don't know whether Scale is
12298 // signed or not. We'll use unsigned version of division/modulo
12299 // operation after making sure Scale doesn't have the sign bit set.
12300 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL &&
12301 Scale->getZExtValue() % ArrayEltSize == 0) {
12302 Scale = ConstantInt::get(Scale->getType(),
12303 Scale->getZExtValue() / ArrayEltSize);
12304 if (Scale->getZExtValue() != 1) {
12305 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(),
12307 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale");
12310 // Insert the new GEP instruction.
12312 Idx[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
12314 Value *NewGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
12315 Builder->CreateInBoundsGEP(X, Idx, Idx + 2, GEP.getName()) :
12316 Builder->CreateGEP(X, Idx, Idx + 2, GEP.getName());
12317 // The NewGEP must be pointer typed, so must the old one -> BitCast
12318 return new BitCastInst(NewGEP, GEP.getType());
12324 /// See if we can simplify:
12325 /// X = bitcast A* to B*
12326 /// Y = gep X, <...constant indices...>
12327 /// into a gep of the original struct. This is important for SROA and alias
12328 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged.
12329 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) {
12331 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) {
12332 // Determine how much the GEP moves the pointer. We are guaranteed to get
12333 // a constant back from EmitGEPOffset.
12334 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP, *this));
12335 int64_t Offset = OffsetV->getSExtValue();
12337 // If this GEP instruction doesn't move the pointer, just replace the GEP
12338 // with a bitcast of the real input to the dest type.
12340 // If the bitcast is of an allocation, and the allocation will be
12341 // converted to match the type of the cast, don't touch this.
12342 if (isa<AllocaInst>(BCI->getOperand(0)) ||
12343 isMalloc(BCI->getOperand(0))) {
12344 // See if the bitcast simplifies, if so, don't nuke this GEP yet.
12345 if (Instruction *I = visitBitCast(*BCI)) {
12348 BCI->getParent()->getInstList().insert(BCI, I);
12349 ReplaceInstUsesWith(*BCI, I);
12354 return new BitCastInst(BCI->getOperand(0), GEP.getType());
12357 // Otherwise, if the offset is non-zero, we need to find out if there is a
12358 // field at Offset in 'A's type. If so, we can pull the cast through the
12360 SmallVector<Value*, 8> NewIndices;
12362 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType();
12363 if (FindElementAtOffset(InTy, Offset, NewIndices, TD, Context)) {
12364 Value *NGEP = cast<GEPOperator>(&GEP)->isInBounds() ?
12365 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices.begin(),
12366 NewIndices.end()) :
12367 Builder->CreateGEP(BCI->getOperand(0), NewIndices.begin(),
12370 if (NGEP->getType() == GEP.getType())
12371 return ReplaceInstUsesWith(GEP, NGEP);
12372 NGEP->takeName(&GEP);
12373 return new BitCastInst(NGEP, GEP.getType());
12381 Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
12382 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
12383 if (AI.isArrayAllocation()) { // Check C != 1
12384 if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
12385 const Type *NewTy =
12386 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
12387 assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
12388 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
12389 New->setAlignment(AI.getAlignment());
12391 // Scan to the end of the allocation instructions, to skip over a block of
12392 // allocas if possible...also skip interleaved debug info
12394 BasicBlock::iterator It = New;
12395 while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
12397 // Now that I is pointing to the first non-allocation-inst in the block,
12398 // insert our getelementptr instruction...
12400 Value *NullIdx = Constant::getNullValue(Type::getInt32Ty(*Context));
12404 Value *V = GetElementPtrInst::CreateInBounds(New, Idx, Idx + 2,
12405 New->getName()+".sub", It);
12407 // Now make everything use the getelementptr instead of the original
12409 return ReplaceInstUsesWith(AI, V);
12410 } else if (isa<UndefValue>(AI.getArraySize())) {
12411 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
12415 if (TD && isa<AllocaInst>(AI) && AI.getAllocatedType()->isSized()) {
12416 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
12417 // Note that we only do this for alloca's, because malloc should allocate
12418 // and return a unique pointer, even for a zero byte allocation.
12419 if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0)
12420 return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
12422 // If the alignment is 0 (unspecified), assign it the preferred alignment.
12423 if (AI.getAlignment() == 0)
12424 AI.setAlignment(TD->getPrefTypeAlignment(AI.getAllocatedType()));
12430 Instruction *InstCombiner::visitFree(Instruction &FI) {
12431 Value *Op = FI.getOperand(1);
12433 // free undef -> unreachable.
12434 if (isa<UndefValue>(Op)) {
12435 // Insert a new store to null because we cannot modify the CFG here.
12436 new StoreInst(ConstantInt::getTrue(*Context),
12437 UndefValue::get(Type::getInt1PtrTy(*Context)), &FI);
12438 return EraseInstFromFunction(FI);
12441 // If we have 'free null' delete the instruction. This can happen in stl code
12442 // when lots of inlining happens.
12443 if (isa<ConstantPointerNull>(Op))
12444 return EraseInstFromFunction(FI);
12446 // If we have a malloc call whose only use is a free call, delete both.
12447 if (isMalloc(Op)) {
12448 if (CallInst* CI = extractMallocCallFromBitCast(Op)) {
12449 if (Op->hasOneUse() && CI->hasOneUse()) {
12450 EraseInstFromFunction(FI);
12451 EraseInstFromFunction(*CI);
12452 return EraseInstFromFunction(*cast<Instruction>(Op));
12455 // Op is a call to malloc
12456 if (Op->hasOneUse()) {
12457 EraseInstFromFunction(FI);
12458 return EraseInstFromFunction(*cast<Instruction>(Op));
12466 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
12467 static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
12468 const TargetData *TD) {
12469 User *CI = cast<User>(LI.getOperand(0));
12470 Value *CastOp = CI->getOperand(0);
12471 LLVMContext *Context = IC.getContext();
12473 const PointerType *DestTy = cast<PointerType>(CI->getType());
12474 const Type *DestPTy = DestTy->getElementType();
12475 if (const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType())) {
12477 // If the address spaces don't match, don't eliminate the cast.
12478 if (DestTy->getAddressSpace() != SrcTy->getAddressSpace())
12481 const Type *SrcPTy = SrcTy->getElementType();
12483 if (DestPTy->isInteger() || isa<PointerType>(DestPTy) ||
12484 isa<VectorType>(DestPTy)) {
12485 // If the source is an array, the code below will not succeed. Check to
12486 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
12488 if (const ArrayType *ASrcTy = dyn_cast<ArrayType>(SrcPTy))
12489 if (Constant *CSrc = dyn_cast<Constant>(CastOp))
12490 if (ASrcTy->getNumElements() != 0) {
12492 Idxs[0] = Constant::getNullValue(Type::getInt32Ty(*Context));
12494 CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2);
12495 SrcTy = cast<PointerType>(CastOp->getType());
12496 SrcPTy = SrcTy->getElementType();
12499 if (IC.getTargetData() &&
12500 (SrcPTy->isInteger() || isa<PointerType>(SrcPTy) ||
12501 isa<VectorType>(SrcPTy)) &&
12502 // Do not allow turning this into a load of an integer, which is then
12503 // casted to a pointer, this pessimizes pointer analysis a lot.
12504 (isa<PointerType>(SrcPTy) == isa<PointerType>(LI.getType())) &&
12505 IC.getTargetData()->getTypeSizeInBits(SrcPTy) ==
12506 IC.getTargetData()->getTypeSizeInBits(DestPTy)) {
12508 // Okay, we are casting from one integer or pointer type to another of
12509 // the same size. Instead of casting the pointer before the load, cast
12510 // the result of the loaded value.
12512 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
12513 // Now cast the result of the load.
12514 return new BitCastInst(NewLoad, LI.getType());
12521 Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
12522 Value *Op = LI.getOperand(0);
12524 // Attempt to improve the alignment.
12526 unsigned KnownAlign =
12527 GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType()));
12529 (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) :
12530 LI.getAlignment()))
12531 LI.setAlignment(KnownAlign);
12534 // load (cast X) --> cast (load X) iff safe.
12535 if (isa<CastInst>(Op))
12536 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
12539 // None of the following transforms are legal for volatile loads.
12540 if (LI.isVolatile()) return 0;
12542 // Do really simple store-to-load forwarding and load CSE, to catch cases
12543 // where there are several consequtive memory accesses to the same location,
12544 // separated by a few arithmetic operations.
12545 BasicBlock::iterator BBI = &LI;
12546 if (Value *AvailableVal = FindAvailableLoadedValue(Op, LI.getParent(), BBI,6))
12547 return ReplaceInstUsesWith(LI, AvailableVal);
12549 // load(gep null, ...) -> unreachable
12550 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
12551 const Value *GEPI0 = GEPI->getOperand(0);
12552 // TODO: Consider a target hook for valid address spaces for this xform.
12553 if (isa<ConstantPointerNull>(GEPI0) && GEPI->getPointerAddressSpace() == 0){
12554 // Insert a new store to null instruction before the load to indicate
12555 // that this code is not reachable. We do this instead of inserting
12556 // an unreachable instruction directly because we cannot modify the
12558 new StoreInst(UndefValue::get(LI.getType()),
12559 Constant::getNullValue(Op->getType()), &LI);
12560 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
12564 // load null/undef -> unreachable
12565 // TODO: Consider a target hook for valid address spaces for this xform.
12566 if (isa<UndefValue>(Op) ||
12567 (isa<ConstantPointerNull>(Op) && LI.getPointerAddressSpace() == 0)) {
12568 // Insert a new store to null instruction before the load to indicate that
12569 // this code is not reachable. We do this instead of inserting an
12570 // unreachable instruction directly because we cannot modify the CFG.
12571 new StoreInst(UndefValue::get(LI.getType()),
12572 Constant::getNullValue(Op->getType()), &LI);
12573 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
12576 // Instcombine load (constantexpr_cast global) -> cast (load global)
12577 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op))
12579 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
12582 if (Op->hasOneUse()) {
12583 // Change select and PHI nodes to select values instead of addresses: this
12584 // helps alias analysis out a lot, allows many others simplifications, and
12585 // exposes redundancy in the code.
12587 // Note that we cannot do the transformation unless we know that the
12588 // introduced loads cannot trap! Something like this is valid as long as
12589 // the condition is always false: load (select bool %C, int* null, int* %G),
12590 // but it would not be valid if we transformed it to load from null
12591 // unconditionally.
12593 if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
12594 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
12595 if (isSafeToLoadUnconditionally(SI->getOperand(1), SI) &&
12596 isSafeToLoadUnconditionally(SI->getOperand(2), SI)) {
12597 Value *V1 = Builder->CreateLoad(SI->getOperand(1),
12598 SI->getOperand(1)->getName()+".val");
12599 Value *V2 = Builder->CreateLoad(SI->getOperand(2),
12600 SI->getOperand(2)->getName()+".val");
12601 return SelectInst::Create(SI->getCondition(), V1, V2);
12604 // load (select (cond, null, P)) -> load P
12605 if (Constant *C = dyn_cast<Constant>(SI->getOperand(1)))
12606 if (C->isNullValue()) {
12607 LI.setOperand(0, SI->getOperand(2));
12611 // load (select (cond, P, null)) -> load P
12612 if (Constant *C = dyn_cast<Constant>(SI->getOperand(2)))
12613 if (C->isNullValue()) {
12614 LI.setOperand(0, SI->getOperand(1));
12622 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
12623 /// when possible. This makes it generally easy to do alias analysis and/or
12624 /// SROA/mem2reg of the memory object.
12625 static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
12626 User *CI = cast<User>(SI.getOperand(1));
12627 Value *CastOp = CI->getOperand(0);
12629 const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
12630 const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
12631 if (SrcTy == 0) return 0;
12633 const Type *SrcPTy = SrcTy->getElementType();
12635 if (!DestPTy->isInteger() && !isa<PointerType>(DestPTy))
12638 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
12639 /// to its first element. This allows us to handle things like:
12640 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
12641 /// on 32-bit hosts.
12642 SmallVector<Value*, 4> NewGEPIndices;
12644 // If the source is an array, the code below will not succeed. Check to
12645 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
12647 if (isa<ArrayType>(SrcPTy) || isa<StructType>(SrcPTy)) {
12648 // Index through pointer.
12649 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(*IC.getContext()));
12650 NewGEPIndices.push_back(Zero);
12653 if (const StructType *STy = dyn_cast<StructType>(SrcPTy)) {
12654 if (!STy->getNumElements()) /* Struct can be empty {} */
12656 NewGEPIndices.push_back(Zero);
12657 SrcPTy = STy->getElementType(0);
12658 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcPTy)) {
12659 NewGEPIndices.push_back(Zero);
12660 SrcPTy = ATy->getElementType();
12666 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
12669 if (!SrcPTy->isInteger() && !isa<PointerType>(SrcPTy))
12672 // If the pointers point into different address spaces or if they point to
12673 // values with different sizes, we can't do the transformation.
12674 if (!IC.getTargetData() ||
12675 SrcTy->getAddressSpace() !=
12676 cast<PointerType>(CI->getType())->getAddressSpace() ||
12677 IC.getTargetData()->getTypeSizeInBits(SrcPTy) !=
12678 IC.getTargetData()->getTypeSizeInBits(DestPTy))
12681 // Okay, we are casting from one integer or pointer type to another of
12682 // the same size. Instead of casting the pointer before
12683 // the store, cast the value to be stored.
12685 Value *SIOp0 = SI.getOperand(0);
12686 Instruction::CastOps opcode = Instruction::BitCast;
12687 const Type* CastSrcTy = SIOp0->getType();
12688 const Type* CastDstTy = SrcPTy;
12689 if (isa<PointerType>(CastDstTy)) {
12690 if (CastSrcTy->isInteger())
12691 opcode = Instruction::IntToPtr;
12692 } else if (isa<IntegerType>(CastDstTy)) {
12693 if (isa<PointerType>(SIOp0->getType()))
12694 opcode = Instruction::PtrToInt;
12697 // SIOp0 is a pointer to aggregate and this is a store to the first field,
12698 // emit a GEP to index into its first field.
12699 if (!NewGEPIndices.empty())
12700 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices.begin(),
12701 NewGEPIndices.end());
12703 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
12704 SIOp0->getName()+".c");
12705 return new StoreInst(NewCast, CastOp);
12708 /// equivalentAddressValues - Test if A and B will obviously have the same
12709 /// value. This includes recognizing that %t0 and %t1 will have the same
12710 /// value in code like this:
12711 /// %t0 = getelementptr \@a, 0, 3
12712 /// store i32 0, i32* %t0
12713 /// %t1 = getelementptr \@a, 0, 3
12714 /// %t2 = load i32* %t1
12716 static bool equivalentAddressValues(Value *A, Value *B) {
12717 // Test if the values are trivially equivalent.
12718 if (A == B) return true;
12720 // Test if the values come form identical arithmetic instructions.
12721 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
12722 // its only used to compare two uses within the same basic block, which
12723 // means that they'll always either have the same value or one of them
12724 // will have an undefined value.
12725 if (isa<BinaryOperator>(A) ||
12726 isa<CastInst>(A) ||
12728 isa<GetElementPtrInst>(A))
12729 if (Instruction *BI = dyn_cast<Instruction>(B))
12730 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
12733 // Otherwise they may not be equivalent.
12737 // If this instruction has two uses, one of which is a llvm.dbg.declare,
12738 // return the llvm.dbg.declare.
12739 DbgDeclareInst *InstCombiner::hasOneUsePlusDeclare(Value *V) {
12740 if (!V->hasNUses(2))
12742 for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
12744 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI))
12746 if (isa<BitCastInst>(UI) && UI->hasOneUse()) {
12747 if (DbgDeclareInst *DI = dyn_cast<DbgDeclareInst>(UI->use_begin()))
12754 Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {
12755 Value *Val = SI.getOperand(0);
12756 Value *Ptr = SI.getOperand(1);
12758 // If the RHS is an alloca with a single use, zapify the store, making the
12760 // If the RHS is an alloca with a two uses, the other one being a
12761 // llvm.dbg.declare, zapify the store and the declare, making the
12762 // alloca dead. We must do this to prevent declare's from affecting
12764 if (!SI.isVolatile()) {
12765 if (Ptr->hasOneUse()) {
12766 if (isa<AllocaInst>(Ptr)) {
12767 EraseInstFromFunction(SI);
12771 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
12772 if (isa<AllocaInst>(GEP->getOperand(0))) {
12773 if (GEP->getOperand(0)->hasOneUse()) {
12774 EraseInstFromFunction(SI);
12778 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(GEP->getOperand(0))) {
12779 EraseInstFromFunction(*DI);
12780 EraseInstFromFunction(SI);
12787 if (DbgDeclareInst *DI = hasOneUsePlusDeclare(Ptr)) {
12788 EraseInstFromFunction(*DI);
12789 EraseInstFromFunction(SI);
12795 // Attempt to improve the alignment.
12797 unsigned KnownAlign =
12798 GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType()));
12800 (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) :
12801 SI.getAlignment()))
12802 SI.setAlignment(KnownAlign);
12805 // Do really simple DSE, to catch cases where there are several consecutive
12806 // stores to the same location, separated by a few arithmetic operations. This
12807 // situation often occurs with bitfield accesses.
12808 BasicBlock::iterator BBI = &SI;
12809 for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
12812 // Don't count debug info directives, lest they affect codegen,
12813 // and we skip pointer-to-pointer bitcasts, which are NOPs.
12814 // It is necessary for correctness to skip those that feed into a
12815 // llvm.dbg.declare, as these are not present when debugging is off.
12816 if (isa<DbgInfoIntrinsic>(BBI) ||
12817 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
12822 if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
12823 // Prev store isn't volatile, and stores to the same location?
12824 if (!PrevSI->isVolatile() &&equivalentAddressValues(PrevSI->getOperand(1),
12825 SI.getOperand(1))) {
12828 EraseInstFromFunction(*PrevSI);
12834 // If this is a load, we have to stop. However, if the loaded value is from
12835 // the pointer we're loading and is producing the pointer we're storing,
12836 // then *this* store is dead (X = load P; store X -> P).
12837 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
12838 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
12839 !SI.isVolatile()) {
12840 EraseInstFromFunction(SI);
12844 // Otherwise, this is a load from some other location. Stores before it
12845 // may not be dead.
12849 // Don't skip over loads or things that can modify memory.
12850 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
12855 if (SI.isVolatile()) return 0; // Don't hack volatile stores.
12857 // store X, null -> turns into 'unreachable' in SimplifyCFG
12858 if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {
12859 if (!isa<UndefValue>(Val)) {
12860 SI.setOperand(0, UndefValue::get(Val->getType()));
12861 if (Instruction *U = dyn_cast<Instruction>(Val))
12862 Worklist.Add(U); // Dropped a use.
12865 return 0; // Do not modify these!
12868 // store undef, Ptr -> noop
12869 if (isa<UndefValue>(Val)) {
12870 EraseInstFromFunction(SI);
12875 // If the pointer destination is a cast, see if we can fold the cast into the
12877 if (isa<CastInst>(Ptr))
12878 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
12880 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr))
12882 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
12886 // If this store is the last instruction in the basic block (possibly
12887 // excepting debug info instructions and the pointer bitcasts that feed
12888 // into them), and if the block ends with an unconditional branch, try
12889 // to move it to the successor block.
12893 } while (isa<DbgInfoIntrinsic>(BBI) ||
12894 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType())));
12895 if (BranchInst *BI = dyn_cast<BranchInst>(BBI))
12896 if (BI->isUnconditional())
12897 if (SimplifyStoreAtEndOfBlock(SI))
12898 return 0; // xform done!
12903 /// SimplifyStoreAtEndOfBlock - Turn things like:
12904 /// if () { *P = v1; } else { *P = v2 }
12905 /// into a phi node with a store in the successor.
12907 /// Simplify things like:
12908 /// *P = v1; if () { *P = v2; }
12909 /// into a phi node with a store in the successor.
12911 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
12912 BasicBlock *StoreBB = SI.getParent();
12914 // Check to see if the successor block has exactly two incoming edges. If
12915 // so, see if the other predecessor contains a store to the same location.
12916 // if so, insert a PHI node (if needed) and move the stores down.
12917 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
12919 // Determine whether Dest has exactly two predecessors and, if so, compute
12920 // the other predecessor.
12921 pred_iterator PI = pred_begin(DestBB);
12922 BasicBlock *OtherBB = 0;
12923 if (*PI != StoreBB)
12926 if (PI == pred_end(DestBB))
12929 if (*PI != StoreBB) {
12934 if (++PI != pred_end(DestBB))
12937 // Bail out if all the relevant blocks aren't distinct (this can happen,
12938 // for example, if SI is in an infinite loop)
12939 if (StoreBB == DestBB || OtherBB == DestBB)
12942 // Verify that the other block ends in a branch and is not otherwise empty.
12943 BasicBlock::iterator BBI = OtherBB->getTerminator();
12944 BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
12945 if (!OtherBr || BBI == OtherBB->begin())
12948 // If the other block ends in an unconditional branch, check for the 'if then
12949 // else' case. there is an instruction before the branch.
12950 StoreInst *OtherStore = 0;
12951 if (OtherBr->isUnconditional()) {
12953 // Skip over debugging info.
12954 while (isa<DbgInfoIntrinsic>(BBI) ||
12955 (isa<BitCastInst>(BBI) && isa<PointerType>(BBI->getType()))) {
12956 if (BBI==OtherBB->begin())
12960 // If this isn't a store, isn't a store to the same location, or if the
12961 // alignments differ, bail out.
12962 OtherStore = dyn_cast<StoreInst>(BBI);
12963 if (!OtherStore || OtherStore->getOperand(1) != SI.getOperand(1) ||
12964 OtherStore->getAlignment() != SI.getAlignment())
12967 // Otherwise, the other block ended with a conditional branch. If one of the
12968 // destinations is StoreBB, then we have the if/then case.
12969 if (OtherBr->getSuccessor(0) != StoreBB &&
12970 OtherBr->getSuccessor(1) != StoreBB)
12973 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
12974 // if/then triangle. See if there is a store to the same ptr as SI that
12975 // lives in OtherBB.
12977 // Check to see if we find the matching store.
12978 if ((OtherStore = dyn_cast<StoreInst>(BBI))) {
12979 if (OtherStore->getOperand(1) != SI.getOperand(1) ||
12980 OtherStore->getAlignment() != SI.getAlignment())
12984 // If we find something that may be using or overwriting the stored
12985 // value, or if we run out of instructions, we can't do the xform.
12986 if (BBI->mayReadFromMemory() || BBI->mayWriteToMemory() ||
12987 BBI == OtherBB->begin())
12991 // In order to eliminate the store in OtherBr, we have to
12992 // make sure nothing reads or overwrites the stored value in
12994 for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
12995 // FIXME: This should really be AA driven.
12996 if (I->mayReadFromMemory() || I->mayWriteToMemory())
13001 // Insert a PHI node now if we need it.
13002 Value *MergedVal = OtherStore->getOperand(0);
13003 if (MergedVal != SI.getOperand(0)) {
13004 PHINode *PN = PHINode::Create(MergedVal->getType(), "storemerge");
13005 PN->reserveOperandSpace(2);
13006 PN->addIncoming(SI.getOperand(0), SI.getParent());
13007 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
13008 MergedVal = InsertNewInstBefore(PN, DestBB->front());
13011 // Advance to a place where it is safe to insert the new store and
13013 BBI = DestBB->getFirstNonPHI();
13014 InsertNewInstBefore(new StoreInst(MergedVal, SI.getOperand(1),
13015 OtherStore->isVolatile(),
13016 SI.getAlignment()), *BBI);
13018 // Nuke the old stores.
13019 EraseInstFromFunction(SI);
13020 EraseInstFromFunction(*OtherStore);
13026 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) {
13027 // Change br (not X), label True, label False to: br X, label False, True
13029 BasicBlock *TrueDest;
13030 BasicBlock *FalseDest;
13031 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) &&
13032 !isa<Constant>(X)) {
13033 // Swap Destinations and condition...
13034 BI.setCondition(X);
13035 BI.setSuccessor(0, FalseDest);
13036 BI.setSuccessor(1, TrueDest);
13040 // Cannonicalize fcmp_one -> fcmp_oeq
13041 FCmpInst::Predicate FPred; Value *Y;
13042 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)),
13043 TrueDest, FalseDest)) &&
13044 BI.getCondition()->hasOneUse())
13045 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE ||
13046 FPred == FCmpInst::FCMP_OGE) {
13047 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition());
13048 Cond->setPredicate(FCmpInst::getInversePredicate(FPred));
13050 // Swap Destinations and condition.
13051 BI.setSuccessor(0, FalseDest);
13052 BI.setSuccessor(1, TrueDest);
13053 Worklist.Add(Cond);
13057 // Cannonicalize icmp_ne -> icmp_eq
13058 ICmpInst::Predicate IPred;
13059 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)),
13060 TrueDest, FalseDest)) &&
13061 BI.getCondition()->hasOneUse())
13062 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE ||
13063 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE ||
13064 IPred == ICmpInst::ICMP_SGE) {
13065 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition());
13066 Cond->setPredicate(ICmpInst::getInversePredicate(IPred));
13067 // Swap Destinations and condition.
13068 BI.setSuccessor(0, FalseDest);
13069 BI.setSuccessor(1, TrueDest);
13070 Worklist.Add(Cond);
13077 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
13078 Value *Cond = SI.getCondition();
13079 if (Instruction *I = dyn_cast<Instruction>(Cond)) {
13080 if (I->getOpcode() == Instruction::Add)
13081 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
13082 // change 'switch (X+4) case 1:' into 'switch (X) case -3'
13083 for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2)
13085 ConstantExpr::getSub(cast<Constant>(SI.getOperand(i)),
13087 SI.setOperand(0, I->getOperand(0));
13095 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
13096 Value *Agg = EV.getAggregateOperand();
13098 if (!EV.hasIndices())
13099 return ReplaceInstUsesWith(EV, Agg);
13101 if (Constant *C = dyn_cast<Constant>(Agg)) {
13102 if (isa<UndefValue>(C))
13103 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType()));
13105 if (isa<ConstantAggregateZero>(C))
13106 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType()));
13108 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) {
13109 // Extract the element indexed by the first index out of the constant
13110 Value *V = C->getOperand(*EV.idx_begin());
13111 if (EV.getNumIndices() > 1)
13112 // Extract the remaining indices out of the constant indexed by the
13114 return ExtractValueInst::Create(V, EV.idx_begin() + 1, EV.idx_end());
13116 return ReplaceInstUsesWith(EV, V);
13118 return 0; // Can't handle other constants
13120 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
13121 // We're extracting from an insertvalue instruction, compare the indices
13122 const unsigned *exti, *exte, *insi, *inse;
13123 for (exti = EV.idx_begin(), insi = IV->idx_begin(),
13124 exte = EV.idx_end(), inse = IV->idx_end();
13125 exti != exte && insi != inse;
13127 if (*insi != *exti)
13128 // The insert and extract both reference distinctly different elements.
13129 // This means the extract is not influenced by the insert, and we can
13130 // replace the aggregate operand of the extract with the aggregate
13131 // operand of the insert. i.e., replace
13132 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
13133 // %E = extractvalue { i32, { i32 } } %I, 0
13135 // %E = extractvalue { i32, { i32 } } %A, 0
13136 return ExtractValueInst::Create(IV->getAggregateOperand(),
13137 EV.idx_begin(), EV.idx_end());
13139 if (exti == exte && insi == inse)
13140 // Both iterators are at the end: Index lists are identical. Replace
13141 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
13142 // %C = extractvalue { i32, { i32 } } %B, 1, 0
13144 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand());
13145 if (exti == exte) {
13146 // The extract list is a prefix of the insert list. i.e. replace
13147 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0
13148 // %E = extractvalue { i32, { i32 } } %I, 1
13150 // %X = extractvalue { i32, { i32 } } %A, 1
13151 // %E = insertvalue { i32 } %X, i32 42, 0
13152 // by switching the order of the insert and extract (though the
13153 // insertvalue should be left in, since it may have other uses).
13154 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(),
13155 EV.idx_begin(), EV.idx_end());
13156 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
13160 // The insert list is a prefix of the extract list
13161 // We can simply remove the common indices from the extract and make it
13162 // operate on the inserted value instead of the insertvalue result.
13164 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1
13165 // %E = extractvalue { i32, { i32 } } %I, 1, 0
13167 // %E extractvalue { i32 } { i32 42 }, 0
13168 return ExtractValueInst::Create(IV->getInsertedValueOperand(),
13171 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) {
13172 // We're extracting from an intrinsic, see if we're the only user, which
13173 // allows us to simplify multiple result intrinsics to simpler things that
13174 // just get one value..
13175 if (II->hasOneUse()) {
13176 // Check if we're grabbing the overflow bit or the result of a 'with
13177 // overflow' intrinsic. If it's the latter we can remove the intrinsic
13178 // and replace it with a traditional binary instruction.
13179 switch (II->getIntrinsicID()) {
13180 case Intrinsic::uadd_with_overflow:
13181 case Intrinsic::sadd_with_overflow:
13182 if (*EV.idx_begin() == 0) { // Normal result.
13183 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
13184 II->replaceAllUsesWith(UndefValue::get(II->getType()));
13185 EraseInstFromFunction(*II);
13186 return BinaryOperator::CreateAdd(LHS, RHS);
13189 case Intrinsic::usub_with_overflow:
13190 case Intrinsic::ssub_with_overflow:
13191 if (*EV.idx_begin() == 0) { // Normal result.
13192 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
13193 II->replaceAllUsesWith(UndefValue::get(II->getType()));
13194 EraseInstFromFunction(*II);
13195 return BinaryOperator::CreateSub(LHS, RHS);
13198 case Intrinsic::umul_with_overflow:
13199 case Intrinsic::smul_with_overflow:
13200 if (*EV.idx_begin() == 0) { // Normal result.
13201 Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
13202 II->replaceAllUsesWith(UndefValue::get(II->getType()));
13203 EraseInstFromFunction(*II);
13204 return BinaryOperator::CreateMul(LHS, RHS);
13212 // Can't simplify extracts from other values. Note that nested extracts are
13213 // already simplified implicitely by the above (extract ( extract (insert) )
13214 // will be translated into extract ( insert ( extract ) ) first and then just
13215 // the value inserted, if appropriate).
13219 /// CheapToScalarize - Return true if the value is cheaper to scalarize than it
13220 /// is to leave as a vector operation.
13221 static bool CheapToScalarize(Value *V, bool isConstant) {
13222 if (isa<ConstantAggregateZero>(V))
13224 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) {
13225 if (isConstant) return true;
13226 // If all elts are the same, we can extract.
13227 Constant *Op0 = C->getOperand(0);
13228 for (unsigned i = 1; i < C->getNumOperands(); ++i)
13229 if (C->getOperand(i) != Op0)
13233 Instruction *I = dyn_cast<Instruction>(V);
13234 if (!I) return false;
13236 // Insert element gets simplified to the inserted element or is deleted if
13237 // this is constant idx extract element and its a constant idx insertelt.
13238 if (I->getOpcode() == Instruction::InsertElement && isConstant &&
13239 isa<ConstantInt>(I->getOperand(2)))
13241 if (I->getOpcode() == Instruction::Load && I->hasOneUse())
13243 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I))
13244 if (BO->hasOneUse() &&
13245 (CheapToScalarize(BO->getOperand(0), isConstant) ||
13246 CheapToScalarize(BO->getOperand(1), isConstant)))
13248 if (CmpInst *CI = dyn_cast<CmpInst>(I))
13249 if (CI->hasOneUse() &&
13250 (CheapToScalarize(CI->getOperand(0), isConstant) ||
13251 CheapToScalarize(CI->getOperand(1), isConstant)))
13257 /// Read and decode a shufflevector mask.
13259 /// It turns undef elements into values that are larger than the number of
13260 /// elements in the input.
13261 static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
13262 unsigned NElts = SVI->getType()->getNumElements();
13263 if (isa<ConstantAggregateZero>(SVI->getOperand(2)))
13264 return std::vector<unsigned>(NElts, 0);
13265 if (isa<UndefValue>(SVI->getOperand(2)))
13266 return std::vector<unsigned>(NElts, 2*NElts);
13268 std::vector<unsigned> Result;
13269 const ConstantVector *CP = cast<ConstantVector>(SVI->getOperand(2));
13270 for (User::const_op_iterator i = CP->op_begin(), e = CP->op_end(); i!=e; ++i)
13271 if (isa<UndefValue>(*i))
13272 Result.push_back(NElts*2); // undef -> 8
13274 Result.push_back(cast<ConstantInt>(*i)->getZExtValue());
13278 /// FindScalarElement - Given a vector and an element number, see if the scalar
13279 /// value is already around as a register, for example if it were inserted then
13280 /// extracted from the vector.
13281 static Value *FindScalarElement(Value *V, unsigned EltNo,
13282 LLVMContext *Context) {
13283 assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
13284 const VectorType *PTy = cast<VectorType>(V->getType());
13285 unsigned Width = PTy->getNumElements();
13286 if (EltNo >= Width) // Out of range access.
13287 return UndefValue::get(PTy->getElementType());
13289 if (isa<UndefValue>(V))
13290 return UndefValue::get(PTy->getElementType());
13291 else if (isa<ConstantAggregateZero>(V))
13292 return Constant::getNullValue(PTy->getElementType());
13293 else if (ConstantVector *CP = dyn_cast<ConstantVector>(V))
13294 return CP->getOperand(EltNo);
13295 else if (InsertElementInst *III = dyn_cast<InsertElementInst>(V)) {
13296 // If this is an insert to a variable element, we don't know what it is.
13297 if (!isa<ConstantInt>(III->getOperand(2)))
13299 unsigned IIElt = cast<ConstantInt>(III->getOperand(2))->getZExtValue();
13301 // If this is an insert to the element we are looking for, return the
13303 if (EltNo == IIElt)
13304 return III->getOperand(1);
13306 // Otherwise, the insertelement doesn't modify the value, recurse on its
13308 return FindScalarElement(III->getOperand(0), EltNo, Context);
13309 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(V)) {
13310 unsigned LHSWidth =
13311 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
13312 unsigned InEl = getShuffleMask(SVI)[EltNo];
13313 if (InEl < LHSWidth)
13314 return FindScalarElement(SVI->getOperand(0), InEl, Context);
13315 else if (InEl < LHSWidth*2)
13316 return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth, Context);
13318 return UndefValue::get(PTy->getElementType());
13321 // Otherwise, we don't know.
13325 Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
13326 // If vector val is undef, replace extract with scalar undef.
13327 if (isa<UndefValue>(EI.getOperand(0)))
13328 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
13330 // If vector val is constant 0, replace extract with scalar 0.
13331 if (isa<ConstantAggregateZero>(EI.getOperand(0)))
13332 return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType()));
13334 if (ConstantVector *C = dyn_cast<ConstantVector>(EI.getOperand(0))) {
13335 // If vector val is constant with all elements the same, replace EI with
13336 // that element. When the elements are not identical, we cannot replace yet
13337 // (we do that below, but only when the index is constant).
13338 Constant *op0 = C->getOperand(0);
13339 for (unsigned i = 1; i != C->getNumOperands(); ++i)
13340 if (C->getOperand(i) != op0) {
13345 return ReplaceInstUsesWith(EI, op0);
13348 // If extracting a specified index from the vector, see if we can recursively
13349 // find a previously computed scalar that was inserted into the vector.
13350 if (ConstantInt *IdxC = dyn_cast<ConstantInt>(EI.getOperand(1))) {
13351 unsigned IndexVal = IdxC->getZExtValue();
13352 unsigned VectorWidth = EI.getVectorOperandType()->getNumElements();
13354 // If this is extracting an invalid index, turn this into undef, to avoid
13355 // crashing the code below.
13356 if (IndexVal >= VectorWidth)
13357 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
13359 // This instruction only demands the single element from the input vector.
13360 // If the input vector has a single use, simplify it based on this use
13362 if (EI.getOperand(0)->hasOneUse() && VectorWidth != 1) {
13363 APInt UndefElts(VectorWidth, 0);
13364 APInt DemandedMask(VectorWidth, 1 << IndexVal);
13365 if (Value *V = SimplifyDemandedVectorElts(EI.getOperand(0),
13366 DemandedMask, UndefElts)) {
13367 EI.setOperand(0, V);
13372 if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal, Context))
13373 return ReplaceInstUsesWith(EI, Elt);
13375 // If the this extractelement is directly using a bitcast from a vector of
13376 // the same number of elements, see if we can find the source element from
13377 // it. In this case, we will end up needing to bitcast the scalars.
13378 if (BitCastInst *BCI = dyn_cast<BitCastInst>(EI.getOperand(0))) {
13379 if (const VectorType *VT =
13380 dyn_cast<VectorType>(BCI->getOperand(0)->getType()))
13381 if (VT->getNumElements() == VectorWidth)
13382 if (Value *Elt = FindScalarElement(BCI->getOperand(0),
13383 IndexVal, Context))
13384 return new BitCastInst(Elt, EI.getType());
13388 if (Instruction *I = dyn_cast<Instruction>(EI.getOperand(0))) {
13389 // Push extractelement into predecessor operation if legal and
13390 // profitable to do so
13391 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) {
13392 if (I->hasOneUse() &&
13393 CheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
13395 Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
13396 EI.getName()+".lhs");
13398 Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
13399 EI.getName()+".rhs");
13400 return BinaryOperator::Create(BO->getOpcode(), newEI0, newEI1);
13402 } else if (InsertElementInst *IE = dyn_cast<InsertElementInst>(I)) {
13403 // Extracting the inserted element?
13404 if (IE->getOperand(2) == EI.getOperand(1))
13405 return ReplaceInstUsesWith(EI, IE->getOperand(1));
13406 // If the inserted and extracted elements are constants, they must not
13407 // be the same value, extract from the pre-inserted value instead.
13408 if (isa<Constant>(IE->getOperand(2)) && isa<Constant>(EI.getOperand(1))) {
13409 Worklist.AddValue(EI.getOperand(0));
13410 EI.setOperand(0, IE->getOperand(0));
13413 } else if (ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I)) {
13414 // If this is extracting an element from a shufflevector, figure out where
13415 // it came from and extract from the appropriate input element instead.
13416 if (ConstantInt *Elt = dyn_cast<ConstantInt>(EI.getOperand(1))) {
13417 unsigned SrcIdx = getShuffleMask(SVI)[Elt->getZExtValue()];
13419 unsigned LHSWidth =
13420 cast<VectorType>(SVI->getOperand(0)->getType())->getNumElements();
13422 if (SrcIdx < LHSWidth)
13423 Src = SVI->getOperand(0);
13424 else if (SrcIdx < LHSWidth*2) {
13425 SrcIdx -= LHSWidth;
13426 Src = SVI->getOperand(1);
13428 return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType()));
13430 return ExtractElementInst::Create(Src,
13431 ConstantInt::get(Type::getInt32Ty(*Context), SrcIdx,
13435 // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement)
13440 /// CollectSingleShuffleElements - If V is a shuffle of values that ONLY returns
13441 /// elements from either LHS or RHS, return the shuffle mask and true.
13442 /// Otherwise, return false.
13443 static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
13444 std::vector<Constant*> &Mask,
13445 LLVMContext *Context) {
13446 assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
13447 "Invalid CollectSingleShuffleElements");
13448 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
13450 if (isa<UndefValue>(V)) {
13451 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
13453 } else if (V == LHS) {
13454 for (unsigned i = 0; i != NumElts; ++i)
13455 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
13457 } else if (V == RHS) {
13458 for (unsigned i = 0; i != NumElts; ++i)
13459 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i+NumElts));
13461 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
13462 // If this is an insert of an extract from some other vector, include it.
13463 Value *VecOp = IEI->getOperand(0);
13464 Value *ScalarOp = IEI->getOperand(1);
13465 Value *IdxOp = IEI->getOperand(2);
13467 if (!isa<ConstantInt>(IdxOp))
13469 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
13471 if (isa<UndefValue>(ScalarOp)) { // inserting undef into vector.
13472 // Okay, we can handle this if the vector we are insertinting into is
13473 // transitively ok.
13474 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
13475 // If so, update the mask to reflect the inserted undef.
13476 Mask[InsertedIdx] = UndefValue::get(Type::getInt32Ty(*Context));
13479 } else if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)){
13480 if (isa<ConstantInt>(EI->getOperand(1)) &&
13481 EI->getOperand(0)->getType() == V->getType()) {
13482 unsigned ExtractedIdx =
13483 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
13485 // This must be extracting from either LHS or RHS.
13486 if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) {
13487 // Okay, we can handle this if the vector we are insertinting into is
13488 // transitively ok.
13489 if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) {
13490 // If so, update the mask to reflect the inserted value.
13491 if (EI->getOperand(0) == LHS) {
13492 Mask[InsertedIdx % NumElts] =
13493 ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx);
13495 assert(EI->getOperand(0) == RHS);
13496 Mask[InsertedIdx % NumElts] =
13497 ConstantInt::get(Type::getInt32Ty(*Context), ExtractedIdx+NumElts);
13506 // TODO: Handle shufflevector here!
13511 /// CollectShuffleElements - We are building a shuffle of V, using RHS as the
13512 /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
13513 /// that computes V and the LHS value of the shuffle.
13514 static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
13515 Value *&RHS, LLVMContext *Context) {
13516 assert(isa<VectorType>(V->getType()) &&
13517 (RHS == 0 || V->getType() == RHS->getType()) &&
13518 "Invalid shuffle!");
13519 unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
13521 if (isa<UndefValue>(V)) {
13522 Mask.assign(NumElts, UndefValue::get(Type::getInt32Ty(*Context)));
13524 } else if (isa<ConstantAggregateZero>(V)) {
13525 Mask.assign(NumElts, ConstantInt::get(Type::getInt32Ty(*Context), 0));
13527 } else if (InsertElementInst *IEI = dyn_cast<InsertElementInst>(V)) {
13528 // If this is an insert of an extract from some other vector, include it.
13529 Value *VecOp = IEI->getOperand(0);
13530 Value *ScalarOp = IEI->getOperand(1);
13531 Value *IdxOp = IEI->getOperand(2);
13533 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
13534 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
13535 EI->getOperand(0)->getType() == V->getType()) {
13536 unsigned ExtractedIdx =
13537 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
13538 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
13540 // Either the extracted from or inserted into vector must be RHSVec,
13541 // otherwise we'd end up with a shuffle of three inputs.
13542 if (EI->getOperand(0) == RHS || RHS == 0) {
13543 RHS = EI->getOperand(0);
13544 Value *V = CollectShuffleElements(VecOp, Mask, RHS, Context);
13545 Mask[InsertedIdx % NumElts] =
13546 ConstantInt::get(Type::getInt32Ty(*Context), NumElts+ExtractedIdx);
13550 if (VecOp == RHS) {
13551 Value *V = CollectShuffleElements(EI->getOperand(0), Mask,
13553 // Everything but the extracted element is replaced with the RHS.
13554 for (unsigned i = 0; i != NumElts; ++i) {
13555 if (i != InsertedIdx)
13556 Mask[i] = ConstantInt::get(Type::getInt32Ty(*Context), NumElts+i);
13561 // If this insertelement is a chain that comes from exactly these two
13562 // vectors, return the vector and the effective shuffle.
13563 if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask,
13565 return EI->getOperand(0);
13570 // TODO: Handle shufflevector here!
13572 // Otherwise, can't do anything fancy. Return an identity vector.
13573 for (unsigned i = 0; i != NumElts; ++i)
13574 Mask.push_back(ConstantInt::get(Type::getInt32Ty(*Context), i));
13578 Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
13579 Value *VecOp = IE.getOperand(0);
13580 Value *ScalarOp = IE.getOperand(1);
13581 Value *IdxOp = IE.getOperand(2);
13583 // Inserting an undef or into an undefined place, remove this.
13584 if (isa<UndefValue>(ScalarOp) || isa<UndefValue>(IdxOp))
13585 ReplaceInstUsesWith(IE, VecOp);
13587 // If the inserted element was extracted from some other vector, and if the
13588 // indexes are constant, try to turn this into a shufflevector operation.
13589 if (ExtractElementInst *EI = dyn_cast<ExtractElementInst>(ScalarOp)) {
13590 if (isa<ConstantInt>(EI->getOperand(1)) && isa<ConstantInt>(IdxOp) &&
13591 EI->getOperand(0)->getType() == IE.getType()) {
13592 unsigned NumVectorElts = IE.getType()->getNumElements();
13593 unsigned ExtractedIdx =
13594 cast<ConstantInt>(EI->getOperand(1))->getZExtValue();
13595 unsigned InsertedIdx = cast<ConstantInt>(IdxOp)->getZExtValue();
13597 if (ExtractedIdx >= NumVectorElts) // Out of range extract.
13598 return ReplaceInstUsesWith(IE, VecOp);
13600 if (InsertedIdx >= NumVectorElts) // Out of range insert.
13601 return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType()));
13603 // If we are extracting a value from a vector, then inserting it right
13604 // back into the same place, just use the input vector.
13605 if (EI->getOperand(0) == VecOp && ExtractedIdx == InsertedIdx)
13606 return ReplaceInstUsesWith(IE, VecOp);
13608 // If this insertelement isn't used by some other insertelement, turn it
13609 // (and any insertelements it points to), into one big shuffle.
13610 if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
13611 std::vector<Constant*> Mask;
13613 Value *LHS = CollectShuffleElements(&IE, Mask, RHS, Context);
13614 if (RHS == 0) RHS = UndefValue::get(LHS->getType());
13615 // We now have a shuffle of LHS, RHS, Mask.
13616 return new ShuffleVectorInst(LHS, RHS,
13617 ConstantVector::get(Mask));
13622 unsigned VWidth = cast<VectorType>(VecOp->getType())->getNumElements();
13623 APInt UndefElts(VWidth, 0);
13624 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
13625 if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts))
13632 Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
13633 Value *LHS = SVI.getOperand(0);
13634 Value *RHS = SVI.getOperand(1);
13635 std::vector<unsigned> Mask = getShuffleMask(&SVI);
13637 bool MadeChange = false;
13639 // Undefined shuffle mask -> undefined value.
13640 if (isa<UndefValue>(SVI.getOperand(2)))
13641 return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType()));
13643 unsigned VWidth = cast<VectorType>(SVI.getType())->getNumElements();
13645 if (VWidth != cast<VectorType>(LHS->getType())->getNumElements())
13648 APInt UndefElts(VWidth, 0);
13649 APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth));
13650 if (SimplifyDemandedVectorElts(&SVI, AllOnesEltMask, UndefElts)) {
13651 LHS = SVI.getOperand(0);
13652 RHS = SVI.getOperand(1);
13656 // Canonicalize shuffle(x ,x,mask) -> shuffle(x, undef,mask')
13657 // Canonicalize shuffle(undef,x,mask) -> shuffle(x, undef,mask').
13658 if (LHS == RHS || isa<UndefValue>(LHS)) {
13659 if (isa<UndefValue>(LHS) && LHS == RHS) {
13660 // shuffle(undef,undef,mask) -> undef.
13661 return ReplaceInstUsesWith(SVI, LHS);
13664 // Remap any references to RHS to use LHS.
13665 std::vector<Constant*> Elts;
13666 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
13667 if (Mask[i] >= 2*e)
13668 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
13670 if ((Mask[i] >= e && isa<UndefValue>(RHS)) ||
13671 (Mask[i] < e && isa<UndefValue>(LHS))) {
13672 Mask[i] = 2*e; // Turn into undef.
13673 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
13675 Mask[i] = Mask[i] % e; // Force to LHS.
13676 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context), Mask[i]));
13680 SVI.setOperand(0, SVI.getOperand(1));
13681 SVI.setOperand(1, UndefValue::get(RHS->getType()));
13682 SVI.setOperand(2, ConstantVector::get(Elts));
13683 LHS = SVI.getOperand(0);
13684 RHS = SVI.getOperand(1);
13688 // Analyze the shuffle, are the LHS or RHS and identity shuffles?
13689 bool isLHSID = true, isRHSID = true;
13691 for (unsigned i = 0, e = Mask.size(); i != e; ++i) {
13692 if (Mask[i] >= e*2) continue; // Ignore undef values.
13693 // Is this an identity shuffle of the LHS value?
13694 isLHSID &= (Mask[i] == i);
13696 // Is this an identity shuffle of the RHS value?
13697 isRHSID &= (Mask[i]-e == i);
13700 // Eliminate identity shuffles.
13701 if (isLHSID) return ReplaceInstUsesWith(SVI, LHS);
13702 if (isRHSID) return ReplaceInstUsesWith(SVI, RHS);
13704 // If the LHS is a shufflevector itself, see if we can combine it with this
13705 // one without producing an unusual shuffle. Here we are really conservative:
13706 // we are absolutely afraid of producing a shuffle mask not in the input
13707 // program, because the code gen may not be smart enough to turn a merged
13708 // shuffle into two specific shuffles: it may produce worse code. As such,
13709 // we only merge two shuffles if the result is one of the two input shuffle
13710 // masks. In this case, merging the shuffles just removes one instruction,
13711 // which we know is safe. This is good for things like turning:
13712 // (splat(splat)) -> splat.
13713 if (ShuffleVectorInst *LHSSVI = dyn_cast<ShuffleVectorInst>(LHS)) {
13714 if (isa<UndefValue>(RHS)) {
13715 std::vector<unsigned> LHSMask = getShuffleMask(LHSSVI);
13717 if (LHSMask.size() == Mask.size()) {
13718 std::vector<unsigned> NewMask;
13719 for (unsigned i = 0, e = Mask.size(); i != e; ++i)
13721 NewMask.push_back(2*e);
13723 NewMask.push_back(LHSMask[Mask[i]]);
13725 // If the result mask is equal to the src shuffle or this
13726 // shuffle mask, do the replacement.
13727 if (NewMask == LHSMask || NewMask == Mask) {
13728 unsigned LHSInNElts =
13729 cast<VectorType>(LHSSVI->getOperand(0)->getType())->
13731 std::vector<Constant*> Elts;
13732 for (unsigned i = 0, e = NewMask.size(); i != e; ++i) {
13733 if (NewMask[i] >= LHSInNElts*2) {
13734 Elts.push_back(UndefValue::get(Type::getInt32Ty(*Context)));
13736 Elts.push_back(ConstantInt::get(Type::getInt32Ty(*Context),
13740 return new ShuffleVectorInst(LHSSVI->getOperand(0),
13741 LHSSVI->getOperand(1),
13742 ConstantVector::get(Elts));
13748 return MadeChange ? &SVI : 0;
13754 /// TryToSinkInstruction - Try to move the specified instruction from its
13755 /// current block into the beginning of DestBlock, which can only happen if it's
13756 /// safe to move the instruction past all of the instructions between it and the
13757 /// end of its block.
13758 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
13759 assert(I->hasOneUse() && "Invariants didn't hold!");
13761 // Cannot move control-flow-involving, volatile loads, vaarg, etc.
13762 if (isa<PHINode>(I) || I->mayHaveSideEffects() || isa<TerminatorInst>(I))
13765 // Do not sink alloca instructions out of the entry block.
13766 if (isa<AllocaInst>(I) && I->getParent() ==
13767 &DestBlock->getParent()->getEntryBlock())
13770 // We can only sink load instructions if there is nothing between the load and
13771 // the end of block that could change the value.
13772 if (I->mayReadFromMemory()) {
13773 for (BasicBlock::iterator Scan = I, E = I->getParent()->end();
13775 if (Scan->mayWriteToMemory())
13779 BasicBlock::iterator InsertPos = DestBlock->getFirstNonPHI();
13781 CopyPrecedingStopPoint(I, InsertPos);
13782 I->moveBefore(InsertPos);
13788 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding
13789 /// all reachable code to the worklist.
13791 /// This has a couple of tricks to make the code faster and more powerful. In
13792 /// particular, we constant fold and DCE instructions as we go, to avoid adding
13793 /// them to the worklist (this significantly speeds up instcombine on code where
13794 /// many instructions are dead or constant). Additionally, if we find a branch
13795 /// whose condition is a known constant, we only visit the reachable successors.
13797 static bool AddReachableCodeToWorklist(BasicBlock *BB,
13798 SmallPtrSet<BasicBlock*, 64> &Visited,
13800 const TargetData *TD) {
13801 bool MadeIRChange = false;
13802 SmallVector<BasicBlock*, 256> Worklist;
13803 Worklist.push_back(BB);
13805 std::vector<Instruction*> InstrsForInstCombineWorklist;
13806 InstrsForInstCombineWorklist.reserve(128);
13808 SmallPtrSet<ConstantExpr*, 64> FoldedConstants;
13810 while (!Worklist.empty()) {
13811 BB = Worklist.back();
13812 Worklist.pop_back();
13814 // We have now visited this block! If we've already been here, ignore it.
13815 if (!Visited.insert(BB)) continue;
13817 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
13818 Instruction *Inst = BBI++;
13820 // DCE instruction if trivially dead.
13821 if (isInstructionTriviallyDead(Inst)) {
13823 DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
13824 Inst->eraseFromParent();
13828 // ConstantProp instruction if trivially constant.
13829 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
13830 if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
13831 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
13833 Inst->replaceAllUsesWith(C);
13835 Inst->eraseFromParent();
13842 // See if we can constant fold its operands.
13843 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end();
13845 ConstantExpr *CE = dyn_cast<ConstantExpr>(i);
13846 if (CE == 0) continue;
13848 // If we already folded this constant, don't try again.
13849 if (!FoldedConstants.insert(CE))
13852 Constant *NewC = ConstantFoldConstantExpression(CE, TD);
13853 if (NewC && NewC != CE) {
13855 MadeIRChange = true;
13861 InstrsForInstCombineWorklist.push_back(Inst);
13864 // Recursively visit successors. If this is a branch or switch on a
13865 // constant, only visit the reachable successor.
13866 TerminatorInst *TI = BB->getTerminator();
13867 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
13868 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) {
13869 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue();
13870 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal);
13871 Worklist.push_back(ReachableBB);
13874 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
13875 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
13876 // See if this is an explicit destination.
13877 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i)
13878 if (SI->getCaseValue(i) == Cond) {
13879 BasicBlock *ReachableBB = SI->getSuccessor(i);
13880 Worklist.push_back(ReachableBB);
13884 // Otherwise it is the default destination.
13885 Worklist.push_back(SI->getSuccessor(0));
13890 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
13891 Worklist.push_back(TI->getSuccessor(i));
13894 // Once we've found all of the instructions to add to instcombine's worklist,
13895 // add them in reverse order. This way instcombine will visit from the top
13896 // of the function down. This jives well with the way that it adds all uses
13897 // of instructions to the worklist after doing a transformation, thus avoiding
13898 // some N^2 behavior in pathological cases.
13899 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0],
13900 InstrsForInstCombineWorklist.size());
13902 return MadeIRChange;
13905 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
13906 MadeIRChange = false;
13908 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on "
13909 << F.getNameStr() << "\n");
13912 // Do a depth-first traversal of the function, populate the worklist with
13913 // the reachable instructions. Ignore blocks that are not reachable. Keep
13914 // track of which blocks we visit.
13915 SmallPtrSet<BasicBlock*, 64> Visited;
13916 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD);
13918 // Do a quick scan over the function. If we find any blocks that are
13919 // unreachable, remove any instructions inside of them. This prevents
13920 // the instcombine code from having to deal with some bad special cases.
13921 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
13922 if (!Visited.count(BB)) {
13923 Instruction *Term = BB->getTerminator();
13924 while (Term != BB->begin()) { // Remove instrs bottom-up
13925 BasicBlock::iterator I = Term; --I;
13927 DEBUG(errs() << "IC: DCE: " << *I << '\n');
13928 // A debug intrinsic shouldn't force another iteration if we weren't
13929 // going to do one without it.
13930 if (!isa<DbgInfoIntrinsic>(I)) {
13932 MadeIRChange = true;
13935 // If I is not void type then replaceAllUsesWith undef.
13936 // This allows ValueHandlers and custom metadata to adjust itself.
13937 if (!I->getType()->isVoidTy())
13938 I->replaceAllUsesWith(UndefValue::get(I->getType()));
13939 I->eraseFromParent();
13944 while (!Worklist.isEmpty()) {
13945 Instruction *I = Worklist.RemoveOne();
13946 if (I == 0) continue; // skip null values.
13948 // Check to see if we can DCE the instruction.
13949 if (isInstructionTriviallyDead(I)) {
13950 DEBUG(errs() << "IC: DCE: " << *I << '\n');
13951 EraseInstFromFunction(*I);
13953 MadeIRChange = true;
13957 // Instruction isn't dead, see if we can constant propagate it.
13958 if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
13959 if (Constant *C = ConstantFoldInstruction(I, TD)) {
13960 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
13962 // Add operands to the worklist.
13963 ReplaceInstUsesWith(*I, C);
13965 EraseInstFromFunction(*I);
13966 MadeIRChange = true;
13970 // See if we can trivially sink this instruction to a successor basic block.
13971 if (I->hasOneUse()) {
13972 BasicBlock *BB = I->getParent();
13973 Instruction *UserInst = cast<Instruction>(I->use_back());
13974 BasicBlock *UserParent;
13976 // Get the block the use occurs in.
13977 if (PHINode *PN = dyn_cast<PHINode>(UserInst))
13978 UserParent = PN->getIncomingBlock(I->use_begin().getUse());
13980 UserParent = UserInst->getParent();
13982 if (UserParent != BB) {
13983 bool UserIsSuccessor = false;
13984 // See if the user is one of our successors.
13985 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
13986 if (*SI == UserParent) {
13987 UserIsSuccessor = true;
13991 // If the user is one of our immediate successors, and if that successor
13992 // only has us as a predecessors (we'd have to split the critical edge
13993 // otherwise), we can keep going.
13994 if (UserIsSuccessor && UserParent->getSinglePredecessor())
13995 // Okay, the CFG is simple enough, try to sink this instruction.
13996 MadeIRChange |= TryToSinkInstruction(I, UserParent);
14000 // Now that we have an instruction, try combining it to simplify it.
14001 Builder->SetInsertPoint(I->getParent(), I);
14006 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str(););
14007 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n');
14009 if (Instruction *Result = visit(*I)) {
14011 // Should we replace the old instruction with a new one?
14013 DEBUG(errs() << "IC: Old = " << *I << '\n'
14014 << " New = " << *Result << '\n');
14016 // Everything uses the new instruction now.
14017 I->replaceAllUsesWith(Result);
14019 // Push the new instruction and any users onto the worklist.
14020 Worklist.Add(Result);
14021 Worklist.AddUsersToWorkList(*Result);
14023 // Move the name to the new instruction first.
14024 Result->takeName(I);
14026 // Insert the new instruction into the basic block...
14027 BasicBlock *InstParent = I->getParent();
14028 BasicBlock::iterator InsertPos = I;
14030 if (!isa<PHINode>(Result)) // If combining a PHI, don't insert
14031 while (isa<PHINode>(InsertPos)) // middle of a block of PHIs.
14034 InstParent->getInstList().insert(InsertPos, Result);
14036 EraseInstFromFunction(*I);
14039 DEBUG(errs() << "IC: Mod = " << OrigI << '\n'
14040 << " New = " << *I << '\n');
14043 // If the instruction was modified, it's possible that it is now dead.
14044 // if so, remove it.
14045 if (isInstructionTriviallyDead(I)) {
14046 EraseInstFromFunction(*I);
14049 Worklist.AddUsersToWorkList(*I);
14052 MadeIRChange = true;
14057 return MadeIRChange;
14061 bool InstCombiner::runOnFunction(Function &F) {
14062 MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
14063 Context = &F.getContext();
14064 TD = getAnalysisIfAvailable<TargetData>();
14067 /// Builder - This is an IRBuilder that automatically inserts new
14068 /// instructions into the worklist when they are created.
14069 IRBuilder<true, TargetFolder, InstCombineIRInserter>
14070 TheBuilder(F.getContext(), TargetFolder(TD),
14071 InstCombineIRInserter(Worklist));
14072 Builder = &TheBuilder;
14074 bool EverMadeChange = false;
14076 // Iterate while there is work to do.
14077 unsigned Iteration = 0;
14078 while (DoOneIteration(F, Iteration++))
14079 EverMadeChange = true;
14082 return EverMadeChange;
14085 FunctionPass *llvm::createInstructionCombiningPass() {
14086 return new InstCombiner();