X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTransforms%2FScalar%2FInstructionCombining.cpp;h=4a777b331b9dd6256a1ee146f0122045d997aece;hb=eed707b1e6097aac2bb6b3d47271f6300ace7f2e;hp=b2a6864194075dc74e5a5b4f0dfab9b7a9d7f0ed;hpb=a891518c4dfd7dddc2f88be0100f8204e738afb3;p=oota-llvm.git diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index b2a68641940..4a777b331b9 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -36,9 +36,11 @@ #define DEBUG_TYPE "instcombine" #include "llvm/Transforms/Scalar.h" #include "llvm/IntrinsicInst.h" +#include "llvm/LLVMContext.h" #include "llvm/Pass.h" #include "llvm/DerivedTypes.h" #include "llvm/GlobalVariable.h" +#include "llvm/Operator.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Target/TargetData.h" @@ -47,6 +49,7 @@ #include "llvm/Support/CallSite.h" #include "llvm/Support/ConstantRange.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/InstVisitor.h" #include "llvm/Support/MathExtras.h" @@ -82,6 +85,9 @@ namespace { static char ID; // Pass identification, replacement for typeid InstCombiner() : FunctionPass(&ID) {} + LLVMContext *Context; + LLVMContext *getContext() const { return Context; } + /// AddToWorkList - Add the specified instruction to the worklist if it /// isn't already in it. void AddToWorkList(Instruction *I) { @@ -140,7 +146,7 @@ namespace { if (Instruction *Op = dyn_cast(*i)) { AddToWorkList(Op); // Set the operand to undef to drop the use. - *i = UndefValue::get(Op->getType()); + *i = Context->getUndef(Op->getType()); } return R; @@ -152,12 +158,11 @@ namespace { bool DoOneIteration(Function &F, unsigned ItNum); virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired(); AU.addPreservedID(LCSSAID); AU.setPreservesCFG(); } - TargetData &getTargetData() const { return *TD; } + TargetData *getTargetData() const { return TD; } // Visitation implementation - Implement instruction combining for different // instruction types. The semantics are as follows: @@ -167,8 +172,11 @@ namespace { // otherwise - Change was made, replace I with returned instruction // Instruction *visitAdd(BinaryOperator &I); + Instruction *visitFAdd(BinaryOperator &I); Instruction *visitSub(BinaryOperator &I); + Instruction *visitFSub(BinaryOperator &I); Instruction *visitMul(BinaryOperator &I); + Instruction *visitFMul(BinaryOperator &I); Instruction *visitURem(BinaryOperator &I); Instruction *visitSRem(BinaryOperator &I); Instruction *visitFRem(BinaryOperator &I); @@ -181,8 +189,10 @@ namespace { Instruction *visitSDiv(BinaryOperator &I); Instruction *visitFDiv(BinaryOperator &I); Instruction *FoldAndOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS); + Instruction *FoldAndOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS); Instruction *visitAnd(BinaryOperator &I); Instruction *FoldOrOfICmps(Instruction &I, ICmpInst *LHS, ICmpInst *RHS); + Instruction *FoldOrOfFCmps(Instruction &I, FCmpInst *LHS, FCmpInst *RHS); Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op, Value *A, Value *B, Value *C); Instruction *visitOr (BinaryOperator &I); @@ -218,11 +228,12 @@ namespace { Instruction *visitFPToSI(FPToSIInst &FI); Instruction *visitUIToFP(CastInst &CI); Instruction *visitSIToFP(CastInst &CI); - Instruction *visitPtrToInt(CastInst &CI); + Instruction *visitPtrToInt(PtrToIntInst &CI); Instruction *visitIntToPtr(IntToPtrInst &CI); Instruction *visitBitCast(BitCastInst &CI); Instruction *FoldSelectOpOp(SelectInst &SI, Instruction *TI, Instruction *FI); + Instruction *FoldSelectIntoOp(SelectInst &SI, Value*, Value*); Instruction *visitSelectInst(SelectInst &SI); Instruction *visitSelectInstWithICmp(SelectInst &SI, ICmpInst *ICI); Instruction *visitCallInst(CallInst &CI); @@ -274,7 +285,7 @@ namespace { if (V->getType() == Ty) return V; if (Constant *CV = dyn_cast(V)) - return ConstantExpr::getCast(opc, CV, Ty); + return Context->getConstantExprCast(opc, CV, Ty); Instruction *C = CastInst::Create(opc, V, Ty, V->getName(), &Pos); AddToWorkList(C); @@ -300,7 +311,7 @@ namespace { } else { // If we are replacing the instruction with itself, this must be in a // segment of unreachable code, so just clobber the instruction. - I.replaceAllUsesWith(UndefValue::get(I.getType())); + I.replaceAllUsesWith(Context->getUndef(I.getType())); return &I; } } @@ -386,7 +397,7 @@ namespace { Value *EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned); - bool CanEvaluateInDifferentType(Value *V, const IntegerType *Ty, + bool CanEvaluateInDifferentType(Value *V, const Type *Ty, unsigned CastOpc, int &NumCastsRemoved); unsigned GetOrEnforceKnownAlignment(Value *V, unsigned PrefAlign = 0); @@ -400,9 +411,11 @@ X("instcombine", "Combine redundant instructions"); // getComplexity: Assign a complexity or rank value to LLVM Values... // 0 -> undef, 1 -> Const, 2 -> Other, 3 -> Arg, 3 -> Unary, 4 -> OtherInst -static unsigned getComplexity(Value *V) { +static unsigned getComplexity(LLVMContext *Context, Value *V) { if (isa(V)) { - if (BinaryOperator::isNeg(V) || BinaryOperator::isNot(V)) + if (BinaryOperator::isNeg(V) || + BinaryOperator::isFNeg(V) || + BinaryOperator::isNot(V)) return 3; return 4; } @@ -430,29 +443,12 @@ static const Type *getPromotedType(const Type *Ty) { /// expression bitcast, or a GetElementPtrInst with all zero indices, return the /// operand value, otherwise return null. static Value *getBitCastOperand(Value *V) { - if (BitCastInst *I = dyn_cast(V)) - // BitCastInst? - return I->getOperand(0); - else if (GetElementPtrInst *GEP = dyn_cast(V)) { - // GetElementPtrInst? - if (GEP->hasAllZeroIndices()) - return GEP->getOperand(0); - } else if (ConstantExpr *CE = dyn_cast(V)) { - if (CE->getOpcode() == Instruction::BitCast) - // BitCast ConstantExp? - return CE->getOperand(0); - else if (CE->getOpcode() == Instruction::GetElementPtr) { - // GetElementPtr ConstantExp? - for (User::op_iterator I = CE->op_begin() + 1, E = CE->op_end(); - I != E; ++I) { - ConstantInt *CI = dyn_cast(I); - if (!CI || !CI->isZero()) - // Any non-zero indices? Not cast-like. - return 0; - } - // All-zero indices? This is just like casting. - return CE->getOperand(0); - } + if (Operator *O = dyn_cast(V)) { + if (O->getOpcode() == Instruction::BitCast) + return O->getOperand(0); + if (GEPOperator *GEP = dyn_cast(V)) + if (GEP->hasAllZeroIndices()) + return GEP->getPointerOperand(); } return 0; } @@ -466,7 +462,7 @@ isEliminableCastPair( const Type *DstTy, ///< The target type for the second cast instruction TargetData *TD ///< The target data for pointer size ) { - + const Type *SrcTy = CI->getOperand(0)->getType(); // A from above const Type *MidTy = CI->getType(); // B from above @@ -474,9 +470,17 @@ isEliminableCastPair( Instruction::CastOps firstOp = Instruction::CastOps(CI->getOpcode()); Instruction::CastOps secondOp = Instruction::CastOps(opcode); - return Instruction::CastOps( - CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, - DstTy, TD->getIntPtrType())); + unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, + DstTy, + TD ? TD->getIntPtrType() : 0); + + // We don't want to form an inttoptr or ptrtoint that converts to an integer + // type that differs from the pointer size. + if ((Res == Instruction::IntToPtr && SrcTy != TD->getIntPtrType()) || + (Res == Instruction::PtrToInt && DstTy != TD->getIntPtrType())) + Res = 0; + + return Instruction::CastOps(Res); } /// ValueRequiresCast - Return true if the cast from "V to Ty" actually results @@ -488,7 +492,7 @@ static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V, // If this is another cast that can be eliminated, it isn't codegen either. if (const CastInst *CI = dyn_cast(V)) - if (isEliminableCastPair(CI, opcode, Ty, TD)) + if (isEliminableCastPair(CI, opcode, Ty, TD)) return false; return true; } @@ -505,7 +509,8 @@ static bool ValueRequiresCast(Instruction::CastOps opcode, const Value *V, // bool InstCombiner::SimplifyCommutative(BinaryOperator &I) { bool Changed = false; - if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) + if (getComplexity(Context, I.getOperand(0)) < + getComplexity(Context, I.getOperand(1))) Changed = !I.swapOperands(); if (!I.isAssociative()) return Changed; @@ -513,7 +518,7 @@ bool InstCombiner::SimplifyCommutative(BinaryOperator &I) { if (BinaryOperator *Op = dyn_cast(I.getOperand(0))) if (Op->getOpcode() == Opcode && isa(Op->getOperand(1))) { if (isa(I.getOperand(1))) { - Constant *Folded = ConstantExpr::get(I.getOpcode(), + Constant *Folded = Context->getConstantExpr(I.getOpcode(), cast(I.getOperand(1)), cast(Op->getOperand(1))); I.setOperand(0, Op->getOperand(0)); @@ -526,7 +531,7 @@ bool InstCombiner::SimplifyCommutative(BinaryOperator &I) { Constant *C2 = cast(Op1->getOperand(1)); // Fold (op (op V1, C1), (op V2, C2)) ==> (op (op V1, V2), (op C1,C2)) - Constant *Folded = ConstantExpr::get(I.getOpcode(), C1, C2); + Constant *Folded = Context->getConstantExpr(I.getOpcode(), C1, C2); Instruction *New = BinaryOperator::Create(Opcode, Op->getOperand(0), Op1->getOperand(0), Op1->getName(), &I); @@ -543,7 +548,8 @@ bool InstCombiner::SimplifyCommutative(BinaryOperator &I) { /// so that theyare listed from right (least complex) to left (most complex). /// This puts constants before unary operators before binary operators. bool InstCombiner::SimplifyCompare(CmpInst &I) { - if (getComplexity(I.getOperand(0)) >= getComplexity(I.getOperand(1))) + if (getComplexity(Context, I.getOperand(0)) >= + getComplexity(Context, I.getOperand(1))) return false; I.swapOperands(); // Compare instructions are not associative so there's nothing else we can do. @@ -553,28 +559,47 @@ bool InstCombiner::SimplifyCompare(CmpInst &I) { // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction // if the LHS is a constant zero (which is the 'negate' form). // -static inline Value *dyn_castNegVal(Value *V) { +static inline Value *dyn_castNegVal(Value *V, LLVMContext *Context) { if (BinaryOperator::isNeg(V)) return BinaryOperator::getNegArgument(V); // Constants can be considered to be negated values if they can be folded. if (ConstantInt *C = dyn_cast(V)) - return ConstantExpr::getNeg(C); + return Context->getConstantExprNeg(C); if (ConstantVector *C = dyn_cast(V)) if (C->getType()->getElementType()->isInteger()) - return ConstantExpr::getNeg(C); + return Context->getConstantExprNeg(C); + + return 0; +} + +// dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the +// instruction if the LHS is a constant negative zero (which is the 'negate' +// form). +// +static inline Value *dyn_castFNegVal(Value *V, LLVMContext *Context) { + if (BinaryOperator::isFNeg(V)) + return BinaryOperator::getFNegArgument(V); + + // Constants can be considered to be negated values if they can be folded. + if (ConstantFP *C = dyn_cast(V)) + return Context->getConstantExprFNeg(C); + + if (ConstantVector *C = dyn_cast(V)) + if (C->getType()->getElementType()->isFloatingPoint()) + return Context->getConstantExprFNeg(C); return 0; } -static inline Value *dyn_castNotVal(Value *V) { +static inline Value *dyn_castNotVal(Value *V, LLVMContext *Context) { if (BinaryOperator::isNot(V)) return BinaryOperator::getNotArgument(V); // Constants can be considered to be not'ed values... if (ConstantInt *C = dyn_cast(V)) - return ConstantInt::get(~C->getValue()); + return ConstantInt::get(*Context, ~C->getValue()); return 0; } @@ -583,7 +608,8 @@ static inline Value *dyn_castNotVal(Value *V) { // non-constant operand of the multiply, and set CST to point to the multiplier. // Otherwise, return null. // -static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) { +static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST, + LLVMContext *Context) { if (V->hasOneUse() && V->getType()->isInteger()) if (Instruction *I = dyn_cast(V)) { if (I->getOpcode() == Instruction::Mul) @@ -594,7 +620,7 @@ static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST) { // The multiplier is really 1 << CST. uint32_t BitWidth = cast(V->getType())->getBitWidth(); uint32_t CSTVal = CST->getLimitedValue(BitWidth); - CST = ConstantInt::get(APInt(BitWidth, 1).shl(CSTVal)); + CST = ConstantInt::get(*Context, APInt(BitWidth, 1).shl(CSTVal)); return I->getOperand(0); } } @@ -611,46 +637,20 @@ static User *dyn_castGetElementPtr(Value *V) { return false; } -/// getOpcode - If this is an Instruction or a ConstantExpr, return the -/// opcode value. Otherwise return UserOp1. -static unsigned getOpcode(const Value *V) { - if (const Instruction *I = dyn_cast(V)) - return I->getOpcode(); - if (const ConstantExpr *CE = dyn_cast(V)) - return CE->getOpcode(); - // Use UserOp1 to mean there's no opcode. - return Instruction::UserOp1; -} - /// AddOne - Add one to a ConstantInt -static ConstantInt *AddOne(ConstantInt *C) { - APInt Val(C->getValue()); - return ConstantInt::get(++Val); +static Constant *AddOne(Constant *C, LLVMContext *Context) { + return Context->getConstantExprAdd(C, + ConstantInt::get(C->getType(), 1)); } /// SubOne - Subtract one from a ConstantInt -static ConstantInt *SubOne(ConstantInt *C) { - APInt Val(C->getValue()); - return ConstantInt::get(--Val); -} -/// Add - Add two ConstantInts together -static ConstantInt *Add(ConstantInt *C1, ConstantInt *C2) { - return ConstantInt::get(C1->getValue() + C2->getValue()); -} -/// And - Bitwise AND two ConstantInts together -static ConstantInt *And(ConstantInt *C1, ConstantInt *C2) { - return ConstantInt::get(C1->getValue() & C2->getValue()); -} -/// Subtract - Subtract one ConstantInt from another -static ConstantInt *Subtract(ConstantInt *C1, ConstantInt *C2) { - return ConstantInt::get(C1->getValue() - C2->getValue()); -} -/// Multiply - Multiply two ConstantInts together -static ConstantInt *Multiply(ConstantInt *C1, ConstantInt *C2) { - return ConstantInt::get(C1->getValue() * C2->getValue()); +static Constant *SubOne(ConstantInt *C, LLVMContext *Context) { + return Context->getConstantExprSub(C, + ConstantInt::get(C->getType(), 1)); } /// MultiplyOverflows - True if the multiply can not be expressed in an int /// this size. -static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) { +static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign, + LLVMContext *Context) { uint32_t W = C1->getBitWidth(); APInt LHSExt = C1->getValue(), RHSExt = C2->getValue(); if (sign) { @@ -677,7 +677,7 @@ static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign) { /// are any bits set in the constant that are not demanded. If so, shrink the /// constant and return true. static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, - APInt Demanded) { + APInt Demanded, LLVMContext *Context) { assert(I && "No instruction?"); assert(OpNo < I->getNumOperands() && "Operand index too large"); @@ -692,7 +692,7 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, // This instruction is producing bits that are not demanded. Shrink the RHS. Demanded &= OpC->getValue(); - I->setOperand(OpNo, ConstantInt::get(Demanded)); + I->setOperand(OpNo, ConstantInt::get(*Context, Demanded)); return true; } @@ -700,15 +700,13 @@ static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, // set of known zero and one bits, compute the maximum and minimum values that // could have the specified known zero and known one bits, returning them in // min/max. -static void ComputeSignedMinMaxValuesFromKnownBits(const Type *Ty, - const APInt& KnownZero, +static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero, const APInt& KnownOne, APInt& Min, APInt& Max) { - uint32_t BitWidth = cast(Ty)->getBitWidth(); - assert(KnownZero.getBitWidth() == BitWidth && - KnownOne.getBitWidth() == BitWidth && - Min.getBitWidth() == BitWidth && Max.getBitWidth() == BitWidth && - "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth."); + assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() && + KnownZero.getBitWidth() == Min.getBitWidth() && + KnownZero.getBitWidth() == Max.getBitWidth() && + "KnownZero, KnownOne and Min, Max must have equal bitwidth."); APInt UnknownBits = ~(KnownZero|KnownOne); // The minimum value is when all unknown bits are zeros, EXCEPT for the sign @@ -716,9 +714,9 @@ static void ComputeSignedMinMaxValuesFromKnownBits(const Type *Ty, Min = KnownOne; Max = KnownOne|UnknownBits; - if (UnknownBits[BitWidth-1]) { // Sign bit is unknown - Min.set(BitWidth-1); - Max.clear(BitWidth-1); + if (UnknownBits.isNegative()) { // Sign bit is unknown + Min.set(Min.getBitWidth()-1); + Max.clear(Max.getBitWidth()-1); } } @@ -726,14 +724,12 @@ static void ComputeSignedMinMaxValuesFromKnownBits(const Type *Ty, // a set of known zero and one bits, compute the maximum and minimum values that // could have the specified known zero and known one bits, returning them in // min/max. -static void ComputeUnsignedMinMaxValuesFromKnownBits(const Type *Ty, - const APInt &KnownZero, +static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero, const APInt &KnownOne, APInt &Min, APInt &Max) { - uint32_t BitWidth = cast(Ty)->getBitWidth(); BitWidth = BitWidth; - assert(KnownZero.getBitWidth() == BitWidth && - KnownOne.getBitWidth() == BitWidth && - Min.getBitWidth() == BitWidth && Max.getBitWidth() && + assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() && + KnownZero.getBitWidth() == Min.getBitWidth() && + KnownZero.getBitWidth() == Max.getBitWidth() && "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth."); APInt UnknownBits = ~(KnownZero|KnownOne); @@ -747,7 +743,7 @@ static void ComputeUnsignedMinMaxValuesFromKnownBits(const Type *Ty, /// SimplifyDemandedBits knows about. See if the instruction has any /// properties that allow us to simplify its operands. bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) { - unsigned BitWidth = cast(Inst.getType())->getBitWidth(); + unsigned BitWidth = Inst.getType()->getScalarSizeInBits(); APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); APInt DemandedMask(APInt::getAllOnesValue(BitWidth)); @@ -800,36 +796,49 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, assert(V != 0 && "Null pointer of Value???"); assert(Depth <= 6 && "Limit Search Depth"); uint32_t BitWidth = DemandedMask.getBitWidth(); - const IntegerType *VTy = cast(V->getType()); - assert(VTy->getBitWidth() == BitWidth && - KnownZero.getBitWidth() == BitWidth && + const Type *VTy = V->getType(); + assert((TD || !isa(VTy)) && + "SimplifyDemandedBits needs to know bit widths!"); + assert((!TD || TD->getTypeSizeInBits(VTy->getScalarType()) == BitWidth) && + (!VTy->isIntOrIntVector() || + VTy->getScalarSizeInBits() == BitWidth) && + KnownZero.getBitWidth() == BitWidth && KnownOne.getBitWidth() == BitWidth && - "Value *V, DemandedMask, KnownZero and KnownOne \ - must have same BitWidth"); + "Value *V, DemandedMask, KnownZero and KnownOne " + "must have same BitWidth"); if (ConstantInt *CI = dyn_cast(V)) { // We know all of the bits for a constant! KnownOne = CI->getValue() & DemandedMask; KnownZero = ~KnownOne & DemandedMask; return 0; } - + if (isa(V)) { + // We know all of the bits for a constant! + KnownOne.clear(); + KnownZero = DemandedMask; + return 0; + } + KnownZero.clear(); KnownOne.clear(); if (DemandedMask == 0) { // Not demanding any bits from V. if (isa(V)) return 0; - return UndefValue::get(VTy); + return Context->getUndef(VTy); } if (Depth == 6) // Limit search depth. return 0; - Instruction *I = dyn_cast(V); - if (!I) return 0; // Only analyze instructions. - APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); APInt &RHSKnownZero = KnownZero, &RHSKnownOne = KnownOne; + Instruction *I = dyn_cast(V); + if (!I) { + ComputeMaskedBits(V, DemandedMask, RHSKnownZero, RHSKnownOne, Depth); + return 0; // Only analyze instructions. + } + // If there are multiple uses of this value and we aren't at the root, then // we can't do any simplifications of the operands, because DemandedMask // only reflects the bits demanded by *one* of the users. @@ -857,7 +866,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If all of the demanded bits in the inputs are known zeros, return zero. if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask) - return Constant::getNullValue(VTy); + return Context->getNullValue(VTy); } else if (I->getOpcode() == Instruction::Or) { // We can simplify (X|Y) -> X or Y in the user's context if we know that @@ -926,10 +935,10 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If all of the demanded bits in the inputs are known zeros, return zero. if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask) - return Constant::getNullValue(VTy); + return Context->getNullValue(VTy); // If the RHS is a constant, see if we can simplify it. - if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero)) + if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero, Context)) return I; // Output known-1 bits are only known if set in both the LHS & RHS. @@ -966,7 +975,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, return I->getOperand(1); // If the RHS is a constant, see if we can simplify it. - if (ShrinkDemandedConstant(I, 1, DemandedMask)) + if (ShrinkDemandedConstant(I, 1, DemandedMask, Context)) return I; // Output known-0 bits are only known if clear in both the LHS & RHS. @@ -1014,7 +1023,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) { // all known if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) { - Constant *AndC = ConstantInt::get(~RHSKnownOne & DemandedMask); + Constant *AndC = ConstantInt::get(*Context, + ~RHSKnownOne & DemandedMask); Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC, "tmp"); return InsertNewInstBefore(And, *I); @@ -1023,7 +1033,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the RHS is a constant, see if we can simplify it. // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. - if (ShrinkDemandedConstant(I, 1, DemandedMask)) + if (ShrinkDemandedConstant(I, 1, DemandedMask, Context)) return I; RHSKnownZero = KnownZeroOut; @@ -1040,8 +1050,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); // If the operands are constants, see if we can simplify them. - if (ShrinkDemandedConstant(I, 1, DemandedMask) || - ShrinkDemandedConstant(I, 2, DemandedMask)) + if (ShrinkDemandedConstant(I, 1, DemandedMask, Context) || + ShrinkDemandedConstant(I, 2, DemandedMask, Context)) return I; // Only known if known in both the LHS and RHS. @@ -1049,7 +1059,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, RHSKnownZero &= LHSKnownZero; break; case Instruction::Trunc: { - unsigned truncBf = I->getOperand(0)->getType()->getPrimitiveSizeInBits(); + unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits(); DemandedMask.zext(truncBf); RHSKnownZero.zext(truncBf); RHSKnownOne.zext(truncBf); @@ -1063,8 +1073,22 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, break; } case Instruction::BitCast: - if (!I->getOperand(0)->getType()->isInteger()) + if (!I->getOperand(0)->getType()->isIntOrIntVector()) return false; // vector->int or fp->int? + + if (const VectorType *DstVTy = dyn_cast(I->getType())) { + if (const VectorType *SrcVTy = + dyn_cast(I->getOperand(0)->getType())) { + if (DstVTy->getNumElements() != SrcVTy->getNumElements()) + // Don't touch a bitcast between vectors of different element counts. + return false; + } else + // Don't touch a scalar-to-vector bitcast. + return false; + } else if (isa(I->getOperand(0)->getType())) + // Don't touch a vector-to-scalar bitcast. + return false; + if (SimplifyDemandedBits(I->getOperandUse(0), DemandedMask, RHSKnownZero, RHSKnownOne, Depth+1)) return I; @@ -1072,7 +1096,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, break; case Instruction::ZExt: { // Compute the bits in the result that are not present in the input. - unsigned SrcBitWidth =I->getOperand(0)->getType()->getPrimitiveSizeInBits(); + unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); DemandedMask.trunc(SrcBitWidth); RHSKnownZero.trunc(SrcBitWidth); @@ -1090,7 +1114,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, } case Instruction::SExt: { // Compute the bits in the result that are not present in the input. - unsigned SrcBitWidth =I->getOperand(0)->getType()->getPrimitiveSizeInBits(); + unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); APInt InputDemandedBits = DemandedMask & APInt::getLowBitsSet(BitWidth, SrcBitWidth); @@ -1151,7 +1175,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the RHS of the add has bits set that can't affect the input, reduce // the constant. - if (ShrinkDemandedConstant(I, 1, InDemandedBits)) + if (ShrinkDemandedConstant(I, 1, InDemandedBits, Context)) return I; // Avoid excess work. @@ -1314,7 +1338,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, if (ConstantInt *Rem = dyn_cast(I->getOperand(1))) { APInt RA = Rem->getValue().abs(); if (RA.isPowerOf2()) { - if (DemandedMask.ule(RA)) // srem won't affect demanded bits + if (DemandedMask.ult(RA)) // srem won't affect demanded bits return I->getOperand(0); APInt LowBits = RA - 1; @@ -1391,8 +1415,12 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, // If the client is only demanding bits that we know, return the known // constant. - if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) - return ConstantInt::get(RHSKnownOne); + if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) { + Constant *C = ConstantInt::get(*Context, RHSKnownOne); + if (isa(V->getType())) + C = Context->getConstantExprIntToPtr(C, V->getType()); + return C; + } return false; } @@ -1418,13 +1446,13 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, return 0; } else if (DemandedElts == 0) { // If nothing is demanded, provide undef. UndefElts = EltMask; - return UndefValue::get(V->getType()); + return Context->getUndef(V->getType()); } UndefElts = 0; if (ConstantVector *CP = dyn_cast(V)) { const Type *EltTy = cast(V->getType())->getElementType(); - Constant *Undef = UndefValue::get(EltTy); + Constant *Undef = Context->getUndef(EltTy); std::vector Elts; for (unsigned i = 0; i != VWidth; ++i) @@ -1439,7 +1467,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, } // If we changed the constant, return it. - Constant *NewCP = ConstantVector::get(Elts); + Constant *NewCP = Context->getConstantVector(Elts); return NewCP != CP ? NewCP : 0; } else if (isa(V)) { // Simplify the CAZ to a ConstantVector where the non-demanded elements are @@ -1451,20 +1479,20 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, return 0; const Type *EltTy = cast(V->getType())->getElementType(); - Constant *Zero = Constant::getNullValue(EltTy); - Constant *Undef = UndefValue::get(EltTy); + Constant *Zero = Context->getNullValue(EltTy); + Constant *Undef = Context->getUndef(EltTy); std::vector Elts; for (unsigned i = 0; i != VWidth; ++i) { Constant *Elt = DemandedElts[i] ? Zero : Undef; Elts.push_back(Elt); } UndefElts = DemandedElts ^ EltMask; - return ConstantVector::get(Elts); + return Context->getConstantVector(Elts); } // Limit search depth. if (Depth == 10) - return false; + return 0; // If multiple users are using the root value, procede with // simplification conservatively assuming that all elements @@ -1475,14 +1503,14 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, // the main instcombine process. if (Depth != 0) // TODO: Just compute the UndefElts information recursively. - return false; + return 0; // Conservatively assume that all elements are needed. DemandedElts = EltMask; } Instruction *I = dyn_cast(V); - if (!I) return false; // Only analyze instructions. + if (!I) return 0; // Only analyze instructions. bool MadeChange = false; APInt UndefElts2(VWidth, 0); @@ -1573,12 +1601,12 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, std::vector Elts; for (unsigned i = 0; i < VWidth; ++i) { if (UndefElts[i]) - Elts.push_back(UndefValue::get(Type::Int32Ty)); + Elts.push_back(Context->getUndef(Type::Int32Ty)); else Elts.push_back(ConstantInt::get(Type::Int32Ty, Shuffle->getMaskValue(i))); } - I->setOperand(2, ConstantVector::get(Elts)); + I->setOperand(2, Context->getConstantVector(Elts)); MadeChange = true; } break; @@ -1631,7 +1659,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, UndefElts = UndefElts2; if (VWidth > InVWidth) { - assert(0 && "Unimp"); + llvm_unreachable("Unimp"); // If there are more elements in the result than there are in the source, // then an output element is undef if the corresponding input element is // undef. @@ -1639,7 +1667,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, if (UndefElts2[OutIdx/Ratio]) UndefElts.set(OutIdx); } else if (VWidth < InVWidth) { - assert(0 && "Unimp"); + llvm_unreachable("Unimp"); // If there are more elements in the source than there are in the result, // then a result element is undef if all of the corresponding input // elements are undef. @@ -1705,26 +1733,29 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, Value *LHS = II->getOperand(1); Value *RHS = II->getOperand(2); // Extract the element as scalars. - LHS = InsertNewInstBefore(new ExtractElementInst(LHS, 0U,"tmp"), *II); - RHS = InsertNewInstBefore(new ExtractElementInst(RHS, 0U,"tmp"), *II); + LHS = InsertNewInstBefore(new ExtractElementInst(LHS, + ConstantInt::get(Type::Int32Ty, 0U, false), "tmp"), *II); + RHS = InsertNewInstBefore(new ExtractElementInst(RHS, + ConstantInt::get(Type::Int32Ty, 0U, false), "tmp"), *II); switch (II->getIntrinsicID()) { - default: assert(0 && "Case stmts out of sync!"); + default: llvm_unreachable("Case stmts out of sync!"); case Intrinsic::x86_sse_sub_ss: case Intrinsic::x86_sse2_sub_sd: - TmpV = InsertNewInstBefore(BinaryOperator::CreateSub(LHS, RHS, + TmpV = InsertNewInstBefore(BinaryOperator::CreateFSub(LHS, RHS, II->getName()), *II); break; case Intrinsic::x86_sse_mul_ss: case Intrinsic::x86_sse2_mul_sd: - TmpV = InsertNewInstBefore(BinaryOperator::CreateMul(LHS, RHS, + TmpV = InsertNewInstBefore(BinaryOperator::CreateFMul(LHS, RHS, II->getName()), *II); break; } Instruction *New = - InsertElementInst::Create(UndefValue::get(II->getType()), TmpV, 0U, - II->getName()); + InsertElementInst::Create( + Context->getUndef(II->getType()), TmpV, + ConstantInt::get(Type::Int32Ty, 0U, false), II->getName()); InsertNewInstBefore(New, *II); AddSoonDeadInstToWorklist(*II, 0); return New; @@ -1752,7 +1783,8 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, /// 'shouldApply' and 'apply' methods. /// template -static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) { +static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F, + LLVMContext *Context) { unsigned Opcode = Root.getOpcode(); Value *LHS = Root.getOperand(0); @@ -1785,7 +1817,7 @@ static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F) { // Make what used to be the LHS of the root be the user of the root... Value *ExtraOperand = TmpLHSI->getOperand(1); if (&Root == TmpLHSI) { - Root.replaceAllUsesWith(Constant::getNullValue(TmpLHSI->getType())); + Root.replaceAllUsesWith(Context->getNullValue(TmpLHSI->getType())); return 0; } Root.replaceAllUsesWith(TmpLHSI); // Users now use TmpLHSI @@ -1824,7 +1856,8 @@ namespace { // AddRHS - Implements: X + X --> X << 1 struct AddRHS { Value *RHS; - AddRHS(Value *rhs) : RHS(rhs) {} + LLVMContext *Context; + AddRHS(Value *rhs, LLVMContext *C) : RHS(rhs), Context(C) {} bool shouldApply(Value *LHS) const { return LHS == RHS; } Instruction *apply(BinaryOperator &Add) const { return BinaryOperator::CreateShl(Add.getOperand(0), @@ -1836,11 +1869,12 @@ struct AddRHS { // iff C1&C2 == 0 struct AddMaskingAnd { Constant *C2; - AddMaskingAnd(Constant *c) : C2(c) {} + LLVMContext *Context; + AddMaskingAnd(Constant *c, LLVMContext *C) : C2(c), Context(C) {} bool shouldApply(Value *LHS) const { ConstantInt *C1; - return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) && - ConstantExpr::getAnd(C1, C2)->isNullValue(); + return match(LHS, m_And(m_Value(), m_ConstantInt(C1)), *Context) && + Context->getConstantExprAnd(C1, C2)->isNullValue(); } Instruction *apply(BinaryOperator &Add) const { return BinaryOperator::CreateOr(Add.getOperand(0), Add.getOperand(1)); @@ -1851,6 +1885,8 @@ struct AddMaskingAnd { static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, InstCombiner *IC) { + LLVMContext *Context = IC->getContext(); + if (CastInst *CI = dyn_cast(&I)) { return IC->InsertCastBefore(CI->getOpcode(), SO, I.getType(), I); } @@ -1861,8 +1897,8 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, if (Constant *SOC = dyn_cast(SO)) { if (ConstIsRHS) - return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); - return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); + return Context->getConstantExpr(I.getOpcode(), SOC, ConstOperand); + return Context->getConstantExpr(I.getOpcode(), ConstOperand, SOC); } Value *Op0 = SO, *Op1 = ConstOperand; @@ -1872,11 +1908,10 @@ static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, if (BinaryOperator *BO = dyn_cast(&I)) New = BinaryOperator::Create(BO->getOpcode(), Op0, Op1,SO->getName()+".op"); else if (CmpInst *CI = dyn_cast(&I)) - New = CmpInst::Create(CI->getOpcode(), CI->getPredicate(), Op0, Op1, - SO->getName()+".cmp"); + New = CmpInst::Create(*Context, CI->getOpcode(), CI->getPredicate(), + Op0, Op1, SO->getName()+".cmp"); else { - assert(0 && "Unknown binary instruction type!"); - abort(); + llvm_unreachable("Unknown binary instruction type!"); } return IC->InsertNewInstBefore(New, I); } @@ -1952,9 +1987,9 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { Value *InV = 0; if (Constant *InC = dyn_cast(PN->getIncomingValue(i))) { if (CmpInst *CI = dyn_cast(&I)) - InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); + InV = Context->getConstantExprCompare(CI->getPredicate(), InC, C); else - InV = ConstantExpr::get(I.getOpcode(), InC, C); + InV = Context->getConstantExpr(I.getOpcode(), InC, C); } else { assert(PN->getIncomingBlock(i) == NonConstBB); if (BinaryOperator *BO = dyn_cast(&I)) @@ -1962,12 +1997,12 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { PN->getIncomingValue(i), C, "phitmp", NonConstBB->getTerminator()); else if (CmpInst *CI = dyn_cast(&I)) - InV = CmpInst::Create(CI->getOpcode(), + InV = CmpInst::Create(*Context, CI->getOpcode(), CI->getPredicate(), PN->getIncomingValue(i), C, "phitmp", NonConstBB->getTerminator()); else - assert(0 && "Unknown binop!"); + llvm_unreachable("Unknown binop!"); AddToWorkList(cast(InV)); } @@ -1979,7 +2014,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { for (unsigned i = 0; i != NumPHIValues; ++i) { Value *InV; if (Constant *InC = dyn_cast(PN->getIncomingValue(i))) { - InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); + InV = Context->getConstantExprCast(CI->getOpcode(), InC, RetTy); } else { assert(PN->getIncomingBlock(i) == NonConstBB); InV = CastInst::Create(CI->getOpcode(), PN->getIncomingValue(i), @@ -2031,14 +2066,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { return ReplaceInstUsesWith(I, RHS); // X + 0 --> X - if (!I.getType()->isFPOrFPVector()) { // NOTE: -0 + +0 = +0. - if (RHSC->isNullValue()) - return ReplaceInstUsesWith(I, LHS); - } else if (ConstantFP *CFP = dyn_cast(RHSC)) { - if (CFP->isExactlyValue(ConstantFP::getNegativeZero - (I.getType())->getValueAPF())) - return ReplaceInstUsesWith(I, LHS); - } + if (RHSC->isNullValue()) + return ReplaceInstUsesWith(I, LHS); if (ConstantInt *CI = dyn_cast(RHSC)) { // X + (signbit) --> X ^ signbit @@ -2049,16 +2078,13 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // See if SimplifyDemandedBits can simplify this. This handles stuff like // (X & 254)+1 -> (X&254)|1 - if (!isa(I.getType()) && SimplifyDemandedInstructionBits(I)) + if (SimplifyDemandedInstructionBits(I)) return &I; - // zext(i1) - 1 -> select i1, 0, -1 + // zext(bool) + C -> bool ? C + 1 : C if (ZExtInst *ZI = dyn_cast(LHS)) - if (CI->isAllOnesValue() && - ZI->getOperand(0)->getType() == Type::Int1Ty) - return SelectInst::Create(ZI->getOperand(0), - Constant::getNullValue(I.getType()), - ConstantInt::getAllOnesValue(I.getType())); + if (ZI->getSrcTy() == Type::Int1Ty) + return SelectInst::Create(ZI->getOperand(0), AddOne(CI, Context), CI); } if (isa(LHS)) @@ -2068,8 +2094,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { ConstantInt *XorRHS = 0; Value *XorLHS = 0; if (isa(RHSC) && - match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) { - uint32_t TySizeBits = I.getType()->getPrimitiveSizeInBits(); + match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)), *Context)) { + uint32_t TySizeBits = I.getType()->getScalarSizeInBits(); const APInt& RHSVal = cast(RHSC)->getValue(); uint32_t Size = TySizeBits / 2; @@ -2117,7 +2143,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // X + X --> X << 1 if (I.getType()->isInteger()) { - if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS))) return Result; + if (Instruction *Result = AssociativeOpt(I, AddRHS(RHS, Context), Context)) + return Result; if (Instruction *RHSI = dyn_cast(RHS)) { if (RHSI->getOpcode() == Instruction::Sub) @@ -2133,12 +2160,12 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // -A + B --> B - A // -A + -B --> -(A + B) - if (Value *LHSV = dyn_castNegVal(LHS)) { + if (Value *LHSV = dyn_castNegVal(LHS, Context)) { if (LHS->getType()->isIntOrIntVector()) { - if (Value *RHSV = dyn_castNegVal(RHS)) { + if (Value *RHSV = dyn_castNegVal(RHS, Context)) { Instruction *NewAdd = BinaryOperator::CreateAdd(LHSV, RHSV, "sum"); InsertNewInstBefore(NewAdd, I); - return BinaryOperator::CreateNeg(NewAdd); + return BinaryOperator::CreateNeg(*Context, NewAdd); } } @@ -2147,33 +2174,34 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // A + -B --> A - B if (!isa(RHS)) - if (Value *V = dyn_castNegVal(RHS)) + if (Value *V = dyn_castNegVal(RHS, Context)) return BinaryOperator::CreateSub(LHS, V); ConstantInt *C2; - if (Value *X = dyn_castFoldableMul(LHS, C2)) { + if (Value *X = dyn_castFoldableMul(LHS, C2, Context)) { if (X == RHS) // X*C + X --> X * (C+1) - return BinaryOperator::CreateMul(RHS, AddOne(C2)); + return BinaryOperator::CreateMul(RHS, AddOne(C2, Context)); // X*C1 + X*C2 --> X * (C1+C2) ConstantInt *C1; - if (X == dyn_castFoldableMul(RHS, C1)) - return BinaryOperator::CreateMul(X, Add(C1, C2)); + if (X == dyn_castFoldableMul(RHS, C1, Context)) + return BinaryOperator::CreateMul(X, Context->getConstantExprAdd(C1, C2)); } // X + X*C --> X * (C+1) - if (dyn_castFoldableMul(RHS, C2) == LHS) - return BinaryOperator::CreateMul(LHS, AddOne(C2)); + if (dyn_castFoldableMul(RHS, C2, Context) == LHS) + return BinaryOperator::CreateMul(LHS, AddOne(C2, Context)); // X + ~X --> -1 since ~X = -X-1 - if (dyn_castNotVal(LHS) == RHS || dyn_castNotVal(RHS) == LHS) - return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType())); + if (dyn_castNotVal(LHS, Context) == RHS || + dyn_castNotVal(RHS, Context) == LHS) + return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType())); // (A & C1)+(B & C2) --> (A & C1)|(B & C2) iff C1&C2 == 0 - if (match(RHS, m_And(m_Value(), m_ConstantInt(C2)))) - if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2))) + if (match(RHS, m_And(m_Value(), m_ConstantInt(C2)), *Context)) + if (Instruction *R = AssociativeOpt(I, AddMaskingAnd(C2, Context), Context)) return R; // A+B --> A|B iff A and B have no bits set in common. @@ -2196,8 +2224,8 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // W*X + Y*Z --> W * (X+Z) iff W == Y if (I.getType()->isIntOrIntVector()) { Value *W, *X, *Y, *Z; - if (match(LHS, m_Mul(m_Value(W), m_Value(X))) && - match(RHS, m_Mul(m_Value(Y), m_Value(Z)))) { + if (match(LHS, m_Mul(m_Value(W), m_Value(X)), *Context) && + match(RHS, m_Mul(m_Value(Y), m_Value(Z)), *Context)) { if (W != Y) { if (W == Z) { std::swap(Y, Z); @@ -2219,12 +2247,13 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { if (ConstantInt *CRHS = dyn_cast(RHS)) { Value *X = 0; - if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X - return BinaryOperator::CreateSub(SubOne(CRHS), X); + if (match(LHS, m_Not(m_Value(X)), *Context)) // ~X + C --> (C-1) - X + return BinaryOperator::CreateSub(SubOne(CRHS, Context), X); // (X & FF00) + xx00 -> (X+xx00) & FF00 - if (LHS->hasOneUse() && match(LHS, m_And(m_Value(X), m_ConstantInt(C2)))) { - Constant *Anded = And(CRHS, C2); + if (LHS->hasOneUse() && + match(LHS, m_And(m_Value(X), m_ConstantInt(C2)), *Context)) { + Constant *Anded = Context->getConstantExprAnd(CRHS, C2); if (Anded == CRHS) { // See if all bits from the first bit set in the Add RHS up are included // in the mask. First, get the rightmost bit. @@ -2251,28 +2280,6 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { return R; } - // add (cast *A to intptrtype) B -> - // cast (GEP (cast *A to sbyte*) B) --> intptrtype - { - CastInst *CI = dyn_cast(LHS); - Value *Other = RHS; - if (!CI) { - CI = dyn_cast(RHS); - Other = LHS; - } - if (CI && CI->getType()->isSized() && - (CI->getType()->getPrimitiveSizeInBits() == - TD->getIntPtrType()->getPrimitiveSizeInBits()) - && isa(CI->getOperand(0)->getType())) { - unsigned AS = - cast(CI->getOperand(0)->getType())->getAddressSpace(); - Value *I2 = InsertBitCastBefore(CI->getOperand(0), - PointerType::get(Type::Int8Ty, AS), I); - I2 = InsertNewInstBefore(GetElementPtrInst::Create(I2, Other, "ctg2"), I); - return new PtrToIntInst(I2, CI->getType()); - } - } - // add (select X 0 (sub n A)) A --> select X A n { SelectInst *SI = dyn_cast(LHS); @@ -2288,19 +2295,16 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // Can we fold the add into the argument of the select? // We check both true and false select arguments for a matching subtract. - if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A)))) + if (match(FV, m_Zero(), *Context) && + match(TV, m_Sub(m_Value(N), m_Specific(A)), *Context)) // Fold the add into the true select value. return SelectInst::Create(SI->getCondition(), N, A); - if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A)))) + if (match(TV, m_Zero(), *Context) && + match(FV, m_Sub(m_Value(N), m_Specific(A)), *Context)) // Fold the add into the false select value. return SelectInst::Create(SI->getCondition(), A, N); } } - - // Check for X+0.0. Simplify it to X if we know X is not -0.0. - if (ConstantFP *CFP = dyn_cast(RHS)) - if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS)) - return ReplaceInstUsesWith(I, LHS); // Check for (add (sext x), y), see if we can merge this into an // integer add followed by a sext. @@ -2308,9 +2312,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // (add (sext x), cst) --> (sext (add x, cst')) if (ConstantInt *RHSC = dyn_cast(RHS)) { Constant *CI = - ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType()); + Context->getConstantExprTrunc(RHSC, LHSConv->getOperand(0)->getType()); if (LHSConv->hasOneUse() && - ConstantExpr::getSExt(CI, I.getType()) == RHSC && + Context->getConstantExprSExt(CI, I.getType()) == RHSC && WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) { // Insert the new, smaller add. Instruction *NewAdd = BinaryOperator::CreateAdd(LHSConv->getOperand(0), @@ -2338,7 +2342,42 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { } } } - + + return Changed ? &I : 0; +} + +Instruction *InstCombiner::visitFAdd(BinaryOperator &I) { + bool Changed = SimplifyCommutative(I); + Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); + + if (Constant *RHSC = dyn_cast(RHS)) { + // X + 0 --> X + if (ConstantFP *CFP = dyn_cast(RHSC)) { + if (CFP->isExactlyValue(Context->getConstantFPNegativeZero + (I.getType())->getValueAPF())) + return ReplaceInstUsesWith(I, LHS); + } + + if (isa(LHS)) + if (Instruction *NV = FoldOpIntoPhi(I)) + return NV; + } + + // -A + B --> B - A + // -A + -B --> -(A + B) + if (Value *LHSV = dyn_castFNegVal(LHS, Context)) + return BinaryOperator::CreateFSub(RHS, LHSV); + + // A + -B --> A - B + if (!isa(RHS)) + if (Value *V = dyn_castFNegVal(RHS, Context)) + return BinaryOperator::CreateFSub(LHS, V); + + // Check for X+0.0. Simplify it to X if we know X is not -0.0. + if (ConstantFP *CFP = dyn_cast(RHS)) + if (CFP->getValueAPF().isPosZero() && CannotBeNegativeZero(LHS)) + return ReplaceInstUsesWith(I, LHS); + // Check for (add double (sitofp x), y), see if we can merge this into an // integer add followed by a promotion. if (SIToFPInst *LHSConv = dyn_cast(LHS)) { @@ -2349,9 +2388,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { // instcombined. if (ConstantFP *CFP = dyn_cast(RHS)) { Constant *CI = - ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType()); + Context->getConstantExprFPToSI(CFP, LHSConv->getOperand(0)->getType()); if (LHSConv->hasOneUse() && - ConstantExpr::getSIToFP(CI, I.getType()) == CFP && + Context->getConstantExprSIToFP(CI, I.getType()) == CFP && WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI)) { // Insert the new integer add. Instruction *NewAdd = BinaryOperator::CreateAdd(LHSConv->getOperand(0), @@ -2386,12 +2425,11 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { Instruction *InstCombiner::visitSub(BinaryOperator &I) { Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); - if (Op0 == Op1 && // sub X, X -> 0 - !I.getType()->isFPOrFPVector()) - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + if (Op0 == Op1) // sub X, X -> 0 + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); // If this is a 'B = x-(-A)', change to B = x+A... - if (Value *V = dyn_castNegVal(Op1)) + if (Value *V = dyn_castNegVal(Op1, Context)) return BinaryOperator::CreateAdd(Op0, V); if (isa(Op0)) @@ -2402,12 +2440,12 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { if (ConstantInt *C = dyn_cast(Op0)) { // Replace (-1 - A) with (~A)... if (C->isAllOnesValue()) - return BinaryOperator::CreateNot(Op1); + return BinaryOperator::CreateNot(*Context, Op1); // C - ~X == X + (1+C) Value *X = 0; - if (match(Op1, m_Not(m_Value(X)))) - return BinaryOperator::CreateAdd(X, AddOne(C)); + if (match(Op1, m_Not(m_Value(X)), *Context)) + return BinaryOperator::CreateAdd(X, AddOne(C, Context)); // -(X >>u 31) -> (X >>s 31) // -(X >>s 31) -> (X >>u 31) @@ -2442,23 +2480,29 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { if (SelectInst *SI = dyn_cast(Op1)) if (Instruction *R = FoldOpIntoSelect(I, SI, this)) return R; + + // C - zext(bool) -> bool ? C - 1 : C + if (ZExtInst *ZI = dyn_cast(Op1)) + if (ZI->getSrcTy() == Type::Int1Ty) + return SelectInst::Create(ZI->getOperand(0), SubOne(C, Context), C); } if (I.getType() == Type::Int1Ty) return BinaryOperator::CreateXor(Op0, Op1); if (BinaryOperator *Op1I = dyn_cast(Op1)) { - if (Op1I->getOpcode() == Instruction::Add && - !Op0->getType()->isFPOrFPVector()) { + if (Op1I->getOpcode() == Instruction::Add) { if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y - return BinaryOperator::CreateNeg(Op1I->getOperand(1), I.getName()); + return BinaryOperator::CreateNeg(*Context, Op1I->getOperand(1), + I.getName()); else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y - return BinaryOperator::CreateNeg(Op1I->getOperand(0), I.getName()); + return BinaryOperator::CreateNeg(*Context, Op1I->getOperand(0), + I.getName()); else if (ConstantInt *CI1 = dyn_cast(I.getOperand(0))) { if (ConstantInt *CI2 = dyn_cast(Op1I->getOperand(1))) // C1-(X+C2) --> (C1-C2)-X - return BinaryOperator::CreateSub(Subtract(CI1, CI2), - Op1I->getOperand(0)); + return BinaryOperator::CreateSub( + Context->getConstantExprSub(CI1, CI2), Op1I->getOperand(0)); } } @@ -2466,8 +2510,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { // Replace (x - (y - z)) with (x + (z - y)) if the (y - z) subexpression // is not used by anyone else... // - if (Op1I->getOpcode() == Instruction::Sub && - !Op1I->getType()->isFPOrFPVector()) { + if (Op1I->getOpcode() == Instruction::Sub) { // Swap the two operands of the subexpr... Value *IIOp0 = Op1I->getOperand(0), *IIOp1 = Op1I->getOperand(1); Op1I->setOperand(0, IIOp1); @@ -2484,7 +2527,8 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { Value *OtherOp = Op1I->getOperand(Op1I->getOperand(0) == Op0); Value *NewNot = - InsertNewInstBefore(BinaryOperator::CreateNot(OtherOp, "B.not"), I); + InsertNewInstBefore(BinaryOperator::CreateNot(*Context, + OtherOp, "B.not"), I); return BinaryOperator::CreateAnd(Op0, NewNot); } @@ -2494,39 +2538,62 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) { if (CSI->isZero()) if (Constant *DivRHS = dyn_cast(Op1I->getOperand(1))) return BinaryOperator::CreateSDiv(Op1I->getOperand(0), - ConstantExpr::getNeg(DivRHS)); + Context->getConstantExprNeg(DivRHS)); // X - X*C --> X * (1-C) ConstantInt *C2 = 0; - if (dyn_castFoldableMul(Op1I, C2) == Op0) { - Constant *CP1 = Subtract(ConstantInt::get(I.getType(), 1), C2); + if (dyn_castFoldableMul(Op1I, C2, Context) == Op0) { + Constant *CP1 = + Context->getConstantExprSub(ConstantInt::get(I.getType(), 1), + C2); return BinaryOperator::CreateMul(Op0, CP1); } } } - if (!Op0->getType()->isFPOrFPVector()) - if (BinaryOperator *Op0I = dyn_cast(Op0)) { - if (Op0I->getOpcode() == Instruction::Add) { - if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X - return ReplaceInstUsesWith(I, Op0I->getOperand(1)); - else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X - return ReplaceInstUsesWith(I, Op0I->getOperand(0)); - } else if (Op0I->getOpcode() == Instruction::Sub) { - if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y - return BinaryOperator::CreateNeg(Op0I->getOperand(1), I.getName()); - } + if (BinaryOperator *Op0I = dyn_cast(Op0)) { + if (Op0I->getOpcode() == Instruction::Add) { + if (Op0I->getOperand(0) == Op1) // (Y+X)-Y == X + return ReplaceInstUsesWith(I, Op0I->getOperand(1)); + else if (Op0I->getOperand(1) == Op1) // (X+Y)-Y == X + return ReplaceInstUsesWith(I, Op0I->getOperand(0)); + } else if (Op0I->getOpcode() == Instruction::Sub) { + if (Op0I->getOperand(0) == Op1) // (X-Y)-X == -Y + return BinaryOperator::CreateNeg(*Context, Op0I->getOperand(1), + I.getName()); } + } ConstantInt *C1; - if (Value *X = dyn_castFoldableMul(Op0, C1)) { + if (Value *X = dyn_castFoldableMul(Op0, C1, Context)) { if (X == Op1) // X*C - X --> X * (C-1) - return BinaryOperator::CreateMul(Op1, SubOne(C1)); + return BinaryOperator::CreateMul(Op1, SubOne(C1, Context)); ConstantInt *C2; // X*C1 - X*C2 -> X * (C1-C2) - if (X == dyn_castFoldableMul(Op1, C2)) - return BinaryOperator::CreateMul(X, Subtract(C1, C2)); + if (X == dyn_castFoldableMul(Op1, C2, Context)) + return BinaryOperator::CreateMul(X, Context->getConstantExprSub(C1, C2)); + } + return 0; +} + +Instruction *InstCombiner::visitFSub(BinaryOperator &I) { + Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); + + // If this is a 'B = x-(-A)', change to B = x+A... + if (Value *V = dyn_castFNegVal(Op1, Context)) + return BinaryOperator::CreateFAdd(Op0, V); + + if (BinaryOperator *Op1I = dyn_cast(Op1)) { + if (Op1I->getOpcode() == Instruction::FAdd) { + if (Op1I->getOperand(0) == Op0) // X-(X+Y) == -Y + return BinaryOperator::CreateFNeg(*Context, Op1I->getOperand(1), + I.getName()); + else if (Op1I->getOperand(1) == Op0) // X-(Y+X) == -Y + return BinaryOperator::CreateFNeg(*Context, Op1I->getOperand(0), + I.getName()); + } } + return 0; } @@ -2565,7 +2632,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { Value *Op0 = I.getOperand(0); if (isa(I.getOperand(1))) // undef * X -> 0 - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); // Simplify mul instructions with a constant RHS... if (Constant *Op1 = dyn_cast(I.getOperand(1))) { @@ -2576,41 +2643,30 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { if (SI->getOpcode() == Instruction::Shl) if (Constant *ShOp = dyn_cast(SI->getOperand(1))) return BinaryOperator::CreateMul(SI->getOperand(0), - ConstantExpr::getShl(CI, ShOp)); + Context->getConstantExprShl(CI, ShOp)); if (CI->isZero()) return ReplaceInstUsesWith(I, Op1); // X * 0 == 0 if (CI->equalsInt(1)) // X * 1 == X return ReplaceInstUsesWith(I, Op0); if (CI->isAllOnesValue()) // X * -1 == 0 - X - return BinaryOperator::CreateNeg(Op0, I.getName()); + return BinaryOperator::CreateNeg(*Context, Op0, I.getName()); const APInt& Val = cast(CI)->getValue(); if (Val.isPowerOf2()) { // Replace X*(2^C) with X << C return BinaryOperator::CreateShl(Op0, ConstantInt::get(Op0->getType(), Val.logBase2())); } - } else if (ConstantFP *Op1F = dyn_cast(Op1)) { - if (Op1F->isNullValue()) - return ReplaceInstUsesWith(I, Op1); - - // "In IEEE floating point, x*1 is not equivalent to x for nans. However, - // ANSI says we can drop signals, so we can do this anyway." (from GCC) - if (Op1F->isExactlyValue(1.0)) - return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0' } else if (isa(Op1->getType())) { - if (isa(Op1)) + if (Op1->isNullValue()) return ReplaceInstUsesWith(I, Op1); if (ConstantVector *Op1V = dyn_cast(Op1)) { if (Op1V->isAllOnesValue()) // X * -1 == 0 - X - return BinaryOperator::CreateNeg(Op0, I.getName()); + return BinaryOperator::CreateNeg(*Context, Op0, I.getName()); // As above, vector X*splat(1.0) -> X in all defined cases. if (Constant *Splat = Op1V->getSplatValue()) { - if (ConstantFP *F = dyn_cast(Splat)) - if (F->isExactlyValue(1.0)) - return ReplaceInstUsesWith(I, Op0); if (ConstantInt *CI = dyn_cast(Splat)) if (CI->equalsInt(1)) return ReplaceInstUsesWith(I, Op0); @@ -2625,7 +2681,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { Instruction *Add = BinaryOperator::CreateMul(Op0I->getOperand(0), Op1, "tmp"); InsertNewInstBefore(Add, I); - Value *C1C2 = ConstantExpr::getMul(Op1, + Value *C1C2 = Context->getConstantExprMul(Op1, cast(Op0I->getOperand(1))); return BinaryOperator::CreateAdd(Add, C1C2); @@ -2641,8 +2697,8 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { return NV; } - if (Value *Op0v = dyn_castNegVal(Op0)) // -X * -Y = X*Y - if (Value *Op1v = dyn_castNegVal(I.getOperand(1))) + if (Value *Op0v = dyn_castNegVal(Op0, Context)) // -X * -Y = X*Y + if (Value *Op1v = dyn_castNegVal(I.getOperand(1), Context)) return BinaryOperator::CreateMul(Op0v, Op1v); // (X / Y) * Y = X - (X % Y) @@ -2656,7 +2712,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { Op1 = Op0; BO = dyn_cast(I.getOperand(1)); } - Value *Neg = dyn_castNegVal(Op1); + Value *Neg = dyn_castNegVal(Op1, Context); if (BO && BO->hasOneUse() && (BO->getOperand(1) == Op1 || BO->getOperand(1) == Neg) && (BO->getOpcode() == Instruction::UDiv || @@ -2734,6 +2790,45 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { return Changed ? &I : 0; } +Instruction *InstCombiner::visitFMul(BinaryOperator &I) { + bool Changed = SimplifyCommutative(I); + Value *Op0 = I.getOperand(0); + + // Simplify mul instructions with a constant RHS... + if (Constant *Op1 = dyn_cast(I.getOperand(1))) { + if (ConstantFP *Op1F = dyn_cast(Op1)) { + // "In IEEE floating point, x*1 is not equivalent to x for nans. However, + // ANSI says we can drop signals, so we can do this anyway." (from GCC) + if (Op1F->isExactlyValue(1.0)) + return ReplaceInstUsesWith(I, Op0); // Eliminate 'mul double %X, 1.0' + } else if (isa(Op1->getType())) { + if (ConstantVector *Op1V = dyn_cast(Op1)) { + // As above, vector X*splat(1.0) -> X in all defined cases. + if (Constant *Splat = Op1V->getSplatValue()) { + if (ConstantFP *F = dyn_cast(Splat)) + if (F->isExactlyValue(1.0)) + return ReplaceInstUsesWith(I, Op0); + } + } + } + + // Try to fold constant mul into select arguments. + if (SelectInst *SI = dyn_cast(Op0)) + if (Instruction *R = FoldOpIntoSelect(I, SI, this)) + return R; + + if (isa(Op0)) + if (Instruction *NV = FoldOpIntoPhi(I)) + return NV; + } + + if (Value *Op0v = dyn_castFNegVal(Op0, Context)) // -X * -Y = X*Y + if (Value *Op1v = dyn_castFNegVal(I.getOperand(1), Context)) + return BinaryOperator::CreateFMul(Op0v, Op1v); + + return Changed ? &I : 0; +} + /// SimplifyDivRemOfSelect - Try to fold a divide or remainder of a select /// instruction. bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) { @@ -2785,8 +2880,8 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) { *I = SI->getOperand(NonNullOperand); AddToWorkList(BBI); } else if (*I == SelectCond) { - *I = NonNullOperand == 1 ? ConstantInt::getTrue() : - ConstantInt::getFalse(); + *I = NonNullOperand == 1 ? Context->getTrue() : + Context->getFalse(); AddToWorkList(BBI); } } @@ -2818,7 +2913,7 @@ Instruction *InstCombiner::commonDivTransforms(BinaryOperator &I) { if (isa(Op0)) { if (Op0->getType()->isFPOrFPVector()) return ReplaceInstUsesWith(I, Op0); - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); } // X / undef -> undef @@ -2838,12 +2933,12 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { // (sdiv X, X) --> 1 (udiv X, X) --> 1 if (Op0 == Op1) { if (const VectorType *Ty = dyn_cast(I.getType())) { - ConstantInt *CI = ConstantInt::get(Ty->getElementType(), 1); + Constant *CI = ConstantInt::get(Ty->getElementType(), 1); std::vector Elts(Ty->getNumElements(), CI); - return ReplaceInstUsesWith(I, ConstantVector::get(Elts)); + return ReplaceInstUsesWith(I, Context->getConstantVector(Elts)); } - ConstantInt *CI = ConstantInt::get(I.getType(), 1); + Constant *CI = ConstantInt::get(I.getType(), 1); return ReplaceInstUsesWith(I, CI); } @@ -2864,11 +2959,12 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { if (Instruction *LHS = dyn_cast(Op0)) if (Instruction::BinaryOps(LHS->getOpcode()) == I.getOpcode()) if (ConstantInt *LHSRHS = dyn_cast(LHS->getOperand(1))) { - if (MultiplyOverflows(RHS, LHSRHS, I.getOpcode()==Instruction::SDiv)) - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + if (MultiplyOverflows(RHS, LHSRHS, + I.getOpcode()==Instruction::SDiv, Context)) + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); else return BinaryOperator::Create(I.getOpcode(), LHS->getOperand(0), - Multiply(RHS, LHSRHS)); + Context->getConstantExprMul(RHS, LHSRHS)); } if (!RHS->isZero()) { // avoid X udiv 0 @@ -2884,7 +2980,7 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) { // 0 / X == 0, we don't need to preserve faults! if (ConstantInt *LHS = dyn_cast(Op0)) if (LHS->equalsInt(0)) - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); // It can't be division by zero, hence it must be division by one. if (I.getType() == Type::Int1Ty) @@ -2913,13 +3009,14 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) { // if so, convert to a right shift. if (C->getValue().isPowerOf2()) // 0 not included in isPowerOf2 return BinaryOperator::CreateLShr(Op0, - ConstantInt::get(Op0->getType(), C->getValue().logBase2())); + ConstantInt::get(Op0->getType(), C->getValue().logBase2())); // X udiv C, where C >= signbit if (C->getValue().isNegative()) { - Value *IC = InsertNewInstBefore(new ICmpInst(ICmpInst::ICMP_ULT, Op0, C), + Value *IC = InsertNewInstBefore(new ICmpInst(*Context, + ICmpInst::ICMP_ULT, Op0, C), I); - return SelectInst::Create(IC, Constant::getNullValue(I.getType()), + return SelectInst::Create(IC, Context->getNullValue(I.getType()), ConstantInt::get(I.getType(), 1)); } } @@ -2979,18 +3076,29 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) { if (ConstantInt *RHS = dyn_cast(Op1)) { // sdiv X, -1 == -X if (RHS->isAllOnesValue()) - return BinaryOperator::CreateNeg(Op0); + return BinaryOperator::CreateNeg(*Context, Op0); } // If the sign bits of both operands are zero (i.e. we can prove they are // unsigned inputs), turn this into a udiv. if (I.getType()->isInteger()) { APInt Mask(APInt::getSignBit(I.getType()->getPrimitiveSizeInBits())); - if (MaskedValueIsZero(Op1, Mask) && MaskedValueIsZero(Op0, Mask)) { - // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set - return BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); + if (MaskedValueIsZero(Op0, Mask)) { + if (MaskedValueIsZero(Op1, Mask)) { + // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set + return BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); + } + ConstantInt *ShiftedInt; + if (match(Op1, m_Shl(m_ConstantInt(ShiftedInt), m_Value()), *Context) && + ShiftedInt->getValue().isPowerOf2()) { + // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y) + // Safe because the only negative value (1 << Y) can take on is + // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have + // the sign bit set. + return BinaryOperator::CreateUDiv(Op0, Op1, I.getName()); + } } - } + } return 0; } @@ -3009,7 +3117,7 @@ Instruction *InstCombiner::commonRemTransforms(BinaryOperator &I) { if (isa(Op0)) { // undef % X -> 0 if (I.getType()->isFPOrFPVector()) return ReplaceInstUsesWith(I, Op0); // X % undef -> undef (could be SNaN) - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); } if (isa(Op1)) return ReplaceInstUsesWith(I, Op1); // X % undef -> undef @@ -3034,15 +3142,15 @@ Instruction *InstCombiner::commonIRemTransforms(BinaryOperator &I) { // 0 % X == 0 for integer, we don't need to preserve faults! if (Constant *LHS = dyn_cast(Op0)) if (LHS->isNullValue()) - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); if (ConstantInt *RHS = dyn_cast(Op1)) { // X % 0 == undef, we don't need to preserve faults! if (RHS->equalsInt(0)) - return ReplaceInstUsesWith(I, UndefValue::get(I.getType())); + return ReplaceInstUsesWith(I, Context->getUndef(I.getType())); if (RHS->equalsInt(1)) // X % 1 == 0 - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); if (Instruction *Op0I = dyn_cast(Op0)) { if (SelectInst *SI = dyn_cast(Op0I)) { @@ -3074,7 +3182,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) { // if so, convert to a bitwise and. if (ConstantInt *C = dyn_cast(RHS)) if (C->getValue().isPowerOf2()) - return BinaryOperator::CreateAnd(Op0, SubOne(C)); + return BinaryOperator::CreateAnd(Op0, SubOne(C, Context)); } if (Instruction *RHSI = dyn_cast(I.getOperand(1))) { @@ -3082,7 +3190,7 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) { if (RHSI->getOpcode() == Instruction::Shl && isa(RHSI->getOperand(0))) { if (cast(RHSI->getOperand(0))->getValue().isPowerOf2()) { - Constant *N1 = ConstantInt::getAllOnesValue(I.getType()); + Constant *N1 = Context->getAllOnesValue(I.getType()); Value *Add = InsertNewInstBefore(BinaryOperator::CreateAdd(RHSI, N1, "tmp"), I); return BinaryOperator::CreateAnd(Op0, Add); @@ -3099,9 +3207,11 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) { if ((STO->getValue().isPowerOf2()) && (SFO->getValue().isPowerOf2())) { Value *TrueAnd = InsertNewInstBefore( - BinaryOperator::CreateAnd(Op0, SubOne(STO), SI->getName()+".t"), I); + BinaryOperator::CreateAnd(Op0, SubOne(STO, Context), + SI->getName()+".t"), I); Value *FalseAnd = InsertNewInstBefore( - BinaryOperator::CreateAnd(Op0, SubOne(SFO), SI->getName()+".f"), I); + BinaryOperator::CreateAnd(Op0, SubOne(SFO, Context), + SI->getName()+".f"), I); return SelectInst::Create(SI->getOperand(0), TrueAnd, FalseAnd); } } @@ -3117,7 +3227,7 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) { if (Instruction *common = commonIRemTransforms(I)) return common; - if (Value *RHSNeg = dyn_castNegVal(Op1)) + if (Value *RHSNeg = dyn_castNegVal(Op1, Context)) if (!isa(RHSNeg) || (isa(RHSNeg) && cast(RHSNeg)->getValue().isStrictlyPositive())) { @@ -3152,13 +3262,13 @@ Instruction *InstCombiner::visitSRem(BinaryOperator &I) { for (unsigned i = 0; i != VWidth; ++i) { if (ConstantInt *RHS = dyn_cast(RHSV->getOperand(i))) { if (RHS->getValue().isNegative()) - Elts[i] = cast(ConstantExpr::getNeg(RHS)); + Elts[i] = cast(Context->getConstantExprNeg(RHS)); else Elts[i] = RHS; } } - Constant *NewRHSV = ConstantVector::get(Elts); + Constant *NewRHSV = Context->getConstantVector(Elts); if (NewRHSV != RHSV) { AddUsesToWorkList(I); I.setOperand(1, NewRHSV); @@ -3224,7 +3334,7 @@ static unsigned getICmpCode(const ICmpInst *ICI) { case ICmpInst::ICMP_SLE: return 6; // 110 // True -> 7 default: - assert(0 && "Invalid ICmp predicate!"); + llvm_unreachable("Invalid ICmp predicate!"); return 0; } } @@ -3252,7 +3362,7 @@ static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) { // True -> 7 default: // Not expecting FCMP_FALSE and FCMP_TRUE; - assert(0 && "Unexpected FCmp predicate!"); + llvm_unreachable("Unexpected FCmp predicate!"); return 0; } } @@ -3261,33 +3371,34 @@ static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) { /// opcode and two operands into either a constant true or false, or a brand /// new ICmp instruction. The sign is passed in to determine which kind /// of predicate to use in the new icmp instruction. -static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS) { +static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS, + LLVMContext *Context) { switch (code) { - default: assert(0 && "Illegal ICmp code!"); - case 0: return ConstantInt::getFalse(); + default: llvm_unreachable("Illegal ICmp code!"); + case 0: return Context->getFalse(); case 1: if (sign) - return new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS); + return new ICmpInst(*Context, ICmpInst::ICMP_SGT, LHS, RHS); else - return new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS); - case 2: return new ICmpInst(ICmpInst::ICMP_EQ, LHS, RHS); + return new ICmpInst(*Context, ICmpInst::ICMP_UGT, LHS, RHS); + case 2: return new ICmpInst(*Context, ICmpInst::ICMP_EQ, LHS, RHS); case 3: if (sign) - return new ICmpInst(ICmpInst::ICMP_SGE, LHS, RHS); + return new ICmpInst(*Context, ICmpInst::ICMP_SGE, LHS, RHS); else - return new ICmpInst(ICmpInst::ICMP_UGE, LHS, RHS); + return new ICmpInst(*Context, ICmpInst::ICMP_UGE, LHS, RHS); case 4: if (sign) - return new ICmpInst(ICmpInst::ICMP_SLT, LHS, RHS); + return new ICmpInst(*Context, ICmpInst::ICMP_SLT, LHS, RHS); else - return new ICmpInst(ICmpInst::ICMP_ULT, LHS, RHS); - case 5: return new ICmpInst(ICmpInst::ICMP_NE, LHS, RHS); + return new ICmpInst(*Context, ICmpInst::ICMP_ULT, LHS, RHS); + case 5: return new ICmpInst(*Context, ICmpInst::ICMP_NE, LHS, RHS); case 6: if (sign) - return new ICmpInst(ICmpInst::ICMP_SLE, LHS, RHS); + return new ICmpInst(*Context, ICmpInst::ICMP_SLE, LHS, RHS); else - return new ICmpInst(ICmpInst::ICMP_ULE, LHS, RHS); - case 7: return ConstantInt::getTrue(); + return new ICmpInst(*Context, ICmpInst::ICMP_ULE, LHS, RHS); + case 7: return Context->getTrue(); } } @@ -3295,45 +3406,45 @@ static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS) { /// opcode and two operands into either a FCmp instruction. isordered is passed /// in to determine which kind of predicate to use in the new fcmp instruction. static Value *getFCmpValue(bool isordered, unsigned code, - Value *LHS, Value *RHS) { + Value *LHS, Value *RHS, LLVMContext *Context) { switch (code) { - default: assert(0 && "Illegal FCmp code!"); + default: llvm_unreachable("Illegal FCmp code!"); case 0: if (isordered) - return new FCmpInst(FCmpInst::FCMP_ORD, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_ORD, LHS, RHS); else - return new FCmpInst(FCmpInst::FCMP_UNO, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_UNO, LHS, RHS); case 1: if (isordered) - return new FCmpInst(FCmpInst::FCMP_OGT, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_OGT, LHS, RHS); else - return new FCmpInst(FCmpInst::FCMP_UGT, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_UGT, LHS, RHS); case 2: if (isordered) - return new FCmpInst(FCmpInst::FCMP_OEQ, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_OEQ, LHS, RHS); else - return new FCmpInst(FCmpInst::FCMP_UEQ, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_UEQ, LHS, RHS); case 3: if (isordered) - return new FCmpInst(FCmpInst::FCMP_OGE, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_OGE, LHS, RHS); else - return new FCmpInst(FCmpInst::FCMP_UGE, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_UGE, LHS, RHS); case 4: if (isordered) - return new FCmpInst(FCmpInst::FCMP_OLT, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_OLT, LHS, RHS); else - return new FCmpInst(FCmpInst::FCMP_ULT, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_ULT, LHS, RHS); case 5: if (isordered) - return new FCmpInst(FCmpInst::FCMP_ONE, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_ONE, LHS, RHS); else - return new FCmpInst(FCmpInst::FCMP_UNE, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_UNE, LHS, RHS); case 6: if (isordered) - return new FCmpInst(FCmpInst::FCMP_OLE, LHS, RHS); + return new FCmpInst(*Context, FCmpInst::FCMP_OLE, LHS, RHS); else - return new FCmpInst(FCmpInst::FCMP_ULE, LHS, RHS); - case 7: return ConstantInt::getTrue(); + return new FCmpInst(*Context, FCmpInst::FCMP_ULE, LHS, RHS); + case 7: return Context->getTrue(); } } @@ -3376,13 +3487,13 @@ struct FoldICmpLogical { case Instruction::And: Code = LHSCode & RHSCode; break; case Instruction::Or: Code = LHSCode | RHSCode; break; case Instruction::Xor: Code = LHSCode ^ RHSCode; break; - default: assert(0 && "Illegal logical opcode!"); return 0; + default: llvm_unreachable("Illegal logical opcode!"); return 0; } bool isSigned = ICmpInst::isSignedPredicate(RHSICI->getPredicate()) || ICmpInst::isSignedPredicate(ICI->getPredicate()); - Value *RV = getICmpValue(isSigned, Code, LHS, RHS); + Value *RV = getICmpValue(isSigned, Code, LHS, RHS, IC.getContext()); if (Instruction *I = dyn_cast(RV)) return I; // Otherwise, it's a constant boolean value... @@ -3401,7 +3512,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op, Value *X = Op->getOperand(0); Constant *Together = 0; if (!Op->isShift()) - Together = And(AndRHS, OpRHS); + Together = Context->getConstantExprAnd(AndRHS, OpRHS); switch (Op->getOpcode()) { case Instruction::Xor: @@ -3467,7 +3578,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op, uint32_t BitWidth = AndRHS->getType()->getBitWidth(); uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal)); - ConstantInt *CI = ConstantInt::get(AndRHS->getValue() & ShlMask); + ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShlMask); if (CI->getValue() == ShlMask) { // Masking out bits that the shift already masks @@ -3487,7 +3598,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op, uint32_t BitWidth = AndRHS->getType()->getBitWidth(); uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); - ConstantInt *CI = ConstantInt::get(AndRHS->getValue() & ShrMask); + ConstantInt *CI = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask); if (CI->getValue() == ShrMask) { // Masking out bits that the shift already masks. @@ -3506,7 +3617,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op, uint32_t BitWidth = AndRHS->getType()->getBitWidth(); uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); - Constant *C = ConstantInt::get(AndRHS->getValue() & ShrMask); + Constant *C = ConstantInt::get(*Context, AndRHS->getValue() & ShrMask); if (C == AndRHS) { // Masking out bits shifted in. // (Val ashr C1) & C2 -> (Val lshr C1) & C2 // Make the argument unsigned. @@ -3531,47 +3642,47 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op, Instruction *InstCombiner::InsertRangeTest(Value *V, Constant *Lo, Constant *Hi, bool isSigned, bool Inside, Instruction &IB) { - assert(cast(ConstantExpr::getICmp((isSigned ? + assert(cast(Context->getConstantExprICmp((isSigned ? ICmpInst::ICMP_SLE:ICmpInst::ICMP_ULE), Lo, Hi))->getZExtValue() && "Lo is not <= Hi in range emission code!"); if (Inside) { if (Lo == Hi) // Trivially false. - return new ICmpInst(ICmpInst::ICMP_NE, V, V); + return new ICmpInst(*Context, ICmpInst::ICMP_NE, V, V); // V >= Min && V < Hi --> V < Hi if (cast(Lo)->isMinValue(isSigned)) { ICmpInst::Predicate pred = (isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT); - return new ICmpInst(pred, V, Hi); + return new ICmpInst(*Context, pred, V, Hi); } // Emit V-Lo getConstantExprNeg(Lo); Instruction *Add = BinaryOperator::CreateAdd(V, NegLo, V->getName()+".off"); InsertNewInstBefore(Add, IB); - Constant *UpperBound = ConstantExpr::getAdd(NegLo, Hi); - return new ICmpInst(ICmpInst::ICMP_ULT, Add, UpperBound); + Constant *UpperBound = Context->getConstantExprAdd(NegLo, Hi); + return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Add, UpperBound); } if (Lo == Hi) // Trivially true. - return new ICmpInst(ICmpInst::ICMP_EQ, V, V); + return new ICmpInst(*Context, ICmpInst::ICMP_EQ, V, V); // V < Min || V >= Hi -> V > Hi-1 - Hi = SubOne(cast(Hi)); + Hi = SubOne(cast(Hi), Context); if (cast(Lo)->isMinValue(isSigned)) { ICmpInst::Predicate pred = (isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT); - return new ICmpInst(pred, V, Hi); + return new ICmpInst(*Context, pred, V, Hi); } // Emit V-Lo >u Hi-1-Lo // Note that Hi has already had one subtracted from it, above. - ConstantInt *NegLo = cast(ConstantExpr::getNeg(Lo)); + ConstantInt *NegLo = cast(Context->getConstantExprNeg(Lo)); Instruction *Add = BinaryOperator::CreateAdd(V, NegLo, V->getName()+".off"); InsertNewInstBefore(Add, IB); - Constant *LowerBound = ConstantExpr::getAdd(NegLo, Hi); - return new ICmpInst(ICmpInst::ICMP_UGT, Add, LowerBound); + Constant *LowerBound = Context->getConstantExprAdd(NegLo, Hi); + return new ICmpInst(*Context, ICmpInst::ICMP_UGT, Add, LowerBound); } // isRunOfOnes - Returns true iff Val consists of one contiguous run of 1s with @@ -3612,7 +3723,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS, switch (LHSI->getOpcode()) { default: return 0; case Instruction::And: - if (And(N, Mask) == Mask) { + if (Context->getConstantExprAnd(N, Mask) == Mask) { // If the AndRHS is a power of two minus one (0+1+), this is simple. if ((Mask->getValue().countLeadingZeros() + Mask->getValue().countPopulation()) == @@ -3636,7 +3747,7 @@ Value *InstCombiner::FoldLogicalPlusAnd(Value *LHS, Value *RHS, // If the AndRHS is a power of two minus one (0+1+), and N&Mask == 0 if ((Mask->getValue().countLeadingZeros() + Mask->getValue().countPopulation()) == Mask->getValue().getBitWidth() - && And(N, Mask)->isZero()) + && Context->getConstantExprAnd(N, Mask)->isNullValue()) break; return 0; } @@ -3657,8 +3768,10 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, ICmpInst::Predicate LHSCC, RHSCC; // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2). - if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), m_ConstantInt(LHSCst))) || - !match(RHS, m_ICmp(RHSCC, m_Value(Val2), m_ConstantInt(RHSCst)))) + if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), + m_ConstantInt(LHSCst)), *Context) || + !match(RHS, m_ICmp(RHSCC, m_Value(Val2), + m_ConstantInt(RHSCst)), *Context)) return 0; // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C) @@ -3667,7 +3780,7 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, LHSCst->getValue().isPowerOf2()) { Instruction *NewOr = BinaryOperator::CreateOr(Val, Val2); InsertNewInstBefore(NewOr, I); - return new ICmpInst(LHSCC, NewOr, LHSCst); + return new ICmpInst(*Context, LHSCC, NewOr, LHSCst); } // From here on, we only handle: @@ -3709,14 +3822,14 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, assert(LHSCst != RHSCst && "Compares not folded above?"); switch (LHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X == 13 & X == 15) -> false case ICmpInst::ICMP_UGT: // (X == 13 & X > 15) -> false case ICmpInst::ICMP_SGT: // (X == 13 & X > 15) -> false - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getFalse()); case ICmpInst::ICMP_NE: // (X == 13 & X != 15) -> X == 13 case ICmpInst::ICMP_ULT: // (X == 13 & X < 15) -> X == 13 case ICmpInst::ICMP_SLT: // (X == 13 & X < 15) -> X == 13 @@ -3724,26 +3837,26 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, } case ICmpInst::ICMP_NE: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_ULT: - if (LHSCst == SubOne(RHSCst)) // (X != 13 & X u< 14) -> X < 13 - return new ICmpInst(ICmpInst::ICMP_ULT, Val, LHSCst); + if (LHSCst == SubOne(RHSCst, Context)) // (X != 13 & X u< 14) -> X < 13 + return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Val, LHSCst); break; // (X != 13 & X u< 15) -> no change case ICmpInst::ICMP_SLT: - if (LHSCst == SubOne(RHSCst)) // (X != 13 & X s< 14) -> X < 13 - return new ICmpInst(ICmpInst::ICMP_SLT, Val, LHSCst); + if (LHSCst == SubOne(RHSCst, Context)) // (X != 13 & X s< 14) -> X < 13 + return new ICmpInst(*Context, ICmpInst::ICMP_SLT, Val, LHSCst); break; // (X != 13 & X s< 15) -> no change case ICmpInst::ICMP_EQ: // (X != 13 & X == 15) -> X == 15 case ICmpInst::ICMP_UGT: // (X != 13 & X u> 15) -> X u> 15 case ICmpInst::ICMP_SGT: // (X != 13 & X s> 15) -> X s> 15 return ReplaceInstUsesWith(I, RHS); case ICmpInst::ICMP_NE: - if (LHSCst == SubOne(RHSCst)){// (X != 13 & X != 14) -> X-13 >u 1 - Constant *AddCST = ConstantExpr::getNeg(LHSCst); + if (LHSCst == SubOne(RHSCst, Context)){// (X != 13 & X != 14) -> X-13 >u 1 + Constant *AddCST = Context->getConstantExprNeg(LHSCst); Instruction *Add = BinaryOperator::CreateAdd(Val, AddCST, Val->getName()+".off"); InsertNewInstBefore(Add, I); - return new ICmpInst(ICmpInst::ICMP_UGT, Add, + return new ICmpInst(*Context, ICmpInst::ICMP_UGT, Add, ConstantInt::get(Add->getType(), 1)); } break; // (X != 13 & X != 15) -> no change @@ -3751,10 +3864,10 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, break; case ICmpInst::ICMP_ULT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X u< 13 & X == 15) -> false case ICmpInst::ICMP_UGT: // (X u< 13 & X u> 15) -> false - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getFalse()); case ICmpInst::ICMP_SGT: // (X u< 13 & X s> 15) -> no change break; case ICmpInst::ICMP_NE: // (X u< 13 & X != 15) -> X u< 13 @@ -3766,10 +3879,10 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, break; case ICmpInst::ICMP_SLT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X s< 13 & X == 15) -> false case ICmpInst::ICMP_SGT: // (X s< 13 & X s> 15) -> false - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getFalse()); case ICmpInst::ICMP_UGT: // (X s< 13 & X u> 15) -> no change break; case ICmpInst::ICMP_NE: // (X s< 13 & X != 15) -> X < 13 @@ -3781,36 +3894,38 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, break; case ICmpInst::ICMP_UGT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X u> 13 & X == 15) -> X == 15 case ICmpInst::ICMP_UGT: // (X u> 13 & X u> 15) -> X u> 15 return ReplaceInstUsesWith(I, RHS); case ICmpInst::ICMP_SGT: // (X u> 13 & X s> 15) -> no change break; case ICmpInst::ICMP_NE: - if (RHSCst == AddOne(LHSCst)) // (X u> 13 & X != 14) -> X u> 14 - return new ICmpInst(LHSCC, Val, RHSCst); + if (RHSCst == AddOne(LHSCst, Context)) // (X u> 13 & X != 14) -> X u> 14 + return new ICmpInst(*Context, LHSCC, Val, RHSCst); break; // (X u> 13 & X != 15) -> no change case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) 13 & X s< 15) -> no change break; } break; case ICmpInst::ICMP_SGT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X s> 13 & X == 15) -> X == 15 case ICmpInst::ICMP_SGT: // (X s> 13 & X s> 15) -> X s> 15 return ReplaceInstUsesWith(I, RHS); case ICmpInst::ICMP_UGT: // (X s> 13 & X u> 15) -> no change break; case ICmpInst::ICMP_NE: - if (RHSCst == AddOne(LHSCst)) // (X s> 13 & X != 14) -> X s> 14 - return new ICmpInst(LHSCC, Val, RHSCst); + if (RHSCst == AddOne(LHSCst, Context)) // (X s> 13 & X != 14) -> X s> 14 + return new ICmpInst(*Context, LHSCC, Val, RHSCst); break; // (X s> 13 & X != 15) -> no change case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1 - return InsertRangeTest(Val, AddOne(LHSCst), RHSCst, true, true, I); + return InsertRangeTest(Val, AddOne(LHSCst, Context), + RHSCst, true, true, I); case ICmpInst::ICMP_ULT: // (X s> 13 & X u< 15) -> no change break; } @@ -3820,13 +3935,89 @@ Instruction *InstCombiner::FoldAndOfICmps(Instruction &I, return 0; } +Instruction *InstCombiner::FoldAndOfFCmps(Instruction &I, FCmpInst *LHS, + FCmpInst *RHS) { + + if (LHS->getPredicate() == FCmpInst::FCMP_ORD && + RHS->getPredicate() == FCmpInst::FCMP_ORD) { + // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y) + if (ConstantFP *LHSC = dyn_cast(LHS->getOperand(1))) + if (ConstantFP *RHSC = dyn_cast(RHS->getOperand(1))) { + // If either of the constants are nans, then the whole thing returns + // false. + if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) + return ReplaceInstUsesWith(I, Context->getFalse()); + return new FCmpInst(*Context, FCmpInst::FCMP_ORD, + LHS->getOperand(0), RHS->getOperand(0)); + } + + // Handle vector zeros. This occurs because the canonical form of + // "fcmp ord x,x" is "fcmp ord x, 0". + if (isa(LHS->getOperand(1)) && + isa(RHS->getOperand(1))) + return new FCmpInst(*Context, FCmpInst::FCMP_ORD, + LHS->getOperand(0), RHS->getOperand(0)); + return 0; + } + + Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1); + Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1); + FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate(); + + + if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { + // Swap RHS operands to match LHS. + Op1CC = FCmpInst::getSwappedPredicate(Op1CC); + std::swap(Op1LHS, Op1RHS); + } + + if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { + // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). + if (Op0CC == Op1CC) + return new FCmpInst(*Context, (FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS); + + if (Op0CC == FCmpInst::FCMP_FALSE || Op1CC == FCmpInst::FCMP_FALSE) + return ReplaceInstUsesWith(I, Context->getFalse()); + if (Op0CC == FCmpInst::FCMP_TRUE) + return ReplaceInstUsesWith(I, RHS); + if (Op1CC == FCmpInst::FCMP_TRUE) + return ReplaceInstUsesWith(I, LHS); + + bool Op0Ordered; + bool Op1Ordered; + unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); + unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); + if (Op1Pred == 0) { + std::swap(LHS, RHS); + std::swap(Op0Pred, Op1Pred); + std::swap(Op0Ordered, Op1Ordered); + } + if (Op0Pred == 0) { + // uno && ueq -> uno && (uno || eq) -> ueq + // ord && olt -> ord && (ord && lt) -> olt + if (Op0Ordered == Op1Ordered) + return ReplaceInstUsesWith(I, RHS); + + // uno && oeq -> uno && (ord && eq) -> false + // uno && ord -> false + if (!Op0Ordered) + return ReplaceInstUsesWith(I, Context->getFalse()); + // ord && ueq -> ord && (uno || eq) -> oeq + return cast(getFCmpValue(true, Op1Pred, + Op0LHS, Op0RHS, Context)); + } + } + + return 0; +} + Instruction *InstCombiner::visitAnd(BinaryOperator &I) { bool Changed = SimplifyCommutative(I); Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); if (isa(Op1)) // X & undef -> 0 - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); // and X, X = X if (Op0 == Op1) @@ -3834,10 +4025,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { // See if we can simplify any instructions used by the instruction whose sole // purpose is to compute bits we don't care about. - if (!isa(I.getType())) { - if (SimplifyDemandedInstructionBits(I)) - return &I; - } else { + if (SimplifyDemandedInstructionBits(I)) + return &I; + if (isa(I.getType())) { if (ConstantVector *CP = dyn_cast(Op1)) { if (CP->isAllOnesValue()) // X & <-1,-1> -> X return ReplaceInstUsesWith(I, I.getOperand(0)); @@ -3845,7 +4035,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { return ReplaceInstUsesWith(I, Op1); // X & <0,0> -> <0,0> } } - + if (ConstantInt *AndRHS = dyn_cast(Op1)) { const APInt& AndRHSMask = AndRHS->getValue(); APInt NotAndRHS(~AndRHSMask); @@ -3907,7 +4097,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { ConstantInt *A = dyn_cast(Op0LHS); if (!(A && A->isZero()) && // avoid infinite recursion. MaskedValueIsZero(Op0LHS, Mask)) { - Instruction *NewNeg = BinaryOperator::CreateNeg(Op0RHS); + Instruction *NewNeg = BinaryOperator::CreateNeg(*Context, Op0RHS); InsertNewInstBefore(NewNeg, I); return BinaryOperator::CreateAnd(NewNeg, AndRHS); } @@ -3919,8 +4109,8 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { // (1 << x) & 1 --> zext(x == 0) // (1 >> x) & 1 --> zext(x == 0) if (AndRHSMask == 1 && Op0LHS == AndRHS) { - Instruction *NewICmp = new ICmpInst(ICmpInst::ICMP_EQ, Op0RHS, - Constant::getNullValue(I.getType())); + Instruction *NewICmp = new ICmpInst(*Context, ICmpInst::ICMP_EQ, + Op0RHS, Context->getNullValue(I.getType())); InsertNewInstBefore(NewICmp, I); return new ZExtInst(NewICmp, I.getType()); } @@ -3948,14 +4138,17 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { CastOp->getName()+".shrunk"); NewCast = InsertNewInstBefore(NewCast, I); // trunc_or_bitcast(C1)&C2 - Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType()); - C3 = ConstantExpr::getAnd(C3, AndRHS); + Constant *C3 = + Context->getConstantExprTruncOrBitCast(AndCI,I.getType()); + C3 = Context->getConstantExprAnd(C3, AndRHS); return BinaryOperator::CreateAnd(NewCast, C3); } else if (CastOp->getOpcode() == Instruction::Or) { // Change: and (cast (or X, C1) to T), C2 // into : trunc(C1)&C2 iff trunc(C1)&C2 == C2 - Constant *C3 = ConstantExpr::getTruncOrBitCast(AndCI,I.getType()); - if (ConstantExpr::getAnd(C3, AndRHS) == AndRHS) // trunc(C1)&C2 + Constant *C3 = + Context->getConstantExprTruncOrBitCast(AndCI,I.getType()); + if (Context->getConstantExprAnd(C3, AndRHS) == AndRHS) + // trunc(C1)&C2 return ReplaceInstUsesWith(I, AndRHS); } } @@ -3971,46 +4164,46 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { return NV; } - Value *Op0NotVal = dyn_castNotVal(Op0); - Value *Op1NotVal = dyn_castNotVal(Op1); + Value *Op0NotVal = dyn_castNotVal(Op0, Context); + Value *Op1NotVal = dyn_castNotVal(Op1, Context); if (Op0NotVal == Op1 || Op1NotVal == Op0) // A & ~A == ~A & A == 0 - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); // (~A & ~B) == (~(A | B)) - De Morgan's Law if (Op0NotVal && Op1NotVal && isOnlyUse(Op0) && isOnlyUse(Op1)) { Instruction *Or = BinaryOperator::CreateOr(Op0NotVal, Op1NotVal, I.getName()+".demorgan"); InsertNewInstBefore(Or, I); - return BinaryOperator::CreateNot(Or); + return BinaryOperator::CreateNot(*Context, Or); } { Value *A = 0, *B = 0, *C = 0, *D = 0; - if (match(Op0, m_Or(m_Value(A), m_Value(B)))) { + if (match(Op0, m_Or(m_Value(A), m_Value(B)), *Context)) { if (A == Op1 || B == Op1) // (A | ?) & A --> A return ReplaceInstUsesWith(I, Op1); // (A|B) & ~(A&B) -> A^B - if (match(Op1, m_Not(m_And(m_Value(C), m_Value(D))))) { + if (match(Op1, m_Not(m_And(m_Value(C), m_Value(D))), *Context)) { if ((A == C && B == D) || (A == D && B == C)) return BinaryOperator::CreateXor(A, B); } } - if (match(Op1, m_Or(m_Value(A), m_Value(B)))) { + if (match(Op1, m_Or(m_Value(A), m_Value(B)), *Context)) { if (A == Op0 || B == Op0) // A & (A | ?) --> A return ReplaceInstUsesWith(I, Op0); // ~(A&B) & (A|B) -> A^B - if (match(Op0, m_Not(m_And(m_Value(C), m_Value(D))))) { + if (match(Op0, m_Not(m_And(m_Value(C), m_Value(D))), *Context)) { if ((A == C && B == D) || (A == D && B == C)) return BinaryOperator::CreateXor(A, B); } } if (Op0->hasOneUse() && - match(Op0, m_Xor(m_Value(A), m_Value(B)))) { + match(Op0, m_Xor(m_Value(A), m_Value(B)), *Context)) { if (A == Op1) { // (A^B)&A -> A&(A^B) I.swapOperands(); // Simplify below std::swap(Op0, Op1); @@ -4022,30 +4215,30 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { } if (Op1->hasOneUse() && - match(Op1, m_Xor(m_Value(A), m_Value(B)))) { + match(Op1, m_Xor(m_Value(A), m_Value(B)), *Context)) { if (B == Op0) { // B&(A^B) -> B&(B^A) cast(Op1)->swapOperands(); std::swap(A, B); } if (A == Op0) { // A&(A^B) -> A & ~B - Instruction *NotB = BinaryOperator::CreateNot(B, "tmp"); + Instruction *NotB = BinaryOperator::CreateNot(*Context, B, "tmp"); InsertNewInstBefore(NotB, I); return BinaryOperator::CreateAnd(A, NotB); } } // (A&((~A)|B)) -> A&B - if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A))) || - match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1))))) + if (match(Op0, m_Or(m_Not(m_Specific(Op1)), m_Value(A)), *Context) || + match(Op0, m_Or(m_Value(A), m_Not(m_Specific(Op1))), *Context)) return BinaryOperator::CreateAnd(A, Op1); - if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A))) || - match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0))))) + if (match(Op1, m_Or(m_Not(m_Specific(Op0)), m_Value(A)), *Context) || + match(Op1, m_Or(m_Value(A), m_Not(m_Specific(Op0))), *Context)) return BinaryOperator::CreateAnd(A, Op0); } if (ICmpInst *RHS = dyn_cast(Op1)) { // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B) - if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS))) + if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS),Context)) return R; if (ICmpInst *LHS = dyn_cast(Op0)) @@ -4058,7 +4251,8 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { if (CastInst *Op1C = dyn_cast(Op1)) if (Op0C->getOpcode() == Op1C->getOpcode()) { // same cast kind ? const Type *SrcTy = Op0C->getOperand(0)->getType(); - if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() && + if (SrcTy == Op1C->getOperand(0)->getType() && + SrcTy->isIntOrIntVector() && // Only do this if the casts both really cause code to be generated. ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0), I.getType(), TD) && @@ -4089,66 +4283,9 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { // If and'ing two fcmp, try combine them into one. if (FCmpInst *LHS = dyn_cast(I.getOperand(0))) { - if (FCmpInst *RHS = dyn_cast(I.getOperand(1))) { - if (LHS->getPredicate() == FCmpInst::FCMP_ORD && - RHS->getPredicate() == FCmpInst::FCMP_ORD) { - // (fcmp ord x, c) & (fcmp ord y, c) -> (fcmp ord x, y) - if (ConstantFP *LHSC = dyn_cast(LHS->getOperand(1))) - if (ConstantFP *RHSC = dyn_cast(RHS->getOperand(1))) { - // If either of the constants are nans, then the whole thing returns - // false. - if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - return new FCmpInst(FCmpInst::FCMP_ORD, LHS->getOperand(0), - RHS->getOperand(0)); - } - } else { - Value *Op0LHS, *Op0RHS, *Op1LHS, *Op1RHS; - FCmpInst::Predicate Op0CC, Op1CC; - if (match(Op0, m_FCmp(Op0CC, m_Value(Op0LHS), m_Value(Op0RHS))) && - match(Op1, m_FCmp(Op1CC, m_Value(Op1LHS), m_Value(Op1RHS)))) { - if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { - // Swap RHS operands to match LHS. - Op1CC = FCmpInst::getSwappedPredicate(Op1CC); - std::swap(Op1LHS, Op1RHS); - } - if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { - // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). - if (Op0CC == Op1CC) - return new FCmpInst((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS); - else if (Op0CC == FCmpInst::FCMP_FALSE || - Op1CC == FCmpInst::FCMP_FALSE) - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - else if (Op0CC == FCmpInst::FCMP_TRUE) - return ReplaceInstUsesWith(I, Op1); - else if (Op1CC == FCmpInst::FCMP_TRUE) - return ReplaceInstUsesWith(I, Op0); - bool Op0Ordered; - bool Op1Ordered; - unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); - unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); - if (Op1Pred == 0) { - std::swap(Op0, Op1); - std::swap(Op0Pred, Op1Pred); - std::swap(Op0Ordered, Op1Ordered); - } - if (Op0Pred == 0) { - // uno && ueq -> uno && (uno || eq) -> ueq - // ord && olt -> ord && (ord && lt) -> olt - if (Op0Ordered == Op1Ordered) - return ReplaceInstUsesWith(I, Op1); - // uno && oeq -> uno && (ord && eq) -> false - // uno && ord -> false - if (!Op0Ordered) - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - // ord && ueq -> ord && (uno || eq) -> oeq - return cast(getFCmpValue(true, Op1Pred, - Op0LHS, Op0RHS)); - } - } - } - } - } + if (FCmpInst *RHS = dyn_cast(I.getOperand(1))) + if (Instruction *Res = FoldAndOfFCmps(I, LHS, RHS)) + return Res; } return Changed ? &I : 0; @@ -4318,21 +4455,22 @@ Instruction *InstCombiner::MatchBSwap(BinaryOperator &I) { /// If A is (cond?-1:0) and either B or D is ~(cond?-1,0) or (cond?0,-1), then /// we can simplify this expression to "cond ? C : D or B". static Instruction *MatchSelectFromAndOr(Value *A, Value *B, - Value *C, Value *D) { + Value *C, Value *D, + LLVMContext *Context) { // If A is not a select of -1/0, this cannot match. Value *Cond = 0; - if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond)))) + if (!match(A, m_SelectCst<-1, 0>(m_Value(Cond)), *Context)) return 0; // ((cond?-1:0)&C) | (B&(cond?0:-1)) -> cond ? C : B. - if (match(D, m_SelectCst<0, -1>(m_Specific(Cond)))) + if (match(D, m_SelectCst<0, -1>(m_Specific(Cond)), *Context)) return SelectInst::Create(Cond, C, B); - if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond))))) + if (match(D, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond))), *Context)) return SelectInst::Create(Cond, C, B); // ((cond?-1:0)&C) | ((cond?0:-1)&D) -> cond ? C : D. - if (match(B, m_SelectCst<0, -1>(m_Specific(Cond)))) + if (match(B, m_SelectCst<0, -1>(m_Specific(Cond)), *Context)) return SelectInst::Create(Cond, C, D); - if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond))))) + if (match(B, m_Not(m_SelectCst<-1, 0>(m_Specific(Cond))), *Context)) return SelectInst::Create(Cond, C, D); return 0; } @@ -4345,8 +4483,10 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, ICmpInst::Predicate LHSCC, RHSCC; // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). - if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), m_ConstantInt(LHSCst))) || - !match(RHS, m_ICmp(RHSCC, m_Value(Val2), m_ConstantInt(RHSCst)))) + if (!match(LHS, m_ICmp(LHSCC, m_Value(Val), + m_ConstantInt(LHSCst)), *Context) || + !match(RHS, m_ICmp(RHSCC, m_Value(Val2), + m_ConstantInt(RHSCst)), *Context)) return 0; // From here on, we only handle: @@ -4388,18 +4528,19 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, assert(LHSCst != RHSCst && "Compares not folded above?"); switch (LHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: - if (LHSCst == SubOne(RHSCst)) { // (X == 13 | X == 14) -> X-13 X-13 getConstantExprNeg(LHSCst); Instruction *Add = BinaryOperator::CreateAdd(Val, AddCST, Val->getName()+".off"); InsertNewInstBefore(Add, I); - AddCST = Subtract(AddOne(RHSCst), LHSCst); - return new ICmpInst(ICmpInst::ICMP_ULT, Add, AddCST); + AddCST = Context->getConstantExprSub(AddOne(RHSCst, Context), LHSCst); + return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Add, AddCST); } break; // (X == 13 | X == 15) -> no change case ICmpInst::ICMP_UGT: // (X == 13 | X u> 14) -> no change @@ -4413,7 +4554,7 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, break; case ICmpInst::ICMP_NE: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X != 13 | X == 15) -> X != 13 case ICmpInst::ICMP_UGT: // (X != 13 | X u> 15) -> X != 13 case ICmpInst::ICMP_SGT: // (X != 13 | X s> 15) -> X != 13 @@ -4421,12 +4562,12 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, case ICmpInst::ICMP_NE: // (X != 13 | X != 15) -> true case ICmpInst::ICMP_ULT: // (X != 13 | X u< 15) -> true case ICmpInst::ICMP_SLT: // (X != 13 | X s< 15) -> true - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + return ReplaceInstUsesWith(I, Context->getTrue()); } break; case ICmpInst::ICMP_ULT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change break; case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2 @@ -4434,7 +4575,8 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, // this can cause overflow. if (RHSCst->isMaxValue(false)) return ReplaceInstUsesWith(I, LHS); - return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), false, false, I); + return InsertRangeTest(Val, LHSCst, AddOne(RHSCst, Context), + false, false, I); case ICmpInst::ICMP_SGT: // (X u< 13 | X s> 15) -> no change break; case ICmpInst::ICMP_NE: // (X u< 13 | X != 15) -> X != 15 @@ -4446,7 +4588,7 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, break; case ICmpInst::ICMP_SLT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X s< 13 | X == 14) -> no change break; case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) s> 2 @@ -4454,7 +4596,8 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, // this can cause overflow. if (RHSCst->isMaxValue(true)) return ReplaceInstUsesWith(I, LHS); - return InsertRangeTest(Val, LHSCst, AddOne(RHSCst), true, false, I); + return InsertRangeTest(Val, LHSCst, AddOne(RHSCst, Context), + true, false, I); case ICmpInst::ICMP_UGT: // (X s< 13 | X u> 15) -> no change break; case ICmpInst::ICMP_NE: // (X s< 13 | X != 15) -> X != 15 @@ -4466,7 +4609,7 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, break; case ICmpInst::ICMP_UGT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X u> 13 | X == 15) -> X u> 13 case ICmpInst::ICMP_UGT: // (X u> 13 | X u> 15) -> X u> 13 return ReplaceInstUsesWith(I, LHS); @@ -4474,14 +4617,14 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, break; case ICmpInst::ICMP_NE: // (X u> 13 | X != 15) -> true case ICmpInst::ICMP_ULT: // (X u> 13 | X u< 15) -> true - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + return ReplaceInstUsesWith(I, Context->getTrue()); case ICmpInst::ICMP_SLT: // (X u> 13 | X s< 15) -> no change break; } break; case ICmpInst::ICMP_SGT: switch (RHSCC) { - default: assert(0 && "Unknown integer condition code!"); + default: llvm_unreachable("Unknown integer condition code!"); case ICmpInst::ICMP_EQ: // (X s> 13 | X == 15) -> X > 13 case ICmpInst::ICMP_SGT: // (X s> 13 | X s> 15) -> X > 13 return ReplaceInstUsesWith(I, LHS); @@ -4489,7 +4632,7 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, break; case ICmpInst::ICMP_NE: // (X s> 13 | X != 15) -> true case ICmpInst::ICMP_SLT: // (X s> 13 | X s< 15) -> true - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + return ReplaceInstUsesWith(I, Context->getTrue()); case ICmpInst::ICMP_ULT: // (X s> 13 | X u< 15) -> no change break; } @@ -4498,6 +4641,72 @@ Instruction *InstCombiner::FoldOrOfICmps(Instruction &I, return 0; } +Instruction *InstCombiner::FoldOrOfFCmps(Instruction &I, FCmpInst *LHS, + FCmpInst *RHS) { + if (LHS->getPredicate() == FCmpInst::FCMP_UNO && + RHS->getPredicate() == FCmpInst::FCMP_UNO && + LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) { + if (ConstantFP *LHSC = dyn_cast(LHS->getOperand(1))) + if (ConstantFP *RHSC = dyn_cast(RHS->getOperand(1))) { + // If either of the constants are nans, then the whole thing returns + // true. + if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) + return ReplaceInstUsesWith(I, Context->getTrue()); + + // Otherwise, no need to compare the two constants, compare the + // rest. + return new FCmpInst(*Context, FCmpInst::FCMP_UNO, + LHS->getOperand(0), RHS->getOperand(0)); + } + + // Handle vector zeros. This occurs because the canonical form of + // "fcmp uno x,x" is "fcmp uno x, 0". + if (isa(LHS->getOperand(1)) && + isa(RHS->getOperand(1))) + return new FCmpInst(*Context, FCmpInst::FCMP_UNO, + LHS->getOperand(0), RHS->getOperand(0)); + + return 0; + } + + Value *Op0LHS = LHS->getOperand(0), *Op0RHS = LHS->getOperand(1); + Value *Op1LHS = RHS->getOperand(0), *Op1RHS = RHS->getOperand(1); + FCmpInst::Predicate Op0CC = LHS->getPredicate(), Op1CC = RHS->getPredicate(); + + if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { + // Swap RHS operands to match LHS. + Op1CC = FCmpInst::getSwappedPredicate(Op1CC); + std::swap(Op1LHS, Op1RHS); + } + if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { + // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y). + if (Op0CC == Op1CC) + return new FCmpInst(*Context, (FCmpInst::Predicate)Op0CC, + Op0LHS, Op0RHS); + if (Op0CC == FCmpInst::FCMP_TRUE || Op1CC == FCmpInst::FCMP_TRUE) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0CC == FCmpInst::FCMP_FALSE) + return ReplaceInstUsesWith(I, RHS); + if (Op1CC == FCmpInst::FCMP_FALSE) + return ReplaceInstUsesWith(I, LHS); + bool Op0Ordered; + bool Op1Ordered; + unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); + unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); + if (Op0Ordered == Op1Ordered) { + // If both are ordered or unordered, return a new fcmp with + // or'ed predicates. + Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, + Op0LHS, Op0RHS, Context); + if (Instruction *I = dyn_cast(RV)) + return I; + // Otherwise, it's a constant boolean value... + return ReplaceInstUsesWith(I, RV); + } + } + return 0; +} + /// FoldOrWithConstants - This helper function folds: /// /// ((A | B) & C1) | (B & C2) @@ -4514,7 +4723,7 @@ Instruction *InstCombiner::FoldOrWithConstants(BinaryOperator &I, Value *Op, Value *V1 = 0; ConstantInt *CI2 = 0; - if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)))) return 0; + if (!match(Op, m_And(m_Value(V1), m_ConstantInt(CI2)), *Context)) return 0; APInt Xor = CI1->getValue() ^ CI2->getValue(); if (!Xor.isAllOnesValue()) return 0; @@ -4533,7 +4742,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); if (isa(Op1)) // X | undef -> -1 - return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType())); // or X, X = X if (Op0 == Op1) @@ -4541,37 +4750,38 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { // See if we can simplify any instructions used by the instruction whose sole // purpose is to compute bits we don't care about. - if (!isa(I.getType())) { - if (SimplifyDemandedInstructionBits(I)) - return &I; - } else if (isa(Op1)) { - return ReplaceInstUsesWith(I, Op0); // X | <0,0> -> X - } else if (ConstantVector *CP = dyn_cast(Op1)) { - if (CP->isAllOnesValue()) // X | <-1,-1> -> <-1,-1> - return ReplaceInstUsesWith(I, I.getOperand(1)); + if (SimplifyDemandedInstructionBits(I)) + return &I; + if (isa(I.getType())) { + if (isa(Op1)) { + return ReplaceInstUsesWith(I, Op0); // X | <0,0> -> X + } else if (ConstantVector *CP = dyn_cast(Op1)) { + if (CP->isAllOnesValue()) // X | <-1,-1> -> <-1,-1> + return ReplaceInstUsesWith(I, I.getOperand(1)); + } } - - // or X, -1 == -1 if (ConstantInt *RHS = dyn_cast(Op1)) { ConstantInt *C1 = 0; Value *X = 0; // (X & C1) | C2 --> (X | C2) & (C1|C2) - if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1))) && isOnlyUse(Op0)) { + if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1)), *Context) && + isOnlyUse(Op0)) { Instruction *Or = BinaryOperator::CreateOr(X, RHS); InsertNewInstBefore(Or, I); Or->takeName(Op0); return BinaryOperator::CreateAnd(Or, - ConstantInt::get(RHS->getValue() | C1->getValue())); + ConstantInt::get(*Context, RHS->getValue() | C1->getValue())); } // (X ^ C1) | C2 --> (X | C2) ^ (C1&~C2) - if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1))) && isOnlyUse(Op0)) { + if (match(Op0, m_Xor(m_Value(X), m_ConstantInt(C1)), *Context) && + isOnlyUse(Op0)) { Instruction *Or = BinaryOperator::CreateOr(X, RHS); InsertNewInstBefore(Or, I); Or->takeName(Op0); return BinaryOperator::CreateXor(Or, - ConstantInt::get(C1->getValue() & ~RHS->getValue())); + ConstantInt::get(*Context, C1->getValue() & ~RHS->getValue())); } // Try to fold constant and into select arguments. @@ -4586,25 +4796,26 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { Value *A = 0, *B = 0; ConstantInt *C1 = 0, *C2 = 0; - if (match(Op0, m_And(m_Value(A), m_Value(B)))) + if (match(Op0, m_And(m_Value(A), m_Value(B)), *Context)) if (A == Op1 || B == Op1) // (A & ?) | A --> A return ReplaceInstUsesWith(I, Op1); - if (match(Op1, m_And(m_Value(A), m_Value(B)))) + if (match(Op1, m_And(m_Value(A), m_Value(B)), *Context)) if (A == Op0 || B == Op0) // A | (A & ?) --> A return ReplaceInstUsesWith(I, Op0); // (A | B) | C and A | (B | C) -> bswap if possible. // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible. - if (match(Op0, m_Or(m_Value(), m_Value())) || - match(Op1, m_Or(m_Value(), m_Value())) || - (match(Op0, m_Shift(m_Value(), m_Value())) && - match(Op1, m_Shift(m_Value(), m_Value())))) { + if (match(Op0, m_Or(m_Value(), m_Value()), *Context) || + match(Op1, m_Or(m_Value(), m_Value()), *Context) || + (match(Op0, m_Shift(m_Value(), m_Value()), *Context) && + match(Op1, m_Shift(m_Value(), m_Value()), *Context))) { if (Instruction *BSwap = MatchBSwap(I)) return BSwap; } // (X^C)|Y -> (X|Y)^C iff Y&C == 0 - if (Op0->hasOneUse() && match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1))) && + if (Op0->hasOneUse() && + match(Op0, m_Xor(m_Value(A), m_ConstantInt(C1)), *Context) && MaskedValueIsZero(Op1, C1->getValue())) { Instruction *NOr = BinaryOperator::CreateOr(A, Op1); InsertNewInstBefore(NOr, I); @@ -4613,7 +4824,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { } // Y|(X^C) -> (X|Y)^C iff Y&C == 0 - if (Op1->hasOneUse() && match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1))) && + if (Op1->hasOneUse() && + match(Op1, m_Xor(m_Value(A), m_ConstantInt(C1)), *Context) && MaskedValueIsZero(Op0, C1->getValue())) { Instruction *NOr = BinaryOperator::CreateOr(A, Op0); InsertNewInstBefore(NOr, I); @@ -4623,8 +4835,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { // (A & C)|(B & D) Value *C = 0, *D = 0; - if (match(Op0, m_And(m_Value(A), m_Value(C))) && - match(Op1, m_And(m_Value(B), m_Value(D)))) { + if (match(Op0, m_And(m_Value(A), m_Value(C)), *Context) && + match(Op1, m_And(m_Value(B), m_Value(D)), *Context)) { Value *V1 = 0, *V2 = 0, *V3 = 0; C1 = dyn_cast(C); C2 = dyn_cast(D); @@ -4634,7 +4846,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { // replace with V+N. if (C1->getValue() == ~C2->getValue()) { if ((C2->getValue() & (C2->getValue()+1)) == 0 && // C2 == 0+1+ - match(A, m_Add(m_Value(V1), m_Value(V2)))) { + match(A, m_Add(m_Value(V1), m_Value(V2)), *Context)) { // Add commutes, try both ways. if (V1 == B && MaskedValueIsZero(V2, C2->getValue())) return ReplaceInstUsesWith(I, A); @@ -4643,7 +4855,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { } // Or commutes, try both ways. if ((C1->getValue() & (C1->getValue()+1)) == 0 && - match(B, m_Add(m_Value(V1), m_Value(V2)))) { + match(B, m_Add(m_Value(V1), m_Value(V2)), *Context)) { // Add commutes, try both ways. if (V1 == A && MaskedValueIsZero(V2, C1->getValue())) return ReplaceInstUsesWith(I, B); @@ -4674,30 +4886,30 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { } // (A & (C0?-1:0)) | (B & ~(C0?-1:0)) -> C0 ? A : B, and commuted variants - if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D)) + if (Instruction *Match = MatchSelectFromAndOr(A, B, C, D, Context)) return Match; - if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C)) + if (Instruction *Match = MatchSelectFromAndOr(B, A, D, C, Context)) return Match; - if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D)) + if (Instruction *Match = MatchSelectFromAndOr(C, B, A, D, Context)) return Match; - if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C)) + if (Instruction *Match = MatchSelectFromAndOr(D, A, B, C, Context)) return Match; // ((A&~B)|(~A&B)) -> A^B - if ((match(C, m_Not(m_Specific(D))) && - match(B, m_Not(m_Specific(A))))) + if ((match(C, m_Not(m_Specific(D)), *Context) && + match(B, m_Not(m_Specific(A)), *Context))) return BinaryOperator::CreateXor(A, D); // ((~B&A)|(~A&B)) -> A^B - if ((match(A, m_Not(m_Specific(D))) && - match(B, m_Not(m_Specific(C))))) + if ((match(A, m_Not(m_Specific(D)), *Context) && + match(B, m_Not(m_Specific(C)), *Context))) return BinaryOperator::CreateXor(C, D); // ((A&~B)|(B&~A)) -> A^B - if ((match(C, m_Not(m_Specific(B))) && - match(D, m_Not(m_Specific(A))))) + if ((match(C, m_Not(m_Specific(B)), *Context) && + match(D, m_Not(m_Specific(A)), *Context))) return BinaryOperator::CreateXor(A, B); // ((~B&A)|(B&~A)) -> A^B - if ((match(A, m_Not(m_Specific(B))) && - match(D, m_Not(m_Specific(C))))) + if ((match(A, m_Not(m_Specific(B)), *Context) && + match(D, m_Not(m_Specific(C)), *Context))) return BinaryOperator::CreateXor(C, B); } @@ -4717,40 +4929,40 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { } // ((A|B)&1)|(B&-2) -> (A&1) | B - if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) || - match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) { + if (match(Op0, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C)), *Context) || + match(Op0, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))), *Context)) { Instruction *Ret = FoldOrWithConstants(I, Op1, A, B, C); if (Ret) return Ret; } // (B&-2)|((A|B)&1) -> (A&1) | B - if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C))) || - match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))))) { + if (match(Op1, m_And(m_Or(m_Value(A), m_Value(B)), m_Value(C)), *Context) || + match(Op1, m_And(m_Value(C), m_Or(m_Value(A), m_Value(B))), *Context)) { Instruction *Ret = FoldOrWithConstants(I, Op0, A, B, C); if (Ret) return Ret; } - if (match(Op0, m_Not(m_Value(A)))) { // ~A | Op1 + if (match(Op0, m_Not(m_Value(A)), *Context)) { // ~A | Op1 if (A == Op1) // ~A | A == -1 - return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType())); } else { A = 0; } // Note, A is still live here! - if (match(Op1, m_Not(m_Value(B)))) { // Op0 | ~B + if (match(Op1, m_Not(m_Value(B)), *Context)) { // Op0 | ~B if (Op0 == B) - return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType())); // (~A | ~B) == (~(A & B)) - De Morgan's Law if (A && isOnlyUse(Op0) && isOnlyUse(Op1)) { Value *And = InsertNewInstBefore(BinaryOperator::CreateAnd(A, B, I.getName()+".demorgan"), I); - return BinaryOperator::CreateNot(And); + return BinaryOperator::CreateNot(*Context, And); } } // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B) if (ICmpInst *RHS = dyn_cast(I.getOperand(1))) { - if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS))) + if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS),Context)) return R; if (ICmpInst *LHS = dyn_cast(I.getOperand(0))) @@ -4765,7 +4977,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { if (!isa(Op0C->getOperand(0)) || !isa(Op1C->getOperand(0))) { const Type *SrcTy = Op0C->getOperand(0)->getType(); - if (SrcTy == Op1C->getOperand(0)->getType() && SrcTy->isInteger() && + if (SrcTy == Op1C->getOperand(0)->getType() && + SrcTy->isIntOrIntVector() && // Only do this if the casts both really cause code to be // generated. ValueRequiresCast(Op0C->getOpcode(), Op0C->getOperand(0), @@ -4785,61 +4998,9 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) { // (fcmp uno x, c) | (fcmp uno y, c) -> (fcmp uno x, y) if (FCmpInst *LHS = dyn_cast(I.getOperand(0))) { - if (FCmpInst *RHS = dyn_cast(I.getOperand(1))) { - if (LHS->getPredicate() == FCmpInst::FCMP_UNO && - RHS->getPredicate() == FCmpInst::FCMP_UNO && - LHS->getOperand(0)->getType() == RHS->getOperand(0)->getType()) { - if (ConstantFP *LHSC = dyn_cast(LHS->getOperand(1))) - if (ConstantFP *RHSC = dyn_cast(RHS->getOperand(1))) { - // If either of the constants are nans, then the whole thing returns - // true. - if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - - // Otherwise, no need to compare the two constants, compare the - // rest. - return new FCmpInst(FCmpInst::FCMP_UNO, LHS->getOperand(0), - RHS->getOperand(0)); - } - } else { - Value *Op0LHS, *Op0RHS, *Op1LHS, *Op1RHS; - FCmpInst::Predicate Op0CC, Op1CC; - if (match(Op0, m_FCmp(Op0CC, m_Value(Op0LHS), m_Value(Op0RHS))) && - match(Op1, m_FCmp(Op1CC, m_Value(Op1LHS), m_Value(Op1RHS)))) { - if (Op0LHS == Op1RHS && Op0RHS == Op1LHS) { - // Swap RHS operands to match LHS. - Op1CC = FCmpInst::getSwappedPredicate(Op1CC); - std::swap(Op1LHS, Op1RHS); - } - if (Op0LHS == Op1LHS && Op0RHS == Op1RHS) { - // Simplify (fcmp cc0 x, y) | (fcmp cc1 x, y). - if (Op0CC == Op1CC) - return new FCmpInst((FCmpInst::Predicate)Op0CC, Op0LHS, Op0RHS); - else if (Op0CC == FCmpInst::FCMP_TRUE || - Op1CC == FCmpInst::FCMP_TRUE) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - else if (Op0CC == FCmpInst::FCMP_FALSE) - return ReplaceInstUsesWith(I, Op1); - else if (Op1CC == FCmpInst::FCMP_FALSE) - return ReplaceInstUsesWith(I, Op0); - bool Op0Ordered; - bool Op1Ordered; - unsigned Op0Pred = getFCmpCode(Op0CC, Op0Ordered); - unsigned Op1Pred = getFCmpCode(Op1CC, Op1Ordered); - if (Op0Ordered == Op1Ordered) { - // If both are ordered or unordered, return a new fcmp with - // or'ed predicates. - Value *RV = getFCmpValue(Op0Ordered, Op0Pred|Op1Pred, - Op0LHS, Op0RHS); - if (Instruction *I = dyn_cast(RV)) - return I; - // Otherwise, it's a constant boolean value... - return ReplaceInstUsesWith(I, RV); - } - } - } - } - } + if (FCmpInst *RHS = dyn_cast(I.getOperand(1))) + if (Instruction *Res = FoldOrOfFCmps(I, LHS, RHS)) + return Res; } return Changed ? &I : 0; @@ -4867,36 +5028,35 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { if (isa(Op0)) // Handle undef ^ undef -> 0 special case. This is a common // idiom (misuse). - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); return ReplaceInstUsesWith(I, Op1); // X ^ undef -> undef } // xor X, X = 0, even if X is nested in a sequence of Xor's. - if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1))) { + if (Instruction *Result = AssociativeOpt(I, XorSelf(Op1), Context)) { assert(Result == &I && "AssociativeOpt didn't work?"); Result=Result; - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); } // See if we can simplify any instructions used by the instruction whose sole // purpose is to compute bits we don't care about. - if (!isa(I.getType())) { - if (SimplifyDemandedInstructionBits(I)) - return &I; - } else if (isa(Op1)) { - return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X - } + if (SimplifyDemandedInstructionBits(I)) + return &I; + if (isa(I.getType())) + if (isa(Op1)) + return ReplaceInstUsesWith(I, Op0); // X ^ <0,0> -> X // Is this a ~ operation? - if (Value *NotOp = dyn_castNotVal(&I)) { + if (Value *NotOp = dyn_castNotVal(&I, Context)) { // ~(~X & Y) --> (X | ~Y) - De Morgan's Law // ~(~X | Y) === (X & ~Y) - De Morgan's Law if (BinaryOperator *Op0I = dyn_cast(NotOp)) { if (Op0I->getOpcode() == Instruction::And || Op0I->getOpcode() == Instruction::Or) { - if (dyn_castNotVal(Op0I->getOperand(1))) Op0I->swapOperands(); - if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0))) { + if (dyn_castNotVal(Op0I->getOperand(1), Context)) Op0I->swapOperands(); + if (Value *Op0NotVal = dyn_castNotVal(Op0I->getOperand(0), Context)) { Instruction *NotY = - BinaryOperator::CreateNot(Op0I->getOperand(1), + BinaryOperator::CreateNot(*Context, Op0I->getOperand(1), Op0I->getOperand(1)->getName()+".not"); InsertNewInstBefore(NotY, I); if (Op0I->getOpcode() == Instruction::And) @@ -4910,14 +5070,14 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { if (ConstantInt *RHS = dyn_cast(Op1)) { - if (RHS == ConstantInt::getTrue() && Op0->hasOneUse()) { + if (RHS == Context->getTrue() && Op0->hasOneUse()) { // xor (cmp A, B), true = not (cmp A, B) = !cmp A, B if (ICmpInst *ICI = dyn_cast(Op0)) - return new ICmpInst(ICI->getInversePredicate(), + return new ICmpInst(*Context, ICI->getInversePredicate(), ICI->getOperand(0), ICI->getOperand(1)); if (FCmpInst *FCI = dyn_cast(Op0)) - return new FCmpInst(FCI->getInversePredicate(), + return new FCmpInst(*Context, FCI->getInversePredicate(), FCI->getOperand(0), FCI->getOperand(1)); } @@ -4927,9 +5087,11 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { if (CI->hasOneUse() && Op0C->hasOneUse()) { Instruction::CastOps Opcode = Op0C->getOpcode(); if (Opcode == Instruction::ZExt || Opcode == Instruction::SExt) { - if (RHS == ConstantExpr::getCast(Opcode, ConstantInt::getTrue(), + if (RHS == Context->getConstantExprCast(Opcode, + Context->getTrue(), Op0C->getDestTy())) { Instruction *NewCI = InsertNewInstBefore(CmpInst::Create( + *Context, CI->getOpcode(), CI->getInversePredicate(), CI->getOperand(0), CI->getOperand(1)), I); NewCI->takeName(CI); @@ -4944,9 +5106,9 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { // ~(c-X) == X-c-1 == X+(-c-1) if (Op0I->getOpcode() == Instruction::Sub && RHS->isAllOnesValue()) if (Constant *Op0I0C = dyn_cast(Op0I->getOperand(0))) { - Constant *NegOp0I0C = ConstantExpr::getNeg(Op0I0C); - Constant *ConstantRHS = ConstantExpr::getSub(NegOp0I0C, - ConstantInt::get(I.getType(), 1)); + Constant *NegOp0I0C = Context->getConstantExprNeg(Op0I0C); + Constant *ConstantRHS = Context->getConstantExprSub(NegOp0I0C, + ConstantInt::get(I.getType(), 1)); return BinaryOperator::CreateAdd(Op0I->getOperand(1), ConstantRHS); } @@ -4954,26 +5116,27 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { if (Op0I->getOpcode() == Instruction::Add) { // ~(X-c) --> (-c-1)-X if (RHS->isAllOnesValue()) { - Constant *NegOp0CI = ConstantExpr::getNeg(Op0CI); + Constant *NegOp0CI = Context->getConstantExprNeg(Op0CI); return BinaryOperator::CreateSub( - ConstantExpr::getSub(NegOp0CI, - ConstantInt::get(I.getType(), 1)), - Op0I->getOperand(0)); + Context->getConstantExprSub(NegOp0CI, + ConstantInt::get(I.getType(), 1)), + Op0I->getOperand(0)); } else if (RHS->getValue().isSignBit()) { // (X + C) ^ signbit -> (X + C + signbit) - Constant *C = ConstantInt::get(RHS->getValue() + Op0CI->getValue()); + Constant *C = ConstantInt::get(*Context, + RHS->getValue() + Op0CI->getValue()); return BinaryOperator::CreateAdd(Op0I->getOperand(0), C); } } else if (Op0I->getOpcode() == Instruction::Or) { // (X|C1)^C2 -> X^(C1|C2) iff X&~C1 == 0 if (MaskedValueIsZero(Op0I->getOperand(0), Op0CI->getValue())) { - Constant *NewRHS = ConstantExpr::getOr(Op0CI, RHS); + Constant *NewRHS = Context->getConstantExprOr(Op0CI, RHS); // Anything in both C1 and C2 is known to be zero, remove it from // NewRHS. - Constant *CommonBits = And(Op0CI, RHS); - NewRHS = ConstantExpr::getAnd(NewRHS, - ConstantExpr::getNot(CommonBits)); + Constant *CommonBits = Context->getConstantExprAnd(Op0CI, RHS); + NewRHS = Context->getConstantExprAnd(NewRHS, + Context->getConstantExprNot(CommonBits)); AddToWorkList(Op0I); I.setOperand(0, Op0I->getOperand(0)); I.setOperand(1, NewRHS); @@ -4992,19 +5155,19 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { return NV; } - if (Value *X = dyn_castNotVal(Op0)) // ~A ^ A == -1 + if (Value *X = dyn_castNotVal(Op0, Context)) // ~A ^ A == -1 if (X == Op1) - return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType())); - if (Value *X = dyn_castNotVal(Op1)) // A ^ ~A == -1 + if (Value *X = dyn_castNotVal(Op1, Context)) // A ^ ~A == -1 if (X == Op0) - return ReplaceInstUsesWith(I, Constant::getAllOnesValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getAllOnesValue(I.getType())); BinaryOperator *Op1I = dyn_cast(Op1); if (Op1I) { Value *A, *B; - if (match(Op1I, m_Or(m_Value(A), m_Value(B)))) { + if (match(Op1I, m_Or(m_Value(A), m_Value(B)), *Context)) { if (A == Op0) { // B^(B|A) == (A|B)^B Op1I->swapOperands(); I.swapOperands(); @@ -5013,11 +5176,12 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { I.swapOperands(); // Simplified below. std::swap(Op0, Op1); } - } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)))) { + } else if (match(Op1I, m_Xor(m_Specific(Op0), m_Value(B)), *Context)) { return ReplaceInstUsesWith(I, B); // A^(A^B) == B - } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)))) { + } else if (match(Op1I, m_Xor(m_Value(A), m_Specific(Op0)), *Context)) { return ReplaceInstUsesWith(I, A); // A^(B^A) == B - } else if (match(Op1I, m_And(m_Value(A), m_Value(B))) && Op1I->hasOneUse()){ + } else if (match(Op1I, m_And(m_Value(A), m_Value(B)), *Context) && + Op1I->hasOneUse()){ if (A == Op0) { // A^(A&B) -> A^(B&A) Op1I->swapOperands(); std::swap(A, B); @@ -5032,25 +5196,28 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { BinaryOperator *Op0I = dyn_cast(Op0); if (Op0I) { Value *A, *B; - if (match(Op0I, m_Or(m_Value(A), m_Value(B))) && Op0I->hasOneUse()) { + if (match(Op0I, m_Or(m_Value(A), m_Value(B)), *Context) && + Op0I->hasOneUse()) { if (A == Op1) // (B|A)^B == (A|B)^B std::swap(A, B); if (B == Op1) { // (A|B)^B == A & ~B Instruction *NotB = - InsertNewInstBefore(BinaryOperator::CreateNot(Op1, "tmp"), I); + InsertNewInstBefore(BinaryOperator::CreateNot(*Context, + Op1, "tmp"), I); return BinaryOperator::CreateAnd(A, NotB); } - } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)))) { + } else if (match(Op0I, m_Xor(m_Specific(Op1), m_Value(B)), *Context)) { return ReplaceInstUsesWith(I, B); // (A^B)^A == B - } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)))) { + } else if (match(Op0I, m_Xor(m_Value(A), m_Specific(Op1)), *Context)) { return ReplaceInstUsesWith(I, A); // (B^A)^A == B - } else if (match(Op0I, m_And(m_Value(A), m_Value(B))) && Op0I->hasOneUse()){ + } else if (match(Op0I, m_And(m_Value(A), m_Value(B)), *Context) && + Op0I->hasOneUse()){ if (A == Op1) // (A&B)^A -> (B&A)^A std::swap(A, B); if (B == Op1 && // (B&A)^A == ~B & A !isa(Op1)) { // Canonical form is (B&C)^C Instruction *N = - InsertNewInstBefore(BinaryOperator::CreateNot(A, "tmp"), I); + InsertNewInstBefore(BinaryOperator::CreateNot(*Context, A, "tmp"), I); return BinaryOperator::CreateAnd(N, Op1); } } @@ -5072,22 +5239,22 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { if (Op0I && Op1I) { Value *A, *B, *C, *D; // (A & B)^(A | B) -> A ^ B - if (match(Op0I, m_And(m_Value(A), m_Value(B))) && - match(Op1I, m_Or(m_Value(C), m_Value(D)))) { + if (match(Op0I, m_And(m_Value(A), m_Value(B)), *Context) && + match(Op1I, m_Or(m_Value(C), m_Value(D)), *Context)) { if ((A == C && B == D) || (A == D && B == C)) return BinaryOperator::CreateXor(A, B); } // (A | B)^(A & B) -> A ^ B - if (match(Op0I, m_Or(m_Value(A), m_Value(B))) && - match(Op1I, m_And(m_Value(C), m_Value(D)))) { + if (match(Op0I, m_Or(m_Value(A), m_Value(B)), *Context) && + match(Op1I, m_And(m_Value(C), m_Value(D)), *Context)) { if ((A == C && B == D) || (A == D && B == C)) return BinaryOperator::CreateXor(A, B); } // (A & B)^(C & D) if ((Op0I->hasOneUse() || Op1I->hasOneUse()) && - match(Op0I, m_And(m_Value(A), m_Value(B))) && - match(Op1I, m_And(m_Value(C), m_Value(D)))) { + match(Op0I, m_And(m_Value(A), m_Value(B)), *Context) && + match(Op1I, m_And(m_Value(C), m_Value(D)), *Context)) { // (X & Y)^(X & Y) -> (Y^Z) & X Value *X = 0, *Y = 0, *Z = 0; if (A == C) @@ -5109,7 +5276,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B) if (ICmpInst *RHS = dyn_cast(I.getOperand(1))) - if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS))) + if (Instruction *R = AssociativeOpt(I, FoldICmpLogical(*this, RHS),Context)) return R; // fold (xor (cast A), (cast B)) -> (cast (xor A, B)) @@ -5135,12 +5302,14 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { return Changed ? &I : 0; } -/// AddWithOverflow - Compute Result = In1+In2, returning true if the result -/// overflowed for this type. -static bool AddWithOverflow(ConstantInt *&Result, ConstantInt *In1, - ConstantInt *In2, bool IsSigned = false) { - Result = cast(Add(In1, In2)); +static ConstantInt *ExtractElement(Constant *V, Constant *Idx, + LLVMContext *Context) { + return cast(Context->getConstantExprExtractElement(V, Idx)); +} +static bool HasAddOverflow(ConstantInt *Result, + ConstantInt *In1, ConstantInt *In2, + bool IsSigned) { if (IsSigned) if (In2->getValue().isNegative()) return Result->getValue().sgt(In1->getValue()); @@ -5150,12 +5319,33 @@ static bool AddWithOverflow(ConstantInt *&Result, ConstantInt *In1, return Result->getValue().ult(In1->getValue()); } -/// SubWithOverflow - Compute Result = In1-In2, returning true if the result +/// AddWithOverflow - Compute Result = In1+In2, returning true if the result /// overflowed for this type. -static bool SubWithOverflow(ConstantInt *&Result, ConstantInt *In1, - ConstantInt *In2, bool IsSigned = false) { - Result = cast(Subtract(In1, In2)); +static bool AddWithOverflow(Constant *&Result, Constant *In1, + Constant *In2, LLVMContext *Context, + bool IsSigned = false) { + Result = Context->getConstantExprAdd(In1, In2); + + if (const VectorType *VTy = dyn_cast(In1->getType())) { + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { + Constant *Idx = ConstantInt::get(Type::Int32Ty, i); + if (HasAddOverflow(ExtractElement(Result, Idx, Context), + ExtractElement(In1, Idx, Context), + ExtractElement(In2, Idx, Context), + IsSigned)) + return true; + } + return false; + } + + return HasAddOverflow(cast(Result), + cast(In1), cast(In2), + IsSigned); +} +static bool HasSubOverflow(ConstantInt *Result, + ConstantInt *In1, ConstantInt *In2, + bool IsSigned) { if (IsSigned) if (In2->getValue().isNegative()) return Result->getValue().slt(In1->getValue()); @@ -5165,14 +5355,39 @@ static bool SubWithOverflow(ConstantInt *&Result, ConstantInt *In1, return Result->getValue().ugt(In1->getValue()); } +/// SubWithOverflow - Compute Result = In1-In2, returning true if the result +/// overflowed for this type. +static bool SubWithOverflow(Constant *&Result, Constant *In1, + Constant *In2, LLVMContext *Context, + bool IsSigned = false) { + Result = Context->getConstantExprSub(In1, In2); + + if (const VectorType *VTy = dyn_cast(In1->getType())) { + for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { + Constant *Idx = ConstantInt::get(Type::Int32Ty, i); + if (HasSubOverflow(ExtractElement(Result, Idx, Context), + ExtractElement(In1, Idx, Context), + ExtractElement(In2, Idx, Context), + IsSigned)) + return true; + } + return false; + } + + return HasSubOverflow(cast(Result), + cast(In1), cast(In2), + IsSigned); +} + /// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the /// code necessary to compute the offset from the base pointer (without adding /// in the base pointer). Return the result as a signed integer of intptr size. static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { - TargetData &TD = IC.getTargetData(); + TargetData &TD = *IC.getTargetData(); gep_type_iterator GTI = gep_type_begin(GEP); const Type *IntPtrTy = TD.getIntPtrType(); - Value *Result = Constant::getNullValue(IntPtrTy); + LLVMContext *Context = IC.getContext(); + Value *Result = Context->getNullValue(IntPtrTy); // Build a mask for high order bits. unsigned IntPtrWidth = TD.getPointerSizeInBits(); @@ -5181,7 +5396,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e; ++i, ++GTI) { Value *Op = *i; - uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()) & PtrSizeMask; + uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask; if (ConstantInt *OpC = dyn_cast(Op)) { if (OpC->isZero()) continue; @@ -5190,20 +5405,23 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { Size = TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); if (ConstantInt *RC = dyn_cast(Result)) - Result = ConstantInt::get(RC->getValue() + APInt(IntPtrWidth, Size)); + Result = + ConstantInt::get(*Context, + RC->getValue() + APInt(IntPtrWidth, Size)); else Result = IC.InsertNewInstBefore( BinaryOperator::CreateAdd(Result, - ConstantInt::get(IntPtrTy, Size), + ConstantInt::get(IntPtrTy, Size), GEP->getName()+".offs"), I); continue; } Constant *Scale = ConstantInt::get(IntPtrTy, Size); - Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/); - Scale = ConstantExpr::getMul(OC, Scale); + Constant *OC = + Context->getConstantExprIntegerCast(OpC, IntPtrTy, true /*SExt*/); + Scale = Context->getConstantExprMul(OC, Scale); if (Constant *RC = dyn_cast(Result)) - Result = ConstantExpr::getAdd(RC, Scale); + Result = Context->getConstantExprAdd(RC, Scale); else { // Emit an add instruction. Result = IC.InsertNewInstBefore( @@ -5215,15 +5433,16 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { // Convert to correct type. if (Op->getType() != IntPtrTy) { if (Constant *OpC = dyn_cast(Op)) - Op = ConstantExpr::getSExt(OpC, IntPtrTy); + Op = Context->getConstantExprIntegerCast(OpC, IntPtrTy, true); else - Op = IC.InsertNewInstBefore(new SExtInst(Op, IntPtrTy, - Op->getName()+".c"), I); + Op = IC.InsertNewInstBefore(CastInst::CreateIntegerCast(Op, IntPtrTy, + true, + Op->getName()+".c"), I); } if (Size != 1) { Constant *Scale = ConstantInt::get(IntPtrTy, Size); if (Constant *OpC = dyn_cast(Op)) - Op = ConstantExpr::getMul(OpC, Scale); + Op = Context->getConstantExprMul(OpC, Scale); else // We'll let instcombine(mul) convert this to a shl if possible. Op = IC.InsertNewInstBefore(BinaryOperator::CreateMul(Op, Scale, GEP->getName()+".idx"), I); @@ -5231,7 +5450,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { // Emit an add instruction. if (isa(Op) && isa(Result)) - Result = ConstantExpr::getAdd(cast(Op), + Result = Context->getConstantExprAdd(cast(Op), cast(Result)); else Result = IC.InsertNewInstBefore(BinaryOperator::CreateAdd(Op, Result, @@ -5241,19 +5460,19 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) { } -/// EvaluateGEPOffsetExpression - Return an value that can be used to compare of -/// the *offset* implied by GEP to zero. For example, if we have &A[i], we want -/// to return 'i' for "icmp ne i, 0". Note that, in general, indices can be -/// complex, and scales are involved. The above expression would also be legal -/// to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32). This -/// later form is less amenable to optimization though, and we are allowed to -/// generate the first by knowing that pointer arithmetic doesn't overflow. +/// EvaluateGEPOffsetExpression - Return a value that can be used to compare +/// the *offset* implied by a GEP to zero. For example, if we have &A[i], we +/// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can +/// be complex, and scales are involved. The above expression would also be +/// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32). +/// This later form is less amenable to optimization though, and we are allowed +/// to generate the first by knowing that pointer arithmetic doesn't overflow. /// /// If we can't emit an optimized form for this expression, this returns null. /// static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I, InstCombiner &IC) { - TargetData &TD = IC.getTargetData(); + TargetData &TD = *IC.getTargetData(); gep_type_iterator GTI = gep_type_begin(GEP); // Check to see if this gep only has a single variable index. If so, and if @@ -5272,7 +5491,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I, if (const StructType *STy = dyn_cast(*GTI)) { Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { - uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()); + uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); Offset += Size*CI->getSExtValue(); } } else { @@ -5288,7 +5507,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I, Value *VariableIdx = GEP->getOperand(i); // Determine the scale factor of the variable element. For example, this is // 4 if the variable index is into an array of i32. - uint64_t VariableScale = TD.getTypePaddedSize(GTI.getIndexedType()); + uint64_t VariableScale = TD.getTypeAllocSize(GTI.getIndexedType()); // Verify that there are no other variable indices. If so, emit the hard way. for (++i, ++GTI; i != e; ++i, ++GTI) { @@ -5302,7 +5521,7 @@ static Value *EvaluateGEPOffsetExpression(User *GEP, Instruction &I, if (const StructType *STy = dyn_cast(*GTI)) { Offset += TD.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { - uint64_t Size = TD.getTypePaddedSize(GTI.getIndexedType()); + uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()); Offset += Size*CI->getSExtValue(); } } @@ -5359,7 +5578,7 @@ Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS, RHS = BCI->getOperand(0); Value *PtrBase = GEPLHS->getOperand(0); - if (PtrBase == RHS) { + if (TD && PtrBase == RHS) { // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0). // This transformation (ignoring the base and scales) is valid because we // know pointers can't overflow. See if we can output an optimized form. @@ -5368,8 +5587,8 @@ Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS, // If not, synthesize the offset the hard way. if (Offset == 0) Offset = EmitGEPOffset(GEPLHS, I, *this); - return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset, - Constant::getNullValue(Offset->getType())); + return new ICmpInst(*Context, ICmpInst::getSignedPredicate(Cond), Offset, + Context->getNullValue(Offset->getType())); } else if (User *GEPRHS = dyn_castGetElementPtr(RHS)) { // If the base pointers are different, but the indices are the same, just // compare the base pointer. @@ -5386,7 +5605,7 @@ Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS, // If all indices are the same, just compare the base pointers. if (IndicesTheSame) - return new ICmpInst(ICmpInst::getSignedPredicate(Cond), + return new ICmpInst(*Context, ICmpInst::getSignedPredicate(Cond), GEPLHS->getOperand(0), GEPRHS->getOperand(0)); // Otherwise, the base pointers are different and the indices are @@ -5443,18 +5662,20 @@ Instruction *InstCombiner::FoldGEPICmp(User *GEPLHS, Value *RHS, Value *LHSV = GEPLHS->getOperand(DiffOperand); Value *RHSV = GEPRHS->getOperand(DiffOperand); // Make sure we do a signed comparison here. - return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); + return new ICmpInst(*Context, + ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); } } // Only lower this if the icmp is the only user of the GEP or if we expect // the result to fold to a constant! - if ((isa(GEPLHS) || GEPLHS->hasOneUse()) && + if (TD && + (isa(GEPLHS) || GEPLHS->hasOneUse()) && (isa(GEPRHS) || GEPRHS->hasOneUse())) { // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2) Value *L = EmitGEPOffset(GEPLHS, I, *this); Value *R = EmitGEPOffset(GEPRHS, I, *this); - return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R); + return new ICmpInst(*Context, ICmpInst::getSignedPredicate(Cond), L, R); } } return 0; @@ -5476,7 +5697,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, // Check to see that the input is converted from an integer type that is small // enough that preserves all bits. TODO: check here for "known" sign bits. // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e. - unsigned InputSize = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits(); + unsigned InputSize = LHSI->getOperand(0)->getType()->getScalarSizeInBits(); // If this is a uitofp instruction, we need an extra bit to hold the sign. bool LHSUnsigned = isa(LHSI); @@ -5494,7 +5715,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, ICmpInst::Predicate Pred; switch (I.getPredicate()) { - default: assert(0 && "Unexpected predicate!"); + default: llvm_unreachable("Unexpected predicate!"); case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_OEQ: Pred = ICmpInst::ICMP_EQ; @@ -5520,9 +5741,9 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, Pred = ICmpInst::ICMP_NE; break; case FCmpInst::FCMP_ORD: - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + return ReplaceInstUsesWith(I, Context->getTrue()); case FCmpInst::FCMP_UNO: - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getFalse()); } const IntegerType *IntTy = cast(LHSI->getOperand(0)->getType()); @@ -5531,7 +5752,7 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, // See if the FP constant is too large for the integer. For example, // comparing an i8 to 300.0. - unsigned IntWidth = IntTy->getPrimitiveSizeInBits(); + unsigned IntWidth = IntTy->getScalarSizeInBits(); if (!LHSUnsigned) { // If the RHS value is > SignedMax, fold the comparison. This handles +INF @@ -5542,8 +5763,8 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getTrue()); + return ReplaceInstUsesWith(I, Context->getFalse()); } } else { // If the RHS value is > UnsignedMax, fold the comparison. This handles @@ -5554,8 +5775,8 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getTrue()); + return ReplaceInstUsesWith(I, Context->getFalse()); } } @@ -5567,8 +5788,8 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) - return ReplaceInstUsesWith(I,ConstantInt::getTrue()); - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getTrue()); + return ReplaceInstUsesWith(I, Context->getFalse()); } } @@ -5576,74 +5797,80 @@ Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I, // [0, UMAX], but it may still be fractional. See if it is fractional by // casting the FP value to the integer value and back, checking for equality. // Don't do this for zero, because -0.0 is not fractional. - Constant *RHSInt = ConstantExpr::getFPToSI(RHSC, IntTy); - if (!RHS.isZero() && - ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) != RHSC) { - // If we had a comparison against a fractional value, we have to adjust the - // compare predicate and sometimes the value. RHSC is rounded towards zero - // at this point. - switch (Pred) { - default: assert(0 && "Unexpected integer comparison!"); - case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - case ICmpInst::ICMP_ULE: - // (float)int <= 4.4 --> int <= 4 - // (float)int <= -4.4 --> false - if (RHS.isNegative()) - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - break; - case ICmpInst::ICMP_SLE: - // (float)int <= 4.4 --> int <= 4 - // (float)int <= -4.4 --> int < -4 - if (RHS.isNegative()) - Pred = ICmpInst::ICMP_SLT; - break; - case ICmpInst::ICMP_ULT: - // (float)int < -4.4 --> false - // (float)int < 4.4 --> int <= 4 - if (RHS.isNegative()) - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - Pred = ICmpInst::ICMP_ULE; - break; - case ICmpInst::ICMP_SLT: - // (float)int < -4.4 --> int < -4 - // (float)int < 4.4 --> int <= 4 - if (!RHS.isNegative()) - Pred = ICmpInst::ICMP_SLE; - break; - case ICmpInst::ICMP_UGT: - // (float)int > 4.4 --> int > 4 - // (float)int > -4.4 --> true - if (RHS.isNegative()) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - break; - case ICmpInst::ICMP_SGT: - // (float)int > 4.4 --> int > 4 - // (float)int > -4.4 --> int >= -4 - if (RHS.isNegative()) - Pred = ICmpInst::ICMP_SGE; - break; - case ICmpInst::ICMP_UGE: - // (float)int >= -4.4 --> true - // (float)int >= 4.4 --> int > 4 - if (!RHS.isNegative()) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - Pred = ICmpInst::ICMP_UGT; - break; - case ICmpInst::ICMP_SGE: - // (float)int >= -4.4 --> int >= -4 - // (float)int >= 4.4 --> int > 4 - if (!RHS.isNegative()) - Pred = ICmpInst::ICMP_SGT; - break; + Constant *RHSInt = LHSUnsigned + ? Context->getConstantExprFPToUI(RHSC, IntTy) + : Context->getConstantExprFPToSI(RHSC, IntTy); + if (!RHS.isZero()) { + bool Equal = LHSUnsigned + ? Context->getConstantExprUIToFP(RHSInt, RHSC->getType()) == RHSC + : Context->getConstantExprSIToFP(RHSInt, RHSC->getType()) == RHSC; + if (!Equal) { + // If we had a comparison against a fractional value, we have to adjust + // the compare predicate and sometimes the value. RHSC is rounded towards + // zero at this point. + switch (Pred) { + default: llvm_unreachable("Unexpected integer comparison!"); + case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true + return ReplaceInstUsesWith(I, Context->getTrue()); + case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false + return ReplaceInstUsesWith(I, Context->getFalse()); + case ICmpInst::ICMP_ULE: + // (float)int <= 4.4 --> int <= 4 + // (float)int <= -4.4 --> false + if (RHS.isNegative()) + return ReplaceInstUsesWith(I, Context->getFalse()); + break; + case ICmpInst::ICMP_SLE: + // (float)int <= 4.4 --> int <= 4 + // (float)int <= -4.4 --> int < -4 + if (RHS.isNegative()) + Pred = ICmpInst::ICMP_SLT; + break; + case ICmpInst::ICMP_ULT: + // (float)int < -4.4 --> false + // (float)int < 4.4 --> int <= 4 + if (RHS.isNegative()) + return ReplaceInstUsesWith(I, Context->getFalse()); + Pred = ICmpInst::ICMP_ULE; + break; + case ICmpInst::ICMP_SLT: + // (float)int < -4.4 --> int < -4 + // (float)int < 4.4 --> int <= 4 + if (!RHS.isNegative()) + Pred = ICmpInst::ICMP_SLE; + break; + case ICmpInst::ICMP_UGT: + // (float)int > 4.4 --> int > 4 + // (float)int > -4.4 --> true + if (RHS.isNegative()) + return ReplaceInstUsesWith(I, Context->getTrue()); + break; + case ICmpInst::ICMP_SGT: + // (float)int > 4.4 --> int > 4 + // (float)int > -4.4 --> int >= -4 + if (RHS.isNegative()) + Pred = ICmpInst::ICMP_SGE; + break; + case ICmpInst::ICMP_UGE: + // (float)int >= -4.4 --> true + // (float)int >= 4.4 --> int > 4 + if (!RHS.isNegative()) + return ReplaceInstUsesWith(I, Context->getTrue()); + Pred = ICmpInst::ICMP_UGT; + break; + case ICmpInst::ICMP_SGE: + // (float)int >= -4.4 --> int >= -4 + // (float)int >= 4.4 --> int > 4 + if (!RHS.isNegative()) + Pred = ICmpInst::ICMP_SGT; + break; + } } } // Lower this FP comparison into an appropriate integer version of the // comparison. - return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt); + return new ICmpInst(*Context, Pred, LHSI->getOperand(0), RHSInt); } Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { @@ -5652,22 +5879,22 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { // Fold trivial predicates. if (I.getPredicate() == FCmpInst::FCMP_FALSE) - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getFalse()); if (I.getPredicate() == FCmpInst::FCMP_TRUE) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + return ReplaceInstUsesWith(I, Context->getTrue()); // Simplify 'fcmp pred X, X' if (Op0 == Op1) { switch (I.getPredicate()) { - default: assert(0 && "Unknown predicate!"); + default: llvm_unreachable("Unknown predicate!"); case FCmpInst::FCMP_UEQ: // True if unordered or equal case FCmpInst::FCMP_UGE: // True if unordered, greater than, or equal case FCmpInst::FCMP_ULE: // True if unordered, less than, or equal - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + return ReplaceInstUsesWith(I, Context->getTrue()); case FCmpInst::FCMP_OGT: // True if ordered and greater than case FCmpInst::FCMP_OLT: // True if ordered and less than case FCmpInst::FCMP_ONE: // True if ordered and operands are unequal - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getFalse()); case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y) case FCmpInst::FCMP_ULT: // True if unordered or less than @@ -5675,7 +5902,7 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { case FCmpInst::FCMP_UNE: // True if unordered or not equal // Canonicalize these to be 'fcmp uno %X, 0.0'. I.setPredicate(FCmpInst::FCMP_UNO); - I.setOperand(1, Constant::getNullValue(Op0->getType())); + I.setOperand(1, Context->getNullValue(Op0->getType())); return &I; case FCmpInst::FCMP_ORD: // True if ordered (no nans) @@ -5684,13 +5911,13 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { case FCmpInst::FCMP_OLE: // True if ordered and less than or equal // Canonicalize these to be 'fcmp ord %X, 0.0'. I.setPredicate(FCmpInst::FCMP_ORD); - I.setOperand(1, Constant::getNullValue(Op0->getType())); + I.setOperand(1, Context->getNullValue(Op0->getType())); return &I; } } if (isa(Op1)) // fcmp pred X, undef -> undef - return ReplaceInstUsesWith(I, UndefValue::get(Type::Int1Ty)); + return ReplaceInstUsesWith(I, Context->getUndef(Type::Int1Ty)); // Handle fcmp with constant RHS if (Constant *RHSC = dyn_cast(Op1)) { @@ -5698,11 +5925,11 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { if (ConstantFP *CFP = dyn_cast(RHSC)) { if (CFP->getValueAPF().isNaN()) { if (FCmpInst::isOrdered(I.getPredicate())) // True if ordered and... - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + return ReplaceInstUsesWith(I, Context->getFalse()); assert(FCmpInst::isUnordered(I.getPredicate()) && "Comparison must be either ordered or unordered!"); // True if unordered. - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + return ReplaceInstUsesWith(I, Context->getTrue()); } } @@ -5729,16 +5956,16 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { if (LHSI->hasOneUse()) { if (Constant *C = dyn_cast(LHSI->getOperand(1))) { // Fold the known value into the constant operand. - Op1 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC); + Op1 = Context->getConstantExprCompare(I.getPredicate(), C, RHSC); // Insert a new FCmp of the other select operand. - Op2 = InsertNewInstBefore(new FCmpInst(I.getPredicate(), + Op2 = InsertNewInstBefore(new FCmpInst(*Context, I.getPredicate(), LHSI->getOperand(2), RHSC, I.getName()), I); } else if (Constant *C = dyn_cast(LHSI->getOperand(2))) { // Fold the known value into the constant operand. - Op2 = ConstantExpr::getCompare(I.getPredicate(), C, RHSC); + Op2 = Context->getConstantExprCompare(I.getPredicate(), C, RHSC); // Insert a new FCmp of the other select operand. - Op1 = InsertNewInstBefore(new FCmpInst(I.getPredicate(), + Op1 = InsertNewInstBefore(new FCmpInst(*Context, I.getPredicate(), LHSI->getOperand(1), RHSC, I.getName()), I); } @@ -5764,7 +5991,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { I.isTrueWhenEqual())); if (isa(Op1)) // X icmp undef -> undef - return ReplaceInstUsesWith(I, UndefValue::get(Type::Int1Ty)); + return ReplaceInstUsesWith(I, Context->getUndef(Type::Int1Ty)); // icmp , - Global/Stack value // addresses never equal each other! We already know that Op0 != Op1. @@ -5778,11 +6005,11 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // icmp's with boolean values can always be turned into bitwise operations if (Ty == Type::Int1Ty) { switch (I.getPredicate()) { - default: assert(0 && "Invalid icmp instruction!"); + default: llvm_unreachable("Invalid icmp instruction!"); case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B) Instruction *Xor = BinaryOperator::CreateXor(Op0, Op1, I.getName()+"tmp"); InsertNewInstBefore(Xor, I); - return BinaryOperator::CreateNot(Xor); + return BinaryOperator::CreateNot(*Context, Xor); } case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B return BinaryOperator::CreateXor(Op0, Op1); @@ -5791,7 +6018,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { std::swap(Op0, Op1); // Change icmp ugt -> icmp ult // FALL THROUGH case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B - Instruction *Not = BinaryOperator::CreateNot(Op0, I.getName()+"tmp"); + Instruction *Not = BinaryOperator::CreateNot(*Context, + Op0, I.getName()+"tmp"); InsertNewInstBefore(Not, I); return BinaryOperator::CreateAnd(Not, Op1); } @@ -5799,7 +6027,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { std::swap(Op0, Op1); // Change icmp sgt -> icmp slt // FALL THROUGH case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B - Instruction *Not = BinaryOperator::CreateNot(Op1, I.getName()+"tmp"); + Instruction *Not = BinaryOperator::CreateNot(*Context, + Op1, I.getName()+"tmp"); InsertNewInstBefore(Not, I); return BinaryOperator::CreateAnd(Not, Op0); } @@ -5807,7 +6036,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { std::swap(Op0, Op1); // Change icmp uge -> icmp ule // FALL THROUGH case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B - Instruction *Not = BinaryOperator::CreateNot(Op0, I.getName()+"tmp"); + Instruction *Not = BinaryOperator::CreateNot(*Context, + Op0, I.getName()+"tmp"); InsertNewInstBefore(Not, I); return BinaryOperator::CreateOr(Not, Op1); } @@ -5815,22 +6045,31 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { std::swap(Op0, Op1); // Change icmp sge -> icmp sle // FALL THROUGH case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B - Instruction *Not = BinaryOperator::CreateNot(Op1, I.getName()+"tmp"); + Instruction *Not = BinaryOperator::CreateNot(*Context, + Op1, I.getName()+"tmp"); InsertNewInstBefore(Not, I); return BinaryOperator::CreateOr(Not, Op0); } } } + unsigned BitWidth = 0; + if (TD) + BitWidth = TD->getTypeSizeInBits(Ty->getScalarType()); + else if (Ty->isIntOrIntVector()) + BitWidth = Ty->getScalarSizeInBits(); + + bool isSignBit = false; + // See if we are doing a comparison with a constant. if (ConstantInt *CI = dyn_cast(Op1)) { Value *A = 0, *B = 0; // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B) if (I.isEquality() && CI->isNullValue() && - match(Op0, m_Sub(m_Value(A), m_Value(B)))) { + match(Op0, m_Sub(m_Value(A), m_Value(B)), *Context)) { // (icmp cond A B) if cond is equality - return new ICmpInst(I.getPredicate(), A, B); + return new ICmpInst(*Context, I.getPredicate(), A, B); } // If we have an icmp le or icmp ge instruction, turn it into the @@ -5840,121 +6079,187 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { default: break; case ICmpInst::ICMP_ULE: if (CI->isMaxValue(false)) // A <=u MAX -> TRUE - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - return new ICmpInst(ICmpInst::ICMP_ULT, Op0, AddOne(CI)); + return ReplaceInstUsesWith(I, Context->getTrue()); + return new ICmpInst(*Context, ICmpInst::ICMP_ULT, Op0, + AddOne(CI, Context)); case ICmpInst::ICMP_SLE: if (CI->isMaxValue(true)) // A <=s MAX -> TRUE - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - return new ICmpInst(ICmpInst::ICMP_SLT, Op0, AddOne(CI)); + return ReplaceInstUsesWith(I, Context->getTrue()); + return new ICmpInst(*Context, ICmpInst::ICMP_SLT, Op0, + AddOne(CI, Context)); case ICmpInst::ICMP_UGE: if (CI->isMinValue(false)) // A >=u MIN -> TRUE - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - return new ICmpInst( ICmpInst::ICMP_UGT, Op0, SubOne(CI)); + return ReplaceInstUsesWith(I, Context->getTrue()); + return new ICmpInst(*Context, ICmpInst::ICMP_UGT, Op0, + SubOne(CI, Context)); case ICmpInst::ICMP_SGE: if (CI->isMinValue(true)) // A >=s MIN -> TRUE - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - return new ICmpInst(ICmpInst::ICMP_SGT, Op0, SubOne(CI)); + return ReplaceInstUsesWith(I, Context->getTrue()); + return new ICmpInst(*Context, ICmpInst::ICMP_SGT, Op0, + SubOne(CI, Context)); } - // See if we can fold the comparison based on range information we can get - // by checking whether bits are known to be zero or one in the input. - uint32_t BitWidth = cast(Ty)->getBitWidth(); - APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); - // If this comparison is a normal comparison, it demands all // bits, if it is a sign bit comparison, it only demands the sign bit. bool UnusedBit; - bool isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit); - - if (SimplifyDemandedBits(I.getOperandUse(0), + isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit); + } + + // See if we can fold the comparison based on range information we can get + // by checking whether bits are known to be zero or one in the input. + if (BitWidth != 0) { + APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0); + APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0); + + if (SimplifyDemandedBits(I.getOperandUse(0), isSignBit ? APInt::getSignBit(BitWidth) : APInt::getAllOnesValue(BitWidth), - KnownZero, KnownOne, 0)) + Op0KnownZero, Op0KnownOne, 0)) return &I; - + if (SimplifyDemandedBits(I.getOperandUse(1), + APInt::getAllOnesValue(BitWidth), + Op1KnownZero, Op1KnownOne, 0)) + return &I; + // Given the known and unknown bits, compute a range that the LHS could be // in. Compute the Min, Max and RHS values based on the known bits. For the // EQ and NE we use unsigned values. - APInt Min(BitWidth, 0), Max(BitWidth, 0); - if (ICmpInst::isSignedPredicate(I.getPredicate())) - ComputeSignedMinMaxValuesFromKnownBits(Ty, KnownZero, KnownOne, Min, Max); - else - ComputeUnsignedMinMaxValuesFromKnownBits(Ty, KnownZero, KnownOne,Min,Max); - + APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0); + APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0); + if (ICmpInst::isSignedPredicate(I.getPredicate())) { + ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, + Op0Min, Op0Max); + ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, + Op1Min, Op1Max); + } else { + ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne, + Op0Min, Op0Max); + ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne, + Op1Min, Op1Max); + } + // If Min and Max are known to be the same, then SimplifyDemandedBits // figured out that the LHS is a constant. Just constant fold this now so // that code below can assume that Min != Max. - if (Min == Max) - return ReplaceInstUsesWith(I, ConstantExpr::getICmp(I.getPredicate(), - ConstantInt::get(Min), - CI)); - + if (!isa(Op0) && Op0Min == Op0Max) + return new ICmpInst(*Context, I.getPredicate(), + ConstantInt::get(*Context, Op0Min), Op1); + if (!isa(Op1) && Op1Min == Op1Max) + return new ICmpInst(*Context, I.getPredicate(), Op0, + ConstantInt::get(*Context, Op1Min)); + // Based on the range information we know about the LHS, see if we can // simplify this comparison. For example, (x&4) < 8 is always true. - const APInt &RHSVal = CI->getValue(); - switch (I.getPredicate()) { // LE/GE have been folded already. - default: assert(0 && "Unknown icmp opcode!"); + switch (I.getPredicate()) { + default: llvm_unreachable("Unknown icmp opcode!"); case ICmpInst::ICMP_EQ: - if (Max.ult(RHSVal) || Min.ugt(RHSVal)) - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); + if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) + return ReplaceInstUsesWith(I, Context->getFalse()); break; case ICmpInst::ICMP_NE: - if (Max.ult(RHSVal) || Min.ugt(RHSVal)) - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); + if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) + return ReplaceInstUsesWith(I, Context->getTrue()); break; case ICmpInst::ICMP_ULT: - if (Max.ult(RHSVal)) // A true iff max(A) < C - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - if (Min.uge(RHSVal)) // A false iff min(A) >= C - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - if (RHSVal == Max) // A A != MAX - return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); - if (RHSVal == Min+1) // A A == MIN - return new ICmpInst(ICmpInst::ICMP_EQ, Op0, SubOne(CI)); - - // (x (x >s -1) -> true if sign bit clear - if (CI->isMinValue(true)) - return new ICmpInst(ICmpInst::ICMP_SGT, Op0, - ConstantInt::getAllOnesValue(Op0->getType())); + if (Op0Max.ult(Op1Min)) // A true if max(A) < min(B) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Min.uge(Op1Max)) // A false if min(A) >= max(B) + return ReplaceInstUsesWith(I, Context->getFalse()); + if (Op1Min == Op0Max) // A A != B if max(A) == min(B) + return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1); + if (ConstantInt *CI = dyn_cast(Op1)) { + if (Op1Max == Op0Min+1) // A A == C-1 if min(A)+1 == C + return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0, + SubOne(CI, Context)); + + // (x (x >s -1) -> true if sign bit clear + if (CI->isMinValue(true)) + return new ICmpInst(*Context, ICmpInst::ICMP_SGT, Op0, + Context->getAllOnesValue(Op0->getType())); + } break; case ICmpInst::ICMP_UGT: - if (Min.ugt(RHSVal)) // A >u C -> true iff min(A) > C - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - if (Max.ule(RHSVal)) // A >u C -> false iff max(A) <= C - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - - if (RHSVal == Min) // A >u MIN -> A != MIN - return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); - if (RHSVal == Max-1) // A >u MAX-1 -> A == MAX - return new ICmpInst(ICmpInst::ICMP_EQ, Op0, AddOne(CI)); - - // (x >u 2147483647) -> (x true if sign bit set - if (CI->isMaxValue(true)) - return new ICmpInst(ICmpInst::ICMP_SLT, Op0, - ConstantInt::getNullValue(Op0->getType())); + if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B) + return ReplaceInstUsesWith(I, Context->getFalse()); + + if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B) + return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1); + if (ConstantInt *CI = dyn_cast(Op1)) { + if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C + return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0, + AddOne(CI, Context)); + + // (x >u 2147483647) -> (x true if sign bit set + if (CI->isMaxValue(true)) + return new ICmpInst(*Context, ICmpInst::ICMP_SLT, Op0, + Context->getNullValue(Op0->getType())); + } break; case ICmpInst::ICMP_SLT: - if (Max.slt(RHSVal)) // A true iff max(A) < C - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - if (Min.sge(RHSVal)) // A false iff min(A) >= C - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - if (RHSVal == Max) // A A != MAX - return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); - if (RHSVal == Min+1) // A A == MIN - return new ICmpInst(ICmpInst::ICMP_EQ, Op0, SubOne(CI)); + if (Op0Max.slt(Op1Min)) // A true if max(A) < min(C) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Min.sge(Op1Max)) // A false if min(A) >= max(C) + return ReplaceInstUsesWith(I, Context->getFalse()); + if (Op1Min == Op0Max) // A A != B if max(A) == min(B) + return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1); + if (ConstantInt *CI = dyn_cast(Op1)) { + if (Op1Max == Op0Min+1) // A A == C-1 if min(A)+1 == C + return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0, + SubOne(CI, Context)); + } break; - case ICmpInst::ICMP_SGT: - if (Min.sgt(RHSVal)) // A >s C -> true iff min(A) > C - return ReplaceInstUsesWith(I, ConstantInt::getTrue()); - if (Max.sle(RHSVal)) // A >s C -> false iff max(A) <= C - return ReplaceInstUsesWith(I, ConstantInt::getFalse()); - - if (RHSVal == Min) // A >s MIN -> A != MIN - return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); - if (RHSVal == Max-1) // A >s MAX-1 -> A == MAX - return new ICmpInst(ICmpInst::ICMP_EQ, Op0, AddOne(CI)); + case ICmpInst::ICMP_SGT: + if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B) + return ReplaceInstUsesWith(I, Context->getFalse()); + + if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B) + return new ICmpInst(*Context, ICmpInst::ICMP_NE, Op0, Op1); + if (ConstantInt *CI = dyn_cast(Op1)) { + if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C + return new ICmpInst(*Context, ICmpInst::ICMP_EQ, Op0, + AddOne(CI, Context)); + } + break; + case ICmpInst::ICMP_SGE: + assert(!isa(Op1) && "ICMP_SGE with ConstantInt not folded!"); + if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B) + return ReplaceInstUsesWith(I, Context->getFalse()); + break; + case ICmpInst::ICMP_SLE: + assert(!isa(Op1) && "ICMP_SLE with ConstantInt not folded!"); + if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B) + return ReplaceInstUsesWith(I, Context->getFalse()); + break; + case ICmpInst::ICMP_UGE: + assert(!isa(Op1) && "ICMP_UGE with ConstantInt not folded!"); + if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B) + return ReplaceInstUsesWith(I, Context->getFalse()); + break; + case ICmpInst::ICMP_ULE: + assert(!isa(Op1) && "ICMP_ULE with ConstantInt not folded!"); + if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B) + return ReplaceInstUsesWith(I, Context->getTrue()); + if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B) + return ReplaceInstUsesWith(I, Context->getFalse()); break; } + + // Turn a signed comparison into an unsigned one if both operands + // are known to have the same sign. + if (I.isSignedPredicate() && + ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) || + (Op0KnownOne.isNegative() && Op1KnownOne.isNegative()))) + return new ICmpInst(*Context, I.getUnsignedPredicate(), Op0, Op1); } // Test if the ICmpInst instruction is used exclusively by a select as @@ -5996,8 +6301,8 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { break; } if (isAllZeros) - return new ICmpInst(I.getPredicate(), LHSI->getOperand(0), - Constant::getNullValue(LHSI->getOperand(0)->getType())); + return new ICmpInst(*Context, I.getPredicate(), LHSI->getOperand(0), + Context->getNullValue(LHSI->getOperand(0)->getType())); } break; @@ -6017,16 +6322,16 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { if (LHSI->hasOneUse()) { if (Constant *C = dyn_cast(LHSI->getOperand(1))) { // Fold the known value into the constant operand. - Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); + Op1 = Context->getConstantExprICmp(I.getPredicate(), C, RHSC); // Insert a new ICmp of the other select operand. - Op2 = InsertNewInstBefore(new ICmpInst(I.getPredicate(), + Op2 = InsertNewInstBefore(new ICmpInst(*Context, I.getPredicate(), LHSI->getOperand(2), RHSC, I.getName()), I); } else if (Constant *C = dyn_cast(LHSI->getOperand(2))) { // Fold the known value into the constant operand. - Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); + Op2 = Context->getConstantExprICmp(I.getPredicate(), C, RHSC); // Insert a new ICmp of the other select operand. - Op1 = InsertNewInstBefore(new ICmpInst(I.getPredicate(), + Op1 = InsertNewInstBefore(new ICmpInst(*Context, I.getPredicate(), LHSI->getOperand(1), RHSC, I.getName()), I); } @@ -6075,13 +6380,13 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // If Op1 is a constant, we can fold the cast into the constant. if (Op0->getType() != Op1->getType()) { if (Constant *Op1C = dyn_cast(Op1)) { - Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType()); + Op1 = Context->getConstantExprBitCast(Op1C, Op0->getType()); } else { // Otherwise, cast the RHS right before the icmp Op1 = InsertBitCastBefore(Op1, Op0->getType(), I); } } - return new ICmpInst(I.getPredicate(), Op0, Op1); + return new ICmpInst(*Context, I.getPredicate(), Op0, Op1); } } @@ -6108,7 +6413,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { case Instruction::Sub: case Instruction::Xor: if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b - return new ICmpInst(I.getPredicate(), Op0I->getOperand(0), + return new ICmpInst(*Context, I.getPredicate(), Op0I->getOperand(0), Op1I->getOperand(0)); // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b if (ConstantInt *CI = dyn_cast(Op0I->getOperand(1))) { @@ -6116,7 +6421,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { ICmpInst::Predicate Pred = I.isSignedPredicate() ? I.getUnsignedPredicate() : I.getSignedPredicate(); - return new ICmpInst(Pred, Op0I->getOperand(0), + return new ICmpInst(*Context, Pred, Op0I->getOperand(0), Op1I->getOperand(0)); } @@ -6125,7 +6430,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { ? I.getUnsignedPredicate() : I.getSignedPredicate(); Pred = I.getSwappedPredicate(Pred); - return new ICmpInst(Pred, Op0I->getOperand(0), + return new ICmpInst(*Context, Pred, Op0I->getOperand(0), Op1I->getOperand(0)); } } @@ -6139,7 +6444,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // Mask = -1 >> count-trailing-zeros(Cst). if (!CI->isZero() && !CI->isOne()) { const APInt &AP = CI->getValue(); - ConstantInt *Mask = ConstantInt::get( + ConstantInt *Mask = ConstantInt::get(*Context, APInt::getLowBitsSet(AP.getBitWidth(), AP.getBitWidth() - AP.countTrailingZeros())); @@ -6149,7 +6454,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { Mask); InsertNewInstBefore(And1, I); InsertNewInstBefore(And2, I); - return new ICmpInst(I.getPredicate(), And1, And2); + return new ICmpInst(*Context, I.getPredicate(), And1, And2); } } break; @@ -6160,67 +6465,68 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { // ~x < ~y --> y < x { Value *A, *B; - if (match(Op0, m_Not(m_Value(A))) && - match(Op1, m_Not(m_Value(B)))) - return new ICmpInst(I.getPredicate(), B, A); + if (match(Op0, m_Not(m_Value(A)), *Context) && + match(Op1, m_Not(m_Value(B)), *Context)) + return new ICmpInst(*Context, I.getPredicate(), B, A); } if (I.isEquality()) { Value *A, *B, *C, *D; // -x == -y --> x == y - if (match(Op0, m_Neg(m_Value(A))) && - match(Op1, m_Neg(m_Value(B)))) - return new ICmpInst(I.getPredicate(), A, B); + if (match(Op0, m_Neg(m_Value(A)), *Context) && + match(Op1, m_Neg(m_Value(B)), *Context)) + return new ICmpInst(*Context, I.getPredicate(), A, B); - if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) { + if (match(Op0, m_Xor(m_Value(A), m_Value(B)), *Context)) { if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0 Value *OtherVal = A == Op1 ? B : A; - return new ICmpInst(I.getPredicate(), OtherVal, - Constant::getNullValue(A->getType())); + return new ICmpInst(*Context, I.getPredicate(), OtherVal, + Context->getNullValue(A->getType())); } - if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) { + if (match(Op1, m_Xor(m_Value(C), m_Value(D)), *Context)) { // A^c1 == C^c2 --> A == C^(c1^c2) ConstantInt *C1, *C2; - if (match(B, m_ConstantInt(C1)) && - match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) { - Constant *NC = ConstantInt::get(C1->getValue() ^ C2->getValue()); + if (match(B, m_ConstantInt(C1), *Context) && + match(D, m_ConstantInt(C2), *Context) && Op1->hasOneUse()) { + Constant *NC = + ConstantInt::get(*Context, C1->getValue() ^ C2->getValue()); Instruction *Xor = BinaryOperator::CreateXor(C, NC, "tmp"); - return new ICmpInst(I.getPredicate(), A, + return new ICmpInst(*Context, I.getPredicate(), A, InsertNewInstBefore(Xor, I)); } // A^B == A^D -> B == D - if (A == C) return new ICmpInst(I.getPredicate(), B, D); - if (A == D) return new ICmpInst(I.getPredicate(), B, C); - if (B == C) return new ICmpInst(I.getPredicate(), A, D); - if (B == D) return new ICmpInst(I.getPredicate(), A, C); + if (A == C) return new ICmpInst(*Context, I.getPredicate(), B, D); + if (A == D) return new ICmpInst(*Context, I.getPredicate(), B, C); + if (B == C) return new ICmpInst(*Context, I.getPredicate(), A, D); + if (B == D) return new ICmpInst(*Context, I.getPredicate(), A, C); } } - if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && + if (match(Op1, m_Xor(m_Value(A), m_Value(B)), *Context) && (A == Op0 || B == Op0)) { // A == (A^B) -> B == 0 Value *OtherVal = A == Op0 ? B : A; - return new ICmpInst(I.getPredicate(), OtherVal, - Constant::getNullValue(A->getType())); + return new ICmpInst(*Context, I.getPredicate(), OtherVal, + Context->getNullValue(A->getType())); } // (A-B) == A -> B == 0 - if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B)))) - return new ICmpInst(I.getPredicate(), B, - Constant::getNullValue(B->getType())); + if (match(Op0, m_Sub(m_Specific(Op1), m_Value(B)), *Context)) + return new ICmpInst(*Context, I.getPredicate(), B, + Context->getNullValue(B->getType())); // A == (A-B) -> B == 0 - if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B)))) - return new ICmpInst(I.getPredicate(), B, - Constant::getNullValue(B->getType())); + if (match(Op1, m_Sub(m_Specific(Op0), m_Value(B)), *Context)) + return new ICmpInst(*Context, I.getPredicate(), B, + Context->getNullValue(B->getType())); // (X&Z) == (Y&Z) -> (X^Y) & Z == 0 if (Op0->hasOneUse() && Op1->hasOneUse() && - match(Op0, m_And(m_Value(A), m_Value(B))) && - match(Op1, m_And(m_Value(C), m_Value(D)))) { + match(Op0, m_And(m_Value(A), m_Value(B)), *Context) && + match(Op1, m_And(m_Value(C), m_Value(D)), *Context)) { Value *X = 0, *Y = 0, *Z = 0; if (A == C) { @@ -6237,7 +6543,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { Op1 = InsertNewInstBefore(BinaryOperator::CreateXor(X, Y, "tmp"), I); Op1 = InsertNewInstBefore(BinaryOperator::CreateAnd(Op1, Z, "tmp"), I); I.setOperand(0, Op1); - I.setOperand(1, Constant::getNullValue(Op1->getType())); + I.setOperand(1, Context->getNullValue(Op1->getType())); return &I; } } @@ -6276,13 +6582,13 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI, // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and // C2 (CI). By solving for X we can turn this into a range check // instead of computing a divide. - ConstantInt *Prod = Multiply(CmpRHS, DivRHS); + Constant *Prod = Context->getConstantExprMul(CmpRHS, DivRHS); // Determine if the product overflows by seeing if the product is // not equal to the divide. Make sure we do the same kind of divide // as in the LHS instruction that we're folding. - bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) : - ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS; + bool ProdOV = (DivIsSigned ? Context->getConstantExprSDiv(Prod, DivRHS) : + Context->getConstantExprUDiv(Prod, DivRHS)) != CmpRHS; // Get the ICmp opcode ICmpInst::Predicate Pred = ICI.getPredicate(); @@ -6295,54 +6601,57 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI, // overflow variable is set to 0 if it's corresponding bound variable is valid // -1 if overflowed off the bottom end, or +1 if overflowed off the top end. int LoOverflow = 0, HiOverflow = 0; - ConstantInt *LoBound = 0, *HiBound = 0; + Constant *LoBound = 0, *HiBound = 0; if (!DivIsSigned) { // udiv // e.g. X/5 op 3 --> [15, 20) LoBound = Prod; HiOverflow = LoOverflow = ProdOV; if (!HiOverflow) - HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, false); + HiOverflow = AddWithOverflow(HiBound, LoBound, DivRHS, Context, false); } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0. if (CmpRHSV == 0) { // (X / pos) op 0 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) - LoBound = cast(ConstantExpr::getNeg(SubOne(DivRHS))); + LoBound = cast(Context->getConstantExprNeg(SubOne(DivRHS, + Context))); HiBound = DivRHS; } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) HiOverflow = LoOverflow = ProdOV; if (!HiOverflow) - HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, true); + HiOverflow = AddWithOverflow(HiBound, Prod, DivRHS, Context, true); } else { // (X / pos) op neg // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) - HiBound = AddOne(Prod); + HiBound = AddOne(Prod, Context); LoOverflow = HiOverflow = ProdOV ? -1 : 0; if (!LoOverflow) { - ConstantInt* DivNeg = cast(ConstantExpr::getNeg(DivRHS)); - LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, + ConstantInt* DivNeg = + cast(Context->getConstantExprNeg(DivRHS)); + LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, Context, true) ? -1 : 0; } } } else if (DivRHS->getValue().isNegative()) { // Divisor is < 0. if (CmpRHSV == 0) { // (X / neg) op 0 // e.g. X/-5 op 0 --> [-4, 5) - LoBound = AddOne(DivRHS); - HiBound = cast(ConstantExpr::getNeg(DivRHS)); + LoBound = AddOne(DivRHS, Context); + HiBound = cast(Context->getConstantExprNeg(DivRHS)); if (HiBound == DivRHS) { // -INTMIN = INTMIN HiOverflow = 1; // [INTMIN+1, overflow) HiBound = 0; // e.g. X/INTMIN = 0 --> X > INTMIN } } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos // e.g. X/-5 op 3 --> [-19, -14) - HiBound = AddOne(Prod); + HiBound = AddOne(Prod, Context); HiOverflow = LoOverflow = ProdOV ? -1 : 0; if (!LoOverflow) - LoOverflow = AddWithOverflow(LoBound, HiBound, DivRHS, true) ? -1 : 0; + LoOverflow = AddWithOverflow(LoBound, HiBound, + DivRHS, Context, true) ? -1 : 0; } else { // (X / neg) op neg LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) LoOverflow = HiOverflow = ProdOV; if (!HiOverflow) - HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, true); + HiOverflow = SubWithOverflow(HiBound, Prod, DivRHS, Context, true); } // Dividing by a negative swaps the condition. LT <-> GT @@ -6351,46 +6660,46 @@ Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI, Value *X = DivI->getOperand(0); switch (Pred) { - default: assert(0 && "Unhandled icmp opcode!"); + default: llvm_unreachable("Unhandled icmp opcode!"); case ICmpInst::ICMP_EQ: if (LoOverflow && HiOverflow) - return ReplaceInstUsesWith(ICI, ConstantInt::getFalse()); + return ReplaceInstUsesWith(ICI, Context->getFalse()); else if (HiOverflow) - return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : + return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, X, LoBound); else if (LoOverflow) - return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : + return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, X, HiBound); else return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, true, ICI); case ICmpInst::ICMP_NE: if (LoOverflow && HiOverflow) - return ReplaceInstUsesWith(ICI, ConstantInt::getTrue()); + return ReplaceInstUsesWith(ICI, Context->getTrue()); else if (HiOverflow) - return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : + return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT, X, LoBound); else if (LoOverflow) - return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : + return new ICmpInst(*Context, DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE, X, HiBound); else return InsertRangeTest(X, LoBound, HiBound, DivIsSigned, false, ICI); case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_SLT: if (LoOverflow == +1) // Low bound is greater than input range. - return ReplaceInstUsesWith(ICI, ConstantInt::getTrue()); + return ReplaceInstUsesWith(ICI, Context->getTrue()); if (LoOverflow == -1) // Low bound is less than input range. - return ReplaceInstUsesWith(ICI, ConstantInt::getFalse()); - return new ICmpInst(Pred, X, LoBound); + return ReplaceInstUsesWith(ICI, Context->getFalse()); + return new ICmpInst(*Context, Pred, X, LoBound); case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_SGT: if (HiOverflow == +1) // High bound greater than input range. - return ReplaceInstUsesWith(ICI, ConstantInt::getFalse()); + return ReplaceInstUsesWith(ICI, Context->getFalse()); else if (HiOverflow == -1) // High bound less than input range. - return ReplaceInstUsesWith(ICI, ConstantInt::getTrue()); + return ReplaceInstUsesWith(ICI, Context->getTrue()); if (Pred == ICmpInst::ICMP_UGT) - return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound); + return new ICmpInst(*Context, ICmpInst::ICMP_UGE, X, HiBound); else - return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound); + return new ICmpInst(*Context, ICmpInst::ICMP_SGE, X, HiBound); } } @@ -6419,8 +6728,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, APInt NewRHS(RHS->getValue()); NewRHS.zext(SrcBits); NewRHS |= KnownOne; - return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0), - ConstantInt::get(NewRHS)); + return new ICmpInst(*Context, ICI.getPredicate(), LHSI->getOperand(0), + ConstantInt::get(*Context, NewRHS)); } } break; @@ -6448,9 +6757,11 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, isTrueIfPositive ^= true; if (isTrueIfPositive) - return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal, SubOne(RHS)); + return new ICmpInst(*Context, ICmpInst::ICMP_SGT, CompareVal, + SubOne(RHS, Context)); else - return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal, AddOne(RHS)); + return new ICmpInst(*Context, ICmpInst::ICMP_SLT, CompareVal, + AddOne(RHS, Context)); } if (LHSI->hasOneUse()) { @@ -6460,8 +6771,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, ICmpInst::Predicate Pred = ICI.isSignedPredicate() ? ICI.getUnsignedPredicate() : ICI.getSignedPredicate(); - return new ICmpInst(Pred, LHSI->getOperand(0), - ConstantInt::get(RHSV ^ SignBit)); + return new ICmpInst(*Context, Pred, LHSI->getOperand(0), + ConstantInt::get(*Context, RHSV ^ SignBit)); } // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A) @@ -6471,8 +6782,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, ? ICI.getUnsignedPredicate() : ICI.getSignedPredicate(); Pred = ICI.getSwappedPredicate(Pred); - return new ICmpInst(Pred, LHSI->getOperand(0), - ConstantInt::get(RHSV ^ NotSignBit)); + return new ICmpInst(*Context, Pred, LHSI->getOperand(0), + ConstantInt::get(*Context, RHSV ^ NotSignBit)); } } } @@ -6501,10 +6812,10 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, NewCI.zext(BitWidth); Instruction *NewAnd = BinaryOperator::CreateAnd(Cast->getOperand(0), - ConstantInt::get(NewCST),LHSI->getName()); + ConstantInt::get(*Context, NewCST), LHSI->getName()); InsertNewInstBefore(NewAnd, ICI); - return new ICmpInst(ICI.getPredicate(), NewAnd, - ConstantInt::get(NewCI)); + return new ICmpInst(*Context, ICI.getPredicate(), NewAnd, + ConstantInt::get(*Context, NewCI)); } } @@ -6541,27 +6852,28 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, if (CanFold) { Constant *NewCst; if (Shift->getOpcode() == Instruction::Shl) - NewCst = ConstantExpr::getLShr(RHS, ShAmt); + NewCst = Context->getConstantExprLShr(RHS, ShAmt); else - NewCst = ConstantExpr::getShl(RHS, ShAmt); + NewCst = Context->getConstantExprShl(RHS, ShAmt); // Check to see if we are shifting out any of the bits being // compared. - if (ConstantExpr::get(Shift->getOpcode(), NewCst, ShAmt) != RHS) { + if (Context->getConstantExpr(Shift->getOpcode(), + NewCst, ShAmt) != RHS) { // If we shifted bits out, the fold is not going to work out. // As a special case, check to see if this means that the // result is always true or false now. if (ICI.getPredicate() == ICmpInst::ICMP_EQ) - return ReplaceInstUsesWith(ICI, ConstantInt::getFalse()); + return ReplaceInstUsesWith(ICI, Context->getFalse()); if (ICI.getPredicate() == ICmpInst::ICMP_NE) - return ReplaceInstUsesWith(ICI, ConstantInt::getTrue()); + return ReplaceInstUsesWith(ICI, Context->getTrue()); } else { ICI.setOperand(1, NewCst); Constant *NewAndCST; if (Shift->getOpcode() == Instruction::Shl) - NewAndCST = ConstantExpr::getLShr(AndCST, ShAmt); + NewAndCST = Context->getConstantExprLShr(AndCST, ShAmt); else - NewAndCST = ConstantExpr::getShl(AndCST, ShAmt); + NewAndCST = Context->getConstantExprShl(AndCST, ShAmt); LHSI->setOperand(1, NewAndCST); LHSI->setOperand(0, Shift->getOperand(0)); AddToWorkList(Shift); // Shift is dead. @@ -6576,7 +6888,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, // of a loop if Y is invariant and X is not. if (Shift && Shift->hasOneUse() && RHSV == 0 && ICI.isEquality() && !Shift->isArithmeticShift() && - isa(Shift->getOperand(0))) { + !isa(Shift->getOperand(0))) { // Compute C << Y. Value *NS; if (Shift->getOpcode() == Instruction::LShr) { @@ -6616,7 +6928,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, // If we are comparing against bits always shifted out, the // comparison cannot succeed. Constant *Comp = - ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt), ShAmt); + Context->getConstantExprShl(Context->getConstantExprLShr(RHS, ShAmt), + ShAmt); if (Comp != RHS) {// Comparing against a bit that we know is zero. bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE; Constant *Cst = ConstantInt::get(Type::Int1Ty, IsICMP_NE); @@ -6627,14 +6940,15 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, // Otherwise strength reduce the shift into an and. uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits); Constant *Mask = - ConstantInt::get(APInt::getLowBitsSet(TypeBits, TypeBits-ShAmtVal)); + ConstantInt::get(*Context, APInt::getLowBitsSet(TypeBits, + TypeBits-ShAmtVal)); Instruction *AndI = BinaryOperator::CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask"); Value *And = InsertNewInstBefore(AndI, ICI); - return new ICmpInst(ICI.getPredicate(), And, - ConstantInt::get(RHSV.lshr(ShAmtVal))); + return new ICmpInst(*Context, ICI.getPredicate(), And, + ConstantInt::get(*Context, RHSV.lshr(ShAmtVal))); } } @@ -6643,15 +6957,16 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, if (LHSI->hasOneUse() && isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) { // (X << 31) (X&1) != 0 - Constant *Mask = ConstantInt::get(APInt(TypeBits, 1) << + Constant *Mask = ConstantInt::get(*Context, APInt(TypeBits, 1) << (TypeBits-ShAmt->getZExtValue()-1)); Instruction *AndI = BinaryOperator::CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask"); Value *And = InsertNewInstBefore(AndI, ICI); - return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, - And, Constant::getNullValue(And->getType())); + return new ICmpInst(*Context, + TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, + And, Context->getNullValue(And->getType())); } break; } @@ -6691,21 +7006,21 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, if (LHSI->hasOneUse() && MaskedValueIsZero(LHSI->getOperand(0), APInt::getLowBitsSet(Comp.getBitWidth(), ShAmtVal))) { - return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0), - ConstantExpr::getShl(RHS, ShAmt)); + return new ICmpInst(*Context, ICI.getPredicate(), LHSI->getOperand(0), + Context->getConstantExprShl(RHS, ShAmt)); } if (LHSI->hasOneUse()) { // Otherwise strength reduce the shift into an and. APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); - Constant *Mask = ConstantInt::get(Val); + Constant *Mask = ConstantInt::get(*Context, Val); Instruction *AndI = BinaryOperator::CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask"); Value *And = InsertNewInstBefore(AndI, ICI); - return new ICmpInst(ICI.getPredicate(), And, - ConstantExpr::getShl(RHS, ShAmt)); + return new ICmpInst(*Context, ICI.getPredicate(), And, + Context->getConstantExprShl(RHS, ShAmt)); } break; } @@ -6737,19 +7052,19 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, if (ICI.isSignedPredicate()) { if (CR.getLower().isSignBit()) { - return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0), - ConstantInt::get(CR.getUpper())); + return new ICmpInst(*Context, ICmpInst::ICMP_SLT, LHSI->getOperand(0), + ConstantInt::get(*Context, CR.getUpper())); } else if (CR.getUpper().isSignBit()) { - return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0), - ConstantInt::get(CR.getLower())); + return new ICmpInst(*Context, ICmpInst::ICMP_SGE, LHSI->getOperand(0), + ConstantInt::get(*Context, CR.getLower())); } } else { if (CR.getLower().isMinValue()) { - return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), - ConstantInt::get(CR.getUpper())); + return new ICmpInst(*Context, ICmpInst::ICMP_ULT, LHSI->getOperand(0), + ConstantInt::get(*Context, CR.getUpper())); } else if (CR.getUpper().isMinValue()) { - return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), - ConstantInt::get(CR.getLower())); + return new ICmpInst(*Context, ICmpInst::ICMP_UGE, LHSI->getOperand(0), + ConstantInt::get(*Context, CR.getLower())); } } } @@ -6773,8 +7088,8 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, BinaryOperator::CreateURem(BO->getOperand(0), BO->getOperand(1), BO->getName()); InsertNewInstBefore(NewRem, ICI); - return new ICmpInst(ICI.getPredicate(), NewRem, - Constant::getNullValue(BO->getType())); + return new ICmpInst(*Context, ICI.getPredicate(), NewRem, + Context->getNullValue(BO->getType())); } } break; @@ -6782,22 +7097,22 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, // Replace ((add A, B) != C) with (A != C-B) if B & C are constants. if (ConstantInt *BOp1C = dyn_cast(BO->getOperand(1))) { if (BO->hasOneUse()) - return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), - Subtract(RHS, BOp1C)); + return new ICmpInst(*Context, ICI.getPredicate(), BO->getOperand(0), + Context->getConstantExprSub(RHS, BOp1C)); } else if (RHSV == 0) { // Replace ((add A, B) != 0) with (A != -B) if A or B is // efficiently invertible, or if the add has just this one use. Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1); - if (Value *NegVal = dyn_castNegVal(BOp1)) - return new ICmpInst(ICI.getPredicate(), BOp0, NegVal); - else if (Value *NegVal = dyn_castNegVal(BOp0)) - return new ICmpInst(ICI.getPredicate(), NegVal, BOp1); + if (Value *NegVal = dyn_castNegVal(BOp1, Context)) + return new ICmpInst(*Context, ICI.getPredicate(), BOp0, NegVal); + else if (Value *NegVal = dyn_castNegVal(BOp0, Context)) + return new ICmpInst(*Context, ICI.getPredicate(), NegVal, BOp1); else if (BO->hasOneUse()) { - Instruction *Neg = BinaryOperator::CreateNeg(BOp1); + Instruction *Neg = BinaryOperator::CreateNeg(*Context, BOp1); InsertNewInstBefore(Neg, ICI); Neg->takeName(BO); - return new ICmpInst(ICI.getPredicate(), BOp0, Neg); + return new ICmpInst(*Context, ICI.getPredicate(), BOp0, Neg); } } break; @@ -6805,14 +7120,14 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, // For the xor case, we can xor two constants together, eliminating // the explicit xor. if (Constant *BOC = dyn_cast(BO->getOperand(1))) - return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), - ConstantExpr::getXor(RHS, BOC)); + return new ICmpInst(*Context, ICI.getPredicate(), BO->getOperand(0), + Context->getConstantExprXor(RHS, BOC)); // FALLTHROUGH case Instruction::Sub: // Replace (([sub|xor] A, B) != 0) with (A != B) if (RHSV == 0) - return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), + return new ICmpInst(*Context, ICI.getPredicate(), BO->getOperand(0), BO->getOperand(1)); break; @@ -6820,10 +7135,11 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, // If bits are being or'd in that are not present in the constant we // are comparing against, then the comparison could never succeed! if (Constant *BOC = dyn_cast(BO->getOperand(1))) { - Constant *NotCI = ConstantExpr::getNot(RHS); - if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue()) - return ReplaceInstUsesWith(ICI, ConstantInt::get(Type::Int1Ty, - isICMP_NE)); + Constant *NotCI = Context->getConstantExprNot(RHS); + if (!Context->getConstantExprAnd(BOC, NotCI)->isNullValue()) + return ReplaceInstUsesWith(ICI, + ConstantInt::get(Type::Int1Ty, + isICMP_NE)); } break; @@ -6832,31 +7148,32 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, // If bits are being compared against that are and'd out, then the // comparison can never succeed! if ((RHSV & ~BOC->getValue()) != 0) - return ReplaceInstUsesWith(ICI, ConstantInt::get(Type::Int1Ty, - isICMP_NE)); + return ReplaceInstUsesWith(ICI, + ConstantInt::get(Type::Int1Ty, + isICMP_NE)); // If we have ((X & C) == C), turn it into ((X & C) != 0). if (RHS == BOC && RHSV.isPowerOf2()) - return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : + return new ICmpInst(*Context, isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, LHSI, - Constant::getNullValue(RHS->getType())); + Context->getNullValue(RHS->getType())); // Replace (and X, (1 << size(X)-1) != 0) with x s< 0 if (BOC->getValue().isSignBit()) { Value *X = BO->getOperand(0); - Constant *Zero = Constant::getNullValue(X->getType()); + Constant *Zero = Context->getNullValue(X->getType()); ICmpInst::Predicate pred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; - return new ICmpInst(pred, X, Zero); + return new ICmpInst(*Context, pred, X, Zero); } // ((X & ~7) == 0) --> X < 8 if (RHSV == 0 && isHighOnes(BOC)) { Value *X = BO->getOperand(0); - Constant *NegX = ConstantExpr::getNeg(BOC); + Constant *NegX = Context->getConstantExprNeg(BOC); ICmpInst::Predicate pred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; - return new ICmpInst(pred, X, NegX); + return new ICmpInst(*Context, pred, X, NegX); } } default: break; @@ -6866,7 +7183,7 @@ Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI, if (II->getIntrinsicID() == Intrinsic::bswap) { AddToWorkList(II); ICI.setOperand(0, II->getOperand(1)); - ICI.setOperand(1, ConstantInt::get(RHSV.byteSwap())); + ICI.setOperand(1, ConstantInt::get(*Context, RHSV.byteSwap())); return &ICI; } } @@ -6886,12 +7203,12 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the // integer type is the same size as the pointer type. - if (LHSCI->getOpcode() == Instruction::PtrToInt && - getTargetData().getPointerSizeInBits() == + if (TD && LHSCI->getOpcode() == Instruction::PtrToInt && + TD->getPointerSizeInBits() == cast(DestTy)->getBitWidth()) { Value *RHSOp = 0; if (Constant *RHSC = dyn_cast(ICI.getOperand(1))) { - RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy); + RHSOp = Context->getConstantExprIntToPtr(RHSC, SrcTy); } else if (PtrToIntInst *RHSC = dyn_cast(ICI.getOperand(1))) { RHSOp = RHSC->getOperand(0); // If the pointer types don't match, insert a bitcast. @@ -6900,7 +7217,7 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { } if (RHSOp) - return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp); + return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, RHSOp); } // The code below only handles extension cast instructions, so far. @@ -6925,15 +7242,15 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { // Deal with equality cases early. if (ICI.isEquality()) - return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp); + return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, RHSCIOp); // A signed comparison of sign extended values simplifies into a // signed comparison. if (isSignedCmp && isSignedExt) - return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp); + return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, RHSCIOp); // The other three cases all fold into an unsigned comparison. - return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp); + return new ICmpInst(*Context, ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp); } // If we aren't dealing with a constant on the RHS, exit early @@ -6943,23 +7260,24 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { // Compute the constant that would happen if we truncated to SrcTy then // reextended to DestTy. - Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy); - Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy); + Constant *Res1 = Context->getConstantExprTrunc(CI, SrcTy); + Constant *Res2 = Context->getConstantExprCast(LHSCI->getOpcode(), + Res1, DestTy); // If the re-extended constant didn't change... if (Res2 == CI) { // Make sure that sign of the Cmp and the sign of the Cast are the same. // For example, we might have: - // %A = sext short %X to uint - // %B = icmp ugt uint %A, 1330 + // %A = sext i16 %X to i32 + // %B = icmp ugt i32 %A, 1330 // It is incorrect to transform this into - // %B = icmp ugt short %X, 1330 + // %B = icmp ugt i16 %X, 1330 // because %A may have negative value. // // However, we allow this when the compare is EQ/NE, because they are // signless. if (isSignedExt == isSignedCmp || ICI.isEquality()) - return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1); + return new ICmpInst(*Context, ICI.getPredicate(), LHSCIOp, Res1); return 0; } @@ -6969,9 +7287,9 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { // First, handle some easy cases. We know the result cannot be equal at this // point so handle the ICI.isEquality() cases if (ICI.getPredicate() == ICmpInst::ICMP_EQ) - return ReplaceInstUsesWith(ICI, ConstantInt::getFalse()); + return ReplaceInstUsesWith(ICI, Context->getFalse()); if (ICI.getPredicate() == ICmpInst::ICMP_NE) - return ReplaceInstUsesWith(ICI, ConstantInt::getTrue()); + return ReplaceInstUsesWith(ICI, Context->getTrue()); // Evaluate the comparison for LT (we invert for GT below). LE and GE cases // should have been folded away previously and not enter in here. @@ -6979,20 +7297,20 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { if (isSignedCmp) { // We're performing a signed comparison. if (cast(CI)->getValue().isNegative()) - Result = ConstantInt::getFalse(); // X < (small) --> false + Result = Context->getFalse(); // X < (small) --> false else - Result = ConstantInt::getTrue(); // X < (large) --> true + Result = Context->getTrue(); // X < (large) --> true } else { // We're performing an unsigned comparison. if (isSignedExt) { // We're performing an unsigned comp with a sign extended value. // This is true if the input is >= 0. [aka >s -1] - Constant *NegOne = ConstantInt::getAllOnesValue(SrcTy); - Result = InsertNewInstBefore(new ICmpInst(ICmpInst::ICMP_SGT, LHSCIOp, - NegOne, ICI.getName()), ICI); + Constant *NegOne = Context->getAllOnesValue(SrcTy); + Result = InsertNewInstBefore(new ICmpInst(*Context, ICmpInst::ICMP_SGT, + LHSCIOp, NegOne, ICI.getName()), ICI); } else { // Unsigned extend & unsigned compare -> always true. - Result = ConstantInt::getTrue(); + Result = Context->getTrue(); } } @@ -7005,8 +7323,8 @@ Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) { ICI.getPredicate()==ICmpInst::ICMP_SGT) && "ICmp should be folded!"); if (Constant *CI = dyn_cast(Result)) - return ReplaceInstUsesWith(ICI, ConstantExpr::getNot(CI)); - return BinaryOperator::CreateNot(Result); + return ReplaceInstUsesWith(ICI, Context->getConstantExprNot(CI)); + return BinaryOperator::CreateNot(*Context, Result); } Instruction *InstCombiner::visitShl(BinaryOperator &I) { @@ -7027,16 +7345,15 @@ Instruction *InstCombiner::visitAShr(BinaryOperator &I) { if (ConstantInt *CSI = dyn_cast(Op0)) if (CSI->isAllOnesValue()) return ReplaceInstUsesWith(I, CSI); - + // See if we can turn a signed shr into an unsigned shr. - if (!isa(I.getType()) && - MaskedValueIsZero(Op0, - APInt::getSignBit(I.getType()->getPrimitiveSizeInBits()))) + if (MaskedValueIsZero(Op0, + APInt::getSignBit(I.getType()->getScalarSizeInBits()))) return BinaryOperator::CreateLShr(Op0, I.getOperand(1)); // Arithmetic shifting an all-sign-bit value is a no-op. unsigned NumSignBits = ComputeNumSignBits(Op0); - if (NumSignBits == Op0->getType()->getPrimitiveSizeInBits()) + if (NumSignBits == Op0->getType()->getScalarSizeInBits()) return ReplaceInstUsesWith(I, Op0); return 0; @@ -7048,23 +7365,27 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) { // shl X, 0 == X and shr X, 0 == X // shl 0, X == 0 and shr 0, X == 0 - if (Op1 == Constant::getNullValue(Op1->getType()) || - Op0 == Constant::getNullValue(Op0->getType())) + if (Op1 == Context->getNullValue(Op1->getType()) || + Op0 == Context->getNullValue(Op0->getType())) return ReplaceInstUsesWith(I, Op0); if (isa(Op0)) { if (I.getOpcode() == Instruction::AShr) // undef >>s X -> undef return ReplaceInstUsesWith(I, Op0); else // undef << X -> 0, undef >>u X -> 0 - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); } if (isa(Op1)) { if (I.getOpcode() == Instruction::AShr) // X >>s undef -> X return ReplaceInstUsesWith(I, Op0); else // X << undef, X >>u undef -> 0 - return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); } + // See if we can fold away this shift. + if (SimplifyDemandedInstructionBits(I)) + return &I; + // Try to fold constant and into select arguments. if (isa(Op0)) if (SelectInst *SI = dyn_cast(Op1)) @@ -7083,16 +7404,14 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, // See if we can simplify any instructions used by the instruction whose sole // purpose is to compute bits we don't care about. - uint32_t TypeBits = Op0->getType()->getPrimitiveSizeInBits(); - if (SimplifyDemandedInstructionBits(I)) - return &I; + uint32_t TypeBits = Op0->getType()->getScalarSizeInBits(); - // shl uint X, 32 = 0 and shr ubyte Y, 9 = 0, ... just don't eliminate shr - // of a signed value. + // shl i32 X, 32 = 0 and srl i8 Y, 9 = 0, ... just don't eliminate + // a signed shift. // if (Op1->uge(TypeBits)) { if (I.getOpcode() != Instruction::AShr) - return ReplaceInstUsesWith(I, Constant::getNullValue(Op0->getType())); + return ReplaceInstUsesWith(I, Context->getNullValue(Op0->getType())); else { I.setOperand(1, ConstantInt::get(I.getType(), TypeBits-1)); return &I; @@ -7104,7 +7423,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, if (BO->getOpcode() == Instruction::Mul && isLeftShift) if (Constant *BOOp = dyn_cast(BO->getOperand(1))) return BinaryOperator::CreateMul(BO->getOperand(0), - ConstantExpr::getShl(BOOp, Op1)); + Context->getConstantExprShl(BOOp, Op1)); // Try to fold constant and into select arguments. if (SelectInst *SI = dyn_cast(Op0)) @@ -7125,7 +7444,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, if (TrOp && I.isLogicalShift() && TrOp->isShift() && isa(TrOp->getOperand(1))) { // Okay, we'll do this xform. Make the shift of shift. - Constant *ShAmt = ConstantExpr::getZExt(Op1, TrOp->getType()); + Constant *ShAmt = Context->getConstantExprZExt(Op1, TrOp->getType()); Instruction *NSh = BinaryOperator::Create(I.getOpcode(), TrOp, ShAmt, I.getName()); InsertNewInstBefore(NSh, I); // (shift2 (shift1 & 0x00FF), c2) @@ -7134,8 +7453,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, // part of the register be zeros. Emulate this by inserting an AND to // clear the top bits as needed. This 'and' will usually be zapped by // other xforms later if dead. - unsigned SrcSize = TrOp->getType()->getPrimitiveSizeInBits(); - unsigned DstSize = TI->getType()->getPrimitiveSizeInBits(); + unsigned SrcSize = TrOp->getType()->getScalarSizeInBits(); + unsigned DstSize = TI->getType()->getScalarSizeInBits(); APInt MaskV(APInt::getLowBitsSet(SrcSize, DstSize)); // The mask we constructed says what the trunc would do if occurring @@ -7149,8 +7468,9 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, MaskV = MaskV.lshr(Op1->getZExtValue()); } - Instruction *And = BinaryOperator::CreateAnd(NSh, ConstantInt::get(MaskV), - TI->getName()); + Instruction *And = + BinaryOperator::CreateAnd(NSh, ConstantInt::get(*Context, MaskV), + TI->getName()); InsertNewInstBefore(And, I); // shift1 & 0x00FF // Return the value truncated to the interesting size. @@ -7172,7 +7492,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, // These operators commute. // Turn (Y + (X >> C)) << C -> (X + (Y << C)) & (~0 << C) if (isLeftShift && Op0BO->getOperand(1)->hasOneUse() && - match(Op0BO->getOperand(1), m_Shr(m_Value(V1), m_Specific(Op1)))){ + match(Op0BO->getOperand(1), m_Shr(m_Value(V1), + m_Specific(Op1)), *Context)){ Instruction *YS = BinaryOperator::CreateShl( Op0BO->getOperand(0), Op1, Op0BO->getName()); @@ -7182,7 +7503,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, Op0BO->getOperand(1)->getName()); InsertNewInstBefore(X, I); // (X + (Y << C)) uint32_t Op1Val = Op1->getLimitedValue(TypeBits); - return BinaryOperator::CreateAnd(X, ConstantInt::get( + return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val))); } @@ -7191,14 +7512,15 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, if (isLeftShift && Op0BOOp1->hasOneUse() && match(Op0BOOp1, m_And(m_Shr(m_Value(V1), m_Specific(Op1)), - m_ConstantInt(CC))) && + m_ConstantInt(CC)), *Context) && cast(Op0BOOp1)->getOperand(0)->hasOneUse()) { Instruction *YS = BinaryOperator::CreateShl( Op0BO->getOperand(0), Op1, Op0BO->getName()); InsertNewInstBefore(YS, I); // (Y << C) Instruction *XM = - BinaryOperator::CreateAnd(V1, ConstantExpr::getShl(CC, Op1), + BinaryOperator::CreateAnd(V1, + Context->getConstantExprShl(CC, Op1), V1->getName()+".mask"); InsertNewInstBefore(XM, I); // X & (CC << C) @@ -7210,7 +7532,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, case Instruction::Sub: { // Turn ((X >> C) + Y) << C -> (X + (Y << C)) & (~0 << C) if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() && - match(Op0BO->getOperand(0), m_Shr(m_Value(V1), m_Specific(Op1)))){ + match(Op0BO->getOperand(0), m_Shr(m_Value(V1), + m_Specific(Op1)), *Context)){ Instruction *YS = BinaryOperator::CreateShl( Op0BO->getOperand(1), Op1, Op0BO->getName()); @@ -7220,7 +7543,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, Op0BO->getOperand(0)->getName()); InsertNewInstBefore(X, I); // (X + (Y << C)) uint32_t Op1Val = Op1->getLimitedValue(TypeBits); - return BinaryOperator::CreateAnd(X, ConstantInt::get( + return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, APInt::getHighBitsSet(TypeBits, TypeBits-Op1Val))); } @@ -7228,7 +7551,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, if (isLeftShift && Op0BO->getOperand(0)->hasOneUse() && match(Op0BO->getOperand(0), m_And(m_Shr(m_Value(V1), m_Value(V2)), - m_ConstantInt(CC))) && V2 == Op1 && + m_ConstantInt(CC)), *Context) && V2 == Op1 && cast(Op0BO->getOperand(0)) ->getOperand(0)->hasOneUse()) { Instruction *YS = BinaryOperator::CreateShl( @@ -7236,7 +7559,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, Op0BO->getName()); InsertNewInstBefore(YS, I); // (Y << C) Instruction *XM = - BinaryOperator::CreateAnd(V1, ConstantExpr::getShl(CC, Op1), + BinaryOperator::CreateAnd(V1, + Context->getConstantExprShl(CC, Op1), V1->getName()+".mask"); InsertNewInstBefore(XM, I); // X & (CC << C) @@ -7278,7 +7602,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, isValid = Op0C->getValue()[TypeBits-1] == highBitSet; if (isValid) { - Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1); + Constant *NewRHS = Context->getConstantExpr(I.getOpcode(), Op0C, Op1); Instruction *NewShift = BinaryOperator::Create(I.getOpcode(), Op0BO->getOperand(0), Op1); @@ -7306,28 +7630,40 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, Value *X = ShiftOp->getOperand(0); uint32_t AmtSum = ShiftAmt1+ShiftAmt2; // Fold into one big shift. - if (AmtSum > TypeBits) - AmtSum = TypeBits; const IntegerType *Ty = cast(I.getType()); // Check for (X << c1) << c2 and (X >> c1) >> c2 if (I.getOpcode() == ShiftOp->getOpcode()) { + // If this is oversized composite shift, then unsigned shifts get 0, ashr + // saturates. + if (AmtSum >= TypeBits) { + if (I.getOpcode() != Instruction::AShr) + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); + AmtSum = TypeBits-1; // Saturate to 31 for i32 ashr. + } + return BinaryOperator::Create(I.getOpcode(), X, ConstantInt::get(Ty, AmtSum)); } else if (ShiftOp->getOpcode() == Instruction::LShr && I.getOpcode() == Instruction::AShr) { + if (AmtSum >= TypeBits) + return ReplaceInstUsesWith(I, Context->getNullValue(I.getType())); + // ((X >>u C1) >>s C2) -> (X >>u (C1+C2)) since C1 != 0. return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, AmtSum)); } else if (ShiftOp->getOpcode() == Instruction::AShr && I.getOpcode() == Instruction::LShr) { // ((X >>s C1) >>u C2) -> ((X >>s (C1+C2)) & mask) since C1 != 0. + if (AmtSum >= TypeBits) + AmtSum = TypeBits-1; + Instruction *Shift = BinaryOperator::CreateAShr(X, ConstantInt::get(Ty, AmtSum)); InsertNewInstBefore(Shift, I); APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2)); - return BinaryOperator::CreateAnd(Shift, ConstantInt::get(Mask)); + return BinaryOperator::CreateAnd(Shift, ConstantInt::get(*Context, Mask)); } // Okay, if we get here, one shift must be left, and the other shift must be @@ -7336,12 +7672,12 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, // If we have ((X >>? C) << C), turn this into X & (-1 << C). if (I.getOpcode() == Instruction::Shl) { APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt1)); - return BinaryOperator::CreateAnd(X, ConstantInt::get(Mask)); + return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask)); } // If we have ((X << C) >>u C), turn this into X & (-1 >>u C). if (I.getOpcode() == Instruction::LShr) { APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt1)); - return BinaryOperator::CreateAnd(X, ConstantInt::get(Mask)); + return BinaryOperator::CreateAnd(X, ConstantInt::get(*Context, Mask)); } // We can simplify ((X << C) >>s C) into a trunc + sext. // NOTE: we could do this for any C, but that would make 'unusual' integer @@ -7355,7 +7691,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, case 32 : case 64 : case 128: - SExtType = IntegerType::get(Ty->getBitWidth() - ShiftAmt1); + SExtType = Context->getIntegerType(Ty->getBitWidth() - ShiftAmt1); break; default: break; } @@ -7377,7 +7713,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, InsertNewInstBefore(Shift, I); APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2)); - return BinaryOperator::CreateAnd(Shift, ConstantInt::get(Mask)); + return BinaryOperator::CreateAnd(Shift, + ConstantInt::get(*Context, Mask)); } // (X << C1) >>u C2 --> X >>u (C2-C1) & (-1 >> C2) @@ -7388,7 +7725,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, InsertNewInstBefore(Shift, I); APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2)); - return BinaryOperator::CreateAnd(Shift, ConstantInt::get(Mask)); + return BinaryOperator::CreateAnd(Shift, + ConstantInt::get(*Context, Mask)); } // We can't handle (X << C1) >>s C2, it shifts arbitrary bits in. @@ -7406,7 +7744,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, InsertNewInstBefore(Shift, I); APInt Mask(APInt::getHighBitsSet(TypeBits, TypeBits - ShiftAmt2)); - return BinaryOperator::CreateAnd(Shift, ConstantInt::get(Mask)); + return BinaryOperator::CreateAnd(Shift, + ConstantInt::get(*Context, Mask)); } // (X << C1) >>u C2 --> X << (C1-C2) & (-1 >> C2) @@ -7417,7 +7756,8 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, InsertNewInstBefore(Shift, I); APInt Mask(APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt2)); - return BinaryOperator::CreateAnd(Shift, ConstantInt::get(Mask)); + return BinaryOperator::CreateAnd(Shift, + ConstantInt::get(*Context, Mask)); } // We can't handle (X << C1) >>a C2, it shifts arbitrary bits in. @@ -7432,7 +7772,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1, /// X*Scale+Offset. /// static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale, - int &Offset) { + int &Offset, LLVMContext *Context) { assert(Val->getType() == Type::Int32Ty && "Unexpected allocation size type!"); if (ConstantInt *CI = dyn_cast(Val)) { Offset = CI->getZExtValue(); @@ -7455,7 +7795,8 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale, // where C1 is divisible by C2. unsigned SubScale; Value *SubVal = - DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); + DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, + Offset, Context); Offset += RHS->getZExtValue(); Scale = SubScale; return SubVal; @@ -7490,7 +7831,10 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, EraseInstFromFunction(*User); } } - + + // This requires TargetData to get the alloca alignment and size information. + if (!TD) return 0; + // Get the type really allocated and the type casted to. const Type *AllocElTy = AI.getAllocatedType(); const Type *CastElTy = PTy->getElementType(); @@ -7507,8 +7851,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, if (!AI.hasOneUse() && !hasOneUsePlusDeclare(&AI) && CastElTyAlign == AllocElTyAlign) return 0; - uint64_t AllocElTySize = TD->getTypePaddedSize(AllocElTy); - uint64_t CastElTySize = TD->getTypePaddedSize(CastElTy); + uint64_t AllocElTySize = TD->getTypeAllocSize(AllocElTy); + uint64_t CastElTySize = TD->getTypeAllocSize(CastElTy); if (CastElTySize == 0 || AllocElTySize == 0) return 0; // See if we can satisfy the modulus by pulling a scale out of the array @@ -7516,7 +7860,8 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, unsigned ArraySizeScale; int ArrayOffset; Value *NumElements = // See if the array size is a decomposable linear expr. - DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); + DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, + ArrayOffset, Context); // If we can now satisfy the modulus, by using a non-1 scale, we really can // do the xform. @@ -7531,9 +7876,10 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, // If the allocation size is constant, form a constant mul expression Amt = ConstantInt::get(Type::Int32Ty, Scale); if (isa(NumElements)) - Amt = Multiply(cast(NumElements), cast(Amt)); + Amt = Context->getConstantExprMul(cast(NumElements), + cast(Amt)); // otherwise multiply the amount and the number of elements - else if (Scale != 1) { + else { Instruction *Tmp = BinaryOperator::CreateMul(Amt, NumElements, "tmp"); Amt = InsertNewInstBefore(Tmp, AI); } @@ -7590,17 +7936,17 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, /// If CastOpc is a sext or zext, we are asking if the low bits of the value can /// bit computed in a larger type, which is then and'd or sext_in_reg'd to get /// the final result. -bool InstCombiner::CanEvaluateInDifferentType(Value *V, const IntegerType *Ty, +bool InstCombiner::CanEvaluateInDifferentType(Value *V, const Type *Ty, unsigned CastOpc, int &NumCastsRemoved){ // We can always evaluate constants in another type. - if (isa(V)) + if (isa(V)) return true; Instruction *I = dyn_cast(V); if (!I) return false; - const IntegerType *OrigTy = cast(V->getType()); + const Type *OrigTy = V->getType(); // If this is an extension or truncate, we can often eliminate it. if (isa(I) || isa(I) || isa(I)) { @@ -7634,12 +7980,29 @@ bool InstCombiner::CanEvaluateInDifferentType(Value *V, const IntegerType *Ty, CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc, NumCastsRemoved); + case Instruction::UDiv: + case Instruction::URem: { + // UDiv and URem can be truncated if all the truncated bits are zero. + uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); + uint32_t BitWidth = Ty->getScalarSizeInBits(); + if (BitWidth < OrigBitWidth) { + APInt Mask = APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth); + if (MaskedValueIsZero(I->getOperand(0), Mask) && + MaskedValueIsZero(I->getOperand(1), Mask)) { + return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc, + NumCastsRemoved) && + CanEvaluateInDifferentType(I->getOperand(1), Ty, CastOpc, + NumCastsRemoved); + } + } + break; + } case Instruction::Shl: // If we are truncating the result of this SHL, and if it's a shift of a // constant amount, we can always perform a SHL in a smaller type. if (ConstantInt *CI = dyn_cast(I->getOperand(1))) { - uint32_t BitWidth = Ty->getBitWidth(); - if (BitWidth < OrigTy->getBitWidth() && + uint32_t BitWidth = Ty->getScalarSizeInBits(); + if (BitWidth < OrigTy->getScalarSizeInBits() && CI->getLimitedValue(BitWidth) < BitWidth) return CanEvaluateInDifferentType(I->getOperand(0), Ty, CastOpc, NumCastsRemoved); @@ -7650,8 +8013,8 @@ bool InstCombiner::CanEvaluateInDifferentType(Value *V, const IntegerType *Ty, // lshr iff we know that the bits we would otherwise be shifting in are // already zeros. if (ConstantInt *CI = dyn_cast(I->getOperand(1))) { - uint32_t OrigBitWidth = OrigTy->getBitWidth(); - uint32_t BitWidth = Ty->getBitWidth(); + uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); + uint32_t BitWidth = Ty->getScalarSizeInBits(); if (BitWidth < OrigBitWidth && MaskedValueIsZero(I->getOperand(0), APInt::getHighBitsSet(OrigBitWidth, OrigBitWidth-BitWidth)) && @@ -7704,7 +8067,8 @@ bool InstCombiner::CanEvaluateInDifferentType(Value *V, const IntegerType *Ty, Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty, bool isSigned) { if (Constant *C = dyn_cast(V)) - return ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); + return Context->getConstantExprIntegerCast(C, Ty, + isSigned /*Sext or ZExt*/); // Otherwise, it must be an instruction. Instruction *I = cast(V); @@ -7719,7 +8083,9 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty, case Instruction::Xor: case Instruction::AShr: case Instruction::LShr: - case Instruction::Shl: { + case Instruction::Shl: + case Instruction::UDiv: + case Instruction::URem: { Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); @@ -7756,7 +8122,7 @@ Value *InstCombiner::EvaluateInDifferentType(Value *V, const Type *Ty, } default: // TODO: Can handle more cases here. - assert(0 && "Unreachable!"); + llvm_unreachable("Unreachable!"); break; } @@ -7798,7 +8164,9 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) { /// resultant element type, otherwise return null. static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset, SmallVectorImpl &NewIndices, - const TargetData *TD) { + const TargetData *TD, + LLVMContext *Context) { + if (!TD) return 0; if (!Ty->isSized()) return 0; // Start with the index over the outer type. Note that the type size @@ -7806,7 +8174,7 @@ static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset, // is something like [0 x {int, int}] const Type *IntPtrTy = TD->getIntPtrType(); int64_t FirstIdx = 0; - if (int64_t TySize = TD->getTypePaddedSize(Ty)) { + if (int64_t TySize = TD->getTypeAllocSize(Ty)) { FirstIdx = Offset/TySize; Offset -= FirstIdx*TySize; @@ -7838,7 +8206,7 @@ static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset, Offset -= SL->getElementOffset(Elt); Ty = STy->getElementType(Elt); } else if (const ArrayType *AT = dyn_cast(Ty)) { - uint64_t EltSize = TD->getTypePaddedSize(AT->getElementType()); + uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType()); assert(EltSize && "Cannot index into a zero-sized array"); NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); Offset %= EltSize; @@ -7872,10 +8240,11 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { // GEP computes a constant offset, see if we can convert these three // instructions into fewer. This typically happens with unions and other // non-type-safe code. - if (GEP->hasOneUse() && isa(GEP->getOperand(0))) { + if (TD && GEP->hasOneUse() && isa(GEP->getOperand(0))) { if (GEP->hasAllConstantIndices()) { // We are guaranteed to get a constant from EmitGEPOffset. - ConstantInt *OffsetV = cast(EmitGEPOffset(GEP, CI, *this)); + ConstantInt *OffsetV = + cast(EmitGEPOffset(GEP, CI, *this)); int64_t Offset = OffsetV->getSExtValue(); // Get the base pointer input of the bitcast, and the type it points to. @@ -7883,7 +8252,7 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { const Type *GEPIdxTy = cast(OrigBase->getType())->getElementType(); SmallVector NewIndices; - if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices, TD)) { + if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices, TD, Context)) { // If we were able to index down into an element, create the GEP // and bitcast the result. This eliminates one bitcast, potentially // two. @@ -7905,11 +8274,25 @@ Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { return commonCastTransforms(CI); } +/// isSafeIntegerType - Return true if this is a basic integer type, not a crazy +/// type like i42. We don't want to introduce operations on random non-legal +/// integer types where they don't already exist in the code. In the future, +/// we should consider making this based off target-data, so that 32-bit targets +/// won't get i64 operations etc. +static bool isSafeIntegerType(const Type *Ty) { + switch (Ty->getPrimitiveSizeInBits()) { + case 8: + case 16: + case 32: + case 64: + return true; + default: + return false; + } +} -/// Only the TRUNC, ZEXT, SEXT, and BITCAST can both operand and result as -/// integer types. This function implements the common transforms for all those -/// cases. -/// @brief Implement the transforms common to CastInst with integer operands +/// commonIntCastTransforms - This function implements the common transforms +/// for trunc, zext, and sext. Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { if (Instruction *Result = commonCastTransforms(CI)) return Result; @@ -7917,8 +8300,8 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { Value *Src = CI.getOperand(0); const Type *SrcTy = Src->getType(); const Type *DestTy = CI.getType(); - uint32_t SrcBitSize = SrcTy->getPrimitiveSizeInBits(); - uint32_t DestBitSize = DestTy->getPrimitiveSizeInBits(); + uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); + uint32_t DestBitSize = DestTy->getScalarSizeInBits(); // See if we can simplify any instructions used by the LHS whose sole // purpose is to compute bits we don't care about. @@ -7933,8 +8316,12 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { // Attempt to propagate the cast into the instruction for int->int casts. int NumCastsRemoved = 0; - if (!isa(CI) && - CanEvaluateInDifferentType(SrcI, cast(DestTy), + // Only do this if the dest type is a simple type, don't convert the + // expression tree to something weird like i93 unless the source is also + // strange. + if ((isSafeIntegerType(DestTy->getScalarType()) || + !isSafeIntegerType(SrcI->getType()->getScalarType())) && + CanEvaluateInDifferentType(SrcI, DestTy, CI.getOpcode(), NumCastsRemoved)) { // If this cast is a truncate, evaluting in a different type always // eliminates the cast, so it is always a win. If this is a zero-extension, @@ -7948,7 +8335,7 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { default: // All the others use floating point so we shouldn't actually // get here because of the check above. - assert(0 && "Unknown cast type"); + llvm_unreachable("Unknown cast type"); case Instruction::Trunc: DoXForm = true; break; @@ -8004,9 +8391,8 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { assert(Res->getType() == DestTy); switch (CI.getOpcode()) { - default: assert(0 && "Unknown cast type!"); + default: llvm_unreachable("Unknown cast type!"); case Instruction::Trunc: - case Instruction::BitCast: // Just replace this cast with the result. return ReplaceInstUsesWith(CI, Res); case Instruction::ZExt: { @@ -8019,8 +8405,8 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { return ReplaceInstUsesWith(CI, Res); // We need to emit an AND to clear the high bits. - Constant *C = ConstantInt::get(APInt::getLowBitsSet(DestBitSize, - SrcBitSize)); + Constant *C = ConstantInt::get(*Context, + APInt::getLowBitsSet(DestBitSize, SrcBitSize)); return BinaryOperator::CreateAnd(Res, C); } case Instruction::SExt: { @@ -8049,16 +8435,12 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { case Instruction::Or: case Instruction::Xor: // If we are discarding information, rewrite. - if (DestBitSize <= SrcBitSize && DestBitSize != 1) { - // Don't insert two casts if they cannot be eliminated. We allow - // two casts to be inserted if the sizes are the same. This could - // only be converting signedness, which is a noop. - if (DestBitSize == SrcBitSize || - !ValueRequiresCast(CI.getOpcode(), Op1, DestTy,TD) || + if (DestBitSize < SrcBitSize && DestBitSize != 1) { + // Don't insert two casts unless at least one can be eliminated. + if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) || !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) { - Instruction::CastOps opcode = CI.getOpcode(); - Value *Op0c = InsertCastBefore(opcode, Op0, DestTy, *SrcI); - Value *Op1c = InsertCastBefore(opcode, Op1, DestTy, *SrcI); + Value *Op0c = InsertCastBefore(Instruction::Trunc, Op0, DestTy, *SrcI); + Value *Op1c = InsertCastBefore(Instruction::Trunc, Op1, DestTy, *SrcI); return BinaryOperator::Create( cast(SrcI)->getOpcode(), Op0c, Op1c); } @@ -8067,61 +8449,25 @@ Instruction *InstCombiner::commonIntCastTransforms(CastInst &CI) { // cast (xor bool X, true) to int --> xor (cast bool X to int), 1 if (isa(CI) && SrcBitSize == 1 && SrcI->getOpcode() == Instruction::Xor && - Op1 == ConstantInt::getTrue() && + Op1 == Context->getTrue() && (!Op0->hasOneUse() || !isa(Op0))) { Value *New = InsertCastBefore(Instruction::ZExt, Op0, DestTy, CI); - return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1)); - } - break; - case Instruction::SDiv: - case Instruction::UDiv: - case Instruction::SRem: - case Instruction::URem: - // If we are just changing the sign, rewrite. - if (DestBitSize == SrcBitSize) { - // Don't insert two casts if they cannot be eliminated. We allow - // two casts to be inserted if the sizes are the same. This could - // only be converting signedness, which is a noop. - if (!ValueRequiresCast(CI.getOpcode(), Op1, DestTy, TD) || - !ValueRequiresCast(CI.getOpcode(), Op0, DestTy, TD)) { - Value *Op0c = InsertCastBefore(Instruction::BitCast, - Op0, DestTy, *SrcI); - Value *Op1c = InsertCastBefore(Instruction::BitCast, - Op1, DestTy, *SrcI); - return BinaryOperator::Create( - cast(SrcI)->getOpcode(), Op0c, Op1c); - } + return BinaryOperator::CreateXor(New, + ConstantInt::get(CI.getType(), 1)); } break; - case Instruction::Shl: - // Allow changing the sign of the source operand. Do not allow - // changing the size of the shift, UNLESS the shift amount is a - // constant. We must not change variable sized shifts to a smaller - // size, because it is undefined to shift more bits out than exist - // in the value. - if (DestBitSize == SrcBitSize || - (DestBitSize < SrcBitSize && isa(Op1))) { - Instruction::CastOps opcode = (DestBitSize == SrcBitSize ? - Instruction::BitCast : Instruction::Trunc); - Value *Op0c = InsertCastBefore(opcode, Op0, DestTy, *SrcI); - Value *Op1c = InsertCastBefore(opcode, Op1, DestTy, *SrcI); + case Instruction::Shl: { + // Canonicalize trunc inside shl, if we can. + ConstantInt *CI = dyn_cast(Op1); + if (CI && DestBitSize < SrcBitSize && + CI->getLimitedValue(DestBitSize) < DestBitSize) { + Value *Op0c = InsertCastBefore(Instruction::Trunc, Op0, DestTy, *SrcI); + Value *Op1c = InsertCastBefore(Instruction::Trunc, Op1, DestTy, *SrcI); return BinaryOperator::CreateShl(Op0c, Op1c); } break; - case Instruction::AShr: - // If this is a signed shr, and if all bits shifted in are about to be - // truncated off, turn it into an unsigned shr to allow greater - // simplifications. - if (DestBitSize < SrcBitSize && - isa(Op1)) { - uint32_t ShiftAmt = cast(Op1)->getLimitedValue(SrcBitSize); - if (SrcBitSize > ShiftAmt && SrcBitSize-ShiftAmt >= DestBitSize) { - // Insert the new logical shift right. - return BinaryOperator::CreateLShr(Op0, Op1); - } - } - break; + } } return 0; } @@ -8132,51 +8478,35 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) { Value *Src = CI.getOperand(0); const Type *Ty = CI.getType(); - uint32_t DestBitWidth = Ty->getPrimitiveSizeInBits(); - uint32_t SrcBitWidth = cast(Src->getType())->getBitWidth(); - - if (Instruction *SrcI = dyn_cast(Src)) { - switch (SrcI->getOpcode()) { - default: break; - case Instruction::LShr: - // We can shrink lshr to something smaller if we know the bits shifted in - // are already zeros. - if (ConstantInt *ShAmtV = dyn_cast(SrcI->getOperand(1))) { - uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth); - - // Get a mask for the bits shifting in. - APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth)); - Value* SrcIOp0 = SrcI->getOperand(0); - if (SrcI->hasOneUse() && MaskedValueIsZero(SrcIOp0, Mask)) { - if (ShAmt >= DestBitWidth) // All zeros. - return ReplaceInstUsesWith(CI, Constant::getNullValue(Ty)); - - // Okay, we can shrink this. Truncate the input, then return a new - // shift. - Value *V1 = InsertCastBefore(Instruction::Trunc, SrcIOp0, Ty, CI); - Value *V2 = InsertCastBefore(Instruction::Trunc, SrcI->getOperand(1), - Ty, CI); - return BinaryOperator::CreateLShr(V1, V2); - } - } else { // This is a variable shr. - - // Turn 'trunc (lshr X, Y) to bool' into '(X & (1 << Y)) != 0'. This is - // more LLVM instructions, but allows '1 << Y' to be hoisted if - // loop-invariant and CSE'd. - if (CI.getType() == Type::Int1Ty && SrcI->hasOneUse()) { - Value *One = ConstantInt::get(SrcI->getType(), 1); - - Value *V = InsertNewInstBefore( - BinaryOperator::CreateShl(One, SrcI->getOperand(1), - "tmp"), CI); - V = InsertNewInstBefore(BinaryOperator::CreateAnd(V, - SrcI->getOperand(0), - "tmp"), CI); - Value *Zero = Constant::getNullValue(V->getType()); - return new ICmpInst(ICmpInst::ICMP_NE, V, Zero); - } - } - break; + uint32_t DestBitWidth = Ty->getScalarSizeInBits(); + uint32_t SrcBitWidth = Src->getType()->getScalarSizeInBits(); + + // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0) + if (DestBitWidth == 1) { + Constant *One = ConstantInt::get(Src->getType(), 1); + Src = InsertNewInstBefore(BinaryOperator::CreateAnd(Src, One, "tmp"), CI); + Value *Zero = Context->getNullValue(Src->getType()); + return new ICmpInst(*Context, ICmpInst::ICMP_NE, Src, Zero); + } + + // Optimize trunc(lshr(), c) to pull the shift through the truncate. + ConstantInt *ShAmtV = 0; + Value *ShiftOp = 0; + if (Src->hasOneUse() && + match(Src, m_LShr(m_Value(ShiftOp), m_ConstantInt(ShAmtV)), *Context)) { + uint32_t ShAmt = ShAmtV->getLimitedValue(SrcBitWidth); + + // Get a mask for the bits shifting in. + APInt Mask(APInt::getLowBitsSet(SrcBitWidth, ShAmt).shl(DestBitWidth)); + if (MaskedValueIsZero(ShiftOp, Mask)) { + if (ShAmt >= DestBitWidth) // All zeros. + return ReplaceInstUsesWith(CI, Context->getNullValue(Ty)); + + // Okay, we can shrink this. Truncate the input, then return a new + // shift. + Value *V1 = InsertCastBefore(Instruction::Trunc, ShiftOp, Ty, CI); + Value *V2 = Context->getConstantExprTrunc(ShAmtV, Ty); + return BinaryOperator::CreateLShr(V1, V2); } } @@ -8201,7 +8531,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI, Value *In = ICI->getOperand(0); Value *Sh = ConstantInt::get(In->getType(), - In->getType()->getPrimitiveSizeInBits()-1); + In->getType()->getScalarSizeInBits()-1); In = InsertNewInstBefore(BinaryOperator::CreateLShr(In, Sh, In->getName()+".lobit"), CI); @@ -8247,7 +8577,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI, // (X&4) == 2 --> false // (X&4) != 2 --> true Constant *Res = ConstantInt::get(Type::Int1Ty, isNE); - Res = ConstantExpr::getZExt(Res, CI.getType()); + Res = Context->getConstantExprZExt(Res, CI.getType()); return ReplaceInstUsesWith(CI, Res); } @@ -8257,7 +8587,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, Instruction &CI, // Perform a logical shr by shiftamt. // Insert the shift to put the result in the low bit. In = InsertNewInstBefore(BinaryOperator::CreateLShr(In, - ConstantInt::get(In->getType(), ShiftAmt), + ConstantInt::get(In->getType(), ShiftAmt), In->getName()+".lobit"), CI); } @@ -8292,28 +8622,31 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) { // Get the sizes of the types involved. We know that the intermediate type // will be smaller than A or C, but don't know the relation between A and C. Value *A = CSrc->getOperand(0); - unsigned SrcSize = A->getType()->getPrimitiveSizeInBits(); - unsigned MidSize = CSrc->getType()->getPrimitiveSizeInBits(); - unsigned DstSize = CI.getType()->getPrimitiveSizeInBits(); + unsigned SrcSize = A->getType()->getScalarSizeInBits(); + unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); + unsigned DstSize = CI.getType()->getScalarSizeInBits(); // If we're actually extending zero bits, then if // SrcSize < DstSize: zext(a & mask) // SrcSize == DstSize: a & mask // SrcSize > DstSize: trunc(a) & mask if (SrcSize < DstSize) { APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); - Constant *AndConst = ConstantInt::get(AndValue); + Constant *AndConst = ConstantInt::get(A->getType(), AndValue); Instruction *And = BinaryOperator::CreateAnd(A, AndConst, CSrc->getName()+".mask"); InsertNewInstBefore(And, CI); return new ZExtInst(And, CI.getType()); } else if (SrcSize == DstSize) { APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); - return BinaryOperator::CreateAnd(A, ConstantInt::get(AndValue)); + return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), + AndValue)); } else if (SrcSize > DstSize) { Instruction *Trunc = new TruncInst(A, CI.getType(), "tmp"); InsertNewInstBefore(Trunc, CI); APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); - return BinaryOperator::CreateAnd(Trunc, ConstantInt::get(AndValue)); + return BinaryOperator::CreateAnd(Trunc, + ConstantInt::get(Trunc->getType(), + AndValue)); } } @@ -8335,6 +8668,33 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) { } } + // zext(trunc(t) & C) -> (t & zext(C)). + if (SrcI && SrcI->getOpcode() == Instruction::And && SrcI->hasOneUse()) + if (ConstantInt *C = dyn_cast(SrcI->getOperand(1))) + if (TruncInst *TI = dyn_cast(SrcI->getOperand(0))) { + Value *TI0 = TI->getOperand(0); + if (TI0->getType() == CI.getType()) + return + BinaryOperator::CreateAnd(TI0, + Context->getConstantExprZExt(C, CI.getType())); + } + + // zext((trunc(t) & C) ^ C) -> ((t & zext(C)) ^ zext(C)). + if (SrcI && SrcI->getOpcode() == Instruction::Xor && SrcI->hasOneUse()) + if (ConstantInt *C = dyn_cast(SrcI->getOperand(1))) + if (BinaryOperator *And = dyn_cast(SrcI->getOperand(0))) + if (And->getOpcode() == Instruction::And && And->hasOneUse() && + And->getOperand(1) == C) + if (TruncInst *TI = dyn_cast(And->getOperand(0))) { + Value *TI0 = TI->getOperand(0); + if (TI0->getType() == CI.getType()) { + Constant *ZC = Context->getConstantExprZExt(C, CI.getType()); + Instruction *NewAnd = BinaryOperator::CreateAnd(TI0, ZC, "tmp"); + InsertNewInstBefore(NewAnd, *And); + return BinaryOperator::CreateXor(NewAnd, ZC); + } + } + return 0; } @@ -8347,16 +8707,16 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) { // Canonicalize sign-extend from i1 to a select. if (Src->getType() == Type::Int1Ty) return SelectInst::Create(Src, - ConstantInt::getAllOnesValue(CI.getType()), - Constant::getNullValue(CI.getType())); + Context->getAllOnesValue(CI.getType()), + Context->getNullValue(CI.getType())); // See if the value being truncated is already sign extended. If so, just // eliminate the trunc/sext pair. - if (getOpcode(Src) == Instruction::Trunc) { + if (Operator::getOpcode(Src) == Instruction::Trunc) { Value *Op = cast(Src)->getOperand(0); - unsigned OpBits = cast(Op->getType())->getBitWidth(); - unsigned MidBits = cast(Src->getType())->getBitWidth(); - unsigned DestBits = cast(CI.getType())->getBitWidth(); + unsigned OpBits = Op->getType()->getScalarSizeInBits(); + unsigned MidBits = Src->getType()->getScalarSizeInBits(); + unsigned DestBits = CI.getType()->getScalarSizeInBits(); unsigned NumSignBits = ComputeNumSignBits(Op); if (OpBits == DestBits) { @@ -8393,12 +8753,12 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) { Value *A = 0; ConstantInt *BA = 0, *CA = 0; if (match(Src, m_AShr(m_Shl(m_Value(A), m_ConstantInt(BA)), - m_ConstantInt(CA))) && + m_ConstantInt(CA)), *Context) && BA == CA && isa(A)) { Value *I = cast(A)->getOperand(0); if (I->getType() == CI.getType()) { - unsigned MidSize = Src->getType()->getPrimitiveSizeInBits(); - unsigned SrcDstSize = CI.getType()->getPrimitiveSizeInBits(); + unsigned MidSize = Src->getType()->getScalarSizeInBits(); + unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize; Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt); I = InsertNewInstBefore(BinaryOperator::CreateShl(I, ShAmtV, @@ -8412,21 +8772,22 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) { /// FitsInFPType - Return a Constant* for the specified FP constant if it fits /// in the specified FP type without changing its value. -static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { +static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem, + LLVMContext *Context) { bool losesInfo; APFloat F = CFP->getValueAPF(); (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); if (!losesInfo) - return ConstantFP::get(F); + return Context->getConstantFP(F); return 0; } /// LookThroughFPExtensions - If this is an fp extension instruction, look /// through it until we get the source value. -static Value *LookThroughFPExtensions(Value *V) { +static Value *LookThroughFPExtensions(Value *V, LLVMContext *Context) { if (Instruction *I = dyn_cast(V)) if (I->getOpcode() == Instruction::FPExt) - return LookThroughFPExtensions(I->getOperand(0)); + return LookThroughFPExtensions(I->getOperand(0), Context); // If this value is a constant, return the constant in the smallest FP type // that can accurately represent it. This allows us to turn @@ -8435,11 +8796,11 @@ static Value *LookThroughFPExtensions(Value *V) { if (CFP->getType() == Type::PPC_FP128Ty) return V; // No constant folding of this. // See if the value can be truncated to float and then reextended. - if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle)) + if (Value *V = FitsInFPType(CFP, APFloat::IEEEsingle, Context)) return V; if (CFP->getType() == Type::DoubleTy) return V; // Won't shrink. - if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble)) + if (Value *V = FitsInFPType(CFP, APFloat::IEEEdouble, Context)) return V; // Don't try to shrink to various long double types. } @@ -8451,29 +8812,29 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) { if (Instruction *I = commonCastTransforms(CI)) return I; - // If we have fptrunc(add (fpextend x), (fpextend y)), where x and y are + // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are // smaller than the destination type, we can eliminate the truncate by doing - // the add as the smaller type. This applies to add/sub/mul/div as well as + // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well as // many builtins (sqrt, etc). BinaryOperator *OpI = dyn_cast(CI.getOperand(0)); if (OpI && OpI->hasOneUse()) { switch (OpI->getOpcode()) { default: break; - case Instruction::Add: - case Instruction::Sub: - case Instruction::Mul: + case Instruction::FAdd: + case Instruction::FSub: + case Instruction::FMul: case Instruction::FDiv: case Instruction::FRem: const Type *SrcTy = OpI->getType(); - Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0)); - Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1)); + Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0), Context); + Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1), Context); if (LHSTrunc->getType() != SrcTy && RHSTrunc->getType() != SrcTy) { - unsigned DstSize = CI.getType()->getPrimitiveSizeInBits(); + unsigned DstSize = CI.getType()->getScalarSizeInBits(); // If the source types were both smaller than the destination type of // the cast, do this xform. - if (LHSTrunc->getType()->getPrimitiveSizeInBits() <= DstSize && - RHSTrunc->getType()->getPrimitiveSizeInBits() <= DstSize) { + if (LHSTrunc->getType()->getScalarSizeInBits() <= DstSize && + RHSTrunc->getType()->getScalarSizeInBits() <= DstSize) { LHSTrunc = InsertCastBefore(Instruction::FPExt, LHSTrunc, CI.getType(), CI); RHSTrunc = InsertCastBefore(Instruction::FPExt, RHSTrunc, @@ -8504,7 +8865,7 @@ Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) { // 'X' value would cause an undefined result for the fptoui. if ((isa(OpI) || isa(OpI)) && OpI->getOperand(0)->getType() == FI.getType() && - (int)FI.getType()->getPrimitiveSizeInBits() < /*extra bit for sign */ + (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */ OpI->getType()->getFPMantissaWidth()) return ReplaceInstUsesWith(FI, OpI->getOperand(0)); @@ -8524,7 +8885,7 @@ Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) { // 'X' value would cause an undefined result for the fptoui. if ((isa(OpI) || isa(OpI)) && OpI->getOperand(0)->getType() == FI.getType() && - (int)FI.getType()->getPrimitiveSizeInBits() <= + (int)FI.getType()->getScalarSizeInBits() <= OpI->getType()->getFPMantissaWidth()) return ReplaceInstUsesWith(FI, OpI->getOperand(0)); @@ -8539,61 +8900,41 @@ Instruction *InstCombiner::visitSIToFP(CastInst &CI) { return commonCastTransforms(CI); } -Instruction *InstCombiner::visitPtrToInt(CastInst &CI) { +Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { + // If the destination integer type is smaller than the intptr_t type for + // this target, do a ptrtoint to intptr_t then do a trunc. This allows the + // trunc to be exposed to other transforms. Don't do this for extending + // ptrtoint's, because we don't know if the target sign or zero extends its + // pointers. + if (TD && + CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits()) { + Value *P = InsertNewInstBefore(new PtrToIntInst(CI.getOperand(0), + TD->getIntPtrType(), + "tmp"), CI); + return new TruncInst(P, CI.getType()); + } + return commonPointerCastTransforms(CI); } Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { + // If the source integer type is larger than the intptr_t type for + // this target, do a trunc to the intptr_t type, then inttoptr of it. This + // allows the trunc to be exposed to other transforms. Don't do this for + // extending inttoptr's, because we don't know if the target sign or zero + // extends to pointers. + if (TD && + CI.getOperand(0)->getType()->getScalarSizeInBits() > + TD->getPointerSizeInBits()) { + Value *P = InsertNewInstBefore(new TruncInst(CI.getOperand(0), + TD->getIntPtrType(), + "tmp"), CI); + return new IntToPtrInst(P, CI.getType()); + } + if (Instruction *I = commonCastTransforms(CI)) return I; - - const Type *DestPointee = cast(CI.getType())->getElementType(); - if (!DestPointee->isSized()) return 0; - - // If this is inttoptr(add (ptrtoint x), cst), try to turn this into a GEP. - ConstantInt *Cst; - Value *X; - if (match(CI.getOperand(0), m_Add(m_Cast(m_Value(X)), - m_ConstantInt(Cst)))) { - // If the source and destination operands have the same type, see if this - // is a single-index GEP. - if (X->getType() == CI.getType()) { - // Get the size of the pointee type. - uint64_t Size = TD->getTypePaddedSize(DestPointee); - - // Convert the constant to intptr type. - APInt Offset = Cst->getValue(); - Offset.sextOrTrunc(TD->getPointerSizeInBits()); - - // If Offset is evenly divisible by Size, we can do this xform. - if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){ - Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size)); - return GetElementPtrInst::Create(X, ConstantInt::get(Offset)); - } - } - // TODO: Could handle other cases, e.g. where add is indexing into field of - // struct etc. - } else if (CI.getOperand(0)->hasOneUse() && - match(CI.getOperand(0), m_Add(m_Value(X), m_ConstantInt(Cst)))) { - // Otherwise, if this is inttoptr(add x, cst), try to turn this into an - // "inttoptr+GEP" instead of "add+intptr". - - // Get the size of the pointee type. - uint64_t Size = TD->getTypePaddedSize(DestPointee); - - // Convert the constant to intptr type. - APInt Offset = Cst->getValue(); - Offset.sextOrTrunc(TD->getPointerSizeInBits()); - - // If Offset is evenly divisible by Size, we can do this xform. - if (Size && !APIntOps::srem(Offset, APInt(Offset.getBitWidth(), Size))){ - Offset = APIntOps::sdiv(Offset, APInt(Offset.getBitWidth(), Size)); - - Instruction *P = InsertNewInstBefore(new IntToPtrInst(X, CI.getType(), - "tmp"), CI); - return GetElementPtrInst::Create(P, ConstantInt::get(Offset), "tmp"); - } - } + return 0; } @@ -8604,10 +8945,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { const Type *SrcTy = Src->getType(); const Type *DestTy = CI.getType(); - if (SrcTy->isInteger() && DestTy->isInteger()) { - if (Instruction *Result = commonIntCastTransforms(CI)) - return Result; - } else if (isa(SrcTy)) { + if (isa(SrcTy)) { if (Instruction *I = commonPointerCastTransforms(CI)) return I; } else { @@ -8640,7 +8978,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { // If the source and destination are pointers, and this cast is equivalent // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. // This can enhance SROA and other transforms that want type-safe pointers. - Constant *ZeroUInt = Constant::getNullValue(Type::Int32Ty); + Constant *ZeroUInt = Context->getNullValue(Type::Int32Ty); unsigned NumZeros = 0; while (SrcElTy != DstElTy && isa(SrcElTy) && !isa(SrcElTy) && @@ -8657,6 +8995,29 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { } } + if (const VectorType *DestVTy = dyn_cast(DestTy)) { + if (DestVTy->getNumElements() == 1) { + if (!isa(SrcTy)) { + Value *Elem = InsertCastBefore(Instruction::BitCast, Src, + DestVTy->getElementType(), CI); + return InsertElementInst::Create(Context->getUndef(DestTy), Elem, + Context->getNullValue(Type::Int32Ty)); + } + // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast) + } + } + + if (const VectorType *SrcVTy = dyn_cast(SrcTy)) { + if (SrcVTy->getNumElements() == 1) { + if (!isa(DestTy)) { + Instruction *Elem = + new ExtractElementInst(Src, Context->getNullValue(Type::Int32Ty)); + InsertNewInstBefore(Elem, CI); + return CastInst::Create(Instruction::BitCast, Elem, DestTy); + } + } + } + if (ShuffleVectorInst *SVI = dyn_cast(Src)) { if (SVI->hasOneUse()) { // Okay, we have (bitconvert (shuffle ..)). Check to see if this is @@ -8719,9 +9080,10 @@ static unsigned GetSelectFoldableOperands(Instruction *I) { /// GetSelectFoldableConstant - For the same transformation as the previous /// function, return the identity constant that goes into the select. -static Constant *GetSelectFoldableConstant(Instruction *I) { +static Constant *GetSelectFoldableConstant(Instruction *I, + LLVMContext *Context) { switch (I->getOpcode()) { - default: assert(0 && "This cannot happen!"); abort(); + default: llvm_unreachable("This cannot happen!"); case Instruction::Add: case Instruction::Sub: case Instruction::Or: @@ -8729,9 +9091,9 @@ static Constant *GetSelectFoldableConstant(Instruction *I) { case Instruction::Shl: case Instruction::LShr: case Instruction::AShr: - return Constant::getNullValue(I->getType()); + return Context->getNullValue(I->getType()); case Instruction::And: - return Constant::getAllOnesValue(I->getType()); + return Context->getAllOnesValue(I->getType()); case Instruction::Mul: return ConstantInt::get(I->getType(), 1); } @@ -8803,7 +9165,84 @@ Instruction *InstCombiner::FoldSelectOpOp(SelectInst &SI, Instruction *TI, else return BinaryOperator::Create(BO->getOpcode(), NewSI, MatchOp); } - assert(0 && "Shouldn't get here"); + llvm_unreachable("Shouldn't get here"); + return 0; +} + +static bool isSelect01(Constant *C1, Constant *C2) { + ConstantInt *C1I = dyn_cast(C1); + if (!C1I) + return false; + ConstantInt *C2I = dyn_cast(C2); + if (!C2I) + return false; + return (C1I->isZero() || C1I->isOne()) && (C2I->isZero() || C2I->isOne()); +} + +/// FoldSelectIntoOp - Try fold the select into one of the operands to +/// facilitate further optimization. +Instruction *InstCombiner::FoldSelectIntoOp(SelectInst &SI, Value *TrueVal, + Value *FalseVal) { + // See the comment above GetSelectFoldableOperands for a description of the + // transformation we are doing here. + if (Instruction *TVI = dyn_cast(TrueVal)) { + if (TVI->hasOneUse() && TVI->getNumOperands() == 2 && + !isa(FalseVal)) { + if (unsigned SFO = GetSelectFoldableOperands(TVI)) { + unsigned OpToFold = 0; + if ((SFO & 1) && FalseVal == TVI->getOperand(0)) { + OpToFold = 1; + } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) { + OpToFold = 2; + } + + if (OpToFold) { + Constant *C = GetSelectFoldableConstant(TVI, Context); + Value *OOp = TVI->getOperand(2-OpToFold); + // Avoid creating select between 2 constants unless it's selecting + // between 0 and 1. + if (!isa(OOp) || isSelect01(C, cast(OOp))) { + Instruction *NewSel = SelectInst::Create(SI.getCondition(), OOp, C); + InsertNewInstBefore(NewSel, SI); + NewSel->takeName(TVI); + if (BinaryOperator *BO = dyn_cast(TVI)) + return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel); + llvm_unreachable("Unknown instruction!!"); + } + } + } + } + } + + if (Instruction *FVI = dyn_cast(FalseVal)) { + if (FVI->hasOneUse() && FVI->getNumOperands() == 2 && + !isa(TrueVal)) { + if (unsigned SFO = GetSelectFoldableOperands(FVI)) { + unsigned OpToFold = 0; + if ((SFO & 1) && TrueVal == FVI->getOperand(0)) { + OpToFold = 1; + } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) { + OpToFold = 2; + } + + if (OpToFold) { + Constant *C = GetSelectFoldableConstant(FVI, Context); + Value *OOp = FVI->getOperand(2-OpToFold); + // Avoid creating select between 2 constants unless it's selecting + // between 0 and 1. + if (!isa(OOp) || isSelect01(C, cast(OOp))) { + Instruction *NewSel = SelectInst::Create(SI.getCondition(), C, OOp); + InsertNewInstBefore(NewSel, SI); + NewSel->takeName(FVI); + if (BinaryOperator *BO = dyn_cast(FVI)) + return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel); + llvm_unreachable("Unknown instruction!!"); + } + } + } + } + } + return 0; } @@ -8832,7 +9271,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI, if (CI->isMinValue(Pred == ICmpInst::ICMP_SLT)) return ReplaceInstUsesWith(SI, FalseVal); // X < C ? X : C-1 --> X > C-1 ? C-1 : X - Constant *AdjustedRHS = SubOne(CI); + Constant *AdjustedRHS = SubOne(CI, Context); if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) || (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) { Pred = ICmpInst::getSwappedPredicate(Pred); @@ -8852,7 +9291,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI, if (CI->isMaxValue(Pred == ICmpInst::ICMP_SGT)) return ReplaceInstUsesWith(SI, FalseVal); // X > C ? X : C+1 --> X < C+1 ? C+1 : X - Constant *AdjustedRHS = AddOne(CI); + Constant *AdjustedRHS = AddOne(CI, Context); if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) || (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) { Pred = ICmpInst::getSwappedPredicate(Pred); @@ -8871,11 +9310,11 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI, // (x ashr x, 31 -> all ones if signed // (x >s -1) ? -1 : 0 -> ashr x, 31 -> all ones if not signed CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE; - if (match(TrueVal, m_ConstantInt<-1>()) && - match(FalseVal, m_ConstantInt<0>())) + if (match(TrueVal, m_ConstantInt<-1>(), *Context) && + match(FalseVal, m_ConstantInt<0>(), *Context)) Pred = ICI->getPredicate(); - else if (match(TrueVal, m_ConstantInt<0>()) && - match(FalseVal, m_ConstantInt<-1>())) + else if (match(TrueVal, m_ConstantInt<0>(), *Context) && + match(FalseVal, m_ConstantInt<-1>(), *Context)) Pred = CmpInst::getInversePredicate(ICI->getPredicate()); if (Pred != CmpInst::BAD_ICMP_PREDICATE) { @@ -8890,7 +9329,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI, (Pred == ICmpInst::ICMP_SGT && Op1CV.isAllOnesValue())) { Value *In = ICI->getOperand(0); Value *Sh = ConstantInt::get(In->getType(), - In->getType()->getPrimitiveSizeInBits()-1); + In->getType()->getScalarSizeInBits()-1); In = InsertNewInstBefore(BinaryOperator::CreateAShr(In, Sh, In->getName()+".lobit"), *ICI); @@ -8899,7 +9338,7 @@ Instruction *InstCombiner::visitSelectInstWithICmp(SelectInst &SI, true/*SExt*/, "tmp", ICI); if (Pred == ICmpInst::ICMP_SGT) - In = InsertNewInstBefore(BinaryOperator::CreateNot(In, + In = InsertNewInstBefore(BinaryOperator::CreateNot(*Context, In, In->getName()+".not"), *ICI); return ReplaceInstUsesWith(SI, In); @@ -8964,7 +9403,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { } else { // Change: A = select B, false, C --> A = and !B, C Value *NotCond = - InsertNewInstBefore(BinaryOperator::CreateNot(CondVal, + InsertNewInstBefore(BinaryOperator::CreateNot(*Context, CondVal, "not."+CondVal->getName()), SI); return BinaryOperator::CreateAnd(NotCond, FalseVal); } @@ -8975,7 +9414,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { } else { // Change: A = select B, C, true --> A = or !B, C Value *NotCond = - InsertNewInstBefore(BinaryOperator::CreateNot(CondVal, + InsertNewInstBefore(BinaryOperator::CreateNot(*Context, CondVal, "not."+CondVal->getName()), SI); return BinaryOperator::CreateOr(NotCond, TrueVal); } @@ -8998,32 +9437,12 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { } else if (TrueValC->isZero() && FalseValC->getValue() == 1) { // select C, 0, 1 -> zext !C to int Value *NotCond = - InsertNewInstBefore(BinaryOperator::CreateNot(CondVal, + InsertNewInstBefore(BinaryOperator::CreateNot(*Context, CondVal, "not."+CondVal->getName()), SI); return CastInst::Create(Instruction::ZExt, NotCond, SI.getType()); } if (ICmpInst *IC = dyn_cast(SI.getCondition())) { - - // (x ashr x, 31 - if (TrueValC->isAllOnesValue() && FalseValC->isZero()) - if (ConstantInt *CmpCst = dyn_cast(IC->getOperand(1))) { - if (IC->getPredicate() == ICmpInst::ICMP_SLT && CmpCst->isZero()) { - // The comparison constant and the result are not neccessarily the - // same width. Make an all-ones value by inserting a AShr. - Value *X = IC->getOperand(0); - uint32_t Bits = X->getType()->getPrimitiveSizeInBits(); - Constant *ShAmt = ConstantInt::get(X->getType(), Bits-1); - Instruction *SRA = BinaryOperator::Create(Instruction::AShr, X, - ShAmt, "ones"); - InsertNewInstBefore(SRA, SI); - - // Then cast to the appropriate width. - return CastInst::CreateIntegerCast(SRA, SI.getType(), true); - } - } - - // If one of the constants is zero (we know they can't both be) and we // have an icmp instruction with zero, and we have an 'and' with the // non-constant value, eliminate this whole mess. This corresponds to @@ -9109,11 +9528,15 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { // Turn select C, (X+Y), (X-Y) --> (X+(select C, Y, (-Y))). This is // even legal for FP. - if (TI->getOpcode() == Instruction::Sub && - FI->getOpcode() == Instruction::Add) { + if ((TI->getOpcode() == Instruction::Sub && + FI->getOpcode() == Instruction::Add) || + (TI->getOpcode() == Instruction::FSub && + FI->getOpcode() == Instruction::FAdd)) { AddOp = FI; SubOp = TI; - } else if (FI->getOpcode() == Instruction::Sub && - TI->getOpcode() == Instruction::Add) { + } else if ((FI->getOpcode() == Instruction::Sub && + TI->getOpcode() == Instruction::Add) || + (FI->getOpcode() == Instruction::FSub && + TI->getOpcode() == Instruction::FAdd)) { AddOp = TI; SubOp = FI; } @@ -9130,10 +9553,11 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { // select C, (add X, Y), (sub X, Z) Value *NegVal; // Compute -Z if (Constant *C = dyn_cast(SubOp->getOperand(1))) { - NegVal = ConstantExpr::getNeg(C); + NegVal = Context->getConstantExprNeg(C); } else { NegVal = InsertNewInstBefore( - BinaryOperator::CreateNeg(SubOp->getOperand(1), "tmp"), SI); + BinaryOperator::CreateNeg(*Context, SubOp->getOperand(1), + "tmp"), SI); } Value *NewTrueOp = OtherAddOp; @@ -9152,58 +9576,9 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) { // See if we can fold the select into one of our operands. if (SI.getType()->isInteger()) { - // See the comment above GetSelectFoldableOperands for a description of the - // transformation we are doing here. - if (Instruction *TVI = dyn_cast(TrueVal)) - if (TVI->hasOneUse() && TVI->getNumOperands() == 2 && - !isa(FalseVal)) - if (unsigned SFO = GetSelectFoldableOperands(TVI)) { - unsigned OpToFold = 0; - if ((SFO & 1) && FalseVal == TVI->getOperand(0)) { - OpToFold = 1; - } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) { - OpToFold = 2; - } - - if (OpToFold) { - Constant *C = GetSelectFoldableConstant(TVI); - Instruction *NewSel = - SelectInst::Create(SI.getCondition(), - TVI->getOperand(2-OpToFold), C); - InsertNewInstBefore(NewSel, SI); - NewSel->takeName(TVI); - if (BinaryOperator *BO = dyn_cast(TVI)) - return BinaryOperator::Create(BO->getOpcode(), FalseVal, NewSel); - else { - assert(0 && "Unknown instruction!!"); - } - } - } - - if (Instruction *FVI = dyn_cast(FalseVal)) - if (FVI->hasOneUse() && FVI->getNumOperands() == 2 && - !isa(TrueVal)) - if (unsigned SFO = GetSelectFoldableOperands(FVI)) { - unsigned OpToFold = 0; - if ((SFO & 1) && TrueVal == FVI->getOperand(0)) { - OpToFold = 1; - } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) { - OpToFold = 2; - } - - if (OpToFold) { - Constant *C = GetSelectFoldableConstant(FVI); - Instruction *NewSel = - SelectInst::Create(SI.getCondition(), C, - FVI->getOperand(2-OpToFold)); - InsertNewInstBefore(NewSel, SI); - NewSel->takeName(FVI); - if (BinaryOperator *BO = dyn_cast(FVI)) - return BinaryOperator::Create(BO->getOpcode(), TrueVal, NewSel); - else - assert(0 && "Unknown instruction!!"); - } - } + Instruction *FoldI = FoldSelectIntoOp(SI, TrueVal, FalseVal); + if (FoldI) + return FoldI; } if (BinaryOperator::isNot(CondVal)) { @@ -9228,7 +9603,7 @@ static unsigned EnforceKnownAlignment(Value *V, User *U = dyn_cast(V); if (!U) return Align; - switch (getOpcode(U)) { + switch (Operator::getOpcode(U)) { default: break; case Instruction::BitCast: return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign); @@ -9305,7 +9680,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { unsigned CopyAlign = MI->getAlignment(); if (CopyAlign < MinAlign) { - MI->setAlignment(MinAlign); + MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), + MinAlign, false)); return MI; } @@ -9325,7 +9701,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { return 0; // If not 1/2/4/8 bytes, exit. // Use an integer load+store unless we can find something better. - Type *NewPtrTy = PointerType::getUnqual(IntegerType::get(Size<<3)); + Type *NewPtrTy = + Context->getPointerTypeUnqual(Context->getIntegerType(Size<<3)); // Memcpy forces the use of i8* for the source and destination. That means // that if you're using memcpy to move one double around, you'll get a cast @@ -9335,7 +9712,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { // integer datatype. if (Value *Op = getBitCastOperand(MI->getOperand(1))) { const Type *SrcETy = cast(Op->getType())->getElementType(); - if (SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { + if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) { // The SrcETy might be something like {{{double}}} or [1 x double]. Rip // down through these levels if so. while (!SrcETy->isSingleValueType()) { @@ -9354,7 +9731,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { } if (SrcETy->isSingleValueType()) - NewPtrTy = PointerType::getUnqual(SrcETy); + NewPtrTy = Context->getPointerTypeUnqual(SrcETy); } } @@ -9371,14 +9748,15 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI); // Set the size of the copy to 0, it will be deleted on the next iteration. - MI->setOperand(3, Constant::getNullValue(MemOpLength->getType())); + MI->setOperand(3, Context->getNullValue(MemOpLength->getType())); return MI; } Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest()); if (MI->getAlignment() < Alignment) { - MI->setAlignment(Alignment); + MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), + Alignment, false)); return MI; } @@ -9395,21 +9773,21 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { // memset(s,c,n) -> store s, c (for n=1,2,4,8) if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) { - const Type *ITy = IntegerType::get(Len*8); // n=1 -> i8. + const Type *ITy = Context->getIntegerType(Len*8); // n=1 -> i8. Value *Dest = MI->getDest(); - Dest = InsertBitCastBefore(Dest, PointerType::getUnqual(ITy), *MI); + Dest = InsertBitCastBefore(Dest, Context->getPointerTypeUnqual(ITy), *MI); // Alignment 0 is identity for alignment 1 for memset, but not store. if (Alignment == 0) Alignment = 1; // Extract the fill value and store. uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; - InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), Dest, false, - Alignment), *MI); + InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill), + Dest, false, Alignment), *MI); // Set the size of the copy to 0, it will be deleted on the next iteration. - MI->setLength(Constant::getNullValue(LenC->getType())); + MI->setLength(Context->getNullValue(LenC->getType())); return MI; } @@ -9422,6 +9800,16 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) { /// the heavy lifting. /// Instruction *InstCombiner::visitCallInst(CallInst &CI) { + // If the caller function is nounwind, mark the call as nounwind, even if the + // callee isn't. + if (CI.getParent()->getParent()->doesNotThrow() && + !CI.doesNotThrow()) { + CI.setDoesNotThrow(); + return &CI; + } + + + IntrinsicInst *II = dyn_cast(&CI); if (!II) return visitCallSite(&CI); @@ -9492,7 +9880,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { // Turn X86 loadups -> load if the pointer is known aligned. if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { Value *Ptr = InsertBitCastBefore(II->getOperand(1), - PointerType::getUnqual(II->getType()), + Context->getPointerTypeUnqual(II->getType()), CI); return new LoadInst(Ptr); } @@ -9502,7 +9890,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { // Turn stvx -> store if the pointer is known aligned. if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) { const Type *OpPtrTy = - PointerType::getUnqual(II->getOperand(1)->getType()); + Context->getPointerTypeUnqual(II->getOperand(1)->getType()); Value *Ptr = InsertBitCastBefore(II->getOperand(2), OpPtrTy, CI); return new StoreInst(II->getOperand(1), Ptr); } @@ -9513,7 +9901,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { // Turn X86 storeu -> store if the pointer is known aligned. if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) { const Type *OpPtrTy = - PointerType::getUnqual(II->getOperand(2)->getType()); + Context->getPointerTypeUnqual(II->getOperand(2)->getType()); Value *Ptr = InsertBitCastBefore(II->getOperand(1), OpPtrTy, CI); return new StoreInst(II->getOperand(2), Ptr); } @@ -9553,7 +9941,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { // Cast the input vectors to byte vectors. Value *Op0 =InsertBitCastBefore(II->getOperand(1),Mask->getType(),CI); Value *Op1 =InsertBitCastBefore(II->getOperand(2),Mask->getType(),CI); - Value *Result = UndefValue::get(Op0->getType()); + Value *Result = Context->getUndef(Op0->getType()); // Only extract each element once. Value *ExtractedElts[32]; @@ -9567,14 +9955,16 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { if (ExtractedElts[Idx] == 0) { Instruction *Elt = - new ExtractElementInst(Idx < 16 ? Op0 : Op1, Idx&15, "tmp"); + new ExtractElementInst(Idx < 16 ? Op0 : Op1, + ConstantInt::get(Type::Int32Ty, Idx&15, false), "tmp"); InsertNewInstBefore(Elt, CI); ExtractedElts[Idx] = Elt; } // Insert this value into the result vector. Result = InsertElementInst::Create(Result, ExtractedElts[Idx], - i, "tmp"); + ConstantInt::get(Type::Int32Ty, i, false), + "tmp"); InsertNewInstBefore(cast(Result), CI); } return CastInst::Create(Instruction::BitCast, Result, CI.getType()); @@ -9655,7 +10045,7 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS, const Type* DstTy = cast(CI->getType())->getElementType(); if (!SrcTy->isSized() || !DstTy->isSized()) return false; - if (TD->getTypePaddedSize(SrcTy) != TD->getTypePaddedSize(DstTy)) + if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy)) return false; return true; } @@ -9676,11 +10066,11 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { Instruction *OldCall = CS.getInstruction(); // If the call and callee calling conventions don't match, this call must // be unreachable, as the call is undefined. - new StoreInst(ConstantInt::getTrue(), - UndefValue::get(PointerType::getUnqual(Type::Int1Ty)), - OldCall); + new StoreInst(Context->getTrue(), + Context->getUndef(Context->getPointerTypeUnqual(Type::Int1Ty)), + OldCall); if (!OldCall->use_empty()) - OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType())); + OldCall->replaceAllUsesWith(Context->getUndef(OldCall->getType())); if (isa(OldCall)) // Not worth removing an invoke here. return EraseInstFromFunction(*OldCall); return 0; @@ -9690,18 +10080,18 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) { // This instruction is not reachable, just remove it. We insert a store to // undef so that we know that this code is not reachable, despite the fact // that we can't modify the CFG here. - new StoreInst(ConstantInt::getTrue(), - UndefValue::get(PointerType::getUnqual(Type::Int1Ty)), + new StoreInst(Context->getTrue(), + Context->getUndef(Context->getPointerTypeUnqual(Type::Int1Ty)), CS.getInstruction()); if (!CS.getInstruction()->use_empty()) CS.getInstruction()-> - replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType())); + replaceAllUsesWith(Context->getUndef(CS.getInstruction()->getType())); if (InvokeInst *II = dyn_cast(CS.getInstruction())) { // Don't break the CFG, insert a dummy cond branch. BranchInst::Create(II->getNormalDest(), II->getUnwindDest(), - ConstantInt::getTrue(), II); + Context->getTrue(), II); } return EraseInstFromFunction(*CS.getInstruction()); } @@ -9765,8 +10155,10 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { if (Callee->isDeclaration() && // Conversion is ok if changing from one pointer type to another or from // a pointer to an integer of the same size. - !((isa(OldRetTy) || OldRetTy == TD->getIntPtrType()) && - (isa(NewRetTy) || NewRetTy == TD->getIntPtrType()))) + !((isa(OldRetTy) || !TD || + OldRetTy == TD->getIntPtrType()) && + (isa(NewRetTy) || !TD || + NewRetTy == TD->getIntPtrType()))) return false; // Cannot transform this return value. if (!Caller->use_empty() && @@ -9812,8 +10204,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { // Converting from one pointer type to another or between a pointer and an // integer of the same size is safe even if we do not have a body. bool isConvertible = ActTy == ParamTy || - ((isa(ParamTy) || ParamTy == TD->getIntPtrType()) && - (isa(ActTy) || ActTy == TD->getIntPtrType())); + (TD && ((isa(ParamTy) || ParamTy == TD->getIntPtrType()) && + (isa(ActTy) || ActTy == TD->getIntPtrType()))); if (Callee->isDeclaration() && !isConvertible) return false; } @@ -9872,7 +10264,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { // If the function takes more arguments than the call was taking, add them // now... for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) - Args.push_back(Constant::getNullValue(FT->getParamType(i))); + Args.push_back(Context->getNullValue(FT->getParamType(i))); // If we are removing arguments to the function, emit an obnoxious warning... if (FT->getNumParams() < NumActualArgs) { @@ -9945,7 +10337,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) { } AddUsersToWorkList(*Caller); } else { - NV = UndefValue::get(Caller->getType()); + NV = Context->getUndef(Caller->getType()); } } @@ -10070,9 +10462,12 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { // Replace the trampoline call with a direct call. Let the generic // code sort out any function type mismatches. FunctionType *NewFTy = - FunctionType::get(FTy->getReturnType(), NewTypes, FTy->isVarArg()); - Constant *NewCallee = NestF->getType() == PointerType::getUnqual(NewFTy) ? - NestF : ConstantExpr::getBitCast(NestF, PointerType::getUnqual(NewFTy)); + Context->getFunctionType(FTy->getReturnType(), NewTypes, + FTy->isVarArg()); + Constant *NewCallee = + NestF->getType() == Context->getPointerTypeUnqual(NewFTy) ? + NestF : Context->getConstantExprBitCast(NestF, + Context->getPointerTypeUnqual(NewFTy)); const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),NewAttrs.end()); Instruction *NewCaller; @@ -10104,7 +10499,8 @@ Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) { // parameter, there is no need to adjust the argument list. Let the generic // code sort out any function type mismatches. Constant *NewCallee = - NestF->getType() == PTy ? NestF : ConstantExpr::getBitCast(NestF, PTy); + NestF->getType() == PTy ? NestF : + Context->getConstantExprBitCast(NestF, PTy); CS.setCalledFunction(NewCallee); return CS.getInstruction(); } @@ -10185,8 +10581,8 @@ Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { if (BinaryOperator *BinOp = dyn_cast(FirstInst)) return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal); CmpInst *CIOp = cast(FirstInst); - return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(), LHSVal, - RHSVal); + return CmpInst::Create(*Context, CIOp->getOpcode(), CIOp->getPredicate(), + LHSVal, RHSVal); } Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { @@ -10432,7 +10828,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { if (BinaryOperator *BinOp = dyn_cast(FirstInst)) return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp); if (CmpInst *CIOp = dyn_cast(FirstInst)) - return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(), + return CmpInst::Create(*Context, CIOp->getOpcode(), CIOp->getPredicate(), PhiVal, ConstantOp); assert(isa(FirstInst) && "Unknown operation"); @@ -10525,7 +10921,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { SmallPtrSet PotentiallyDeadPHIs; PotentiallyDeadPHIs.insert(&PN); if (DeadPHICycle(PU, PotentiallyDeadPHIs)) - return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType())); + return ReplaceInstUsesWith(PN, Context->getUndef(PN.getType())); } // If this phi has a single use, and if that use just computes a value for @@ -10537,7 +10933,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { if (PHIUser->hasOneUse() && (isa(PHIUser) || isa(PHIUser)) && PHIUser->use_back() == &PN) { - return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType())); + return ReplaceInstUsesWith(PN, Context->getUndef(PN.getType())); } } @@ -10581,8 +10977,8 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) { static Value *InsertCastToIntPtrTy(Value *V, const Type *DTy, Instruction *InsertPoint, InstCombiner *IC) { - unsigned PtrSize = DTy->getPrimitiveSizeInBits(); - unsigned VTySize = V->getType()->getPrimitiveSizeInBits(); + unsigned PtrSize = DTy->getScalarSizeInBits(); + unsigned VTySize = V->getType()->getScalarSizeInBits(); // We must cast correctly to the pointer type. Ensure that we // sign extend the integer value if it is smaller as this is // used for address computation. @@ -10601,7 +10997,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { return ReplaceInstUsesWith(GEP, PtrOp); if (isa(GEP.getOperand(0))) - return ReplaceInstUsesWith(GEP, UndefValue::get(GEP.getType())); + return ReplaceInstUsesWith(GEP, Context->getUndef(GEP.getType())); bool HasZeroPointerIndex = false; if (Constant *C = dyn_cast(GEP.getOperand(1))) @@ -10616,14 +11012,14 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { gep_type_iterator GTI = gep_type_begin(GEP); for (User::op_iterator i = GEP.op_begin() + 1, e = GEP.op_end(); i != e; ++i, ++GTI) { - if (isa(*GTI)) { + if (TD && isa(*GTI)) { if (CastInst *CI = dyn_cast(*i)) { if (CI->getOpcode() == Instruction::ZExt || CI->getOpcode() == Instruction::SExt) { const Type *SrcTy = CI->getOperand(0)->getType(); // We can eliminate a cast from i32 to i64 iff the target // is a 32-bit pointer target. - if (SrcTy->getPrimitiveSizeInBits() >= TD->getPointerSizeInBits()) { + if (SrcTy->getScalarSizeInBits() >= TD->getPointerSizeInBits()) { MadeChange = true; *i = CI->getOperand(0); } @@ -10637,7 +11033,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { Value *Op = *i; if (TD->getTypeSizeInBits(Op->getType()) > TD->getPointerSizeInBits()) { if (Constant *C = dyn_cast(Op)) { - *i = ConstantExpr::getTrunc(C, TD->getIntPtrType()); + *i = Context->getConstantExprTrunc(C, TD->getIntPtrType()); MadeChange = true; } else { Op = InsertCastBefore(Instruction::Trunc, Op, TD->getIntPtrType(), @@ -10647,7 +11043,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } } else if (TD->getTypeSizeInBits(Op->getType()) < TD->getPointerSizeInBits()) { if (Constant *C = dyn_cast(Op)) { - *i = ConstantExpr::getSExt(C, TD->getIntPtrType()); + *i = Context->getConstantExprSExt(C, TD->getIntPtrType()); MadeChange = true; } else { Op = InsertCastBefore(Instruction::SExt, Op, TD->getIntPtrType(), @@ -10691,19 +11087,21 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // With: T = long A+B; gep %P, T, ... // Value *Sum, *SO1 = SrcGEPOperands.back(), *GO1 = GEP.getOperand(1); - if (SO1 == Constant::getNullValue(SO1->getType())) { + if (SO1 == Context->getNullValue(SO1->getType())) { Sum = GO1; - } else if (GO1 == Constant::getNullValue(GO1->getType())) { + } else if (GO1 == Context->getNullValue(GO1->getType())) { Sum = SO1; } else { // If they aren't the same type, convert both to an integer of the // target's pointer size. if (SO1->getType() != GO1->getType()) { if (Constant *SO1C = dyn_cast(SO1)) { - SO1 = ConstantExpr::getIntegerCast(SO1C, GO1->getType(), true); + SO1 = + Context->getConstantExprIntegerCast(SO1C, GO1->getType(), true); } else if (Constant *GO1C = dyn_cast(GO1)) { - GO1 = ConstantExpr::getIntegerCast(GO1C, SO1->getType(), true); - } else { + GO1 = + Context->getConstantExprIntegerCast(GO1C, SO1->getType(), true); + } else if (TD) { unsigned PS = TD->getPointerSizeInBits(); if (TD->getTypeSizeInBits(SO1->getType()) == PS) { // Convert GO1 to SO1's type. @@ -10720,7 +11118,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { } } if (isa(SO1) && isa(GO1)) - Sum = ConstantExpr::getAdd(cast(SO1), cast(GO1)); + Sum = Context->getConstantExprAdd(cast(SO1), + cast(GO1)); else { Sum = BinaryOperator::CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); InsertNewInstBefore(cast(Sum), GEP); @@ -10762,7 +11161,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { Indices.push_back(cast(*I)); if (I == E) { // If they are all constants... - Constant *CE = ConstantExpr::getGetElementPtr(GV, + Constant *CE = Context->getConstantExprGetElementPtr(GV, &Indices[0],Indices.size()); // Replace all uses of the GEP with the new constexpr... @@ -10809,11 +11208,11 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast const Type *SrcElTy = cast(X->getType())->getElementType(); const Type *ResElTy=cast(PtrOp->getType())->getElementType(); - if (isa(SrcElTy) && - TD->getTypePaddedSize(cast(SrcElTy)->getElementType()) == - TD->getTypePaddedSize(ResElTy)) { + if (TD && isa(SrcElTy) && + TD->getTypeAllocSize(cast(SrcElTy)->getElementType()) == + TD->getTypeAllocSize(ResElTy)) { Value *Idx[2]; - Idx[0] = Constant::getNullValue(Type::Int32Ty); + Idx[0] = Context->getNullValue(Type::Int32Ty); Idx[1] = GEP.getOperand(1); Value *V = InsertNewInstBefore( GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName()), GEP); @@ -10826,9 +11225,9 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // (where tmp = 8*tmp2) into: // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast - if (isa(SrcElTy) && ResElTy == Type::Int8Ty) { + if (TD && isa(SrcElTy) && ResElTy == Type::Int8Ty) { uint64_t ArrayEltSize = - TD->getTypePaddedSize(cast(SrcElTy)->getElementType()); + TD->getTypeAllocSize(cast(SrcElTy)->getElementType()); // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We // allow either a mul, shift, or constant here. @@ -10836,7 +11235,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { ConstantInt *Scale = 0; if (ArrayEltSize == 1) { NewIdx = GEP.getOperand(1); - Scale = ConstantInt::get(NewIdx->getType(), 1); + Scale = + ConstantInt::get(cast(NewIdx->getType()), 1); } else if (ConstantInt *CI = dyn_cast(GEP.getOperand(1))) { NewIdx = ConstantInt::get(CI->getType(), 1); Scale = CI; @@ -10845,7 +11245,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { isa(Inst->getOperand(1))) { ConstantInt *ShAmt = cast(Inst->getOperand(1)); uint32_t ShAmtVal = ShAmt->getLimitedValue(64); - Scale = ConstantInt::get(Inst->getType(), 1ULL << ShAmtVal); + Scale = ConstantInt::get(cast(Inst->getType()), + 1ULL << ShAmtVal); NewIdx = Inst->getOperand(0); } else if (Inst->getOpcode() == Instruction::Mul && isa(Inst->getOperand(1))) { @@ -10863,7 +11264,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { Scale = ConstantInt::get(Scale->getType(), Scale->getZExtValue() / ArrayEltSize); if (Scale->getZExtValue() != 1) { - Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), + Constant *C = + Context->getConstantExprIntegerCast(Scale, NewIdx->getType(), false /*ZExt*/); Instruction *Sc = BinaryOperator::CreateMul(NewIdx, C, "idxscale"); NewIdx = InsertNewInstBefore(Sc, GEP); @@ -10871,7 +11273,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { // Insert the new GEP instruction. Value *Idx[2]; - Idx[0] = Constant::getNullValue(Type::Int32Ty); + Idx[0] = Context->getNullValue(Type::Int32Ty); Idx[1] = NewIdx; Instruction *NewGEP = GetElementPtrInst::Create(X, Idx, Idx + 2, GEP.getName()); @@ -10889,10 +11291,12 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { /// into a gep of the original struct. This is important for SROA and alias /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. if (BitCastInst *BCI = dyn_cast(PtrOp)) { - if (!isa(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) { + if (TD && + !isa(BCI->getOperand(0)) && GEP.hasAllConstantIndices()) { // Determine how much the GEP moves the pointer. We are guaranteed to get // a constant back from EmitGEPOffset. - ConstantInt *OffsetV = cast(EmitGEPOffset(&GEP, GEP, *this)); + ConstantInt *OffsetV = + cast(EmitGEPOffset(&GEP, GEP, *this)); int64_t Offset = OffsetV->getSExtValue(); // If this GEP instruction doesn't move the pointer, just replace the GEP @@ -10920,7 +11324,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { SmallVector NewIndices; const Type *InTy = cast(BCI->getOperand(0)->getType())->getElementType(); - if (FindElementAtOffset(InTy, Offset, NewIndices, TD)) { + if (FindElementAtOffset(InTy, Offset, NewIndices, TD, Context)) { Instruction *NGEP = GetElementPtrInst::Create(BCI->getOperand(0), NewIndices.begin(), NewIndices.end()); @@ -10940,7 +11344,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { if (AI.isArrayAllocation()) { // Check C != 1 if (const ConstantInt *C = dyn_cast(AI.getArraySize())) { const Type *NewTy = - ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); + Context->getArrayType(AI.getAllocatedType(), C->getZExtValue()); AllocationInst *New = 0; // Create and insert the replacement instruction... @@ -10962,7 +11366,7 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // Now that I is pointing to the first non-allocation-inst in the block, // insert our getelementptr instruction... // - Value *NullIdx = Constant::getNullValue(Type::Int32Ty); + Value *NullIdx = Context->getNullValue(Type::Int32Ty); Value *Idx[2]; Idx[0] = NullIdx; Idx[1] = NullIdx; @@ -10973,16 +11377,16 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // allocation. return ReplaceInstUsesWith(AI, V); } else if (isa(AI.getArraySize())) { - return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); + return ReplaceInstUsesWith(AI, Context->getNullValue(AI.getType())); } } - if (isa(AI) && AI.getAllocatedType()->isSized()) { + if (TD && isa(AI) && AI.getAllocatedType()->isSized()) { // If alloca'ing a zero byte object, replace the alloca with a null pointer. - // Note that we only do this for alloca's, because malloc should allocate and - // return a unique pointer, even for a zero byte allocation. - if (TD->getTypePaddedSize(AI.getAllocatedType()) == 0) - return ReplaceInstUsesWith(AI, Constant::getNullValue(AI.getType())); + // Note that we only do this for alloca's, because malloc should allocate + // and return a unique pointer, even for a zero byte allocation. + if (TD->getTypeAllocSize(AI.getAllocatedType()) == 0) + return ReplaceInstUsesWith(AI, Context->getNullValue(AI.getType())); // If the alignment is 0 (unspecified), assign it the preferred alignment. if (AI.getAlignment() == 0) @@ -10998,8 +11402,8 @@ Instruction *InstCombiner::visitFreeInst(FreeInst &FI) { // free undef -> unreachable. if (isa(Op)) { // Insert a new store to null because we cannot modify the CFG here. - new StoreInst(ConstantInt::getTrue(), - UndefValue::get(PointerType::getUnqual(Type::Int1Ty)), &FI); + new StoreInst(Context->getTrue(), + Context->getUndef(Context->getPointerTypeUnqual(Type::Int1Ty)), &FI); return EraseInstFromFunction(FI); } @@ -11039,35 +11443,38 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, const TargetData *TD) { User *CI = cast(LI.getOperand(0)); Value *CastOp = CI->getOperand(0); - - if (ConstantExpr *CE = dyn_cast(CI)) { - // Instead of loading constant c string, use corresponding integer value - // directly if string length is small enough. - std::string Str; - if (GetConstantStringInfo(CE->getOperand(0), Str) && !Str.empty()) { - unsigned len = Str.length(); - const Type *Ty = cast(CE->getType())->getElementType(); - unsigned numBits = Ty->getPrimitiveSizeInBits(); - // Replace LI with immediate integer store. - if ((numBits >> 3) == len + 1) { - APInt StrVal(numBits, 0); - APInt SingleChar(numBits, 0); - if (TD->isLittleEndian()) { - for (signed i = len-1; i >= 0; i--) { - SingleChar = (uint64_t) Str[i] & UCHAR_MAX; - StrVal = (StrVal << 8) | SingleChar; - } - } else { - for (unsigned i = 0; i < len; i++) { - SingleChar = (uint64_t) Str[i] & UCHAR_MAX; + LLVMContext *Context = IC.getContext(); + + if (TD) { + if (ConstantExpr *CE = dyn_cast(CI)) { + // Instead of loading constant c string, use corresponding integer value + // directly if string length is small enough. + std::string Str; + if (GetConstantStringInfo(CE->getOperand(0), Str) && !Str.empty()) { + unsigned len = Str.length(); + const Type *Ty = cast(CE->getType())->getElementType(); + unsigned numBits = Ty->getPrimitiveSizeInBits(); + // Replace LI with immediate integer store. + if ((numBits >> 3) == len + 1) { + APInt StrVal(numBits, 0); + APInt SingleChar(numBits, 0); + if (TD->isLittleEndian()) { + for (signed i = len-1; i >= 0; i--) { + SingleChar = (uint64_t) Str[i] & UCHAR_MAX; + StrVal = (StrVal << 8) | SingleChar; + } + } else { + for (unsigned i = 0; i < len; i++) { + SingleChar = (uint64_t) Str[i] & UCHAR_MAX; + StrVal = (StrVal << 8) | SingleChar; + } + // Append NULL at the end. + SingleChar = 0; StrVal = (StrVal << 8) | SingleChar; } - // Append NULL at the end. - SingleChar = 0; - StrVal = (StrVal << 8) | SingleChar; + Value *NL = ConstantInt::get(*Context, StrVal); + return IC.ReplaceInstUsesWith(LI, NL); } - Value *NL = ConstantInt::get(StrVal); - return IC.ReplaceInstUsesWith(LI, NL); } } } @@ -11091,19 +11498,20 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, if (Constant *CSrc = dyn_cast(CastOp)) if (ASrcTy->getNumElements() != 0) { Value *Idxs[2]; - Idxs[0] = Idxs[1] = Constant::getNullValue(Type::Int32Ty); - CastOp = ConstantExpr::getGetElementPtr(CSrc, Idxs, 2); + Idxs[0] = Idxs[1] = Context->getNullValue(Type::Int32Ty); + CastOp = Context->getConstantExprGetElementPtr(CSrc, Idxs, 2); SrcTy = cast(CastOp->getType()); SrcPTy = SrcTy->getElementType(); } - if ((SrcPTy->isInteger() || isa(SrcPTy) || + if (IC.getTargetData() && + (SrcPTy->isInteger() || isa(SrcPTy) || isa(SrcPTy)) && // Do not allow turning this into a load of an integer, which is then // casted to a pointer, this pessimizes pointer analysis a lot. (isa(SrcPTy) == isa(LI.getType())) && - IC.getTargetData().getTypeSizeInBits(SrcPTy) == - IC.getTargetData().getTypeSizeInBits(DestPTy)) { + IC.getTargetData()->getTypeSizeInBits(SrcPTy) == + IC.getTargetData()->getTypeSizeInBits(DestPTy)) { // Okay, we are casting from one integer or pointer type to another of // the same size. Instead of casting the pointer before the load, cast @@ -11119,54 +11527,18 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI, return 0; } -/// isSafeToLoadUnconditionally - Return true if we know that executing a load -/// from this value cannot trap. If it is not obviously safe to load from the -/// specified pointer, we do a quick local scan of the basic block containing -/// ScanFrom, to determine if the address is already accessed. -static bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom) { - // If it is an alloca it is always safe to load from. - if (isa(V)) return true; - - // If it is a global variable it is mostly safe to load from. - if (const GlobalValue *GV = dyn_cast(V)) - // Don't try to evaluate aliases. External weak GV can be null. - return !isa(GV) && !GV->hasExternalWeakLinkage(); - - // Otherwise, be a little bit agressive by scanning the local block where we - // want to check to see if the pointer is already being loaded or stored - // from/to. If so, the previous load or store would have already trapped, - // so there is no harm doing an extra load (also, CSE will later eliminate - // the load entirely). - BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin(); - - while (BBI != E) { - --BBI; - - // If we see a free or a call (which might do a free) the pointer could be - // marked invalid. - if (isa(BBI) || isa(BBI)) - return false; - - if (LoadInst *LI = dyn_cast(BBI)) { - if (LI->getOperand(0) == V) return true; - } else if (StoreInst *SI = dyn_cast(BBI)) { - if (SI->getOperand(1) == V) return true; - } - - } - return false; -} - Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { Value *Op = LI.getOperand(0); // Attempt to improve the alignment. - unsigned KnownAlign = - GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType())); - if (KnownAlign > - (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) : - LI.getAlignment())) - LI.setAlignment(KnownAlign); + if (TD) { + unsigned KnownAlign = + GetOrEnforceKnownAlignment(Op, TD->getPrefTypeAlignment(LI.getType())); + if (KnownAlign > + (LI.getAlignment() == 0 ? TD->getABITypeAlignment(LI.getType()) : + LI.getAlignment())) + LI.setAlignment(KnownAlign); + } // load (cast X) --> cast (load X) iff safe if (isa(Op)) @@ -11192,9 +11564,9 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { // that this code is not reachable. We do this instead of inserting // an unreachable instruction directly because we cannot modify the // CFG. - new StoreInst(UndefValue::get(LI.getType()), - Constant::getNullValue(Op->getType()), &LI); - return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); + new StoreInst(Context->getUndef(LI.getType()), + Context->getNullValue(Op->getType()), &LI); + return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType())); } } @@ -11206,32 +11578,33 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { // Insert a new store to null instruction before the load to indicate that // this code is not reachable. We do this instead of inserting an // unreachable instruction directly because we cannot modify the CFG. - new StoreInst(UndefValue::get(LI.getType()), - Constant::getNullValue(Op->getType()), &LI); - return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); + new StoreInst(Context->getUndef(LI.getType()), + Context->getNullValue(Op->getType()), &LI); + return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType())); } // Instcombine load (constant global) into the value loaded. if (GlobalVariable *GV = dyn_cast(Op)) - if (GV->isConstant() && !GV->isDeclaration()) + if (GV->isConstant() && GV->hasDefinitiveInitializer()) return ReplaceInstUsesWith(LI, GV->getInitializer()); // Instcombine load (constantexpr_GEP global, 0, ...) into the value loaded. if (ConstantExpr *CE = dyn_cast(Op)) { if (CE->getOpcode() == Instruction::GetElementPtr) { if (GlobalVariable *GV = dyn_cast(CE->getOperand(0))) - if (GV->isConstant() && !GV->isDeclaration()) + if (GV->isConstant() && GV->hasDefinitiveInitializer()) if (Constant *V = - ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE)) + ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE, + *Context)) return ReplaceInstUsesWith(LI, V); if (CE->getOperand(0)->isNullValue()) { // Insert a new store to null instruction before the load to indicate // that this code is not reachable. We do this instead of inserting // an unreachable instruction directly because we cannot modify the // CFG. - new StoreInst(UndefValue::get(LI.getType()), - Constant::getNullValue(Op->getType()), &LI); - return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); + new StoreInst(Context->getUndef(LI.getType()), + Context->getNullValue(Op->getType()), &LI); + return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType())); } } else if (CE->isCast()) { @@ -11244,11 +11617,11 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { // If this load comes from anywhere in a constant global, and if the global // is all undef or zero, we know what it loads. if (GlobalVariable *GV = dyn_cast(Op->getUnderlyingObject())){ - if (GV->isConstant() && GV->hasInitializer()) { + if (GV->isConstant() && GV->hasDefinitiveInitializer()) { if (GV->getInitializer()->isNullValue()) - return ReplaceInstUsesWith(LI, Constant::getNullValue(LI.getType())); + return ReplaceInstUsesWith(LI, Context->getNullValue(LI.getType())); else if (isa(GV->getInitializer())) - return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType())); + return ReplaceInstUsesWith(LI, Context->getUndef(LI.getType())); } } @@ -11298,6 +11671,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { User *CI = cast(SI.getOperand(1)); Value *CastOp = CI->getOperand(0); + LLVMContext *Context = IC.getContext(); const Type *DestPTy = cast(CI->getType())->getElementType(); const PointerType *SrcTy = dyn_cast(CastOp->getType()); @@ -11319,7 +11693,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { // constants. if (isa(SrcPTy) || isa(SrcPTy)) { // Index through pointer. - Constant *Zero = Constant::getNullValue(Type::Int32Ty); + Constant *Zero = Context->getNullValue(Type::Int32Ty); NewGEPIndices.push_back(Zero); while (1) { @@ -11336,7 +11710,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { } } - SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace()); + SrcTy = Context->getPointerType(SrcPTy, SrcTy->getAddressSpace()); } if (!SrcPTy->isInteger() && !isa(SrcPTy)) @@ -11344,10 +11718,11 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { // If the pointers point into different address spaces or if they point to // values with different sizes, we can't do the transformation. - if (SrcTy->getAddressSpace() != + if (!IC.getTargetData() || + SrcTy->getAddressSpace() != cast(CI->getType())->getAddressSpace() || - IC.getTargetData().getTypeSizeInBits(SrcPTy) != - IC.getTargetData().getTypeSizeInBits(DestPTy)) + IC.getTargetData()->getTypeSizeInBits(SrcPTy) != + IC.getTargetData()->getTypeSizeInBits(DestPTy)) return 0; // Okay, we are casting from one integer or pointer type to another of @@ -11370,7 +11745,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { // emit a GEP to index into its first field. if (!NewGEPIndices.empty()) { if (Constant *C = dyn_cast(CastOp)) - CastOp = ConstantExpr::getGetElementPtr(C, &NewGEPIndices[0], + CastOp = Context->getConstantExprGetElementPtr(C, &NewGEPIndices[0], NewGEPIndices.size()); else CastOp = IC.InsertNewInstBefore( @@ -11379,7 +11754,7 @@ static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) { } if (Constant *C = dyn_cast(SIOp0)) - NewCast = ConstantExpr::getCast(opcode, C, CastDstTy); + NewCast = Context->getConstantExprCast(opcode, C, CastDstTy); else NewCast = IC.InsertNewInstBefore( CastInst::Create(opcode, SIOp0, CastDstTy, SIOp0->getName()+".c"), @@ -11477,12 +11852,14 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { } // Attempt to improve the alignment. - unsigned KnownAlign = - GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType())); - if (KnownAlign > - (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) : - SI.getAlignment())) - SI.setAlignment(KnownAlign); + if (TD) { + unsigned KnownAlign = + GetOrEnforceKnownAlignment(Ptr, TD->getPrefTypeAlignment(Val->getType())); + if (KnownAlign > + (SI.getAlignment() == 0 ? TD->getABITypeAlignment(Val->getType()) : + SI.getAlignment())) + SI.setAlignment(KnownAlign); + } // Do really simple DSE, to catch cases where there are several consecutive // stores to the same location, separated by a few arithmetic operations. This @@ -11537,9 +11914,10 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) { if (SI.isVolatile()) return 0; // Don't hack volatile stores. // store X, null -> turns into 'unreachable' in SimplifyCFG - if (isa(Ptr)) { + if (isa(Ptr) && + cast(Ptr->getType())->getAddressSpace() == 0) { if (!isa(Val)) { - SI.setOperand(0, UndefValue::get(Val->getType())); + SI.setOperand(0, Context->getUndef(Val->getType())); if (Instruction *U = dyn_cast(Val)) AddToWorkList(U); // Dropped a use. ++NumCombined; @@ -11706,7 +12084,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { Value *X = 0; BasicBlock *TrueDest; BasicBlock *FalseDest; - if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && + if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest), *Context) && !isa(X)) { // Swap Destinations and condition... BI.setCondition(X); @@ -11718,12 +12096,12 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { // Cannonicalize fcmp_one -> fcmp_oeq FCmpInst::Predicate FPred; Value *Y; if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), - TrueDest, FalseDest))) + TrueDest, FalseDest), *Context)) if ((FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || FPred == FCmpInst::FCMP_OGE) && BI.getCondition()->hasOneUse()) { FCmpInst *I = cast(BI.getCondition()); FCmpInst::Predicate NewPred = FCmpInst::getInversePredicate(FPred); - Instruction *NewSCC = new FCmpInst(NewPred, X, Y, "", I); + Instruction *NewSCC = new FCmpInst(I, NewPred, X, Y, ""); NewSCC->takeName(I); // Swap Destinations and condition... BI.setCondition(NewSCC); @@ -11738,13 +12116,13 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { // Cannonicalize icmp_ne -> icmp_eq ICmpInst::Predicate IPred; if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), - TrueDest, FalseDest))) + TrueDest, FalseDest), *Context)) if ((IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || IPred == ICmpInst::ICMP_SGE) && BI.getCondition()->hasOneUse()) { ICmpInst *I = cast(BI.getCondition()); ICmpInst::Predicate NewPred = ICmpInst::getInversePredicate(IPred); - Instruction *NewSCC = new ICmpInst(NewPred, X, Y, "", I); + Instruction *NewSCC = new ICmpInst(I, NewPred, X, Y, ""); NewSCC->takeName(I); // Swap Destinations and condition... BI.setCondition(NewSCC); @@ -11766,7 +12144,8 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { if (ConstantInt *AddRHS = dyn_cast(I->getOperand(1))) { // change 'switch (X+4) case 1:' into 'switch (X) case -3' for (unsigned i = 2, e = SI.getNumOperands(); i != e; i += 2) - SI.setOperand(i,ConstantExpr::getSub(cast(SI.getOperand(i)), + SI.setOperand(i, + Context->getConstantExprSub(cast(SI.getOperand(i)), AddRHS)); SI.setOperand(0, I->getOperand(0)); AddToWorkList(I); @@ -11784,10 +12163,10 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { if (Constant *C = dyn_cast(Agg)) { if (isa(C)) - return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType())); + return ReplaceInstUsesWith(EV, Context->getUndef(EV.getType())); if (isa(C)) - return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType())); + return ReplaceInstUsesWith(EV, Context->getNullValue(EV.getType())); if (isa(C) || isa(C)) { // Extract the element indexed by the first index out of the constant @@ -11923,17 +12302,18 @@ static std::vector getShuffleMask(const ShuffleVectorInst *SVI) { /// FindScalarElement - Given a vector and an element number, see if the scalar /// value is already around as a register, for example if it were inserted then /// extracted from the vector. -static Value *FindScalarElement(Value *V, unsigned EltNo) { +static Value *FindScalarElement(Value *V, unsigned EltNo, + LLVMContext *Context) { assert(isa(V->getType()) && "Not looking at a vector?"); const VectorType *PTy = cast(V->getType()); unsigned Width = PTy->getNumElements(); if (EltNo >= Width) // Out of range access. - return UndefValue::get(PTy->getElementType()); + return Context->getUndef(PTy->getElementType()); if (isa(V)) - return UndefValue::get(PTy->getElementType()); + return Context->getUndef(PTy->getElementType()); else if (isa(V)) - return Constant::getNullValue(PTy->getElementType()); + return Context->getNullValue(PTy->getElementType()); else if (ConstantVector *CP = dyn_cast(V)) return CP->getOperand(EltNo); else if (InsertElementInst *III = dyn_cast(V)) { @@ -11949,17 +12329,17 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) { // Otherwise, the insertelement doesn't modify the value, recurse on its // vector input. - return FindScalarElement(III->getOperand(0), EltNo); + return FindScalarElement(III->getOperand(0), EltNo, Context); } else if (ShuffleVectorInst *SVI = dyn_cast(V)) { unsigned LHSWidth = cast(SVI->getOperand(0)->getType())->getNumElements(); unsigned InEl = getShuffleMask(SVI)[EltNo]; if (InEl < LHSWidth) - return FindScalarElement(SVI->getOperand(0), InEl); + return FindScalarElement(SVI->getOperand(0), InEl, Context); else if (InEl < LHSWidth*2) - return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth); + return FindScalarElement(SVI->getOperand(1), InEl - LHSWidth, Context); else - return UndefValue::get(PTy->getElementType()); + return Context->getUndef(PTy->getElementType()); } // Otherwise, we don't know. @@ -11969,11 +12349,11 @@ static Value *FindScalarElement(Value *V, unsigned EltNo) { Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { // If vector val is undef, replace extract with scalar undef. if (isa(EI.getOperand(0))) - return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType())); + return ReplaceInstUsesWith(EI, Context->getUndef(EI.getType())); // If vector val is constant 0, replace extract with scalar 0. if (isa(EI.getOperand(0))) - return ReplaceInstUsesWith(EI, Constant::getNullValue(EI.getType())); + return ReplaceInstUsesWith(EI, Context->getNullValue(EI.getType())); if (ConstantVector *C = dyn_cast(EI.getOperand(0))) { // If vector val is constant with all elements the same, replace EI with @@ -11999,7 +12379,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { // If this is extracting an invalid index, turn this into undef, to avoid // crashing the code below. if (IndexVal >= VectorWidth) - return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType())); + return ReplaceInstUsesWith(EI, Context->getUndef(EI.getType())); // This instruction only demands the single element from the input vector. // If the input vector has a single use, simplify it based on this use @@ -12014,7 +12394,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { } } - if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal)) + if (Value *Elt = FindScalarElement(EI.getOperand(0), IndexVal, Context)) return ReplaceInstUsesWith(EI, Elt); // If the this extractelement is directly using a bitcast from a vector of @@ -12024,7 +12404,8 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { if (const VectorType *VT = dyn_cast(BCI->getOperand(0)->getType())) if (VT->getNumElements() == VectorWidth) - if (Value *Elt = FindScalarElement(BCI->getOperand(0), IndexVal)) + if (Value *Elt = FindScalarElement(BCI->getOperand(0), + IndexVal, Context)) return new BitCastInst(Elt, EI.getType()); } } @@ -12050,7 +12431,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { unsigned AS = cast(I->getOperand(0)->getType())->getAddressSpace(); Value *Ptr = InsertBitCastBefore(I->getOperand(0), - PointerType::get(EI.getType(), AS),EI); + Context->getPointerType(EI.getType(), AS),EI); GetElementPtrInst *GEP = GetElementPtrInst::Create(Ptr, EI.getOperand(1), I->getName()+".gep"); InsertNewInstBefore(GEP, EI); @@ -12084,11 +12465,13 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { SrcIdx -= LHSWidth; Src = SVI->getOperand(1); } else { - return ReplaceInstUsesWith(EI, UndefValue::get(EI.getType())); + return ReplaceInstUsesWith(EI, Context->getUndef(EI.getType())); } - return new ExtractElementInst(Src, SrcIdx); + return new ExtractElementInst(Src, + ConstantInt::get(Type::Int32Ty, SrcIdx, false)); } } + // FIXME: Canonicalize extractelement(bitcast) -> bitcast(extractelement) } return 0; } @@ -12097,13 +12480,14 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) { /// elements from either LHS or RHS, return the shuffle mask and true. /// Otherwise, return false. static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS, - std::vector &Mask) { + std::vector &Mask, + LLVMContext *Context) { assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() && "Invalid CollectSingleShuffleElements"); unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { - Mask.assign(NumElts, UndefValue::get(Type::Int32Ty)); + Mask.assign(NumElts, Context->getUndef(Type::Int32Ty)); return true; } else if (V == LHS) { for (unsigned i = 0; i != NumElts; ++i) @@ -12126,9 +12510,9 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS, if (isa(ScalarOp)) { // inserting undef into vector. // Okay, we can handle this if the vector we are insertinting into is // transitively ok. - if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) { + if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) { // If so, update the mask to reflect the inserted undef. - Mask[InsertedIdx] = UndefValue::get(Type::Int32Ty); + Mask[InsertedIdx] = Context->getUndef(Type::Int32Ty); return true; } } else if (ExtractElementInst *EI = dyn_cast(ScalarOp)){ @@ -12141,7 +12525,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS, if (EI->getOperand(0) == LHS || EI->getOperand(0) == RHS) { // Okay, we can handle this if the vector we are insertinting into is // transitively ok. - if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask)) { + if (CollectSingleShuffleElements(VecOp, LHS, RHS, Mask, Context)) { // If so, update the mask to reflect the inserted value. if (EI->getOperand(0) == LHS) { Mask[InsertedIdx % NumElts] = @@ -12167,14 +12551,14 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS, /// RHS of the shuffle instruction, if it is not null. Return a shuffle mask /// that computes V and the LHS value of the shuffle. static Value *CollectShuffleElements(Value *V, std::vector &Mask, - Value *&RHS) { + Value *&RHS, LLVMContext *Context) { assert(isa(V->getType()) && (RHS == 0 || V->getType() == RHS->getType()) && "Invalid shuffle!"); unsigned NumElts = cast(V->getType())->getNumElements(); if (isa(V)) { - Mask.assign(NumElts, UndefValue::get(Type::Int32Ty)); + Mask.assign(NumElts, Context->getUndef(Type::Int32Ty)); return V; } else if (isa(V)) { Mask.assign(NumElts, ConstantInt::get(Type::Int32Ty, 0)); @@ -12196,14 +12580,15 @@ static Value *CollectShuffleElements(Value *V, std::vector &Mask, // otherwise we'd end up with a shuffle of three inputs. if (EI->getOperand(0) == RHS || RHS == 0) { RHS = EI->getOperand(0); - Value *V = CollectShuffleElements(VecOp, Mask, RHS); + Value *V = CollectShuffleElements(VecOp, Mask, RHS, Context); Mask[InsertedIdx % NumElts] = ConstantInt::get(Type::Int32Ty, NumElts+ExtractedIdx); return V; } if (VecOp == RHS) { - Value *V = CollectShuffleElements(EI->getOperand(0), Mask, RHS); + Value *V = CollectShuffleElements(EI->getOperand(0), Mask, + RHS, Context); // Everything but the extracted element is replaced with the RHS. for (unsigned i = 0; i != NumElts; ++i) { if (i != InsertedIdx) @@ -12214,7 +12599,8 @@ static Value *CollectShuffleElements(Value *V, std::vector &Mask, // If this insertelement is a chain that comes from exactly these two // vectors, return the vector and the effective shuffle. - if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask)) + if (CollectSingleShuffleElements(IEI, EI->getOperand(0), RHS, Mask, + Context)) return EI->getOperand(0); } @@ -12251,7 +12637,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) { return ReplaceInstUsesWith(IE, VecOp); if (InsertedIdx >= NumVectorElts) // Out of range insert. - return ReplaceInstUsesWith(IE, UndefValue::get(IE.getType())); + return ReplaceInstUsesWith(IE, Context->getUndef(IE.getType())); // If we are extracting a value from a vector, then inserting it right // back into the same place, just use the input vector. @@ -12268,15 +12654,16 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) { // Build a new shuffle mask. std::vector Mask; if (isa(VecOp)) - Mask.assign(NumVectorElts, UndefValue::get(Type::Int32Ty)); + Mask.assign(NumVectorElts, Context->getUndef(Type::Int32Ty)); else { assert(isa(VecOp) && "Unknown thing"); Mask.assign(NumVectorElts, ConstantInt::get(Type::Int32Ty, NumVectorElts)); } - Mask[InsertedIdx] = ConstantInt::get(Type::Int32Ty, ExtractedIdx); + Mask[InsertedIdx] = + ConstantInt::get(Type::Int32Ty, ExtractedIdx); return new ShuffleVectorInst(EI->getOperand(0), VecOp, - ConstantVector::get(Mask)); + Context->getConstantVector(Mask)); } // If this insertelement isn't used by some other insertelement, turn it @@ -12284,14 +12671,21 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) { if (!IE.hasOneUse() || !isa(IE.use_back())) { std::vector Mask; Value *RHS = 0; - Value *LHS = CollectShuffleElements(&IE, Mask, RHS); - if (RHS == 0) RHS = UndefValue::get(LHS->getType()); + Value *LHS = CollectShuffleElements(&IE, Mask, RHS, Context); + if (RHS == 0) RHS = Context->getUndef(LHS->getType()); // We now have a shuffle of LHS, RHS, Mask. - return new ShuffleVectorInst(LHS, RHS, ConstantVector::get(Mask)); + return new ShuffleVectorInst(LHS, RHS, + Context->getConstantVector(Mask)); } } } + unsigned VWidth = cast(VecOp->getType())->getNumElements(); + APInt UndefElts(VWidth, 0); + APInt AllOnesEltMask(APInt::getAllOnesValue(VWidth)); + if (SimplifyDemandedVectorElts(&IE, AllOnesEltMask, UndefElts)) + return &IE; + return 0; } @@ -12305,7 +12699,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { // Undefined shuffle mask -> undefined value. if (isa(SVI.getOperand(2))) - return ReplaceInstUsesWith(SVI, UndefValue::get(SVI.getType())); + return ReplaceInstUsesWith(SVI, Context->getUndef(SVI.getType())); unsigned VWidth = cast(SVI.getType())->getNumElements(); @@ -12332,12 +12726,12 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { std::vector Elts; for (unsigned i = 0, e = Mask.size(); i != e; ++i) { if (Mask[i] >= 2*e) - Elts.push_back(UndefValue::get(Type::Int32Ty)); + Elts.push_back(Context->getUndef(Type::Int32Ty)); else { if ((Mask[i] >= e && isa(RHS)) || (Mask[i] < e && isa(LHS))) { Mask[i] = 2*e; // Turn into undef. - Elts.push_back(UndefValue::get(Type::Int32Ty)); + Elts.push_back(Context->getUndef(Type::Int32Ty)); } else { Mask[i] = Mask[i] % e; // Force to LHS. Elts.push_back(ConstantInt::get(Type::Int32Ty, Mask[i])); @@ -12345,8 +12739,8 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { } } SVI.setOperand(0, SVI.getOperand(1)); - SVI.setOperand(1, UndefValue::get(RHS->getType())); - SVI.setOperand(2, ConstantVector::get(Elts)); + SVI.setOperand(1, Context->getUndef(RHS->getType())); + SVI.setOperand(2, Context->getConstantVector(Elts)); LHS = SVI.getOperand(0); RHS = SVI.getOperand(1); MadeChange = true; @@ -12396,14 +12790,14 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { std::vector Elts; for (unsigned i = 0, e = NewMask.size(); i != e; ++i) { if (NewMask[i] >= LHSInNElts*2) { - Elts.push_back(UndefValue::get(Type::Int32Ty)); + Elts.push_back(Context->getUndef(Type::Int32Ty)); } else { Elts.push_back(ConstantInt::get(Type::Int32Ty, NewMask[i])); } } return new ShuffleVectorInst(LHSSVI->getOperand(0), LHSSVI->getOperand(1), - ConstantVector::get(Elts)); + Context->getConstantVector(Elts)); } } } @@ -12422,7 +12816,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { assert(I->hasOneUse() && "Invariants didn't hold!"); // Cannot move control-flow-involving, volatile loads, vaarg, etc. - if (isa(I) || I->mayWriteToMemory() || isa(I)) + if (isa(I) || I->mayHaveSideEffects() || isa(I)) return false; // Do not sink alloca instructions out of the entry block. @@ -12484,7 +12878,7 @@ static void AddReachableCodeToWorklist(BasicBlock *BB, } // ConstantProp instruction if trivially constant. - if (Constant *C = ConstantFoldInstruction(Inst, TD)) { + if (Constant *C = ConstantFoldInstruction(Inst, BB->getContext(), TD)) { DOUT << "IC: ConstFold to: " << *C << " from: " << *Inst; Inst->replaceAllUsesWith(C); ++NumConstProp; @@ -12544,7 +12938,7 @@ static void AddReachableCodeToWorklist(BasicBlock *BB, bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { bool Changed = false; - TD = &getAnalysis(); + TD = getAnalysisIfAvailable(); DEBUG(DOUT << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " << F.getNameStr() << "\n"); @@ -12573,7 +12967,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { Changed = true; } if (!I->use_empty()) - I->replaceAllUsesWith(UndefValue::get(I->getType())); + I->replaceAllUsesWith(Context->getUndef(I->getType())); I->eraseFromParent(); } } @@ -12599,7 +12993,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { } // Instruction isn't dead, see if we can constant propagate it. - if (Constant *C = ConstantFoldInstruction(I, TD)) { + if (Constant *C = ConstantFoldInstruction(I, F.getContext(), TD)) { DOUT << "IC: ConstFold to: " << *C << " from: " << *I; // Add operands to the worklist. @@ -12613,11 +13007,12 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { continue; } - if (TD && I->getType()->getTypeID() == Type::VoidTyID) { + if (TD) { // See if we can constant fold its operands. for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) if (ConstantExpr *CE = dyn_cast(i)) - if (Constant *NewC = ConstantFoldConstantExpression(CE, TD)) + if (Constant *NewC = ConstantFoldConstantExpression(CE, + F.getContext(), TD)) if (NewC != CE) { i->set(NewC); Changed = true; @@ -12725,6 +13120,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { bool InstCombiner::runOnFunction(Function &F) { MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID); + Context = &F.getContext(); bool EverMadeChange = false;