Analysis: Reformulate WillNotOverflowUnsignedMul for reusability
authorDavid Majnemer <david.majnemer@gmail.com>
Fri, 2 Jan 2015 07:29:43 +0000 (07:29 +0000)
committerDavid Majnemer <david.majnemer@gmail.com>
Fri, 2 Jan 2015 07:29:43 +0000 (07:29 +0000)
WillNotOverflowUnsignedMul's smarts will live in ValueTracking as
computeOverflowForUnsignedMul.  It now returns a tri-state result:
never overflows, always overflows and sometimes overflows.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225076 91177308-0d34-0410-b5e6-96231b3b80d8

include/llvm/Analysis/ValueTracking.h
lib/Analysis/ValueTracking.cpp
lib/Transforms/InstCombine/InstCombine.h
lib/Transforms/InstCombine/InstCombineCalls.cpp
lib/Transforms/InstCombine/InstCombineMulDivRem.cpp

index 6bbf4f46d98ea352fb583587016fe582cc873787..7c139cd06c83ecaa363ad7d72739f7f8a73d73e8 100644 (file)
@@ -217,6 +217,12 @@ namespace llvm {
                                const DataLayout *DL = nullptr,
                                const DominatorTree *DT = nullptr);
 
+  enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
+  OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
+                                               const DataLayout *DL,
+                                               AssumptionTracker *AT,
+                                               const Instruction *CxtI,
+                                               const DominatorTree *DT);
 } // end namespace llvm
 
 #endif
index 70ee35424eaf9165eb7eef876c4b61807ee37377..cb1e285e8f396cf93d2c2cd1a86f88d1039e12d8 100644 (file)
@@ -2672,3 +2672,42 @@ bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
 
   return false;
 }
+
+OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
+                                                   const DataLayout *DL,
+                                                   AssumptionTracker *AT,
+                                                   const Instruction *CxtI,
+                                                   const DominatorTree *DT) {
+  // Multiplying n * m significant bits yields a result of n + m significant
+  // bits. If the total number of significant bits does not exceed the
+  // result bit width (minus 1), there is no overflow.
+  // This means if we have enough leading zero bits in the operands
+  // we can guarantee that the result does not overflow.
+  // Ref: "Hacker's Delight" by Henry Warren
+  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
+  APInt LHSKnownZero(BitWidth, 0);
+  APInt RHSKnownZero(BitWidth, 0);
+  APInt TmpKnownOne(BitWidth, 0);
+  computeKnownBits(LHS, LHSKnownZero, TmpKnownOne, DL, /*Depth=*/0, AT, CxtI, DT);
+  computeKnownBits(RHS, RHSKnownZero, TmpKnownOne, DL, /*Depth=*/0, AT, CxtI, DT);
+  // Note that underestimating the number of zero bits gives a more
+  // conservative answer.
+  unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
+                      RHSKnownZero.countLeadingOnes();
+  // First handle the easy case: if we have enough zero bits there's
+  // definitely no overflow.
+  if (ZeroBits >= BitWidth)
+    return OverflowResult::NeverOverflows;
+
+  // Get the largest possible values for each operand.
+  APInt LHSMax = ~LHSKnownZero;
+  APInt RHSMax = ~RHSKnownZero;
+
+  // We know the multiply operation doesn't overflow if the maximum values for
+  // each operand will not overflow after we multiply them together.
+  bool Overflow;
+  LHSMax.umul_ov(RHSMax, Overflow);
+
+  return Overflow ? OverflowResult::MayOverflow
+                  : OverflowResult::NeverOverflows;
+}
index b4d1efc1a92680ae6d1ad604b6867f2638e9eba8..96edc793175debbc4752cc6a6c2a7a0260825ccd 100644 (file)
@@ -286,7 +286,6 @@ private:
   bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction *CxtI);
   bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction *CxtI);
   bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction *CxtI);
-  bool WillNotOverflowUnsignedMul(Value *LHS, Value *RHS, Instruction *CxtI);
   Value *EmitGEPOffset(User *GEP);
   Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
   Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask);
@@ -388,6 +387,10 @@ public:
     return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AT, CxtI,
                                 DT);
   }
+  OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
+                                               const Instruction *CxtI) {
+    return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AT, CxtI, DT);
+  }
 
 private:
   /// SimplifyAssociativeOrCommutative - This performs a few simplifications for
index ec6a61307e1f0a9a49bb5c8d9aa6ad4a868571c2..34caf1a5ab9513f58eacf279f86c77b736c2e1ea 100644 (file)
@@ -440,24 +440,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
   }
   case Intrinsic::umul_with_overflow: {
     Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
-    unsigned BitWidth = cast<IntegerType>(LHS->getType())->getBitWidth();
-
-    APInt LHSKnownZero(BitWidth, 0);
-    APInt LHSKnownOne(BitWidth, 0);
-    computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, II);
-    APInt RHSKnownZero(BitWidth, 0);
-    APInt RHSKnownOne(BitWidth, 0);
-    computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, II);
-
-    // Get the largest possible values for each operand.
-    APInt LHSMax = ~LHSKnownZero;
-    APInt RHSMax = ~RHSKnownZero;
-
-    // If multiplying the maximum values does not overflow then we can turn
-    // this into a plain NUW mul.
-    bool Overflow;
-    LHSMax.umul_ov(RHSMax, Overflow);
-    if (!Overflow) {
+    OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, II);
+    if (OR == OverflowResult::NeverOverflows) {
       return CreateOverflowTuple(II, Builder->CreateNUWMul(LHS, RHS), false);
     }
   } // FALL THROUGH
index d444d33ca8a00701ed663189ceb23505a6fbaeca..255e58725830714a461b94d6e9e8273c912a5a1f 100644 (file)
@@ -165,39 +165,6 @@ bool InstCombiner::WillNotOverflowSignedMul(Value *LHS, Value *RHS,
   return false;
 }
 
-/// \brief Return true if we can prove that:
-///    (mul LHS, RHS)  === (mul nuw LHS, RHS)
-bool InstCombiner::WillNotOverflowUnsignedMul(Value *LHS, Value *RHS,
-                                              Instruction *CxtI) {
-  // Multiplying n * m significant bits yields a result of n + m significant
-  // bits. If the total number of significant bits does not exceed the
-  // result bit width (minus 1), there is no overflow.
-  // This means if we have enough leading zero bits in the operands
-  // we can guarantee that the result does not overflow.
-  // Ref: "Hacker's Delight" by Henry Warren
-  unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
-  APInt LHSKnownZero(BitWidth, 0);
-  APInt RHSKnownZero(BitWidth, 0);
-  APInt TmpKnownOne(BitWidth, 0);
-  computeKnownBits(LHS, LHSKnownZero, TmpKnownOne, 0, CxtI);
-  computeKnownBits(RHS, RHSKnownZero, TmpKnownOne, 0, CxtI);
-  // Note that underestimating the number of zero bits gives a more
-  // conservative answer.
-  unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
-                      RHSKnownZero.countLeadingOnes();
-  // First handle the easy case: if we have enough zero bits there's
-  // definitely no overflow.
-  if (ZeroBits >= BitWidth)
-    return true;
-
-  // There is an ambiguous cases where there can be no overflow:
-  //   ZeroBits == BitWidth - 1
-  // However, determining overflow requires calculating the sign bit of
-  // LHS * RHS/2.
-
-  return false;
-}
-
 Instruction *InstCombiner::visitMul(BinaryOperator &I) {
   bool Changed = SimplifyAssociativeOrCommutative(I);
   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
@@ -413,7 +380,9 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
     I.setHasNoSignedWrap(true);
   }
 
-  if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedMul(Op0, Op1, &I)) {
+  if (!I.hasNoUnsignedWrap() &&
+      computeOverflowForUnsignedMul(Op0, Op1, &I) ==
+          OverflowResult::NeverOverflows) {
     Changed = true;
     I.setHasNoUnsignedWrap(true);
   }