From: Sanjoy Das Date: Sat, 10 Jan 2015 23:41:24 +0000 (+0000) Subject: Fix PR22179. X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=7f0da20b979684eccea1a389996804d345fe6a17 Fix PR22179. We were incorrectly inferring nsw for certain SCEVs. We can be more aggressive here (see Richard Smith's comment on http://llvm.org/bugs/show_bug.cgi?id=22179) but this change just focuses on correctness. Differential Revision: http://reviews.llvm.org/D6914 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225591 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 44d0b11ac72..86852d634ff 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -1737,6 +1737,36 @@ namespace { }; } +// We're trying to construct a SCEV of type `Type' with `Ops' as operands and +// `OldFlags' as can't-wrap behavior. Infer a more aggressive set of +// can't-overflow flags for the operation if possible. +static SCEV::NoWrapFlags +StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, + const SmallVectorImpl &Ops, + SCEV::NoWrapFlags OldFlags) { + using namespace std::placeholders; + + bool CanAnalyze = + Type == scAddExpr || Type == scAddRecExpr || Type == scMulExpr; + (void)CanAnalyze; + assert(CanAnalyze && "don't call from other places!"); + + int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; + SCEV::NoWrapFlags SignOrUnsignWrap = + ScalarEvolution::maskFlags(OldFlags, SignOrUnsignMask); + + // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. + auto IsKnownNonNegative = + std::bind(std::mem_fn(&ScalarEvolution::isKnownNonNegative), SE, _1); + + if (SignOrUnsignWrap == SCEV::FlagNSW && + std::all_of(Ops.begin(), Ops.end(), IsKnownNonNegative)) + return ScalarEvolution::setFlags(OldFlags, + (SCEV::NoWrapFlags)SignOrUnsignMask); + + return OldFlags; +} + /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, @@ -1752,20 +1782,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, "SCEVAddExpr operand types don't match!"); #endif - // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. - // And vice-versa. - int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; - SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask); - if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) { - bool All = true; - for (SmallVectorImpl::const_iterator I = Ops.begin(), - E = Ops.end(); I != E; ++I) - if (!isKnownNonNegative(*I)) { - All = false; - break; - } - if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); - } + Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); @@ -2174,20 +2191,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, "SCEVMulExpr operand types don't match!"); #endif - // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. - // And vice-versa. - int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; - SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask); - if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) { - bool All = true; - for (SmallVectorImpl::const_iterator I = Ops.begin(), - E = Ops.end(); I != E; ++I) - if (!isKnownNonNegative(*I)) { - All = false; - break; - } - if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); - } + Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); // Sort by complexity, this groups all similar expression types together. GroupByComplexity(Ops, LI); @@ -2653,20 +2657,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, // meaningful BE count at this point (and if we don't, we'd be stuck // with a SCEVCouldNotCompute as the cached BE count). - // If FlagNSW is true and all the operands are non-negative, infer FlagNUW. - // And vice-versa. - int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW; - SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask); - if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) { - bool All = true; - for (SmallVectorImpl::const_iterator I = Operands.begin(), - E = Operands.end(); I != E; ++I) - if (!isKnownNonNegative(*I)) { - All = false; - break; - } - if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask); - } + Flags = StrengthenNoWrapFlags(this, scAddRecExpr, Operands, Flags); // Canonicalize nested AddRecs in by nesting them in order of loop depth. if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { diff --git a/test/Analysis/ScalarEvolution/pr22179.ll b/test/Analysis/ScalarEvolution/pr22179.ll new file mode 100644 index 00000000000..d9fb5104436 --- /dev/null +++ b/test/Analysis/ScalarEvolution/pr22179.ll @@ -0,0 +1,28 @@ +; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s + +%struct.anon = type { i8 } +%struct.S = type { i32 } + +@a = common global %struct.anon zeroinitializer, align 1 +@b = common global %struct.S zeroinitializer, align 4 + +; Function Attrs: nounwind ssp uwtable +define i32 @main() { +; CHECK-LABEL: Classifying expressions for: @main + store i8 0, i8* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 1 + br label %loop + +loop: + %storemerge1 = phi i8 [ 0, %0 ], [ %inc, %loop ] + %m = load volatile i32* getelementptr inbounds (%struct.S* @b, i64 0, i32 0), align 4 + %inc = add nuw i8 %storemerge1, 1 +; CHECK: %inc = add nuw i8 %storemerge1, 1 +; CHECK-NEXT: --> {1,+,1}<%loop> +; CHECK-NOT: --> {1,+,1}<%loop> + %exitcond = icmp eq i8 %inc, -128 + br i1 %exitcond, label %exit, label %loop + +exit: + store i8 -128, i8* getelementptr inbounds (%struct.anon* @a, i64 0, i32 0), align 1 + ret i32 0 +} diff --git a/test/CodeGen/AArch64/arm64-scaled_iv.ll b/test/CodeGen/AArch64/arm64-scaled_iv.ll index 987373e542a..63428df9610 100644 --- a/test/CodeGen/AArch64/arm64-scaled_iv.ll +++ b/test/CodeGen/AArch64/arm64-scaled_iv.ll @@ -20,7 +20,7 @@ for.body: ; preds = %for.body, %entry %arrayidx = getelementptr inbounds double* %b, i64 %tmp %tmp1 = load double* %arrayidx, align 8 ; The induction variable should carry the scaling factor: 1 * 8 = 8. -; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8 +; CHECK: [[IVNEXT]] = add nuw i64 [[IV]], 8 %indvars.iv.next = add i64 %indvars.iv, 1 %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next %tmp2 = load double* %arrayidx2, align 8 diff --git a/test/CodeGen/X86/avoid_complex_am.ll b/test/CodeGen/X86/avoid_complex_am.ll index 7f095190ab8..e5e7bd23a64 100644 --- a/test/CodeGen/X86/avoid_complex_am.ll +++ b/test/CodeGen/X86/avoid_complex_am.ll @@ -22,7 +22,7 @@ for.body: ; preds = %for.body, %entry %arrayidx = getelementptr inbounds double* %b, i64 %tmp %tmp1 = load double* %arrayidx, align 8 ; The induction variable should carry the scaling factor: 1. -; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK: [[IVNEXT]] = add nuw i64 [[IV]], 1 %indvars.iv.next = add i64 %indvars.iv, 1 %arrayidx2 = getelementptr inbounds double* %c, i64 %indvars.iv.next %tmp2 = load double* %arrayidx2, align 8