From: Steve King Date: Tue, 27 Oct 2015 00:14:06 +0000 (+0000) Subject: Fix llc crash processing S/UREM for -Oz builds caused by rL250825. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=6f257342a1d02b0a8b7a03d5d7c40ad21c20af4f;p=oota-llvm.git Fix llc crash processing S/UREM for -Oz builds caused by rL250825. When taking the remainder of a value divided by a constant, visitREM() attempts to convert the REM to a longer but faster sequence of instructions. This conversion calls combine() on a speculative DIV instruction. Commit rL250825 may cause this combine() to return a DIVREM, corrupting nearby nodes. Flow eventually hits unreachable(). This patch adds a test case and a check to prevent visitREM() from trying to convert the REM instruction in cases where a DIVREM is possible. See http://reviews.llvm.org/D14035 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@251373 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 7cc318ec423..957aa7bf90b 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2329,8 +2329,11 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) { return Op; // sdiv, srem -> sdivrem - if (SDValue DivRem = useDivRem(N)) - return DivRem; + // If the divisor is constant, then return DIVREM only if isIntDivCheap() is true. + // Otherwise, we break the simplification logic in visitREM(). + if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) + if (SDValue DivRem = useDivRem(N)) + return DivRem; // undef / X -> 0 if (N0.getOpcode() == ISD::UNDEF) @@ -2390,8 +2393,11 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) { return Op; // sdiv, srem -> sdivrem - if (SDValue DivRem = useDivRem(N)) - return DivRem; + // If the divisor is constant, then return DIVREM only if isIntDivCheap() is true. + // Otherwise, we break the simplification logic in visitREM(). + if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) + if (SDValue DivRem = useDivRem(N)) + return DivRem; // undef / X -> 0 if (N0.getOpcode() == ISD::UNDEF) @@ -2448,14 +2454,24 @@ SDValue DAGCombiner::visitREM(SDNode *N) { } } + AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes(); + // If X/C can be simplified by the division-by-constant logic, lower // X%C to the equivalent of X-X/C*C. - if (N1C && !N1C->isNullValue()) { + // To avoid mangling nodes, this simplification requires that the combine() + // call for the speculative DIV must not cause a DIVREM conversion. We guard + // against this by skipping the simplification if isIntDivCheap(). When + // div is not cheap, combine will not return a DIVREM. Regardless, + // checking cheapness here makes sense since the simplification results in + // fatter code. + if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap(VT, Attr)) { unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; SDValue Div = DAG.getNode(DivOpcode, DL, VT, N0, N1); AddToWorklist(Div.getNode()); SDValue OptimizedDiv = combine(Div.getNode()); if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { + assert((OptimizedDiv.getOpcode() != ISD::UDIVREM) && + (OptimizedDiv.getOpcode() != ISD::SDIVREM)); SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1); SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul); AddToWorklist(Mul.getNode()); diff --git a/test/CodeGen/X86/rem_crash.ll b/test/CodeGen/X86/rem_crash.ll new file mode 100644 index 00000000000..8363b22ab65 --- /dev/null +++ b/test/CodeGen/X86/rem_crash.ll @@ -0,0 +1,257 @@ +; RUN: llc < %s + +define i8 @test_minsize_uu8(i8 %x) minsize optsize { +entry: + %0 = udiv i8 %x, 10 + %1 = urem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i8 @test_minsize_ss8(i8 %x) minsize optsize { +entry: + %0 = sdiv i8 %x, 10 + %1 = srem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i8 @test_minsize_us8(i8 %x) minsize optsize { +entry: + %0 = udiv i8 %x, 10 + %1 = srem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i8 @test_minsize_su8(i8 %x) minsize optsize { +entry: + %0 = sdiv i8 %x, 10 + %1 = urem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i16 @test_minsize_uu16(i16 %x) minsize optsize { +entry: + %0 = udiv i16 %x, 10 + %1 = urem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i16 @test_minsize_ss16(i16 %x) minsize optsize { +entry: + %0 = sdiv i16 %x, 10 + %1 = srem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i16 @test_minsize_us16(i16 %x) minsize optsize { +entry: + %0 = udiv i16 %x, 10 + %1 = srem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i16 @test_minsize_su16(i16 %x) minsize optsize { +entry: + %0 = sdiv i16 %x, 10 + %1 = urem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i32 @test_minsize_uu32(i32 %x) minsize optsize { +entry: + %0 = udiv i32 %x, 10 + %1 = urem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i32 @test_minsize_ss32(i32 %x) minsize optsize { +entry: + %0 = sdiv i32 %x, 10 + %1 = srem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i32 @test_minsize_us32(i32 %x) minsize optsize { +entry: + %0 = udiv i32 %x, 10 + %1 = srem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i32 @test_minsize_su32(i32 %x) minsize optsize { +entry: + %0 = sdiv i32 %x, 10 + %1 = urem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i64 @test_minsize_uu64(i64 %x) minsize optsize { +entry: + %0 = udiv i64 %x, 10 + %1 = urem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +} + +define i64 @test_minsize_ss64(i64 %x) minsize optsize { +entry: + %0 = sdiv i64 %x, 10 + %1 = srem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +} + +define i64 @test_minsize_us64(i64 %x) minsize optsize { +entry: + %0 = udiv i64 %x, 10 + %1 = srem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +} + +define i64 @test_minsize_su64(i64 %x) minsize optsize { +entry: + %0 = sdiv i64 %x, 10 + %1 = urem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +} + +define i8 @test_uu8(i8 %x) optsize { +entry: + %0 = udiv i8 %x, 10 + %1 = urem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i8 @test_ss8(i8 %x) optsize { +entry: + %0 = sdiv i8 %x, 10 + %1 = srem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i8 @test_us8(i8 %x) optsize { +entry: + %0 = udiv i8 %x, 10 + %1 = srem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i8 @test_su8(i8 %x) optsize { +entry: + %0 = sdiv i8 %x, 10 + %1 = urem i8 %x, 10 + %res = add i8 %0, %1 + ret i8 %res +} + +define i16 @test_uu16(i16 %x) optsize { +entry: + %0 = udiv i16 %x, 10 + %1 = urem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i16 @test_ss16(i16 %x) optsize { +entry: + %0 = sdiv i16 %x, 10 + %1 = srem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i16 @test_us16(i16 %x) optsize { +entry: + %0 = udiv i16 %x, 10 + %1 = srem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i16 @test_su16(i16 %x) optsize { +entry: + %0 = sdiv i16 %x, 10 + %1 = urem i16 %x, 10 + %res = add i16 %0, %1 + ret i16 %res +} + +define i32 @test_uu32(i32 %x) optsize { +entry: + %0 = udiv i32 %x, 10 + %1 = urem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i32 @test_ss32(i32 %x) optsize { +entry: + %0 = sdiv i32 %x, 10 + %1 = srem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i32 @test_us32(i32 %x) optsize { +entry: + %0 = udiv i32 %x, 10 + %1 = srem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i32 @test_su32(i32 %x) optsize { +entry: + %0 = sdiv i32 %x, 10 + %1 = urem i32 %x, 10 + %res = add i32 %0, %1 + ret i32 %res +} + +define i64 @test_uu64(i64 %x) optsize { +entry: + %0 = udiv i64 %x, 10 + %1 = urem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +} + +define i64 @test_ss64(i64 %x) optsize { +entry: + %0 = sdiv i64 %x, 10 + %1 = srem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +} + +define i64 @test_us64(i64 %x) optsize { +entry: + %0 = udiv i64 %x, 10 + %1 = srem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +} + +define i64 @test_su64(i64 %x) optsize { +entry: + %0 = sdiv i64 %x, 10 + %1 = urem i64 %x, 10 + %res = add i64 %0, %1 + ret i64 %res +}