From: Paul Robinson Date: Mon, 3 Nov 2014 18:19:26 +0000 (+0000) Subject: Normally an 'optnone' function goes through fast-isel, which does not X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=c3e82bf9f5bdf4900d62e2da5136b8f9a99ac45d Normally an 'optnone' function goes through fast-isel, which does not call DAGCombiner. But we ran into a case (on Windows) where the calling convention causes argument lowering to bail out of fast-isel, and we end up in CodeGenAndEmitDAG() which does run DAGCombiner. So, we need to make DAGCombiner check for 'optnone' after all. Commit includes the test that found this, plus another one that got missed in the original optnone work. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@221168 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 0d5cabaaf27..107f5a12693 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1155,6 +1155,13 @@ void DAGCombiner::Run(CombineLevel AtLevel) { LegalOperations = Level >= AfterLegalizeVectorOps; LegalTypes = Level >= AfterLegalizeTypes; + // Early exit if this basic block is in an optnone function. + AttributeSet FnAttrs = + DAG.getMachineFunction().getFunction()->getAttributes(); + if (FnAttrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeNone)) + return; + // Add all the dag nodes to the worklist. for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(), E = DAG.allnodes_end(); I != E; ++I) diff --git a/test/CodeGen/X86/fastmath-optnone.ll b/test/CodeGen/X86/fastmath-optnone.ll new file mode 100644 index 00000000000..0caadff8916 --- /dev/null +++ b/test/CodeGen/X86/fastmath-optnone.ll @@ -0,0 +1,35 @@ +; RUN: llc < %s -mcpu=corei7 -march=x86-64 -mattr=+sse2 | FileCheck %s +; Verify that floating-point operations inside 'optnone' functions +; are not optimized even if unsafe-fp-math is set. + +define float @foo(float %x) #0 { +entry: + %add = fadd fast float %x, %x + %add1 = fadd fast float %add, %x + ret float %add1 +} + +; CHECK-LABEL: @foo +; CHECK-NOT: add +; CHECK: mul +; CHECK-NOT: add +; CHECK: ret + +define float @fooWithOptnone(float %x) #1 { +entry: + %add = fadd fast float %x, %x + %add1 = fadd fast float %add, %x + ret float %add1 +} + +; CHECK-LABEL: @fooWithOptnone +; CHECK-NOT: mul +; CHECK: add +; CHECK-NOT: mul +; CHECK: add +; CHECK-NOT: mul +; CHECK: ret + + +attributes #0 = { "unsafe-fp-math"="true" } +attributes #1 = { noinline optnone "unsafe-fp-math"="true" } diff --git a/test/Transforms/FunctionAttrs/optnone-simple.ll b/test/Transforms/FunctionAttrs/optnone-simple.ll new file mode 100644 index 00000000000..9d0f8e3710a --- /dev/null +++ b/test/Transforms/FunctionAttrs/optnone-simple.ll @@ -0,0 +1,135 @@ +; RUN: opt -O3 -S < %s | FileCheck %s +; Show 'optnone' suppresses optimizations. + +; Two attribute groups that differ only by 'optnone'. +; 'optnone' requires 'noinline' so #0 is 'noinline' by itself, +; even though it would otherwise be irrelevant to this example. +attributes #0 = { noinline } +attributes #1 = { noinline optnone } + +; int iadd(int a, int b){ return a + b; } + +define i32 @iadd_optimize(i32 %a, i32 %b) #0 { +entry: + %a.addr = alloca i32, align 4 + %b.addr = alloca i32, align 4 + store i32 %a, i32* %a.addr, align 4 + store i32 %b, i32* %b.addr, align 4 + %0 = load i32* %a.addr, align 4 + %1 = load i32* %b.addr, align 4 + %add = add nsw i32 %0, %1 + ret i32 %add +} + +; CHECK-LABEL: @iadd_optimize +; CHECK-NOT: alloca +; CHECK-NOT: store +; CHECK-NOT: load +; CHECK: ret + +define i32 @iadd_optnone(i32 %a, i32 %b) #1 { +entry: + %a.addr = alloca i32, align 4 + %b.addr = alloca i32, align 4 + store i32 %a, i32* %a.addr, align 4 + store i32 %b, i32* %b.addr, align 4 + %0 = load i32* %a.addr, align 4 + %1 = load i32* %b.addr, align 4 + %add = add nsw i32 %0, %1 + ret i32 %add +} + +; CHECK-LABEL: @iadd_optnone +; CHECK: alloca i32 +; CHECK: alloca i32 +; CHECK: store i32 +; CHECK: store i32 +; CHECK: load i32 +; CHECK: load i32 +; CHECK: add nsw i32 +; CHECK: ret i32 + +; float fsub(float a, float b){ return a - b; } + +define float @fsub_optimize(float %a, float %b) #0 { +entry: + %a.addr = alloca float, align 4 + %b.addr = alloca float, align 4 + store float %a, float* %a.addr, align 4 + store float %b, float* %b.addr, align 4 + %0 = load float* %a.addr, align 4 + %1 = load float* %b.addr, align 4 + %sub = fsub float %0, %1 + ret float %sub +} + +; CHECK-LABEL: @fsub_optimize +; CHECK-NOT: alloca +; CHECK-NOT: store +; CHECK-NOT: load +; CHECK: ret + +define float @fsub_optnone(float %a, float %b) #1 { +entry: + %a.addr = alloca float, align 4 + %b.addr = alloca float, align 4 + store float %a, float* %a.addr, align 4 + store float %b, float* %b.addr, align 4 + %0 = load float* %a.addr, align 4 + %1 = load float* %b.addr, align 4 + %sub = fsub float %0, %1 + ret float %sub +} + +; CHECK-LABEL: @fsub_optnone +; CHECK: alloca float +; CHECK: alloca float +; CHECK: store float +; CHECK: store float +; CHECK: load float +; CHECK: load float +; CHECK: fsub float +; CHECK: ret float + +; typedef float __attribute__((ext_vector_type(4))) float4; +; float4 vmul(float4 a, float4 b){ return a * b; } + +define <4 x float> @vmul_optimize(<4 x float> %a, <4 x float> %b) #0 { +entry: + %a.addr = alloca <4 x float>, align 16 + %b.addr = alloca <4 x float>, align 16 + store <4 x float> %a, <4 x float>* %a.addr, align 16 + store <4 x float> %b, <4 x float>* %b.addr, align 16 + %0 = load <4 x float>* %a.addr, align 16 + %1 = load <4 x float>* %b.addr, align 16 + %mul = fmul <4 x float> %0, %1 + ret <4 x float> %mul +} + +; CHECK-LABEL: @vmul_optimize +; CHECK-NOT: alloca +; CHECK-NOT: store +; CHECK-NOT: load +; CHECK: ret + +define <4 x float> @vmul_optnone(<4 x float> %a, <4 x float> %b) #1 { +entry: + %a.addr = alloca <4 x float>, align 16 + %b.addr = alloca <4 x float>, align 16 + store <4 x float> %a, <4 x float>* %a.addr, align 16 + store <4 x float> %b, <4 x float>* %b.addr, align 16 + %0 = load <4 x float>* %a.addr, align 16 + %1 = load <4 x float>* %b.addr, align 16 + %mul = fmul <4 x float> %0, %1 + ret <4 x float> %mul +} + +; CHECK-LABEL: @vmul_optnone +; CHECK: alloca <4 x float> +; CHECK: alloca <4 x float> +; CHECK: store <4 x float> +; CHECK: store <4 x float> +; CHECK: load <4 x float> +; CHECK: load <4 x float> +; CHECK: fmul <4 x float> +; CHECK: ret