From: Simon Pilgrim Date: Sat, 17 Oct 2015 13:04:42 +0000 (+0000) Subject: [X86][FastISel] Teach how to select SSE4A nontemporal stores. X-Git-Url: http://plrg.eecs.uci.edu/git/?a=commitdiff_plain;h=1157bfddb957c70ccca787fce406c76efbd50ca2;hp=576416a6905266d5d2e66c6b60f7fd198678b3ea;p=oota-llvm.git [X86][FastISel] Teach how to select SSE4A nontemporal stores. Add FastISel support for SSE4A scalar float / double non-temporal stores Follow up to D13698 Differential Revision: http://reviews.llvm.org/D13773 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@250610 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 0771cbeb4a2..263c133698c 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -434,6 +434,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, X86AddressMode &AM, MachineMemOperand *MMO, bool Aligned) { bool HasSSE2 = Subtarget->hasSSE2(); + bool HasSSE4A = Subtarget->hasSSE4A(); bool HasAVX = Subtarget->hasAVX(); bool IsNonTemporal = MMO && MMO->isNonTemporal(); @@ -461,12 +462,22 @@ bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, bool ValIsKill, Opc = (IsNonTemporal && HasSSE2) ? X86::MOVNTI_64mr : X86::MOV64mr; break; case MVT::f32: - Opc = X86ScalarSSEf32 ? - (HasAVX ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m; + if (X86ScalarSSEf32) { + if (IsNonTemporal && HasSSE4A) + Opc = X86::MOVNTSS; + else + Opc = HasAVX ? X86::VMOVSSmr : X86::MOVSSmr; + } else + Opc = X86::ST_Fp32m; break; case MVT::f64: - Opc = X86ScalarSSEf64 ? - (HasAVX ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m; + if (X86ScalarSSEf32) { + if (IsNonTemporal && HasSSE4A) + Opc = X86::MOVNTSD; + else + Opc = HasAVX ? X86::VMOVSDmr : X86::MOVSDmr; + } else + Opc = X86::ST_Fp64m; break; case MVT::v4f32: if (Aligned) { diff --git a/test/CodeGen/X86/fast-isel-nontemporal.ll b/test/CodeGen/X86/fast-isel-nontemporal.ll index 824d8c30495..b601c4562fe 100644 --- a/test/CodeGen/X86/fast-isel-nontemporal.ll +++ b/test/CodeGen/X86/fast-isel-nontemporal.ll @@ -1,4 +1,5 @@ -; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE2 +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2 +; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4a -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE4A ; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx -fast-isel -O0 < %s | FileCheck %s --check-prefix=ALL --check-prefix=AVX define void @test_nti32(i32* nocapture %ptr, i32 %X) { @@ -21,12 +22,52 @@ entry: ret void } -define void @test_nt4xfloat(<4 x float>* nocapture %ptr, <4 x float> %X) { -; SSE2-LABEL: test_nt4xfloat: +define void @test_ntfloat(float* nocapture %ptr, float %X) { +; SSE2-LABEL: test_ntfloat: +; SSE2: # BB#0: # %entry +; SSE2-NEXT: movss %xmm0, (%rdi) +; SSE2-NEXT: retq +; +; SSE4A-LABEL: test_ntfloat: +; SSE4A: # BB#0: # %entry +; SSE4A-NEXT: movntss %xmm0, (%rdi) +; SSE4A-NEXT: retq +; +; AVX-LABEL: test_ntfloat: +; AVX: # BB#0: # %entry +; AVX-NEXT: vmovss %xmm0, (%rdi) +; AVX-NEXT: retq +entry: + store float %X, float* %ptr, align 4, !nontemporal !1 + ret void +} + +define void @test_ntdouble(double* nocapture %ptr, double %X) { +; SSE2-LABEL: test_ntdouble: ; SSE2: # BB#0: # %entry -; SSE2-NEXT: movntps %xmm0, (%rdi) +; SSE2-NEXT: movsd %xmm0, (%rdi) ; SSE2-NEXT: retq ; +; SSE4A-LABEL: test_ntdouble: +; SSE4A: # BB#0: # %entry +; SSE4A-NEXT: movntsd %xmm0, (%rdi) +; SSE4A-NEXT: retq +; +; AVX-LABEL: test_ntdouble: +; AVX: # BB#0: # %entry +; AVX-NEXT: vmovsd %xmm0, (%rdi) +; AVX-NEXT: retq +entry: + store double %X, double* %ptr, align 8, !nontemporal !1 + ret void +} + +define void @test_nt4xfloat(<4 x float>* nocapture %ptr, <4 x float> %X) { +; SSE-LABEL: test_nt4xfloat: +; SSE: # BB#0: # %entry +; SSE-NEXT: movntps %xmm0, (%rdi) +; SSE-NEXT: retq +; ; AVX-LABEL: test_nt4xfloat: ; AVX: # BB#0: # %entry ; AVX-NEXT: vmovntps %xmm0, (%rdi) @@ -37,10 +78,10 @@ entry: } define void @test_nt2xdouble(<2 x double>* nocapture %ptr, <2 x double> %X) { -; SSE2-LABEL: test_nt2xdouble: -; SSE2: # BB#0: # %entry -; SSE2-NEXT: movntpd %xmm0, (%rdi) -; SSE2-NEXT: retq +; SSE-LABEL: test_nt2xdouble: +; SSE: # BB#0: # %entry +; SSE-NEXT: movntpd %xmm0, (%rdi) +; SSE-NEXT: retq ; ; AVX-LABEL: test_nt2xdouble: ; AVX: # BB#0: # %entry @@ -52,10 +93,10 @@ entry: } define void @test_nt2xi64(<2 x i64>* nocapture %ptr, <2 x i64> %X) { -; SSE2-LABEL: test_nt2xi64: -; SSE2: # BB#0: # %entry -; SSE2-NEXT: movntdq %xmm0, (%rdi) -; SSE2-NEXT: retq +; SSE-LABEL: test_nt2xi64: +; SSE: # BB#0: # %entry +; SSE-NEXT: movntdq %xmm0, (%rdi) +; SSE-NEXT: retq ; ; AVX-LABEL: test_nt2xi64: ; AVX: # BB#0: # %entry