X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=test%2FCodeGen%2FX86%2Fmasked_memop.ll;h=c29933e266b26ea533b4716e6ddc35e114d53808;hp=0662d1b22eda9b2de7389061ab4595133957b17d;hb=3f202fdf9eea174a8be886b51074814289a822df;hpb=a026dfe248154dbd38b3f6b42b28ba3fdaa9b0cd diff --git a/test/CodeGen/X86/masked_memop.ll b/test/CodeGen/X86/masked_memop.ll index 0662d1b22ed..c29933e266b 100644 --- a/test/CodeGen/X86/masked_memop.ll +++ b/test/CodeGen/X86/masked_memop.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=knl < %s | FileCheck %s --check-prefix=AVX512 ; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=core-avx2 < %s | FileCheck %s --check-prefix=AVX2 ; RUN: opt -mtriple=x86_64-apple-darwin -codegenprepare -mcpu=corei7-avx -S < %s | FileCheck %s --check-prefix=AVX_SCALAR @@ -363,19 +364,99 @@ define <16 x i32*> @test23(<16 x i32*> %trigger, <16 x i32*>* %addr) { declare <16 x %mystruct*> @llvm.masked.load.v16p0mystruct(<16 x %mystruct*>*, i32, <16 x i1>, <16 x %mystruct*>) -; AVX512-LABEL: test24 -; AVX512: vmovdqu64 (%rdi), %zmm0 {%k1} {z} -; AVX512: kshiftrw $8, %k1, %k1 -; AVX512: vmovdqu64 64(%rdi), %zmm1 {%k1} {z} - define <16 x %mystruct*> @test24(<16 x i1> %mask, <16 x %mystruct*>* %addr) { +; AVX512-LABEL: test24: +; AVX512: ## BB#0: +; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k1 +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} +; AVX512-NEXT: kshiftrw $8, %k1, %k1 +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm1 {%k1} {z} +; AVX512-NEXT: retq +; +; AVX2-LABEL: test24: +; AVX2: ## BB#0: +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmaskmovq (%rdi), %ymm1, %ymm4 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmaskmovq 96(%rdi), %ymm1, %ymm3 +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmaskmovq 64(%rdi), %ymm1, %ymm2 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; AVX2-NEXT: vpmaskmovq 32(%rdi), %ymm0, %ymm1 +; AVX2-NEXT: vmovdqa %ymm4, %ymm0 +; AVX2-NEXT: retq +; +; SKX-LABEL: test24: +; SKX: ## BB#0: +; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 +; SKX-NEXT: vpmovb2m %xmm0, %k1 +; SKX-NEXT: vmovdqu64 (%rdi), %zmm0 {%k1} {z} +; SKX-NEXT: kshiftrw $8, %k1, %k1 +; SKX-NEXT: vmovdqu64 64(%rdi), %zmm1 {%k1} {z} +; SKX-NEXT: retq %res = call <16 x %mystruct*> @llvm.masked.load.v16p0mystruct(<16 x %mystruct*>* %addr, i32 4, <16 x i1>%mask, <16 x %mystruct*>zeroinitializer) ret <16 x %mystruct*> %res } define void @test_store_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %src0) { +; AVX512-LABEL: test_store_16i64: +; AVX512: ## BB#0: +; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k1 +; AVX512-NEXT: vmovdqu64 %zmm1, (%rdi) {%k1} +; AVX512-NEXT: kshiftrw $8, %k1, %k1 +; AVX512-NEXT: vmovdqu64 %zmm2, 64(%rdi) {%k1} +; AVX512-NEXT: retq +; +; AVX2-LABEL: test_store_16i64: +; AVX2: ## BB#0: +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm5, %xmm5 +; AVX2-NEXT: vpsrad $31, %xmm5, %xmm5 +; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5 +; AVX2-NEXT: vpmaskmovq %ymm1, %ymm5, (%rdi) +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmaskmovq %ymm4, %ymm1, 96(%rdi) +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vpmaskmovq %ymm3, %ymm1, 64(%rdi) +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; AVX2-NEXT: vpmaskmovq %ymm2, %ymm0, 32(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; ; SKX-LABEL: test_store_16i64: ; SKX: ## BB#0: +; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 ; SKX-NEXT: vpmovb2m %xmm0, %k1 ; SKX-NEXT: vmovdqu64 %zmm1, (%rdi) {%k1} ; SKX-NEXT: kshiftrw $8, %k1, %k1 @@ -386,8 +467,47 @@ define void @test_store_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %sr } declare void @llvm.masked.store.v16i64(<16 x i64> %src0, <16 x i64>* %ptrs, i32, <16 x i1> %mask) define void @test_store_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x double> %src0) { +; AVX512-LABEL: test_store_16f64: +; AVX512: ## BB#0: +; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k1 +; AVX512-NEXT: vmovupd %zmm1, (%rdi) {%k1} +; AVX512-NEXT: kshiftrw $8, %k1, %k1 +; AVX512-NEXT: vmovupd %zmm2, 64(%rdi) {%k1} +; AVX512-NEXT: retq +; +; AVX2-LABEL: test_store_16f64: +; AVX2: ## BB#0: +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm5, %xmm5 +; AVX2-NEXT: vpsrad $31, %xmm5, %xmm5 +; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5 +; AVX2-NEXT: vmaskmovpd %ymm1, %ymm5, (%rdi) +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vmaskmovpd %ymm4, %ymm1, 96(%rdi) +; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vmaskmovpd %ymm3, %ymm1, 64(%rdi) +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; AVX2-NEXT: vmaskmovpd %ymm2, %ymm0, 32(%rdi) +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; ; SKX-LABEL: test_store_16f64: ; SKX: ## BB#0: +; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 ; SKX-NEXT: vpmovb2m %xmm0, %k1 ; SKX-NEXT: vmovupd %zmm1, (%rdi) {%k1} ; SKX-NEXT: kshiftrw $8, %k1, %k1 @@ -398,8 +518,53 @@ define void @test_store_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x doubl } declare void @llvm.masked.store.v16f64(<16 x double> %src0, <16 x double>* %ptrs, i32, <16 x i1> %mask) define <16 x i64> @test_load_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64> %src0) { +; AVX512-LABEL: test_load_16i64: +; AVX512: ## BB#0: +; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k1 +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm1 {%k1} +; AVX512-NEXT: kshiftrw $8, %k1, %k1 +; AVX512-NEXT: vmovdqu64 64(%rdi), %zmm2 {%k1} +; AVX512-NEXT: vmovaps %zmm1, %zmm0 +; AVX512-NEXT: vmovaps %zmm2, %zmm1 +; AVX512-NEXT: retq +; +; AVX2-LABEL: test_load_16i64: +; AVX2: ## BB#0: +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm5, %xmm5 +; AVX2-NEXT: vpsrad $31, %xmm5, %xmm5 +; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5 +; AVX2-NEXT: vpmaskmovq (%rdi), %ymm5, %ymm9 +; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm7, %xmm7 +; AVX2-NEXT: vpsrad $31, %xmm7, %xmm7 +; AVX2-NEXT: vpmovsxdq %xmm7, %ymm7 +; AVX2-NEXT: vpmaskmovq 32(%rdi), %ymm7, %ymm8 +; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm6, %xmm6 +; AVX2-NEXT: vpsrad $31, %xmm6, %xmm6 +; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6 +; AVX2-NEXT: vpmaskmovq 64(%rdi), %ymm6, %ymm10 +; AVX2-NEXT: vblendvpd %ymm5, %ymm9, %ymm1, %ymm5 +; AVX2-NEXT: vblendvpd %ymm7, %ymm8, %ymm2, %ymm1 +; AVX2-NEXT: vblendvpd %ymm6, %ymm10, %ymm3, %ymm2 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; AVX2-NEXT: vpmaskmovq 96(%rdi), %ymm0, %ymm3 +; AVX2-NEXT: vblendvpd %ymm0, %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vmovapd %ymm5, %ymm0 +; AVX2-NEXT: retq +; ; SKX-LABEL: test_load_16i64: ; SKX: ## BB#0: +; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 ; SKX-NEXT: vpmovb2m %xmm0, %k1 ; SKX-NEXT: vmovdqu64 (%rdi), %zmm1 {%k1} ; SKX-NEXT: kshiftrw $8, %k1, %k1 @@ -412,8 +577,53 @@ define <16 x i64> @test_load_16i64(<16 x i64>* %ptrs, <16 x i1> %mask, <16 x i64 } declare <16 x i64> @llvm.masked.load.v16i64(<16 x i64>* %ptrs, i32, <16 x i1> %mask, <16 x i64> %src0) define <16 x double> @test_load_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 x double> %src0) { +; AVX512-LABEL: test_load_16f64: +; AVX512: ## BB#0: +; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k1 +; AVX512-NEXT: vmovupd (%rdi), %zmm1 {%k1} +; AVX512-NEXT: kshiftrw $8, %k1, %k1 +; AVX512-NEXT: vmovupd 64(%rdi), %zmm2 {%k1} +; AVX512-NEXT: vmovaps %zmm1, %zmm0 +; AVX512-NEXT: vmovaps %zmm2, %zmm1 +; AVX512-NEXT: retq +; +; AVX2-LABEL: test_load_16f64: +; AVX2: ## BB#0: +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm5 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm5, %xmm5 +; AVX2-NEXT: vpsrad $31, %xmm5, %xmm5 +; AVX2-NEXT: vpmovsxdq %xmm5, %ymm5 +; AVX2-NEXT: vmaskmovpd (%rdi), %ymm5, %ymm9 +; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm0[1,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm7, %xmm7 +; AVX2-NEXT: vpsrad $31, %xmm7, %xmm7 +; AVX2-NEXT: vpmovsxdq %xmm7, %ymm7 +; AVX2-NEXT: vmaskmovpd 32(%rdi), %ymm7, %ymm8 +; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm0[2,3,0,1] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm6 = xmm6[0],zero,zero,zero,xmm6[1],zero,zero,zero,xmm6[2],zero,zero,zero,xmm6[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm6, %xmm6 +; AVX2-NEXT: vpsrad $31, %xmm6, %xmm6 +; AVX2-NEXT: vpmovsxdq %xmm6, %ymm6 +; AVX2-NEXT: vmaskmovpd 64(%rdi), %ymm6, %ymm10 +; AVX2-NEXT: vblendvpd %ymm5, %ymm9, %ymm1, %ymm5 +; AVX2-NEXT: vblendvpd %ymm7, %ymm8, %ymm2, %ymm1 +; AVX2-NEXT: vblendvpd %ymm6, %ymm10, %ymm3, %ymm2 +; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; AVX2-NEXT: vmaskmovpd 96(%rdi), %ymm0, %ymm3 +; AVX2-NEXT: vblendvpd %ymm0, %ymm3, %ymm4, %ymm3 +; AVX2-NEXT: vmovapd %ymm5, %ymm0 +; AVX2-NEXT: retq +; ; SKX-LABEL: test_load_16f64: ; SKX: ## BB#0: +; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 ; SKX-NEXT: vpmovb2m %xmm0, %k1 ; SKX-NEXT: vmovupd (%rdi), %zmm1 {%k1} ; SKX-NEXT: kshiftrw $8, %k1, %k1 @@ -427,8 +637,112 @@ define <16 x double> @test_load_16f64(<16 x double>* %ptrs, <16 x i1> %mask, <16 declare <16 x double> @llvm.masked.load.v16f64(<16 x double>* %ptrs, i32, <16 x i1> %mask, <16 x double> %src0) define <32 x double> @test_load_32f64(<32 x double>* %ptrs, <32 x i1> %mask, <32 x double> %src0) { +; AVX512-LABEL: test_load_32f64: +; AVX512: ## BB#0: +; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX512-NEXT: vpmovsxbd %xmm5, %zmm5 +; AVX512-NEXT: vpslld $31, %zmm5, %zmm5 +; AVX512-NEXT: vptestmd %zmm5, %zmm5, %k1 +; AVX512-NEXT: vmovupd 128(%rdi), %zmm3 {%k1} +; AVX512-NEXT: vpmovsxbd %xmm0, %zmm0 +; AVX512-NEXT: vpslld $31, %zmm0, %zmm0 +; AVX512-NEXT: vptestmd %zmm0, %zmm0, %k2 +; AVX512-NEXT: vmovupd (%rdi), %zmm1 {%k2} +; AVX512-NEXT: kshiftrw $8, %k1, %k1 +; AVX512-NEXT: vmovupd 192(%rdi), %zmm4 {%k1} +; AVX512-NEXT: kshiftrw $8, %k2, %k1 +; AVX512-NEXT: vmovupd 64(%rdi), %zmm2 {%k1} +; AVX512-NEXT: vmovaps %zmm1, %zmm0 +; AVX512-NEXT: vmovaps %zmm2, %zmm1 +; AVX512-NEXT: vmovaps %zmm3, %zmm2 +; AVX512-NEXT: vmovaps %zmm4, %zmm3 +; AVX512-NEXT: retq +; +; AVX2-LABEL: test_load_32f64: +; AVX2: ## BB#0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: Ltmp0: +; AVX2-NEXT: .cfi_def_cfa_offset 16 +; AVX2-NEXT: Ltmp1: +; AVX2-NEXT: .cfi_offset %rbp, -16 +; AVX2-NEXT: movq %rsp, %rbp +; AVX2-NEXT: Ltmp2: +; AVX2-NEXT: .cfi_def_cfa_register %rbp +; AVX2-NEXT: andq $-32, %rsp +; AVX2-NEXT: subq $32, %rsp +; AVX2-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[1,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm8 = xmm8[0],zero,zero,zero,xmm8[1],zero,zero,zero,xmm8[2],zero,zero,zero,xmm8[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm8, %xmm8 +; AVX2-NEXT: vpsrad $31, %xmm8, %xmm8 +; AVX2-NEXT: vpmovsxdq %xmm8, %ymm8 +; AVX2-NEXT: vmaskmovpd 32(%rsi), %ymm8, %ymm9 +; AVX2-NEXT: vpshufd {{.*#+}} xmm10 = xmm0[2,3,0,1] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm10 = xmm10[0],zero,zero,zero,xmm10[1],zero,zero,zero,xmm10[2],zero,zero,zero,xmm10[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm10, %xmm10 +; AVX2-NEXT: vpsrad $31, %xmm10, %xmm10 +; AVX2-NEXT: vpmovsxdq %xmm10, %ymm10 +; AVX2-NEXT: vmaskmovpd 64(%rsi), %ymm10, %ymm11 +; AVX2-NEXT: vpshufd {{.*#+}} xmm12 = xmm0[3,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm12 = xmm12[0],zero,zero,zero,xmm12[1],zero,zero,zero,xmm12[2],zero,zero,zero,xmm12[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm12, %xmm12 +; AVX2-NEXT: vpsrad $31, %xmm12, %xmm12 +; AVX2-NEXT: vpmovsxdq %xmm12, %ymm12 +; AVX2-NEXT: vmaskmovpd 96(%rsi), %ymm12, %ymm13 +; AVX2-NEXT: vblendvpd %ymm8, %ymm9, %ymm2, %ymm8 +; AVX2-NEXT: vblendvpd %ymm10, %ymm11, %ymm3, %ymm9 +; AVX2-NEXT: vblendvpd %ymm12, %ymm13, %ymm4, %ymm11 +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 +; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero,xmm3[2],zero,zero,zero,xmm3[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm3, %xmm3 +; AVX2-NEXT: vpsrad $31, %xmm3, %xmm3 +; AVX2-NEXT: vpmovsxdq %xmm3, %ymm3 +; AVX2-NEXT: vmaskmovpd 160(%rsi), %ymm3, %ymm10 +; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,0,1] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero,xmm4[2],zero,zero,zero,xmm4[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm4, %xmm4 +; AVX2-NEXT: vpsrad $31, %xmm4, %xmm4 +; AVX2-NEXT: vpmovsxdq %xmm4, %ymm4 +; AVX2-NEXT: vmaskmovpd 192(%rsi), %ymm4, %ymm12 +; AVX2-NEXT: vblendvpd %ymm3, %ymm10, %ymm6, %ymm3 +; AVX2-NEXT: vmovapd 16(%rbp), %ymm6 +; AVX2-NEXT: vblendvpd %ymm4, %ymm12, %ymm7, %ymm4 +; AVX2-NEXT: vpshufd {{.*#+}} xmm7 = xmm2[3,1,2,3] +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm7 = xmm7[0],zero,zero,zero,xmm7[1],zero,zero,zero,xmm7[2],zero,zero,zero,xmm7[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm7, %xmm7 +; AVX2-NEXT: vpsrad $31, %xmm7, %xmm7 +; AVX2-NEXT: vpmovsxdq %xmm7, %ymm7 +; AVX2-NEXT: vmaskmovpd 224(%rsi), %ymm7, %ymm10 +; AVX2-NEXT: vblendvpd %ymm7, %ymm10, %ymm6, %ymm6 +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 +; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 +; AVX2-NEXT: vmaskmovpd (%rsi), %ymm0, %ymm7 +; AVX2-NEXT: vblendvpd %ymm0, %ymm7, %ymm1, %ymm0 +; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm1 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero,xmm2[2],zero,zero,zero,xmm2[3],zero,zero,zero +; AVX2-NEXT: vpslld $31, %xmm1, %xmm1 +; AVX2-NEXT: vpsrad $31, %xmm1, %xmm1 +; AVX2-NEXT: vpmovsxdq %xmm1, %ymm1 +; AVX2-NEXT: vmaskmovpd 128(%rsi), %ymm1, %ymm2 +; AVX2-NEXT: vblendvpd %ymm1, %ymm2, %ymm5, %ymm1 +; AVX2-NEXT: vmovapd %ymm1, 128(%rdi) +; AVX2-NEXT: vmovapd %ymm0, (%rdi) +; AVX2-NEXT: vmovapd %ymm6, 224(%rdi) +; AVX2-NEXT: vmovapd %ymm4, 192(%rdi) +; AVX2-NEXT: vmovapd %ymm3, 160(%rdi) +; AVX2-NEXT: vmovapd %ymm11, 96(%rdi) +; AVX2-NEXT: vmovapd %ymm9, 64(%rdi) +; AVX2-NEXT: vmovapd %ymm8, 32(%rdi) +; AVX2-NEXT: movq %rdi, %rax +; AVX2-NEXT: movq %rbp, %rsp +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; ; SKX-LABEL: test_load_32f64: ; SKX: ## BB#0: +; SKX-NEXT: vpsllw $7, %ymm0, %ymm0 ; SKX-NEXT: vpmovb2m %ymm0, %k1 ; SKX-NEXT: vmovupd (%rdi), %zmm1 {%k1} ; SKX-NEXT: kshiftrd $16, %k1, %k2