From 26ba182fdf0c07f90cf6cd85850e017c7c38a68f Mon Sep 17 00:00:00 2001 From: Robert Khasanov Date: Fri, 26 Sep 2014 09:48:50 +0000 Subject: [PATCH] [AVX512] Added load/store from BW/VL subsets to Register2Memory opcode tables. Added lowering tests for these instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@218508 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 50 ++- lib/Target/X86/X86InstrSSE.td | 17 +- test/CodeGen/X86/avx512bw-mov.ll | 81 ++++ test/CodeGen/X86/avx512bwvl-mov.ll | 162 ++++++++ test/CodeGen/X86/avx512vl-mov.ll | 642 +++++++++++++++++++++++++++++ 5 files changed, 946 insertions(+), 6 deletions(-) create mode 100644 test/CodeGen/X86/avx512bw-mov.ll create mode 100644 test/CodeGen/X86/avx512bwvl-mov.ll create mode 100644 test/CodeGen/X86/avx512vl-mov.ll diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 4817e8d0bd5..08306419f5a 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -385,8 +385,32 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zmr, TB_FOLDED_STORE | TB_ALIGN_64 }, { X86::VMOVUPDZrr, X86::VMOVUPDZmr, TB_FOLDED_STORE }, { X86::VMOVUPSZrr, X86::VMOVUPSZmr, TB_FOLDED_STORE }, + { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zmr, TB_FOLDED_STORE }, + { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zmr, TB_FOLDED_STORE }, { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zmr, TB_FOLDED_STORE }, - { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zmr, TB_FOLDED_STORE } + { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zmr, TB_FOLDED_STORE }, + // AVX-512 foldable instructions (256-bit versions) + { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, + { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, + { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, + { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256mr, TB_FOLDED_STORE | TB_ALIGN_32 }, + { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256mr, TB_FOLDED_STORE }, + { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256mr, TB_FOLDED_STORE }, + { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256mr, TB_FOLDED_STORE }, + { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256mr, TB_FOLDED_STORE }, + { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256mr, TB_FOLDED_STORE }, + { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256mr, TB_FOLDED_STORE }, + // AVX-512 foldable instructions (128-bit versions) + { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, + { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, + { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, + { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128mr, TB_FOLDED_STORE | TB_ALIGN_16 }, + { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128mr, TB_FOLDED_STORE }, + { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128mr, TB_FOLDED_STORE }, + { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128mr, TB_FOLDED_STORE }, + { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128mr, TB_FOLDED_STORE }, + { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128mr, TB_FOLDED_STORE }, + { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128mr, TB_FOLDED_STORE } }; for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) { @@ -614,12 +638,36 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VMOVAPSZrr, X86::VMOVAPSZrm, TB_ALIGN_64 }, { X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 }, { X86::VMOVDQA64Zrr, X86::VMOVDQA64Zrm, TB_ALIGN_64 }, + { X86::VMOVDQU8Zrr, X86::VMOVDQU8Zrm, 0 }, + { X86::VMOVDQU16Zrr, X86::VMOVDQU16Zrm, 0 }, { X86::VMOVDQU32Zrr, X86::VMOVDQU32Zrm, 0 }, { X86::VMOVDQU64Zrr, X86::VMOVDQU64Zrm, 0 }, { X86::VMOVUPDZrr, X86::VMOVUPDZrm, 0 }, { X86::VMOVUPSZrr, X86::VMOVUPSZrm, 0 }, { X86::VPABSDZrr, X86::VPABSDZrm, 0 }, { X86::VPABSQZrr, X86::VPABSQZrm, 0 }, + // AVX-512 foldable instructions (256-bit versions) + { X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 }, + { X86::VMOVAPSZ256rr, X86::VMOVAPSZ256rm, TB_ALIGN_32 }, + { X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 }, + { X86::VMOVDQA64Z256rr, X86::VMOVDQA64Z256rm, TB_ALIGN_32 }, + { X86::VMOVDQU8Z256rr, X86::VMOVDQU8Z256rm, 0 }, + { X86::VMOVDQU16Z256rr, X86::VMOVDQU16Z256rm, 0 }, + { X86::VMOVDQU32Z256rr, X86::VMOVDQU32Z256rm, 0 }, + { X86::VMOVDQU64Z256rr, X86::VMOVDQU64Z256rm, 0 }, + { X86::VMOVUPDZ256rr, X86::VMOVUPDZ256rm, 0 }, + { X86::VMOVUPSZ256rr, X86::VMOVUPSZ256rm, 0 }, + // AVX-512 foldable instructions (256-bit versions) + { X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 }, + { X86::VMOVAPSZ128rr, X86::VMOVAPSZ128rm, TB_ALIGN_16 }, + { X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 }, + { X86::VMOVDQA64Z128rr, X86::VMOVDQA64Z128rm, TB_ALIGN_16 }, + { X86::VMOVDQU8Z128rr, X86::VMOVDQU8Z128rm, 0 }, + { X86::VMOVDQU16Z128rr, X86::VMOVDQU16Z128rm, 0 }, + { X86::VMOVDQU32Z128rr, X86::VMOVDQU32Z128rm, 0 }, + { X86::VMOVDQU64Z128rr, X86::VMOVDQU64Z128rm, 0 }, + { X86::VMOVUPDZ128rr, X86::VMOVUPDZ128rm, 0 }, + { X86::VMOVUPSZ128rr, X86::VMOVUPSZ128rm, 0 }, // AES foldable instructions { X86::AESIMCrr, X86::AESIMCrm, TB_ALIGN_16 }, diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index b6210e7eb0b..923b3dab0f8 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -865,6 +865,7 @@ let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in Sched<[WriteLoad]>; } +let Predicates = [HasAVX, NoVLX] in { defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps", SSEPackedSingle, SSE_MOVA_ITINS>, PS, VEX; @@ -890,20 +891,26 @@ defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32, defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64, "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>, PD, VEX, VEX_L; +} + +let Predicates = [UseSSE1] in { defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32, "movaps", SSEPackedSingle, SSE_MOVA_ITINS>, PS; -defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, - "movapd", SSEPackedDouble, SSE_MOVA_ITINS>, - PD; defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32, "movups", SSEPackedSingle, SSE_MOVU_ITINS>, PS; +} +let Predicates = [UseSSE2] in { +defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64, + "movapd", SSEPackedDouble, SSE_MOVA_ITINS>, + PD; defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64, "movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>, PD; +} -let SchedRW = [WriteStore] in { +let SchedRW = [WriteStore], Predicates = [HasAVX, NoVLX] in { def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), "movaps\t{$src, $dst|$dst, $src}", [(alignedstore (v4f32 VR128:$src), addr:$dst)], @@ -1047,7 +1054,7 @@ let Predicates = [UseSSE2] in (MOVUPDmr addr:$dst, VR128:$src)>; // Use vmovaps/vmovups for AVX integer load/store. -let Predicates = [HasAVX] in { +let Predicates = [HasAVX, NoVLX] in { // 128-bit load/store def : Pat<(alignedloadv2i64 addr:$src), (VMOVAPSrm addr:$src)>; diff --git a/test/CodeGen/X86/avx512bw-mov.ll b/test/CodeGen/X86/avx512bw-mov.ll new file mode 100644 index 00000000000..2ff6d280ab8 --- /dev/null +++ b/test/CodeGen/X86/avx512bw-mov.ll @@ -0,0 +1,81 @@ +; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s + +; CHECK-LABEL: test1 +; CHECK: vmovdqu8 +; CHECK: ret +define <64 x i8> @test1(i8 * %addr) { + %vaddr = bitcast i8* %addr to <64 x i8>* + %res = load <64 x i8>* %vaddr, align 1 + ret <64 x i8>%res +} + +; CHECK-LABEL: test2 +; CHECK: vmovdqu8 +; CHECK: ret +define void @test2(i8 * %addr, <64 x i8> %data) { + %vaddr = bitcast i8* %addr to <64 x i8>* + store <64 x i8>%data, <64 x i8>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test3 +; CHECK: vmovdqu8{{.*{%k[1-7]}}} +; CHECK: ret +define <64 x i8> @test3(i8 * %addr, <64 x i8> %old, <64 x i8> %mask1) { + %mask = icmp ne <64 x i8> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <64 x i8>* + %r = load <64 x i8>* %vaddr, align 1 + %res = select <64 x i1> %mask, <64 x i8> %r, <64 x i8> %old + ret <64 x i8>%res +} + +; CHECK-LABEL: test4 +; CHECK: vmovdqu8{{.*{%k[1-7]} {z}}} +; CHECK: ret +define <64 x i8> @test4(i8 * %addr, <64 x i8> %mask1) { + %mask = icmp ne <64 x i8> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <64 x i8>* + %r = load <64 x i8>* %vaddr, align 1 + %res = select <64 x i1> %mask, <64 x i8> %r, <64 x i8> zeroinitializer + ret <64 x i8>%res +} + +; CHECK-LABEL: test5 +; CHECK: vmovdqu16 +; CHECK: ret +define <32 x i16> @test5(i8 * %addr) { + %vaddr = bitcast i8* %addr to <32 x i16>* + %res = load <32 x i16>* %vaddr, align 1 + ret <32 x i16>%res +} + +; CHECK-LABEL: test6 +; CHECK: vmovdqu16 +; CHECK: ret +define void @test6(i8 * %addr, <32 x i16> %data) { + %vaddr = bitcast i8* %addr to <32 x i16>* + store <32 x i16>%data, <32 x i16>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test7 +; CHECK: vmovdqu16{{.*{%k[1-7]}}} +; CHECK: ret +define <32 x i16> @test7(i8 * %addr, <32 x i16> %old, <32 x i16> %mask1) { + %mask = icmp ne <32 x i16> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <32 x i16>* + %r = load <32 x i16>* %vaddr, align 1 + %res = select <32 x i1> %mask, <32 x i16> %r, <32 x i16> %old + ret <32 x i16>%res +} + +; CHECK-LABEL: test8 +; CHECK: vmovdqu16{{.*{%k[1-7]} {z}}} +; CHECK: ret +define <32 x i16> @test8(i8 * %addr, <32 x i16> %mask1) { + %mask = icmp ne <32 x i16> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <32 x i16>* + %r = load <32 x i16>* %vaddr, align 1 + %res = select <32 x i1> %mask, <32 x i16> %r, <32 x i16> zeroinitializer + ret <32 x i16>%res +} diff --git a/test/CodeGen/X86/avx512bwvl-mov.ll b/test/CodeGen/X86/avx512bwvl-mov.ll new file mode 100644 index 00000000000..835844fc821 --- /dev/null +++ b/test/CodeGen/X86/avx512bwvl-mov.ll @@ -0,0 +1,162 @@ +; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw -mattr=+avx512vl --show-mc-encoding| FileCheck %s + +; CHECK-LABEL: test_256_1 +; CHECK: vmovdqu8 {{.*}} ## encoding: [0x62 +; CHECK: ret +define <32 x i8> @test_256_1(i8 * %addr) { + %vaddr = bitcast i8* %addr to <32 x i8>* + %res = load <32 x i8>* %vaddr, align 1 + ret <32 x i8>%res +} + +; CHECK-LABEL: test_256_2 +; CHECK: vmovdqu8{{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_256_2(i8 * %addr, <32 x i8> %data) { + %vaddr = bitcast i8* %addr to <32 x i8>* + store <32 x i8>%data, <32 x i8>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_256_3 +; CHECK: vmovdqu8{{.*{%k[1-7]} }}## encoding: [0x62 +; CHECK: ret +define <32 x i8> @test_256_3(i8 * %addr, <32 x i8> %old, <32 x i8> %mask1) { + %mask = icmp ne <32 x i8> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <32 x i8>* + %r = load <32 x i8>* %vaddr, align 1 + %res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> %old + ret <32 x i8>%res +} + +; CHECK-LABEL: test_256_4 +; CHECK: vmovdqu8{{.*{%k[1-7]} {z} }}## encoding: [0x62 +; CHECK: ret +define <32 x i8> @test_256_4(i8 * %addr, <32 x i8> %mask1) { + %mask = icmp ne <32 x i8> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <32 x i8>* + %r = load <32 x i8>* %vaddr, align 1 + %res = select <32 x i1> %mask, <32 x i8> %r, <32 x i8> zeroinitializer + ret <32 x i8>%res +} + +; CHECK-LABEL: test_256_5 +; CHECK: vmovdqu16{{.*}} ## encoding: [0x62 +; CHECK: ret +define <16 x i16> @test_256_5(i8 * %addr) { + %vaddr = bitcast i8* %addr to <16 x i16>* + %res = load <16 x i16>* %vaddr, align 1 + ret <16 x i16>%res +} + +; CHECK-LABEL: test_256_6 +; CHECK: vmovdqu16{{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_256_6(i8 * %addr, <16 x i16> %data) { + %vaddr = bitcast i8* %addr to <16 x i16>* + store <16 x i16>%data, <16 x i16>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_256_7 +; CHECK: vmovdqu16{{.*{%k[1-7]} }}## encoding: [0x62 +; CHECK: ret +define <16 x i16> @test_256_7(i8 * %addr, <16 x i16> %old, <16 x i16> %mask1) { + %mask = icmp ne <16 x i16> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i16>* + %r = load <16 x i16>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> %old + ret <16 x i16>%res +} + +; CHECK-LABEL: test_256_8 +; CHECK: vmovdqu16{{.*{%k[1-7]} {z} }}## encoding: [0x62 +; CHECK: ret +define <16 x i16> @test_256_8(i8 * %addr, <16 x i16> %mask1) { + %mask = icmp ne <16 x i16> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i16>* + %r = load <16 x i16>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x i16> %r, <16 x i16> zeroinitializer + ret <16 x i16>%res +} + +; CHECK-LABEL: test_128_1 +; CHECK: vmovdqu8 {{.*}} ## encoding: [0x62 +; CHECK: ret +define <16 x i8> @test_128_1(i8 * %addr) { + %vaddr = bitcast i8* %addr to <16 x i8>* + %res = load <16 x i8>* %vaddr, align 1 + ret <16 x i8>%res +} + +; CHECK-LABEL: test_128_2 +; CHECK: vmovdqu8{{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_128_2(i8 * %addr, <16 x i8> %data) { + %vaddr = bitcast i8* %addr to <16 x i8>* + store <16 x i8>%data, <16 x i8>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_128_3 +; CHECK: vmovdqu8{{.*{%k[1-7]} }}## encoding: [0x62 +; CHECK: ret +define <16 x i8> @test_128_3(i8 * %addr, <16 x i8> %old, <16 x i8> %mask1) { + %mask = icmp ne <16 x i8> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i8>* + %r = load <16 x i8>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> %old + ret <16 x i8>%res +} + +; CHECK-LABEL: test_128_4 +; CHECK: vmovdqu8{{.*{%k[1-7]} {z} }}## encoding: [0x62 +; CHECK: ret +define <16 x i8> @test_128_4(i8 * %addr, <16 x i8> %mask1) { + %mask = icmp ne <16 x i8> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <16 x i8>* + %r = load <16 x i8>* %vaddr, align 1 + %res = select <16 x i1> %mask, <16 x i8> %r, <16 x i8> zeroinitializer + ret <16 x i8>%res +} + +; CHECK-LABEL: test_128_5 +; CHECK: vmovdqu16{{.*}} ## encoding: [0x62 +; CHECK: ret +define <8 x i16> @test_128_5(i8 * %addr) { + %vaddr = bitcast i8* %addr to <8 x i16>* + %res = load <8 x i16>* %vaddr, align 1 + ret <8 x i16>%res +} + +; CHECK-LABEL: test_128_6 +; CHECK: vmovdqu16{{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_128_6(i8 * %addr, <8 x i16> %data) { + %vaddr = bitcast i8* %addr to <8 x i16>* + store <8 x i16>%data, <8 x i16>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_128_7 +; CHECK: vmovdqu16{{.*{%k[1-7]} }}## encoding: [0x62 +; CHECK: ret +define <8 x i16> @test_128_7(i8 * %addr, <8 x i16> %old, <8 x i16> %mask1) { + %mask = icmp ne <8 x i16> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i16>* + %r = load <8 x i16>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> %old + ret <8 x i16>%res +} + +; CHECK-LABEL: test_128_8 +; CHECK: vmovdqu16{{.*{%k[1-7]} {z} }}## encoding: [0x62 +; CHECK: ret +define <8 x i16> @test_128_8(i8 * %addr, <8 x i16> %mask1) { + %mask = icmp ne <8 x i16> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i16>* + %r = load <8 x i16>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i16> %r, <8 x i16> zeroinitializer + ret <8 x i16>%res +} + diff --git a/test/CodeGen/X86/avx512vl-mov.ll b/test/CodeGen/X86/avx512vl-mov.ll new file mode 100644 index 00000000000..32246568ac2 --- /dev/null +++ b/test/CodeGen/X86/avx512vl-mov.ll @@ -0,0 +1,642 @@ +; RUN: llc < %s -march=x86-64 -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s + +; CHECK-LABEL: test_256_1 +; CHECK: vmovdqu32 +; CHECK: ret +define <8 x i32> @test_256_1(i8 * %addr) { + %vaddr = bitcast i8* %addr to <8 x i32>* + %res = load <8 x i32>* %vaddr, align 1 + ret <8 x i32>%res +} + +; CHECK-LABEL: test_256_2 +; CHECK: vmovdqa32 +; CHECK: ret +define <8 x i32> @test_256_2(i8 * %addr) { + %vaddr = bitcast i8* %addr to <8 x i32>* + %res = load <8 x i32>* %vaddr, align 32 + ret <8 x i32>%res +} + +; CHECK-LABEL: test_256_3 +; CHECK: vmovdqa64 +; CHECK: ret +define void @test_256_3(i8 * %addr, <4 x i64> %data) { + %vaddr = bitcast i8* %addr to <4 x i64>* + store <4 x i64>%data, <4 x i64>* %vaddr, align 32 + ret void +} + +; CHECK-LABEL: test_256_4 +; CHECK: vmovdqu32 +; CHECK: ret +define void @test_256_4(i8 * %addr, <8 x i32> %data) { + %vaddr = bitcast i8* %addr to <8 x i32>* + store <8 x i32>%data, <8 x i32>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_256_5 +; CHECK: vmovdqa32 +; CHECK: ret +define void @test_256_5(i8 * %addr, <8 x i32> %data) { + %vaddr = bitcast i8* %addr to <8 x i32>* + store <8 x i32>%data, <8 x i32>* %vaddr, align 32 + ret void +} + +; CHECK-LABEL: test_256_6 +; CHECK: vmovdqa64 +; CHECK: ret +define <4 x i64> @test_256_6(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x i64>* + %res = load <4 x i64>* %vaddr, align 32 + ret <4 x i64>%res +} + +; CHECK-LABEL: test_256_7 +; CHECK: vmovdqu64 +; CHECK: ret +define void @test_256_7(i8 * %addr, <4 x i64> %data) { + %vaddr = bitcast i8* %addr to <4 x i64>* + store <4 x i64>%data, <4 x i64>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_256_8 +; CHECK: vmovdqu64 +; CHECK: ret +define <4 x i64> @test_256_8(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x i64>* + %res = load <4 x i64>* %vaddr, align 1 + ret <4 x i64>%res +} + +; CHECK-LABEL: test_256_9 +; CHECK: vmovapd {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_256_9(i8 * %addr, <4 x double> %data) { + %vaddr = bitcast i8* %addr to <4 x double>* + store <4 x double>%data, <4 x double>* %vaddr, align 32 + ret void +} + +; CHECK-LABEL: test_256_10 +; CHECK: vmovapd {{.*}} ## encoding: [0x62 +; CHECK: ret +define <4 x double> @test_256_10(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x double>* + %res = load <4 x double>* %vaddr, align 32 + ret <4 x double>%res +} + +; CHECK-LABEL: test_256_11 +; CHECK: vmovaps {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_256_11(i8 * %addr, <8 x float> %data) { + %vaddr = bitcast i8* %addr to <8 x float>* + store <8 x float>%data, <8 x float>* %vaddr, align 32 + ret void +} + +; CHECK-LABEL: test_256_12 +; CHECK: vmovaps {{.*}} ## encoding: [0x62 +; CHECK: ret +define <8 x float> @test_256_12(i8 * %addr) { + %vaddr = bitcast i8* %addr to <8 x float>* + %res = load <8 x float>* %vaddr, align 32 + ret <8 x float>%res +} + +; CHECK-LABEL: test_256_13 +; CHECK: vmovupd {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_256_13(i8 * %addr, <4 x double> %data) { + %vaddr = bitcast i8* %addr to <4 x double>* + store <4 x double>%data, <4 x double>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_256_14 +; CHECK: vmovupd {{.*}} ## encoding: [0x62 +; CHECK: ret +define <4 x double> @test_256_14(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x double>* + %res = load <4 x double>* %vaddr, align 1 + ret <4 x double>%res +} + +; CHECK-LABEL: test_256_15 +; CHECK: vmovups {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_256_15(i8 * %addr, <8 x float> %data) { + %vaddr = bitcast i8* %addr to <8 x float>* + store <8 x float>%data, <8 x float>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_256_16 +; CHECK: vmovups {{.*}} ## encoding: [0x62 +; CHECK: ret +define <8 x float> @test_256_16(i8 * %addr) { + %vaddr = bitcast i8* %addr to <8 x float>* + %res = load <8 x float>* %vaddr, align 1 + ret <8 x float>%res +} + +; CHECK-LABEL: test_256_17 +; CHECK: vmovdqa32{{.*{%k[1-7]} }} +; CHECK: ret +define <8 x i32> @test_256_17(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>* %vaddr, align 32 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old + ret <8 x i32>%res +} + +; CHECK-LABEL: test_256_18 +; CHECK: vmovdqu32{{.*{%k[1-7]} }} +; CHECK: ret +define <8 x i32> @test_256_18(i8 * %addr, <8 x i32> %old, <8 x i32> %mask1) { + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> %old + ret <8 x i32>%res +} + +; CHECK-LABEL: test_256_19 +; CHECK: vmovdqa32{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <8 x i32> @test_256_19(i8 * %addr, <8 x i32> %mask1) { + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>* %vaddr, align 32 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer + ret <8 x i32>%res +} + +; CHECK-LABEL: test_256_20 +; CHECK: vmovdqu32{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <8 x i32> @test_256_20(i8 * %addr, <8 x i32> %mask1) { + %mask = icmp ne <8 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x i32>* + %r = load <8 x i32>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x i32> %r, <8 x i32> zeroinitializer + ret <8 x i32>%res +} + +; CHECK-LABEL: test_256_21 +; CHECK: vmovdqa64{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x i64> @test_256_21(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>* %vaddr, align 32 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old + ret <4 x i64>%res +} + +; CHECK-LABEL: test_256_22 +; CHECK: vmovdqu64{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x i64> @test_256_22(i8 * %addr, <4 x i64> %old, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> %old + ret <4 x i64>%res +} + +; CHECK-LABEL: test_256_23 +; CHECK: vmovdqa64{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x i64> @test_256_23(i8 * %addr, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>* %vaddr, align 32 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer + ret <4 x i64>%res +} + +; CHECK-LABEL: test_256_24 +; CHECK: vmovdqu64{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x i64> @test_256_24(i8 * %addr, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i64>* + %r = load <4 x i64>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i64> %r, <4 x i64> zeroinitializer + ret <4 x i64>%res +} + +; CHECK-LABEL: test_256_25 +; CHECK: vmovaps{{.*{%k[1-7]} }} +; CHECK: ret +define <8 x float> @test_256_25(i8 * %addr, <8 x float> %old, <8 x float> %mask1) { + %mask = fcmp one <8 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x float>* + %r = load <8 x float>* %vaddr, align 32 + %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old + ret <8 x float>%res +} + +; CHECK-LABEL: test_256_26 +; CHECK: vmovups{{.*{%k[1-7]} }} +; CHECK: ret +define <8 x float> @test_256_26(i8 * %addr, <8 x float> %old, <8 x float> %mask1) { + %mask = fcmp one <8 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x float>* + %r = load <8 x float>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> %old + ret <8 x float>%res +} + +; CHECK-LABEL: test_256_27 +; CHECK: vmovaps{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <8 x float> @test_256_27(i8 * %addr, <8 x float> %mask1) { + %mask = fcmp one <8 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x float>* + %r = load <8 x float>* %vaddr, align 32 + %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer + ret <8 x float>%res +} + +; CHECK-LABEL: test_256_28 +; CHECK: vmovups{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <8 x float> @test_256_28(i8 * %addr, <8 x float> %mask1) { + %mask = fcmp one <8 x float> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <8 x float>* + %r = load <8 x float>* %vaddr, align 1 + %res = select <8 x i1> %mask, <8 x float> %r, <8 x float> zeroinitializer + ret <8 x float>%res +} + +; CHECK-LABEL: test_256_29 +; CHECK: vmovapd{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x double> @test_256_29(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x double>* + %r = load <4 x double>* %vaddr, align 32 + %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old + ret <4 x double>%res +} + +; CHECK-LABEL: test_256_30 +; CHECK: vmovupd{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x double> @test_256_30(i8 * %addr, <4 x double> %old, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x double>* + %r = load <4 x double>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> %old + ret <4 x double>%res +} + +; CHECK-LABEL: test_256_31 +; CHECK: vmovapd{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x double> @test_256_31(i8 * %addr, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x double>* + %r = load <4 x double>* %vaddr, align 32 + %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer + ret <4 x double>%res +} + +; CHECK-LABEL: test_256_32 +; CHECK: vmovupd{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x double> @test_256_32(i8 * %addr, <4 x i64> %mask1) { + %mask = icmp ne <4 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x double>* + %r = load <4 x double>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x double> %r, <4 x double> zeroinitializer + ret <4 x double>%res +} + +; CHECK-LABEL: test_128_1 +; CHECK: vmovdqu32 +; CHECK: ret +define <4 x i32> @test_128_1(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x i32>* + %res = load <4 x i32>* %vaddr, align 1 + ret <4 x i32>%res +} + +; CHECK-LABEL: test_128_2 +; CHECK: vmovdqa32 +; CHECK: ret +define <4 x i32> @test_128_2(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x i32>* + %res = load <4 x i32>* %vaddr, align 16 + ret <4 x i32>%res +} + +; CHECK-LABEL: test_128_3 +; CHECK: vmovdqa64 +; CHECK: ret +define void @test_128_3(i8 * %addr, <2 x i64> %data) { + %vaddr = bitcast i8* %addr to <2 x i64>* + store <2 x i64>%data, <2 x i64>* %vaddr, align 16 + ret void +} + +; CHECK-LABEL: test_128_4 +; CHECK: vmovdqu32 +; CHECK: ret +define void @test_128_4(i8 * %addr, <4 x i32> %data) { + %vaddr = bitcast i8* %addr to <4 x i32>* + store <4 x i32>%data, <4 x i32>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_128_5 +; CHECK: vmovdqa32 +; CHECK: ret +define void @test_128_5(i8 * %addr, <4 x i32> %data) { + %vaddr = bitcast i8* %addr to <4 x i32>* + store <4 x i32>%data, <4 x i32>* %vaddr, align 16 + ret void +} + +; CHECK-LABEL: test_128_6 +; CHECK: vmovdqa64 +; CHECK: ret +define <2 x i64> @test_128_6(i8 * %addr) { + %vaddr = bitcast i8* %addr to <2 x i64>* + %res = load <2 x i64>* %vaddr, align 16 + ret <2 x i64>%res +} + +; CHECK-LABEL: test_128_7 +; CHECK: vmovdqu64 +; CHECK: ret +define void @test_128_7(i8 * %addr, <2 x i64> %data) { + %vaddr = bitcast i8* %addr to <2 x i64>* + store <2 x i64>%data, <2 x i64>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_128_8 +; CHECK: vmovdqu64 +; CHECK: ret +define <2 x i64> @test_128_8(i8 * %addr) { + %vaddr = bitcast i8* %addr to <2 x i64>* + %res = load <2 x i64>* %vaddr, align 1 + ret <2 x i64>%res +} + +; CHECK-LABEL: test_128_9 +; CHECK: vmovapd {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_128_9(i8 * %addr, <2 x double> %data) { + %vaddr = bitcast i8* %addr to <2 x double>* + store <2 x double>%data, <2 x double>* %vaddr, align 16 + ret void +} + +; CHECK-LABEL: test_128_10 +; CHECK: vmovapd {{.*}} ## encoding: [0x62 +; CHECK: ret +define <2 x double> @test_128_10(i8 * %addr) { + %vaddr = bitcast i8* %addr to <2 x double>* + %res = load <2 x double>* %vaddr, align 16 + ret <2 x double>%res +} + +; CHECK-LABEL: test_128_11 +; CHECK: vmovaps {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_128_11(i8 * %addr, <4 x float> %data) { + %vaddr = bitcast i8* %addr to <4 x float>* + store <4 x float>%data, <4 x float>* %vaddr, align 16 + ret void +} + +; CHECK-LABEL: test_128_12 +; CHECK: vmovaps {{.*}} ## encoding: [0x62 +; CHECK: ret +define <4 x float> @test_128_12(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x float>* + %res = load <4 x float>* %vaddr, align 16 + ret <4 x float>%res +} + +; CHECK-LABEL: test_128_13 +; CHECK: vmovupd {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_128_13(i8 * %addr, <2 x double> %data) { + %vaddr = bitcast i8* %addr to <2 x double>* + store <2 x double>%data, <2 x double>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_128_14 +; CHECK: vmovupd {{.*}} ## encoding: [0x62 +; CHECK: ret +define <2 x double> @test_128_14(i8 * %addr) { + %vaddr = bitcast i8* %addr to <2 x double>* + %res = load <2 x double>* %vaddr, align 1 + ret <2 x double>%res +} + +; CHECK-LABEL: test_128_15 +; CHECK: vmovups {{.*}} ## encoding: [0x62 +; CHECK: ret +define void @test_128_15(i8 * %addr, <4 x float> %data) { + %vaddr = bitcast i8* %addr to <4 x float>* + store <4 x float>%data, <4 x float>* %vaddr, align 1 + ret void +} + +; CHECK-LABEL: test_128_16 +; CHECK: vmovups {{.*}} ## encoding: [0x62 +; CHECK: ret +define <4 x float> @test_128_16(i8 * %addr) { + %vaddr = bitcast i8* %addr to <4 x float>* + %res = load <4 x float>* %vaddr, align 1 + ret <4 x float>%res +} + +; CHECK-LABEL: test_128_17 +; CHECK: vmovdqa32{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x i32> @test_128_17(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>* %vaddr, align 16 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old + ret <4 x i32>%res +} + +; CHECK-LABEL: test_128_18 +; CHECK: vmovdqu32{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x i32> @test_128_18(i8 * %addr, <4 x i32> %old, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> %old + ret <4 x i32>%res +} + +; CHECK-LABEL: test_128_19 +; CHECK: vmovdqa32{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x i32> @test_128_19(i8 * %addr, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>* %vaddr, align 16 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer + ret <4 x i32>%res +} + +; CHECK-LABEL: test_128_20 +; CHECK: vmovdqu32{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x i32> @test_128_20(i8 * %addr, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x i32>* + %r = load <4 x i32>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x i32> %r, <4 x i32> zeroinitializer + ret <4 x i32>%res +} + +; CHECK-LABEL: test_128_21 +; CHECK: vmovdqa64{{.*{%k[1-7]} }} +; CHECK: ret +define <2 x i64> @test_128_21(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>* %vaddr, align 16 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old + ret <2 x i64>%res +} + +; CHECK-LABEL: test_128_22 +; CHECK: vmovdqu64{{.*{%k[1-7]} }} +; CHECK: ret +define <2 x i64> @test_128_22(i8 * %addr, <2 x i64> %old, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>* %vaddr, align 1 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> %old + ret <2 x i64>%res +} + +; CHECK-LABEL: test_128_23 +; CHECK: vmovdqa64{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <2 x i64> @test_128_23(i8 * %addr, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>* %vaddr, align 16 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer + ret <2 x i64>%res +} + +; CHECK-LABEL: test_128_24 +; CHECK: vmovdqu64{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <2 x i64> @test_128_24(i8 * %addr, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x i64>* + %r = load <2 x i64>* %vaddr, align 1 + %res = select <2 x i1> %mask, <2 x i64> %r, <2 x i64> zeroinitializer + ret <2 x i64>%res +} + +; CHECK-LABEL: test_128_25 +; CHECK: vmovaps{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x float> @test_128_25(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x float>* + %r = load <4 x float>* %vaddr, align 16 + %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old + ret <4 x float>%res +} + +; CHECK-LABEL: test_128_26 +; CHECK: vmovups{{.*{%k[1-7]} }} +; CHECK: ret +define <4 x float> @test_128_26(i8 * %addr, <4 x float> %old, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x float>* + %r = load <4 x float>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> %old + ret <4 x float>%res +} + +; CHECK-LABEL: test_128_27 +; CHECK: vmovaps{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x float> @test_128_27(i8 * %addr, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x float>* + %r = load <4 x float>* %vaddr, align 16 + %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer + ret <4 x float>%res +} + +; CHECK-LABEL: test_128_28 +; CHECK: vmovups{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <4 x float> @test_128_28(i8 * %addr, <4 x i32> %mask1) { + %mask = icmp ne <4 x i32> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <4 x float>* + %r = load <4 x float>* %vaddr, align 1 + %res = select <4 x i1> %mask, <4 x float> %r, <4 x float> zeroinitializer + ret <4 x float>%res +} + +; CHECK-LABEL: test_128_29 +; CHECK: vmovapd{{.*{%k[1-7]} }} +; CHECK: ret +define <2 x double> @test_128_29(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x double>* + %r = load <2 x double>* %vaddr, align 16 + %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old + ret <2 x double>%res +} + +; CHECK-LABEL: test_128_30 +; CHECK: vmovupd{{.*{%k[1-7]} }} +; CHECK: ret +define <2 x double> @test_128_30(i8 * %addr, <2 x double> %old, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x double>* + %r = load <2 x double>* %vaddr, align 1 + %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> %old + ret <2 x double>%res +} + +; CHECK-LABEL: test_128_31 +; CHECK: vmovapd{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <2 x double> @test_128_31(i8 * %addr, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x double>* + %r = load <2 x double>* %vaddr, align 16 + %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer + ret <2 x double>%res +} + +; CHECK-LABEL: test_128_32 +; CHECK: vmovupd{{.*{%k[1-7]} {z} }} +; CHECK: ret +define <2 x double> @test_128_32(i8 * %addr, <2 x i64> %mask1) { + %mask = icmp ne <2 x i64> %mask1, zeroinitializer + %vaddr = bitcast i8* %addr to <2 x double>* + %r = load <2 x double>* %vaddr, align 1 + %res = select <2 x i1> %mask, <2 x double> %r, <2 x double> zeroinitializer + ret <2 x double>%res +} + -- 2.34.1