[X86] AVX512: Add vmovntdqa
authorAdam Nemet <anemet@apple.com>
Tue, 10 Jun 2014 16:39:53 +0000 (16:39 +0000)
committerAdam Nemet <anemet@apple.com>
Tue, 10 Jun 2014 16:39:53 +0000 (16:39 +0000)
Along with the corresponding intrinsic and tests.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@210543 91177308-0d34-0410-b5e6-96231b3b80d8

include/llvm/IR/IntrinsicsX86.td
lib/Target/X86/X86InstrAVX512.td
test/CodeGen/X86/avx512-intrinsics.ll
test/MC/X86/avx512-encodings.s

index 88ee1cec2d585d4c7331a914a79ae7b71dc9cbda..0d224fe40db40f91032faa1a6798813e500870d6 100644 (file)
@@ -1939,6 +1939,8 @@ let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
                          llvm_i32_ty], [IntrNoMem, Commutative]>;
   def int_x86_avx2_movntdqa : GCCBuiltin<"__builtin_ia32_movntdqa256">,
               Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
                          llvm_i32_ty], [IntrNoMem, Commutative]>;
   def int_x86_avx2_movntdqa : GCCBuiltin<"__builtin_ia32_movntdqa256">,
               Intrinsic<[llvm_v4i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
+  def int_x86_avx512_movntdqa : GCCBuiltin<"__builtin_ia32_movntdqa512">,
+              Intrinsic<[llvm_v8i64_ty], [llvm_ptr_ty], [IntrReadMem]>;
 }
 
 //===----------------------------------------------------------------------===//
 }
 
 //===----------------------------------------------------------------------===//
index 37bcc5235e4f1042457e8ea4862d384ddff12c0e..d18c7ec63134a87c7b9950aa7f9d7bf8d0473c61 100644 (file)
@@ -1787,6 +1787,17 @@ def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
         (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
 
 def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
         (SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
 
+//===----------------------------------------------------------------------===//
+// AVX-512 - Non-temporals
+//===----------------------------------------------------------------------===//
+
+def VMOVNTDQAZrm : AVX5128I<0x2A, MRMSrcMem, (outs VR512:$dst),
+                            (ins i512mem:$src),
+                            "vmovntdqa\t{$src, $dst|$dst, $src}",
+                            [(set VR512:$dst,
+                              (int_x86_avx512_movntdqa addr:$src))]>,
+                   EVEX, EVEX_V512;
+
 //===----------------------------------------------------------------------===//
 // AVX-512 - Integer arithmetic
 //
 //===----------------------------------------------------------------------===//
 // AVX-512 - Integer arithmetic
 //
index e19841a6f9a8e8555adc298fd394598f3b43e1d9..297fef50936c424fbb2bc68bd6ef2a22d3549dad 100644 (file)
@@ -545,3 +545,12 @@ define <16 x float> @test_vpermt2ps(<16 x float>%x, <16 x float>%y, <16 x i32>%p
 }
 
 declare <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>, <16 x float>, <16 x float>, i16)
 }
 
 declare <16 x float> @llvm.x86.avx512.mask.vpermt.ps.512(<16 x i32>, <16 x float>, <16 x float>, i16)
+
+define <8 x i64> @test_vmovntdqa(i8 *%x) {
+; CHECK-LABEL: test_vmovntdqa:
+; CHECK: vmovntdqa (%rdi), %zmm0 ## encoding: [0x62,0xf2,0x7d,0x48,0x2a,0x07]
+  %res = call <8 x i64> @llvm.x86.avx512.movntdqa(i8* %x)
+  ret <8 x i64> %res
+}
+
+declare <8 x i64> @llvm.x86.avx512.movntdqa(i8*)
index 2915b7a4d0ac4198a644d0fd09dd00484024247b..f38eaf5402fedad3f2a37d309bf1558a120580a0 100644 (file)
@@ -3151,3 +3151,11 @@ vaddpd 512(%rdi, %rsi, 8) {1to8}, %zmm20, %zmm30
 // CHECK: vaddps {{.*}}{1to16}
 // CHECK: encoding: [0x62,0x61,0x5c,0x50,0x58,0xb4,0xf7,0x00,0x02,0x00,0x00]
 vaddps 512(%rdi, %rsi, 8) {1to16}, %zmm20, %zmm30
 // CHECK: vaddps {{.*}}{1to16}
 // CHECK: encoding: [0x62,0x61,0x5c,0x50,0x58,0xb4,0xf7,0x00,0x02,0x00,0x00]
 vaddps 512(%rdi, %rsi, 8) {1to16}, %zmm20, %zmm30
+
+// CHECK: vmovntdqa
+// CHECK: encoding: [0x62,0x72,0x7d,0x48,0x2a,0xab,0x78,0x56,0x34,0x12]
+vmovntdqa 0x12345678(%rbx), %zmm13
+
+// CHECK: vmovntdqa
+// CHECK: encoding: [0x62,0xc2,0x7d,0x48,0x2a,0x14,0x56]
+vmovntdqa (%r14,%rdx,2), %zmm18