[AVX512] Bring vmovq instructions names into alignment with the AVX and SSE names...
authorCraig Topper <craig.topper@gmail.com>
Mon, 28 Dec 2015 06:11:42 +0000 (06:11 +0000)
committerCraig Topper <craig.topper@gmail.com>
Mon, 28 Dec 2015 06:11:42 +0000 (06:11 +0000)
I believe this also fixes a case where a 64-bit memory form that is documented as being unsupported in 32-bit mode was able to be selected there.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@256483 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/X86/X86InstrAVX512.td
test/MC/X86/avx512-encodings.s

index 3310d36b84f68e8e184f5474bd1c44af1d0ff00b..625a9f2bbee8ad2ff11e4789ad8c2013d86a669c 100644 (file)
@@ -2840,6 +2840,11 @@ def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$sr
                         [(set VR128X:$dst,
                           (v2i64 (scalar_to_vector GR64:$src)))],
                           IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
+def VMOV64toPQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
+                      (ins i64mem:$src),
+                      "vmovq\t{$src, $dst|$dst, $src}", []>,
+                      EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
 let isCodeGenOnly = 1 in {
 def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
                        "vmovq\t{$src, $dst|$dst, $src}",
@@ -2849,12 +2854,12 @@ def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src)
                          "vmovq\t{$src, $dst|$dst, $src}",
                          [(set GR64:$dst, (bitconvert FR64:$src))],
                          IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
-}
 def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
                          "vmovq\t{$src, $dst|$dst, $src}",
                          [(store (i64 (bitconvert FR64:$src)), addr:$dst)],
                          IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
                          EVEX_CD8<64, CD8VT1>;
+}
 
 // Move Int Doubleword to Single Scalar
 //
@@ -2893,18 +2898,25 @@ def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
                       IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
                       Requires<[HasAVX512, In64BitMode]>;
 
-def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
-                       (ins i64mem:$dst, VR128X:$src),
-                       "vmovq\t{$src, $dst|$dst, $src}",
-                       [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
-                               addr:$dst)], IIC_SSE_MOVDQ>,
-                       EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
-                       Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
+let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayStore = 1 in
+def VMOVPQIto64Zmr : I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128X:$src),
+                      "vmovq\t{$src, $dst|$dst, $src}",
+                      [], IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
+                      Requires<[HasAVX512, In64BitMode]>;
 
-def VMOV64toPQIZrr_REV : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
-                            (ins VR128X:$src),
-                            "vmovq.s\t{$src, $dst|$dst, $src}",[]>,
-                            EVEX, VEX_W, VEX_LIG;
+def VMOVPQI2QIZmr : I<0xD6, MRMDestMem, (outs),
+                      (ins i64mem:$dst, VR128X:$src),
+                      "vmovq\t{$src, $dst|$dst, $src}",
+                      [(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
+                              addr:$dst)], IIC_SSE_MOVDQ>,
+                      EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
+                      Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
+
+let hasSideEffects = 0 in
+def VMOVPQI2QIZrr : AVX512BI<0xD6, MRMDestReg, (outs VR128X:$dst),
+                             (ins VR128X:$src),
+                             "vmovq.s\t{$src, $dst|$dst, $src}",[]>,
+                             EVEX, VEX_W, VEX_LIG;
 
 // Move Scalar Single to Double Int
 //
@@ -2923,12 +2935,12 @@ def VMOVSS2DIZmr  : AVX512BI<0x7E, MRMDestMem, (outs),
 
 // Move Quadword Int to Packed Quadword Int
 //
-def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
+def VMOVQI2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
                       (ins i64mem:$src),
                       "vmovq\t{$src, $dst|$dst, $src}",
                       [(set VR128X:$dst,
                         (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
-                      EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
+                      EVEX, VEX_W, EVEX_CD8<8, CD8VT8>;
 
 //===----------------------------------------------------------------------===//
 // AVX-512  MOVSS, MOVSD
index d8806effb0e3baa29fd7d424f83b36f75bf4f0e2..658ca715a32ad160fa96607d0b3f96ba9dc6caf1 100644 (file)
@@ -17962,27 +17962,27 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
           vmovq  %r8, %xmm29
 
 // CHECK: vmovq  (%rcx), %xmm29
-// CHECK:  encoding: [0x62,0x61,0xfd,0x08,0x6e,0x29]
+// CHECK:  encoding: [0x62,0x61,0xfe,0x08,0x7e,0x29]
           vmovq  (%rcx), %xmm29
 
 // CHECK: vmovq  291(%rax,%r14,8), %xmm29
-// CHECK:  encoding: [0x62,0x21,0xfd,0x08,0x6e,0xac,0xf0,0x23,0x01,0x00,0x00]
+// CHECK:  encoding: [0x62,0x21,0xfe,0x08,0x7e,0xac,0xf0,0x23,0x01,0x00,0x00]
           vmovq  291(%rax,%r14,8), %xmm29
 
 // CHECK: vmovq  1016(%rdx), %xmm29
-// CHECK:  encoding: [0x62,0x61,0xfd,0x08,0x6e,0x6a,0x7f]
+// CHECK:  encoding: [0x62,0x61,0xfe,0x08,0x7e,0x6a,0x7f]
           vmovq  1016(%rdx), %xmm29
 
 // CHECK: vmovq  1024(%rdx), %xmm29
-// CHECK:  encoding: [0x62,0x61,0xfd,0x08,0x6e,0xaa,0x00,0x04,0x00,0x00]
+// CHECK:  encoding: [0x62,0x61,0xfe,0x08,0x7e,0xaa,0x00,0x04,0x00,0x00]
           vmovq  1024(%rdx), %xmm29
 
 // CHECK: vmovq  -1024(%rdx), %xmm29
-// CHECK:  encoding: [0x62,0x61,0xfd,0x08,0x6e,0x6a,0x80]
+// CHECK:  encoding: [0x62,0x61,0xfe,0x08,0x7e,0x6a,0x80]
           vmovq  -1024(%rdx), %xmm29
 
 // CHECK: vmovq  -1032(%rdx), %xmm29
-// CHECK:  encoding: [0x62,0x61,0xfd,0x08,0x6e,0xaa,0xf8,0xfb,0xff,0xff]
+// CHECK:  encoding: [0x62,0x61,0xfe,0x08,0x7e,0xaa,0xf8,0xfb,0xff,0xff]
           vmovq  -1032(%rdx), %xmm29
 
 // CHECK: vmovq        %xmm17, (%rcx)          
@@ -18014,27 +18014,27 @@ vpermilpd $0x23, 0x400(%rbx), %zmm2
           vmovq        %xmm3, %xmm24           
 
 // CHECK: vmovq        (%rcx), %xmm24          
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x01]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x01]
           vmovq        (%rcx), %xmm24          
 
 // CHECK: vmovq        291(%rax,%r14,8), %xmm24 
-// CHECK: encoding: [0x62,0x21,0xfd,0x08,0x6e,0x84,0xf0,0x23,0x01,0x00,0x00]
+// CHECK: encoding: [0x62,0x21,0xfe,0x08,0x7e,0x84,0xf0,0x23,0x01,0x00,0x00]
           vmovq        291(%rax,%r14,8), %xmm24 
 
 // CHECK: vmovq        1016(%rdx), %xmm24      
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x42,0x7f]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x42,0x7f]
           vmovq        1016(%rdx), %xmm24      
 
 // CHECK: vmovq        1024(%rdx), %xmm24      
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x82,0x00,0x04,0x00,0x00]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x82,0x00,0x04,0x00,0x00]
           vmovq        1024(%rdx), %xmm24      
 
 // CHECK: vmovq        -1024(%rdx), %xmm24     
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x42,0x80]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x42,0x80]
           vmovq        -1024(%rdx), %xmm24     
 
 // CHECK: vmovq        -1032(%rdx), %xmm24     
-// CHECK: encoding: [0x62,0x61,0xfd,0x08,0x6e,0x82,0xf8,0xfb,0xff,0xff]
+// CHECK: encoding: [0x62,0x61,0xfe,0x08,0x7e,0x82,0xf8,0xfb,0xff,0xff]
           vmovq        -1032(%rdx), %xmm24     
 
 // CHECK: vmovq        %xmm19, (%rcx)