Reorder declarations of vmovmskp* and also put the necessary AVX
authorBruno Cardoso Lopes <bruno.cardoso@gmail.com>
Mon, 15 Aug 2011 23:36:45 +0000 (23:36 +0000)
committerBruno Cardoso Lopes <bruno.cardoso@gmail.com>
Mon, 15 Aug 2011 23:36:45 +0000 (23:36 +0000)
predicate and TB encoding fields. This fix the encoding for the
attached testcase. This fixes PR10625.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137684 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/X86/X86InstrSSE.td
test/MC/X86/x86-32-avx.s

index 2f8b2a701dea4ba473177a226cddbbc035f8a3d3..ad13fc90cdeb1a1e88beda26320d99f449a85a52 100644 (file)
@@ -1475,17 +1475,6 @@ multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
                 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
 }
 
-// Mask creation
-defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
-                                      "movmskps", SSEPackedSingle>, VEX;
-defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
-                                      "movmskpd", SSEPackedDouble>, OpSize,
-                                      VEX;
-defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
-                                      "movmskps", SSEPackedSingle>, VEX;
-defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
-                                      "movmskpd", SSEPackedDouble>, OpSize,
-                                      VEX;
 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
                                      SSEPackedSingle>, TB;
 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
@@ -1493,29 +1482,44 @@ defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
 
 // X86fgetsign
 def MOVMSKPDrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
-                    "movmskpd\t{$src, $dst|$dst, $src}",
-                    [(set GR32:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
+               "movmskpd\t{$src, $dst|$dst, $src}",
+               [(set GR32:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB,
+               OpSize;
 def MOVMSKPDrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
-                    "movmskpd\t{$src, $dst|$dst, $src}",
-                    [(set GR64:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
+               "movmskpd\t{$src, $dst|$dst, $src}",
+               [(set GR64:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB,
+               OpSize;
 def MOVMSKPSrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
-                    "movmskps\t{$src, $dst|$dst, $src}",
-                    [(set GR32:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
+               "movmskps\t{$src, $dst|$dst, $src}",
+               [(set GR32:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
 def MOVMSKPSrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
-                    "movmskps\t{$src, $dst|$dst, $src}",
-                    [(set GR64:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
-
-// Assembler Only
-def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
-           "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
-def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
-           "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
-           VEX;
-def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
-           "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
-def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
-           "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
-           VEX;
+               "movmskps\t{$src, $dst|$dst, $src}",
+               [(set GR64:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
+
+let Predicates = [HasAVX] in {
+  defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
+                                        "movmskps", SSEPackedSingle>, TB, VEX;
+  defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
+                                        "movmskpd", SSEPackedDouble>, TB, OpSize,
+                                        VEX;
+  defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
+                                        "movmskps", SSEPackedSingle>, TB, VEX;
+  defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
+                                        "movmskpd", SSEPackedDouble>, TB, OpSize,
+                                        VEX;
+
+  // Assembler Only
+  def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
+             "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
+  def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
+             "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
+             VEX;
+  def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
+             "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
+  def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
+             "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
+             VEX;
+}
 
 //===----------------------------------------------------------------------===//
 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
index f56d5763adb96533ffb36f8bbb44b98ed3a06515..e13a8712f7d6ec6cedff6914904827c6feeccb23 100644 (file)
 // CHECK: encoding: [0xc5,0xf9,0x50,0xc2]
           vmovmskpd  %xmm2, %eax
 
+// CHECK: vmovmskps  %ymm2, %eax
+// CHECK: encoding: [0xc5,0xfc,0x50,0xc2]
+          vmovmskps  %ymm2, %eax
+
+// CHECK: vmovmskpd  %ymm2, %eax
+// CHECK: encoding: [0xc5,0xfd,0x50,0xc2]
+          vmovmskpd  %ymm2, %eax
+
 // CHECK: vcmpss  $0, %xmm1, %xmm2, %xmm3
 // CHECK: encoding: [0xc5,0xea,0xc2,0xd9,0x00]
           vcmpeqss   %xmm1, %xmm2, %xmm3