Add separate intrinsics for MMX / SSE shifts with i32 integer operands. This allow...
[oota-llvm.git] / lib / Target / X86 / X86InstrMMX.td
index 600b1e6e76de25de6c371fc3ad90f3e4f422241b..d484695f60ea2214e70dd6e154cd57e2cdcb8f7b 100644 (file)
@@ -118,7 +118,8 @@ let isTwoAddress = 1 in {
   }
 
   multiclass MMXI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
-                                string OpcodeStr, Intrinsic IntId> {
+                                string OpcodeStr, Intrinsic IntId,
+                                Intrinsic IntId2> {
     def rr : MMXI<opc, MRMSrcReg, (outs VR64:$dst),
                                   (ins VR64:$src1, VR64:$src2),
                   !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
@@ -131,11 +132,7 @@ let isTwoAddress = 1 in {
     def ri : MMXIi8<opc2, ImmForm, (outs VR64:$dst),
                                    (ins VR64:$src1, i32i8imm:$src2),
                     !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
-           [(set VR64:$dst, (IntId VR64:$src1,
-                             (v1i64 (bitconvert
-                                     (v2i32 (vector_shuffle immAllZerosV,
-                                     (v2i32 (scalar_to_vector (i32 imm:$src2))),
-                                             MMX_MOVL_shuffle_mask))))))]>;
+           [(set VR64:$dst, (IntId2 VR64:$src1, (i32 imm:$src2)))]>;
   }
 }
 
@@ -184,8 +181,9 @@ def MMX_MOVQ64mr : MMXI<0x7F, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src),
 def MMX_MOVDQ2Qrr : MMXID<0xD6, MRMDestMem, (outs VR64:$dst), (ins VR128:$src),
                           "movdq2q\t{$src, $dst|$dst, $src}",
                           [(set VR64:$dst,
-                            (v1i64 (vector_extract (v2i64 VR128:$src),
-                                  (iPTR 0))))]>;
+                            (v1i64 (bitconvert
+                            (i64 (vector_extract (v2i64 VR128:$src),
+                                  (iPTR 0))))))]>;
 
 def MMX_MOVQ2DQrr : MMXIS<0xD6, MRMDestMem, (outs VR128:$dst), (ins VR64:$src),
                           "movq2dq\t{$src, $dst|$dst, $src}",
@@ -282,23 +280,23 @@ let isTwoAddress = 1 in {
 
 // Shift Instructions
 defm MMX_PSRLW : MMXI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
-                                    int_x86_mmx_psrl_w>;
+                                    int_x86_mmx_psrl_w, int_x86_mmx_psrli_w>;
 defm MMX_PSRLD : MMXI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
-                                    int_x86_mmx_psrl_d>;
+                                    int_x86_mmx_psrl_d, int_x86_mmx_psrli_d>;
 defm MMX_PSRLQ : MMXI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
-                                    int_x86_mmx_psrl_q>;
+                                    int_x86_mmx_psrl_q, int_x86_mmx_psrli_q>;
 
 defm MMX_PSLLW : MMXI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
-                                    int_x86_mmx_psll_w>;
+                                    int_x86_mmx_psll_w, int_x86_mmx_pslli_w>;
 defm MMX_PSLLD : MMXI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
-                                    int_x86_mmx_psll_d>;
+                                    int_x86_mmx_psll_d, int_x86_mmx_pslli_d>;
 defm MMX_PSLLQ : MMXI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
-                                    int_x86_mmx_psll_q>;
+                                    int_x86_mmx_psll_q, int_x86_mmx_pslli_q>;
 
 defm MMX_PSRAW : MMXI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
-                                    int_x86_mmx_psra_w>;
+                                    int_x86_mmx_psra_w, int_x86_mmx_psrai_w>;
 defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
-                                    int_x86_mmx_psra_d>;
+                                    int_x86_mmx_psra_d, int_x86_mmx_psrai_d>;
 
 // Comparison Instructions
 defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
@@ -641,3 +639,15 @@ def : Pat<(v1i64 (and (xor VR64:$src1, (bc_v1i64 (v8i8  immAllOnesV_bc))),
 // Move MMX to lower 64-bit of XMM
 def : Pat<(v2i64 (scalar_to_vector (i64 (bitconvert VR64:$src)))),
           (v2i64 (MMX_MOVQ2DQrr VR64:$src))>;
+
+// Move lower 64-bit of XMM to MMX.
+def : Pat<(v2i32 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
+                                                  (iPTR 0))))),
+          (v2i32 (MMX_MOVDQ2Qrr VR128:$src))>;
+def : Pat<(v4i16 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
+                                                  (iPTR 0))))),
+          (v4i16 (MMX_MOVDQ2Qrr VR128:$src))>;
+def : Pat<(v8i8 (bitconvert (i64 (vector_extract (v2i64 VR128:$src),
+                                                  (iPTR 0))))),
+          (v8i8 (MMX_MOVDQ2Qrr VR128:$src))>;
+