Fixed PALIGNR to take 8-bit rotations in all cases.
authorSean Callanan <scallanan@apple.com>
Fri, 20 Nov 2009 21:40:28 +0000 (21:40 +0000)
committerSean Callanan <scallanan@apple.com>
Fri, 20 Nov 2009 21:40:28 +0000 (21:40 +0000)
Also fixed the corresponding testcase, and the PALIGNR
  intrinsic (tested for correctness with llvm-gcc).

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@89491 91177308-0d34-0410-b5e6-96231b3b80d8

include/llvm/IntrinsicsX86.td
lib/Target/X86/X86InstrSSE.td
test/CodeGen/X86/palignr-2.ll

index 794f4bfe816f2b6a2e7a934cfc6870e572327d2b..2f75ed5200318930630cd12695c09c04c85394b1 100644 (file)
@@ -673,10 +673,10 @@ let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
 let TargetPrefix = "x86" in {  // All intrinsics start with "llvm.x86.".
   def int_x86_ssse3_palign_r        : GCCBuiltin<"__builtin_ia32_palignr">,
               Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty,
-                         llvm_v1i64_ty, llvm_i16_ty], [IntrNoMem]>;
+                         llvm_v1i64_ty, llvm_i8_ty], [IntrNoMem]>;
   def int_x86_ssse3_palign_r_128    : GCCBuiltin<"__builtin_ia32_palignr128">,
               Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty,
-                         llvm_v2i64_ty, llvm_i32_ty], [IntrNoMem]>;
+                         llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
 }
 
 //===----------------------------------------------------------------------===//
index ee63d56f3f1712fd8b9b41a9c7da29c52400ac78..dfdd4ce36c6d4209a592eef3bc6c46f313819e4b 100644 (file)
@@ -2820,40 +2820,40 @@ defm PSIGND      : SS3I_binop_rm_int_32<0x0A, "psignd",
 
 let Constraints = "$src1 = $dst" in {
   def PALIGNR64rr  : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst),
-                           (ins VR64:$src1, VR64:$src2, i16imm:$src3),
+                           (ins VR64:$src1, VR64:$src2, i8imm:$src3),
                            "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
                            []>;
   def PALIGNR64rm  : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst),
-                           (ins VR64:$src1, i64mem:$src2, i16imm:$src3),
+                           (ins VR64:$src1, i64mem:$src2, i8imm:$src3),
                            "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
                            []>;
 
   def PALIGNR128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
-                           (ins VR128:$src1, VR128:$src2, i32imm:$src3),
+                           (ins VR128:$src1, VR128:$src2, i8imm:$src3),
                            "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
                            []>, OpSize;
   def PALIGNR128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
-                           (ins VR128:$src1, i128mem:$src2, i32imm:$src3),
+                           (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
                            "palignr\t{$src3, $src2, $dst|$dst, $src2, $src3}",
                            []>, OpSize;
 }
 
 // palignr patterns.
-def : Pat<(int_x86_ssse3_palign_r VR64:$src1, VR64:$src2, (i16 imm:$src3)),
+def : Pat<(int_x86_ssse3_palign_r VR64:$src1, VR64:$src2, (i8 imm:$src3)),
           (PALIGNR64rr VR64:$src1, VR64:$src2, (BYTE_imm imm:$src3))>,
           Requires<[HasSSSE3]>;
 def : Pat<(int_x86_ssse3_palign_r VR64:$src1,
                                       (memop64 addr:$src2),
-                                      (i16 imm:$src3)),
+                                      (i8 imm:$src3)),
           (PALIGNR64rm VR64:$src1, addr:$src2, (BYTE_imm imm:$src3))>,
           Requires<[HasSSSE3]>;
 
-def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, VR128:$src2, (i32 imm:$src3)),
+def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1, VR128:$src2, (i8 imm:$src3)),
           (PALIGNR128rr VR128:$src1, VR128:$src2, (BYTE_imm imm:$src3))>,
           Requires<[HasSSSE3]>;
 def : Pat<(int_x86_ssse3_palign_r_128 VR128:$src1,
                                       (memopv2i64 addr:$src2),
-                                      (i32 imm:$src3)),
+                                      (i8 imm:$src3)),
           (PALIGNR128rm VR128:$src1, addr:$src2, (BYTE_imm imm:$src3))>,
           Requires<[HasSSSE3]>;
 
index 2936641e95d9f95c84e066b0c93186cf5e355a6c..116d4c71814a224035630ad8f0f2bac72a89aec6 100644 (file)
@@ -9,12 +9,12 @@ define void @t1(<2 x i64> %a, <2 x i64> %b) nounwind ssp {
 entry:
 ; CHECK: t1:
 ; palignr $3, %xmm1, %xmm0
-  %0 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %a, <2 x i64> %b, i32 24) nounwind readnone
+  %0 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %a, <2 x i64> %b, i8 24) nounwind readnone
   store <2 x i64> %0, <2 x i64>* bitcast ([4 x i32]* @c to <2 x i64>*), align 16
   ret void
 }
 
-declare <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64>, <2 x i64>, i32) nounwind readnone
+declare <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64>, <2 x i64>, i8) nounwind readnone
 
 define void @t2() nounwind ssp {
 entry:
@@ -22,7 +22,7 @@ entry:
 ; palignr $4, _b, %xmm0
   %0 = load <2 x i64>* bitcast ([4 x i32]* @b to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
   %1 = load <2 x i64>* bitcast ([4 x i32]* @a to <2 x i64>*), align 16 ; <<2 x i64>> [#uses=1]
-  %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i32 32) nounwind readnone
+  %2 = tail call <2 x i64> @llvm.x86.ssse3.palign.r.128(<2 x i64> %1, <2 x i64> %0, i8 32) nounwind readnone
   store <2 x i64> %2, <2 x i64>* bitcast ([4 x i32]* @c to <2 x i64>*), align 16
   ret void
 }