NEON Two-operand assembly aliases for VSRA.
authorJim Grosbach <grosbach@apple.com>
Tue, 24 Jan 2012 17:55:36 +0000 (17:55 +0000)
committerJim Grosbach <grosbach@apple.com>
Tue, 24 Jan 2012 17:55:36 +0000 (17:55 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@148821 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/ARM/ARMInstrNEON.td
test/MC/ARM/neon-shift-encoding.s

index 76654fbb25c830ce01d88984d9c276c5783ccfec..14d480dd61aef38213e9958e278ee64079c7c560 100644 (file)
@@ -6494,6 +6494,45 @@ def : NEONInstAlias<"vpadd${p}.i32 $Vdn, $Vm",
 def : NEONInstAlias<"vpadd${p}.f32 $Vdn, $Vm",
                     (VPADDf DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
 
+// Two-operand variants for VSRA.
+    // Signed.
+def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm",
+                    (VSRAsv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm",
+                    (VSRAsv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm",
+                    (VSRAsv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm",
+                    (VSRAsv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vsra${p}.s8 $Vdm, $imm",
+                    (VSRAsv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s16 $Vdm, $imm",
+                    (VSRAsv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s32 $Vdm, $imm",
+                    (VSRAsv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.s64 $Vdm, $imm",
+                    (VSRAsv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+    // Unsigned.
+def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm",
+                    (VSRAuv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm",
+                    (VSRAuv4i16 DPR:$Vdm, DPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm",
+                    (VSRAuv2i32 DPR:$Vdm, DPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm",
+                    (VSRAuv1i64 DPR:$Vdm, DPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
+def : NEONInstAlias<"vsra${p}.u8 $Vdm, $imm",
+                    (VSRAuv16i8 QPR:$Vdm, QPR:$Vdm, shr_imm8:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u16 $Vdm, $imm",
+                    (VSRAuv8i16 QPR:$Vdm, QPR:$Vdm, shr_imm16:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u32 $Vdm, $imm",
+                    (VSRAuv4i32 QPR:$Vdm, QPR:$Vdm, shr_imm32:$imm, pred:$p)>;
+def : NEONInstAlias<"vsra${p}.u64 $Vdm, $imm",
+                    (VSRAuv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
+
 // Two-operand variants for VSRI.
 def : NEONInstAlias<"vsri${p}.8 $Vdm, $imm",
                     (VSRIv8i8 DPR:$Vdm, DPR:$Vdm, shr_imm8:$imm, pred:$p)>;
index d47eeca8ee4bc7696bf10b6f0be9ac18d226e95d..cd450a8f8bd3d8d1db5c27d1fc8346e215b7a9e5 100644 (file)
@@ -105,39 +105,77 @@ _foo:
 @ CHECK: vshr.s32      q8, q8, #31  @ encoding: [0x70,0x00,0xe1,0xf2]
 @ CHECK: vshr.s64      q8, q8, #63  @ encoding: [0xf0,0x00,0xc1,0xf2]
 
-       vsra.u8   d16, d16, #7
-       vsra.u16  d16, d16, #15
-       vsra.u32  d16, d16, #31
-       vsra.u64  d16, d16, #63
-       vsra.u8   q8, q8, #7
-       vsra.u16  q8, q8, #15
-       vsra.u32  q8, q8, #31
-       vsra.u64  q8, q8, #63
-       vsra.s8   d16, d16, #7
-       vsra.s16  d16, d16, #15
-       vsra.s32  d16, d16, #31
-       vsra.s64  d16, d16, #63
-       vsra.s8   q8, q8, #7
-       vsra.s16  q8, q8, #15
-       vsra.s32  q8, q8, #31
-       vsra.s64  q8, q8, #63
-
-@ CHECK: vsra.u8  d16, d16, #7   @ encoding: [0x30,0x01,0xc9,0xf3]
-@ CHECK: vsra.u16 d16, d16, #15  @ encoding: [0x30,0x01,0xd1,0xf3]
-@ CHECK: vsra.u32 d16, d16, #31  @ encoding: [0x30,0x01,0xe1,0xf3]
-@ CHECK: vsra.u64 d16, d16, #63  @ encoding: [0xb0,0x01,0xc1,0xf3]
-@ CHECK: vsra.u8  q8, q8, #7     @ encoding: [0x70,0x01,0xc9,0xf3]
-@ CHECK: vsra.u16 q8, q8, #15    @ encoding: [0x70,0x01,0xd1,0xf3]
-@ CHECK: vsra.u32 q8, q8, #31    @ encoding: [0x70,0x01,0xe1,0xf3]
-@ CHECK: vsra.u64 q8, q8, #63    @ encoding: [0xf0,0x01,0xc1,0xf3]
-@ CHECK: vsra.s8  d16, d16, #7   @ encoding: [0x30,0x01,0xc9,0xf2]
-@ CHECK: vsra.s16 d16, d16, #15  @ encoding: [0x30,0x01,0xd1,0xf2]
-@ CHECK: vsra.s32 d16, d16, #31  @ encoding: [0x30,0x01,0xe1,0xf2]
-@ CHECK: vsra.s64 d16, d16, #63  @ encoding: [0xb0,0x01,0xc1,0xf2]
-@ CHECK: vsra.s8  q8, q8, #7     @ encoding: [0x70,0x01,0xc9,0xf2]
-@ CHECK: vsra.s16 q8, q8, #15    @ encoding: [0x70,0x01,0xd1,0xf2]
-@ CHECK: vsra.s32 q8, q8, #31    @ encoding: [0x70,0x01,0xe1,0xf2]
-@ CHECK: vsra.s64 q8, q8, #63    @ encoding: [0xf0,0x01,0xc1,0xf2]
+
+       vsra.s8   d16, d6, #7
+       vsra.s16  d26, d18, #15
+       vsra.s32  d11, d10, #31
+       vsra.s64  d12, d19, #63
+       vsra.s8   q1, q8, #7
+       vsra.s16  q2, q7, #15
+       vsra.s32  q3, q6, #31
+       vsra.s64  q4, q5, #63
+
+       vsra.s8   d16, #7
+       vsra.s16  d15, #15
+       vsra.s32  d14, #31
+       vsra.s64  d13, #63
+       vsra.s8   q4, #7
+       vsra.s16  q5, #15
+       vsra.s32  q6, #31
+       vsra.s64  q7, #63
+
+@ CHECK: vsra.s8       d16, d6, #7     @ encoding: [0x16,0x01,0xc9,0xf2]
+@ CHECK: vsra.s16      d26, d18, #15   @ encoding: [0x32,0xa1,0xd1,0xf2]
+@ CHECK: vsra.s32      d11, d10, #31   @ encoding: [0x1a,0xb1,0xa1,0xf2]
+@ CHECK: vsra.s64      d12, d19, #63   @ encoding: [0xb3,0xc1,0x81,0xf2]
+@ CHECK: vsra.s8       q1, q8, #7      @ encoding: [0x70,0x21,0x89,0xf2]
+@ CHECK: vsra.s16      q2, q7, #15     @ encoding: [0x5e,0x41,0x91,0xf2]
+@ CHECK: vsra.s32      q3, q6, #31     @ encoding: [0x5c,0x61,0xa1,0xf2]
+@ CHECK: vsra.s64      q4, q5, #63     @ encoding: [0xda,0x81,0x81,0xf2]
+@ CHECK: vsra.s8       d16, d16, #7    @ encoding: [0x30,0x01,0xc9,0xf2]
+@ CHECK: vsra.s16      d15, d15, #15   @ encoding: [0x1f,0xf1,0x91,0xf2]
+@ CHECK: vsra.s32      d14, d14, #31   @ encoding: [0x1e,0xe1,0xa1,0xf2]
+@ CHECK: vsra.s64      d13, d13, #63   @ encoding: [0x9d,0xd1,0x81,0xf2]
+@ CHECK: vsra.s8       q4, q4, #7      @ encoding: [0x58,0x81,0x89,0xf2]
+@ CHECK: vsra.s16      q5, q5, #15     @ encoding: [0x5a,0xa1,0x91,0xf2]
+@ CHECK: vsra.s32      q6, q6, #31     @ encoding: [0x5c,0xc1,0xa1,0xf2]
+@ CHECK: vsra.s64      q7, q7, #63     @ encoding: [0xde,0xe1,0x81,0xf2]
+
+
+       vsra.u8   d16, d6, #7
+       vsra.u16  d26, d18, #15
+       vsra.u32  d11, d10, #31
+       vsra.u64  d12, d19, #63
+       vsra.u8   q1, q8, #7
+       vsra.u16  q2, q7, #15
+       vsra.u32  q3, q6, #31
+       vsra.u64  q4, q5, #63
+
+       vsra.u8   d16, #7
+       vsra.u16  d15, #15
+       vsra.u32  d14, #31
+       vsra.u64  d13, #63
+       vsra.u8   q4, #7
+       vsra.u16  q5, #15
+       vsra.u32  q6, #31
+       vsra.u64  q7, #63
+
+@ CHECK: vsra.u8       d16, d6, #7     @ encoding: [0x16,0x01,0xc9,0xf3]
+@ CHECK: vsra.u16      d26, d18, #15   @ encoding: [0x32,0xa1,0xd1,0xf3]
+@ CHECK: vsra.u32      d11, d10, #31   @ encoding: [0x1a,0xb1,0xa1,0xf3]
+@ CHECK: vsra.u64      d12, d19, #63   @ encoding: [0xb3,0xc1,0x81,0xf3]
+@ CHECK: vsra.u8       q1, q8, #7      @ encoding: [0x70,0x21,0x89,0xf3]
+@ CHECK: vsra.u16      q2, q7, #15     @ encoding: [0x5e,0x41,0x91,0xf3]
+@ CHECK: vsra.u32      q3, q6, #31     @ encoding: [0x5c,0x61,0xa1,0xf3]
+@ CHECK: vsra.u64      q4, q5, #63     @ encoding: [0xda,0x81,0x81,0xf3]
+@ CHECK: vsra.u8       d16, d16, #7    @ encoding: [0x30,0x01,0xc9,0xf3]
+@ CHECK: vsra.u16      d15, d15, #15   @ encoding: [0x1f,0xf1,0x91,0xf3]
+@ CHECK: vsra.u32      d14, d14, #31   @ encoding: [0x1e,0xe1,0xa1,0xf3]
+@ CHECK: vsra.u64      d13, d13, #63   @ encoding: [0x9d,0xd1,0x81,0xf3]
+@ CHECK: vsra.u8       q4, q4, #7      @ encoding: [0x58,0x81,0x89,0xf3]
+@ CHECK: vsra.u16      q5, q5, #15     @ encoding: [0x5a,0xa1,0x91,0xf3]
+@ CHECK: vsra.u32      q6, q6, #31     @ encoding: [0x5c,0xc1,0xa1,0xf3]
+@ CHECK: vsra.u64      q7, q7, #63     @ encoding: [0xde,0xe1,0x81,0xf3]
 
 
        vsri.8   d16, d6, #7