ensure we continue matching x86-64 rotates.
authorChris Lattner <sabre@nondot.org>
Mon, 17 Mar 2008 01:35:03 +0000 (01:35 +0000)
committerChris Lattner <sabre@nondot.org>
Mon, 17 Mar 2008 01:35:03 +0000 (01:35 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@48437 91177308-0d34-0410-b5e6-96231b3b80d8

test/CodeGen/X86/rotate2.ll [new file with mode: 0644]

diff --git a/test/CodeGen/X86/rotate2.ll b/test/CodeGen/X86/rotate2.ll
new file mode 100644 (file)
index 0000000..40e954c
--- /dev/null
@@ -0,0 +1,19 @@
+; RUN: llvm-as < %s | llc -march=x86-64 | grep rol | count 2
+
+define i64 @test1(i64 %x) nounwind  {
+entry:
+       %tmp2 = lshr i64 %x, 55         ; <i64> [#uses=1]
+       %tmp4 = shl i64 %x, 9           ; <i64> [#uses=1]
+       %tmp5 = or i64 %tmp2, %tmp4             ; <i64> [#uses=1]
+       ret i64 %tmp5
+}
+
+define i64 @test2(i32 %x) nounwind  {
+entry:
+       %tmp2 = lshr i32 %x, 22         ; <i32> [#uses=1]
+       %tmp4 = shl i32 %x, 10          ; <i32> [#uses=1]
+       %tmp5 = or i32 %tmp2, %tmp4             ; <i32> [#uses=1]
+       %tmp56 = zext i32 %tmp5 to i64          ; <i64> [#uses=1]
+       ret i64 %tmp56
+}
+