; All of these ands and shifts should be folded into rlwimi's
-; RUN: llvm-as < %s | llc -march=ppc32 | not grep mulhwu &&
-; RUN: llvm-as < %s | llc -march=ppc32 | not grep srawi &&
-; RUN: llvm-as < %s | llc -march=ppc32 | not grep add &&
-; RUN: llvm-as < %s | llc -march=ppc32 | grep mulhw | wc -l | grep 1
+; RUN: llc < %s -march=ppc32 -o %t
+; RUN: not grep mulhwu %t
+; RUN: not grep srawi %t
+; RUN: not grep add %t
+; RUN: grep mulhw %t | count 1
-implementation ; Functions:
-
-int %mulhs(int %a, int %b) {
+define i32 @mulhs(i32 %a, i32 %b) nounwind {
entry:
- %tmp.1 = cast int %a to ulong ; <ulong> [#uses=1]
- %tmp.3 = cast int %b to ulong ; <ulong> [#uses=1]
- %tmp.4 = mul ulong %tmp.3, %tmp.1 ; <ulong> [#uses=1]
- %tmp.6 = shr ulong %tmp.4, ubyte 32 ; <ulong> [#uses=1]
- %tmp.7 = cast ulong %tmp.6 to int ; <int> [#uses=1]
- ret int %tmp.7
+ %tmp.1 = sext i32 %a to i64 ; <i64> [#uses=1]
+ %tmp.3 = sext i32 %b to i64 ; <i64> [#uses=1]
+ %tmp.4 = mul i64 %tmp.3, %tmp.1 ; <i64> [#uses=1]
+ %tmp.6 = lshr i64 %tmp.4, 32 ; <i64> [#uses=1]
+ %tmp.7 = trunc i64 %tmp.6 to i32 ; <i32> [#uses=1]
+ ret i32 %tmp.7
}
+