-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE | \
-; RUN: grep smulbt | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE | \
-; RUN: grep smultt | count 1
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm -mattr=+v5TE | \
-; RUN: grep smlabt | count 1
+; RUN: llc -mtriple=arm-eabi -mcpu=generic %s -o /dev/null
+; RUN: llc -mtriple=arm-eabi -mcpu=cortex-a8 %s -o - | FileCheck %s
-%x = weak global short 0
-%y = weak global short 0
+@x = weak global i16 0 ; <i16*> [#uses=1]
+@y = weak global i16 0 ; <i16*> [#uses=0]
-int %f1(int %y) {
- %tmp = load short* %x
- %tmp1 = add short %tmp, 2
- %tmp2 = cast short %tmp1 to int
- %tmp3 = shr int %y, ubyte 16
- %tmp4 = mul int %tmp2, %tmp3
- ret int %tmp4
+define i32 @f1(i32 %y) {
+; CHECK: f1
+; CHECK: smulbt
+ %tmp = load i16, i16* @x ; <i16> [#uses=1]
+ %tmp1 = add i16 %tmp, 2 ; <i16> [#uses=1]
+ %tmp2 = sext i16 %tmp1 to i32 ; <i32> [#uses=1]
+ %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1]
+ %tmp4 = mul i32 %tmp2, %tmp3 ; <i32> [#uses=1]
+ ret i32 %tmp4
}
-int %f2(int %x, int %y) {
- %tmp1 = shr int %x, ubyte 16
- %tmp3 = shr int %y, ubyte 16
- %tmp4 = mul int %tmp3, %tmp1
- ret int %tmp4
+define i32 @f2(i32 %x, i32 %y) {
+; CHECK: f2
+; CHECK: smultt
+ %tmp1 = ashr i32 %x, 16 ; <i32> [#uses=1]
+ %tmp3 = ashr i32 %y, 16 ; <i32> [#uses=1]
+ %tmp4 = mul i32 %tmp3, %tmp1 ; <i32> [#uses=1]
+ ret i32 %tmp4
}
-int %f3(int %a, short %x, int %y) {
- %tmp = cast short %x to int
- %tmp2 = shr int %y, ubyte 16
- %tmp3 = mul int %tmp2, %tmp
- %tmp5 = add int %tmp3, %a
- ret int %tmp5
+define i32 @f3(i32 %a, i16 %x, i32 %y) {
+; CHECK: f3
+; CHECK: smlabt
+ %tmp = sext i16 %x to i32 ; <i32> [#uses=1]
+ %tmp2 = ashr i32 %y, 16 ; <i32> [#uses=1]
+ %tmp3 = mul i32 %tmp2, %tmp ; <i32> [#uses=1]
+ %tmp5 = add i32 %tmp3, %a ; <i32> [#uses=1]
+ ret i32 %tmp5
}
+