--- /dev/null
+; RUN: llvm-as < %s | llc -march=c | grep common | grep X
+
+@X = linkonce global i32 5
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=c | grep common | grep X
-
-@X = linkonce global i32 5
--- /dev/null
+; RUN: llvm-as < %s | llc -march=c | grep builtin_return_address
+
+declare i8* @llvm.returnaddress(i32)
+
+declare i8* @llvm.frameaddress(i32)
+
+define i8* @test1() {
+ %X = call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
+ ret i8* %X
+}
+
+define i8* @test2() {
+ %X = call i8* @llvm.frameaddress( i32 0 ) ; <i8*> [#uses=1]
+ ret i8* %X
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=c | grep builtin_return_address
-
-declare i8* @llvm.returnaddress(i32)
-
-declare i8* @llvm.frameaddress(i32)
-
-define i8* @test1() {
- %X = call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
-}
-
-define i8* @test2() {
- %X = call i8* @llvm.frameaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
-}
-
--- /dev/null
+; The intrinsic lowering pass was lowering intrinsics like llvm.memcpy to
+; explicitly specified prototypes, inserting a new function if the old one
+; didn't exist. This caused there to be two external memcpy functions in
+; this testcase for example, which caused the CBE to mangle one, screwing
+; everything up. :( Test that this does not happen anymore.
+;
+; RUN: llvm-as < %s | llc -march=c | not grep _memcpy
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+declare float* @memcpy(i32*, i32, i32)
+
+define i32 @test(i8* %A, i8* %B, i32* %C) {
+ call float* @memcpy( i32* %C, i32 4, i32 17 ) ; <float*>:1 [#uses=0]
+ call void @llvm.memcpy.i32( i8* %A, i8* %B, i32 123, i32 14 )
+ ret i32 7
+}
+
+++ /dev/null
-; The intrinsic lowering pass was lowering intrinsics like llvm.memcpy to
-; explicitly specified prototypes, inserting a new function if the old one
-; didn't exist. This caused there to be two external memcpy functions in
-; this testcase for example, which caused the CBE to mangle one, screwing
-; everything up. :( Test that this does not happen anymore.
-;
-; RUN: llvm-as < %s | llc -march=c | not grep _memcpy
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-declare float* @memcpy(i32*, i32, i32)
-
-define i32 @test(i8* %A, i8* %B, i32* %C) {
- call float* @memcpy( i32* %C, i32 4, i32 17 ) ; <float*>:1 [#uses=0]
- call void @llvm.memcpy.i32( i8* %A, i8* %B, i32 123, i32 14 )
- ret i32 7
-}
-
--- /dev/null
+; This is a non-normal FP value
+; RUN: llvm-as < %s | llc -march=c | grep FPConstant | grep static
+
+define float @func() {
+ ret float 0xFFF0000000000000
+}
+
+define double @func2() {
+ ret double 0xFF20000000000000
+}
+
+++ /dev/null
-; This is a non-normal FP value
-; RUN: llvm-as < %s | llc -march=c | grep FPConstant | grep static
-
-define float @func() {
- ret float 0xFFF0000000000000
-}
-
-define double @func2() {
- ret double 0xFF20000000000000
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=c | grep func1 | grep WEAK
+
+define linkonce i32 @func1() {
+ ret i32 5
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=c | grep func1 | grep WEAK
-
-define linkonce i32 @func1() {
- ret i32 5
-}
-
--- /dev/null
+; The CBE should not emit code that casts the function pointer. This causes
+; GCC to get testy and insert trap instructions instead of doing the right
+; thing. :(
+; RUN: llvm-as < %s | llc -march=c
+
+declare void @external(i8*)
+
+define i32 @test(i32* %X) {
+ %RV = call i32 bitcast (void (i8*)* @external to i32 (i32*)*)( i32* %X ) ; <i32> [#uses=1]
+ ret i32 %RV
+}
+
+++ /dev/null
-; The CBE should not emit code that casts the function pointer. This causes
-; GCC to get testy and insert trap instructions instead of doing the right
-; thing. :(
-; RUN: llvm-as < %s | llc -march=c
-
-declare void @external(i8*)
-
-define i32 @test(i32* %X) {
- %RV = call i32 bitcast (void (i8*)* @external to i32 (i32*)*)( i32* %X ) ; <i32> [#uses=1]
- ret i32 %RV
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -enable-correct-eh-support
+
+define i32 @test() {
+ unwind
+}
+
+define i32 @main() {
+ %X = invoke i32 @test( )
+ to label %cont unwind label %EH ; <i32> [#uses=0]
+
+cont: ; preds = %0
+ ret i32 1
+
+EH: ; preds = %0
+ ret i32 0
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -enable-correct-eh-support
-
-define i32 @test() {
- unwind
-}
-
-define i32 @main() {
- %X = invoke i32 @test( )
- to label %cont unwind label %EH ; <i32> [#uses=0]
-
-cont: ; preds = %0
- ret i32 1
-
-EH: ; preds = %0
- ret i32 0
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc
+@global_long_1 = linkonce global i64 7 ; <i64*> [#uses=1]
+@global_long_2 = linkonce global i64 49 ; <i64*> [#uses=1]
+
+define i32 @main() {
+ %l1 = load i64* @global_long_1 ; <i64> [#uses=1]
+ %l2 = load i64* @global_long_2 ; <i64> [#uses=1]
+ %cond = icmp sle i64 %l1, %l2 ; <i1> [#uses=1]
+ %cast2 = zext i1 %cond to i32 ; <i32> [#uses=1]
+ %RV = sub i32 1, %cast2 ; <i32> [#uses=1]
+ ret i32 %RV
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc
-@global_long_1 = linkonce global i64 7 ; <i64*> [#uses=1]
-@global_long_2 = linkonce global i64 49 ; <i64*> [#uses=1]
-
-define i32 @main() {
- %l1 = load i64* @global_long_1 ; <i64> [#uses=1]
- %l2 = load i64* @global_long_2 ; <i64> [#uses=1]
- %cond = icmp sle i64 %l1, %l2 ; <i1> [#uses=1]
- %cast2 = zext i1 %cond to i32 ; <i32> [#uses=1]
- %RV = sub i32 1, %cast2 ; <i32> [#uses=1]
- ret i32 %RV
-}
-
--- /dev/null
+; New testcase, this contains a bunch of simple instructions that should be
+; handled by a code generator.
+
+; RUN: llvm-as < %s | llc
+
+define i32 @add(i32 %A, i32 %B) {
+ %R = add i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @sub(i32 %A, i32 %B) {
+ %R = sub i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @mul(i32 %A, i32 %B) {
+ %R = mul i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @sdiv(i32 %A, i32 %B) {
+ %R = sdiv i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @udiv(i32 %A, i32 %B) {
+ %R = udiv i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @srem(i32 %A, i32 %B) {
+ %R = srem i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @urem(i32 %A, i32 %B) {
+ %R = urem i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @and(i32 %A, i32 %B) {
+ %R = and i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @or(i32 %A, i32 %B) {
+ %R = or i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+define i32 @xor(i32 %A, i32 %B) {
+ %R = xor i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+++ /dev/null
-; New testcase, this contains a bunch of simple instructions that should be
-; handled by a code generator.
-
-; RUN: llvm-as < %s | llc
-
-define i32 @add(i32 %A, i32 %B) {
- %R = add i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @sub(i32 %A, i32 %B) {
- %R = sub i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @mul(i32 %A, i32 %B) {
- %R = mul i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @sdiv(i32 %A, i32 %B) {
- %R = sdiv i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @udiv(i32 %A, i32 %B) {
- %R = udiv i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @srem(i32 %A, i32 %B) {
- %R = srem i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @urem(i32 %A, i32 %B) {
- %R = urem i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @and(i32 %A, i32 %B) {
- %R = and i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @or(i32 %A, i32 %B) {
- %R = or i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
-define i32 @xor(i32 %A, i32 %B) {
- %R = xor i32 %A, %B ; <i32> [#uses=1]
- ret i32 %R
-}
--- /dev/null
+; RUN: llvm-as < %s | llc
+
+@.str_1 = internal constant [16 x i8] c"%d %d %d %d %d\0A\00" ; <[16 x i8]*> [#uses=1]
+@XA = external global i32 ; <i32*> [#uses=1]
+@XB = external global i32 ; <i32*> [#uses=1]
+
+declare i32 @printf(i8*, ...)
+
+define void @test(i32 %A, i32 %B, i32 %C, i32 %D) {
+entry:
+ %t1 = icmp slt i32 %A, 0 ; <i1> [#uses=1]
+ br i1 %t1, label %less, label %not_less
+
+less: ; preds = %entry
+ br label %not_less
+
+not_less: ; preds = %less, %entry
+ %t2 = phi i32 [ sub (i32 ptrtoint (i32* @XA to i32), i32 ptrtoint (i32* @XB to i32)), %less ], [ sub (i32 ptrtoint (i32* @XA to i32), i32 ptrtoint (i32* @XB to i32)), %entry ] ; <i32> [#uses=1]
+ %tmp.39 = call i32 (i8*, ...)* @printf( i8* getelementptr ([16 x i8]* @.str_1, i64 0, i64 0), i32 %t2 ) ; <i32> [#uses=0]
+ ret void
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc
-
-@.str_1 = internal constant [16 x i8] c"%d %d %d %d %d\0A\00" ; <[16 x i8]*> [#uses=1]
-@XA = external global i32 ; <i32*> [#uses=1]
-@XB = external global i32 ; <i32*> [#uses=1]
-
-declare i32 @printf(i8*, ...)
-
-define void @test(i32 %A, i32 %B, i32 %C, i32 %D) {
-entry:
- %t1 = icmp slt i32 %A, 0 ; <i1> [#uses=1]
- br i1 %t1, label %less, label %not_less
-
-less: ; preds = %entry
- br label %not_less
-
-not_less: ; preds = %less, %entry
- %t2 = phi i32 [ sub (i32 ptrtoint (i32* @XA to i32), i32 ptrtoint (i32* @XB to i32)), %less ], [ sub (i32 ptrtoint (i32* @XA to i32), i32 ptrtoint (i32* @XB to i32)), %entry ] ; <i32> [#uses=1]
- %tmp.39 = call i32 (i8*, ...)* @printf( i8* getelementptr ([16 x i8]* @.str_1, i64 0, i64 0), i32 %t2 ) ; <i32> [#uses=0]
- ret void
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 -regalloc=simple
+
+define i32 @main() {
+ ; %A = 0
+ %A = add i32 0, 0 ; <i32> [#uses=1]
+ ; %B = 1
+ %B = add i32 0, 1 ; <i32> [#uses=2]
+ br label %bb1
+bb1: ; preds = %0
+ ; %X = 0*1 = 0
+ %X = mul i32 %A, %B ; <i32> [#uses=0]
+ ; %r = 0
+ %R = sub i32 %B, 1 ; <i32> [#uses=1]
+ ret i32 %R
+}
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 -regalloc=simple
-
-define i32 @main() {
- ; %A = 0
- %A = add i32 0, 0 ; <i32> [#uses=1]
- ; %B = 1
- %B = add i32 0, 1 ; <i32> [#uses=2]
- br label %bb1
-bb1: ; preds = %0
- ; %X = 0*1 = 0
- %X = mul i32 %A, %B ; <i32> [#uses=0]
- ; %r = 0
- %R = sub i32 %B, 1 ; <i32> [#uses=1]
- ret i32 %R
-}
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 -regalloc=simple
+
+define i32 @main(i32 %B) {
+ ;%B = add i32 0, 1;
+ %R = sub i32 %B, 1 ; %r = 0
+ ret i32 %R
+}
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 -regalloc=simple
-
-define i32 @main(i32 %B) {
- ;%B = add i32 0, 1;
- %R = sub i32 %B, 1 ; %r = 0
- ret i32 %R
-}
--- /dev/null
+; The old instruction selector used to load all arguments to a call up in
+; registers, then start pushing them all onto the stack. This is bad news as
+; it makes a ton of annoying overlapping live ranges. This code should not
+; cause spills!
+;
+; RUN: llvm-as < %s | llc -march=x86 -stats |& not grep spilled
+
+target datalayout = "e-p:32:32"
+
+define i32 @test(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
+ ret i32 0
+}
+
+define i32 @main() {
+ %X = call i32 @test( i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10 ) ; <i32> [#uses=1]
+ ret i32 %X
+}
+
+++ /dev/null
-; The old instruction selector used to load all arguments to a call up in
-; registers, then start pushing them all onto the stack. This is bad news as
-; it makes a ton of annoying overlapping live ranges. This code should not
-; cause spills!
-;
-; RUN: llvm-as < %s | llc -march=x86 -stats |& not grep spilled
-
-target datalayout = "e-p:32:32"
-
-define i32 @test(i32, i32, i32, i32, i32, i32, i32, i32, i32, i32) {
- ret i32 0
-}
-
-define i32 @main() {
- %X = call i32 @test( i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10 ) ; <i32> [#uses=1]
- ret i32 %X
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86
+
+define i32 @test() {
+entry:
+ ret i32 7
+Test: ; No predecessors!
+ %A = call i32 @test( ) ; <i32> [#uses=1]
+ %B = call i32 @test( ) ; <i32> [#uses=1]
+ %C = add i32 %A, %B ; <i32> [#uses=1]
+ ret i32 %C
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86
-
-define i32 @test() {
-entry:
- ret i32 7
-Test: ; No predecessors!
- %A = call i32 @test( ) ; <i32> [#uses=1]
- %B = call i32 @test( ) ; <i32> [#uses=1]
- %C = add i32 %A, %B ; <i32> [#uses=1]
- ret i32 %C
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 | \
+; RUN: not grep {.byte\[\[:space:\]\]*true}
+
+@X = global i1 true ; <i1*> [#uses=0]
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 | \
-; RUN: not grep {.byte\[\[:space:\]\]*true}
-
-@X = global i1 true ; <i1*> [#uses=0]
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 -mtriple=i686-pc-linux-gnu | grep movs | count 3
+
+@A = global [32 x i32] zeroinitializer
+@B = global [32 x i32] zeroinitializer
+
+declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
+
+define void @main() {
+ ; dword copy
+ call void @llvm.memcpy.i32(i8* bitcast ([32 x i32]* @A to i8*),
+ i8* bitcast ([32 x i32]* @B to i8*),
+ i32 128, i32 4 )
+
+ ; word copy
+ call void @llvm.memcpy.i32( i8* bitcast ([32 x i32]* @A to i8*),
+ i8* bitcast ([32 x i32]* @B to i8*),
+ i32 128, i32 2 )
+
+ ; byte copy
+ call void @llvm.memcpy.i32( i8* bitcast ([32 x i32]* @A to i8*),
+ i8* bitcast ([32 x i32]* @B to i8*),
+ i32 128, i32 1 )
+
+ ret void
+}
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 -mtriple=i686-pc-linux-gnu | grep movs | count 3
-
-@A = global [32 x i32] zeroinitializer
-@B = global [32 x i32] zeroinitializer
-
-declare void @llvm.memcpy.i32(i8*, i8*, i32, i32)
-
-define void @main() {
- ; dword copy
- call void @llvm.memcpy.i32(i8* bitcast ([32 x i32]* @A to i8*),
- i8* bitcast ([32 x i32]* @B to i8*),
- i32 128, i32 4 )
-
- ; word copy
- call void @llvm.memcpy.i32( i8* bitcast ([32 x i32]* @A to i8*),
- i8* bitcast ([32 x i32]* @B to i8*),
- i32 128, i32 2 )
-
- ; byte copy
- call void @llvm.memcpy.i32( i8* bitcast ([32 x i32]* @A to i8*),
- i8* bitcast ([32 x i32]* @B to i8*),
- i32 128, i32 1 )
-
- ret void
-}
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 | grep {(%esp}
+
+declare i8* @llvm.returnaddress(i32)
+
+declare i8* @llvm.frameaddress(i32)
+
+define i8* @test1() {
+ %X = call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
+ ret i8* %X
+}
+
+define i8* @test2() {
+ %X = call i8* @llvm.frameaddress( i32 0 ) ; <i8*> [#uses=1]
+ ret i8* %X
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 | grep {(%esp}
-
-declare i8* @llvm.returnaddress(i32)
-
-declare i8* @llvm.frameaddress(i32)
-
-define i8* @test1() {
- %X = call i8* @llvm.returnaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
-}
-
-define i8* @test2() {
- %X = call i8* @llvm.frameaddress( i32 0 ) ; <i8*> [#uses=1]
- ret i8* %X
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 | grep -i ESP | not grep sub
+
+define i32 @test(i32 %X) {
+ ret i32 %X
+}
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 | grep -i ESP | not grep sub
-
-define i32 @test(i32 %X) {
- ret i32 %X
-}
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86
+define i1 @test1(double %X) {
+ %V = fcmp one double %X, 0.000000e+00 ; <i1> [#uses=1]
+ ret i1 %V
+}
+
+define double @test2(i64 %X) {
+ %V = uitofp i64 %X to double ; <double> [#uses=1]
+ ret double %V
+}
+
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86
-define i1 @test1(double %X) {
- %V = fcmp one double %X, 0.000000e+00 ; <i1> [#uses=1]
- ret i1 %V
-}
-
-define double @test2(i64 %X) {
- %V = uitofp i64 %X to double ; <double> [#uses=1]
- ret double %V
-}
-
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 | not grep {j\[lgbe\]}
+
+define i32 @max(i32 %A, i32 %B) {
+ %gt = icmp sgt i32 %A, %B ; <i1> [#uses=1]
+ %R = select i1 %gt, i32 %A, i32 %B ; <i32> [#uses=1]
+ ret i32 %R
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 | not grep {j\[lgbe\]}
-
-define i32 @max(i32 %A, i32 %B) {
- %gt = icmp sgt i32 %A, %B ; <i1> [#uses=1]
- %R = select i1 %gt, i32 %A, i32 %B ; <i32> [#uses=1]
- ret i32 %R
-}
-
--- /dev/null
+; Linear scan does not currently coalesce any two variables that have
+; overlapping live intervals. When two overlapping intervals have the same
+; value, they can be joined though.
+;
+; RUN: llvm-as < %s | llc -march=x86 -regalloc=linearscan | \
+; RUN: not grep {mov %\[A-Z\]\\\{2,3\\\}, %\[A-Z\]\\\{2,3\\\}}
+
+define i64 @test(i64 %x) {
+entry:
+ %tmp.1 = mul i64 %x, 4294967297 ; <i64> [#uses=1]
+ ret i64 %tmp.1
+}
+
+++ /dev/null
-; Linear scan does not currently coalesce any two variables that have
-; overlapping live intervals. When two overlapping intervals have the same
-; value, they can be joined though.
-;
-; RUN: llvm-as < %s | llc -march=x86 -regalloc=linearscan | \
-; RUN: not grep {mov %\[A-Z\]\\\{2,3\\\}, %\[A-Z\]\\\{2,3\\\}}
-
-define i64 @test(i64 %x) {
-entry:
- %tmp.1 = mul i64 %x, 4294967297 ; <i64> [#uses=1]
- ret i64 %tmp.1
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86
+
+define double @test(double %d) {
+ %X = select i1 false, double %d, double %d ; <double> [#uses=1]
+ ret double %X
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86
-
-define double @test(double %d) {
- %X = select i1 false, double %d, double %d ; <double> [#uses=1]
- ret double %X
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86
+
+define i1 @T(double %X) {
+ %V = fcmp oeq double %X, %X ; <i1> [#uses=1]
+ ret i1 %V
+}
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86
-
-define i1 @T(double %X) {
- %V = fcmp oeq double %X, %X ; <i1> [#uses=1]
- ret i1 %V
-}
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86
+
+define i1 @test(i1 %C, i1 %D, i32 %X, i32 %Y) {
+ %E = icmp slt i32 %X, %Y ; <i1> [#uses=1]
+ %F = select i1 %C, i1 %D, i1 %E ; <i1> [#uses=1]
+ ret i1 %F
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86
-
-define i1 @test(i1 %C, i1 %D, i32 %X, i32 %Y) {
- %E = icmp slt i32 %X, %Y ; <i1> [#uses=1]
- %F = select i1 %C, i1 %D, i1 %E ; <i1> [#uses=1]
- ret i1 %F
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | \
+; RUN: grep movsd | count 1
+; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | \
+; RUN: grep ucomisd
+declare i1 @llvm.isunordered.f64(double, double)
+
+define i1 @test1(double %X, double %Y) {
+ %COM = fcmp uno double %X, %Y ; <i1> [#uses=1]
+ ret i1 %COM
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | \
-; RUN: grep movsd | count 1
-; RUN: llvm-as < %s | llc -march=x86 -mcpu=yonah | \
-; RUN: grep ucomisd
-declare i1 @llvm.isunordered.f64(double, double)
-
-define i1 @test1(double %X, double %Y) {
- %COM = fcmp uno double %X, %Y ; <i1> [#uses=1]
- ret i1 %COM
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel -mcpu=i486 | \
+; RUN: grep {fadd\\|fsub\\|fdiv\\|fmul} | not grep -i ST
+
+; Test that the load of the constant is folded into the operation.
+
+
+define double @foo_add(double %P) {
+ %tmp.1 = add double %P, 1.230000e+02 ; <double> [#uses=1]
+ ret double %tmp.1
+}
+
+define double @foo_mul(double %P) {
+ %tmp.1 = mul double %P, 1.230000e+02 ; <double> [#uses=1]
+ ret double %tmp.1
+}
+
+define double @foo_sub(double %P) {
+ %tmp.1 = sub double %P, 1.230000e+02 ; <double> [#uses=1]
+ ret double %tmp.1
+}
+
+define double @foo_subr(double %P) {
+ %tmp.1 = sub double 1.230000e+02, %P ; <double> [#uses=1]
+ ret double %tmp.1
+}
+
+define double @foo_div(double %P) {
+ %tmp.1 = fdiv double %P, 1.230000e+02 ; <double> [#uses=1]
+ ret double %tmp.1
+}
+
+define double @foo_divr(double %P) {
+ %tmp.1 = fdiv double 1.230000e+02, %P ; <double> [#uses=1]
+ ret double %tmp.1
+}
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel -mcpu=i486 | \
-; RUN: grep {fadd\\|fsub\\|fdiv\\|fmul} | not grep -i ST
-
-; Test that the load of the constant is folded into the operation.
-
-
-define double @foo_add(double %P) {
- %tmp.1 = add double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-
-define double @foo_mul(double %P) {
- %tmp.1 = mul double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-
-define double @foo_sub(double %P) {
- %tmp.1 = sub double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-
-define double @foo_subr(double %P) {
- %tmp.1 = sub double 1.230000e+02, %P ; <double> [#uses=1]
- ret double %tmp.1
-}
-
-define double @foo_div(double %P) {
- %tmp.1 = fdiv double %P, 1.230000e+02 ; <double> [#uses=1]
- ret double %tmp.1
-}
-
-define double @foo_divr(double %P) {
- %tmp.1 = fdiv double 1.230000e+02, %P ; <double> [#uses=1]
- ret double %tmp.1
-}
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 | grep fild | not grep ESP
+
+define double @short(i16* %P) {
+ %V = load i16* %P ; <i16> [#uses=1]
+ %V2 = sitofp i16 %V to double ; <double> [#uses=1]
+ ret double %V2
+}
+
+define double @int(i32* %P) {
+ %V = load i32* %P ; <i32> [#uses=1]
+ %V2 = sitofp i32 %V to double ; <double> [#uses=1]
+ ret double %V2
+}
+
+define double @long(i64* %P) {
+ %V = load i64* %P ; <i64> [#uses=1]
+ %V2 = sitofp i64 %V to double ; <double> [#uses=1]
+ ret double %V2
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 | grep fild | not grep ESP
-
-define double @short(i16* %P) {
- %V = load i16* %P ; <i16> [#uses=1]
- %V2 = sitofp i16 %V to double ; <double> [#uses=1]
- ret double %V2
-}
-
-define double @int(i32* %P) {
- %V = load i32* %P ; <i32> [#uses=1]
- %V2 = sitofp i32 %V to double ; <double> [#uses=1]
- ret double %V2
-}
-
-define double @long(i64* %P) {
- %V = load i64* %P ; <i64> [#uses=1]
- %V2 = sitofp i64 %V to double ; <double> [#uses=1]
- ret double %V2
-}
-
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: grep -i ST | not grep {fadd\\|fsub\\|fdiv\\|fmul}
+
+; Test that the load of the memory location is folded into the operation.
+
+define double @test_add(double %X, double* %P) {
+ %Y = load double* %P ; <double> [#uses=1]
+ %R = add double %X, %Y ; <double> [#uses=1]
+ ret double %R
+}
+
+define double @test_mul(double %X, double* %P) {
+ %Y = load double* %P ; <double> [#uses=1]
+ %R = mul double %X, %Y ; <double> [#uses=1]
+ ret double %R
+}
+
+define double @test_sub(double %X, double* %P) {
+ %Y = load double* %P ; <double> [#uses=1]
+ %R = sub double %X, %Y ; <double> [#uses=1]
+ ret double %R
+}
+
+define double @test_subr(double %X, double* %P) {
+ %Y = load double* %P ; <double> [#uses=1]
+ %R = sub double %Y, %X ; <double> [#uses=1]
+ ret double %R
+}
+
+define double @test_div(double %X, double* %P) {
+ %Y = load double* %P ; <double> [#uses=1]
+ %R = fdiv double %X, %Y ; <double> [#uses=1]
+ ret double %R
+}
+
+define double @test_divr(double %X, double* %P) {
+ %Y = load double* %P ; <double> [#uses=1]
+ %R = fdiv double %Y, %X ; <double> [#uses=1]
+ ret double %R
+}
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep -i ST | not grep {fadd\\|fsub\\|fdiv\\|fmul}
-
-; Test that the load of the memory location is folded into the operation.
-
-define double @test_add(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = add double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_mul(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = mul double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_sub(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = sub double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_subr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = sub double %Y, %X ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_div(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fdiv double %X, %Y ; <double> [#uses=1]
- ret double %R
-}
-
-define double @test_divr(double %X, double* %P) {
- %Y = load double* %P ; <double> [#uses=1]
- %R = fdiv double %Y, %X ; <double> [#uses=1]
- ret double %R
-}
--- /dev/null
+; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
+; RUN: grep {sh\[lr\]d} | count 5
+
+define i64 @test1(i64 %X, i8 %C) {
+ %shift.upgrd.1 = zext i8 %C to i64 ; <i64> [#uses=1]
+ %Y = shl i64 %X, %shift.upgrd.1 ; <i64> [#uses=1]
+ ret i64 %Y
+}
+
+define i64 @test2(i64 %X, i8 %C) {
+ %shift.upgrd.2 = zext i8 %C to i64 ; <i64> [#uses=1]
+ %Y = ashr i64 %X, %shift.upgrd.2 ; <i64> [#uses=1]
+ ret i64 %Y
+}
+
+define i64 @test3(i64 %X, i8 %C) {
+ %shift.upgrd.3 = zext i8 %C to i64 ; <i64> [#uses=1]
+ %Y = lshr i64 %X, %shift.upgrd.3 ; <i64> [#uses=1]
+ ret i64 %Y
+}
+
+define i32 @test4(i32 %A, i32 %B, i8 %C) {
+ %shift.upgrd.4 = zext i8 %C to i32 ; <i32> [#uses=1]
+ %X = shl i32 %A, %shift.upgrd.4 ; <i32> [#uses=1]
+ %Cv = sub i8 32, %C ; <i8> [#uses=1]
+ %shift.upgrd.5 = zext i8 %Cv to i32 ; <i32> [#uses=1]
+ %Y = lshr i32 %B, %shift.upgrd.5 ; <i32> [#uses=1]
+ %Z = or i32 %Y, %X ; <i32> [#uses=1]
+ ret i32 %Z
+}
+
+define i16 @test5(i16 %A, i16 %B, i8 %C) {
+ %shift.upgrd.6 = zext i8 %C to i16 ; <i16> [#uses=1]
+ %X = shl i16 %A, %shift.upgrd.6 ; <i16> [#uses=1]
+ %Cv = sub i8 16, %C ; <i8> [#uses=1]
+ %shift.upgrd.7 = zext i8 %Cv to i16 ; <i16> [#uses=1]
+ %Y = lshr i16 %B, %shift.upgrd.7 ; <i16> [#uses=1]
+ %Z = or i16 %Y, %X ; <i16> [#uses=1]
+ ret i16 %Z
+}
+
+++ /dev/null
-; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel | \
-; RUN: grep {sh\[lr\]d} | count 5
-
-define i64 @test1(i64 %X, i8 %C) {
- %shift.upgrd.1 = zext i8 %C to i64 ; <i64> [#uses=1]
- %Y = shl i64 %X, %shift.upgrd.1 ; <i64> [#uses=1]
- ret i64 %Y
-}
-
-define i64 @test2(i64 %X, i8 %C) {
- %shift.upgrd.2 = zext i8 %C to i64 ; <i64> [#uses=1]
- %Y = ashr i64 %X, %shift.upgrd.2 ; <i64> [#uses=1]
- ret i64 %Y
-}
-
-define i64 @test3(i64 %X, i8 %C) {
- %shift.upgrd.3 = zext i8 %C to i64 ; <i64> [#uses=1]
- %Y = lshr i64 %X, %shift.upgrd.3 ; <i64> [#uses=1]
- ret i64 %Y
-}
-
-define i32 @test4(i32 %A, i32 %B, i8 %C) {
- %shift.upgrd.4 = zext i8 %C to i32 ; <i32> [#uses=1]
- %X = shl i32 %A, %shift.upgrd.4 ; <i32> [#uses=1]
- %Cv = sub i8 32, %C ; <i8> [#uses=1]
- %shift.upgrd.5 = zext i8 %Cv to i32 ; <i32> [#uses=1]
- %Y = lshr i32 %B, %shift.upgrd.5 ; <i32> [#uses=1]
- %Z = or i32 %Y, %X ; <i32> [#uses=1]
- ret i32 %Z
-}
-
-define i16 @test5(i16 %A, i16 %B, i8 %C) {
- %shift.upgrd.6 = zext i8 %C to i16 ; <i16> [#uses=1]
- %X = shl i16 %A, %shift.upgrd.6 ; <i16> [#uses=1]
- %Cv = sub i8 16, %C ; <i8> [#uses=1]
- %shift.upgrd.7 = zext i8 %Cv to i16 ; <i16> [#uses=1]
- %Y = lshr i16 %B, %shift.upgrd.7 ; <i16> [#uses=1]
- %Z = or i16 %Y, %X ; <i16> [#uses=1]
- ret i16 %Z
-}
-