-; RUN: llvm-as < %s | llc -march=x86-64 -relocation-model=static -stats -info-output-file - > %t
+; REQUIRES: asserts
+; RUN: llc < %s -mtriple=x86_64-linux -mcpu=corei7 -relocation-model=static -o /dev/null -stats -info-output-file - > %t
; RUN: not grep spill %t
-; RUN: not grep {%rsp} %t
-; RUN: not grep {%rbp} %t
+; RUN: not grep "%rsp" %t
+; RUN: not grep "%rbp" %t
; The register-pressure scheduler should be able to schedule this in a
; way that does not require spills.
@X = external global i64 ; <i64*> [#uses=25]
define fastcc i64 @foo() nounwind {
- %tmp = volatile load i64* @X ; <i64> [#uses=7]
- %tmp1 = volatile load i64* @X ; <i64> [#uses=5]
- %tmp2 = volatile load i64* @X ; <i64> [#uses=3]
- %tmp3 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp4 = volatile load i64* @X ; <i64> [#uses=5]
- %tmp5 = volatile load i64* @X ; <i64> [#uses=3]
- %tmp6 = volatile load i64* @X ; <i64> [#uses=2]
- %tmp7 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp8 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp9 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp10 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp11 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp12 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp13 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp14 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp15 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp16 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp17 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp18 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp19 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp20 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp21 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp22 = volatile load i64* @X ; <i64> [#uses=1]
- %tmp23 = volatile load i64* @X ; <i64> [#uses=1]
+ %tmp = load volatile i64* @X ; <i64> [#uses=7]
+ %tmp1 = load volatile i64* @X ; <i64> [#uses=5]
+ %tmp2 = load volatile i64* @X ; <i64> [#uses=3]
+ %tmp3 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp4 = load volatile i64* @X ; <i64> [#uses=5]
+ %tmp5 = load volatile i64* @X ; <i64> [#uses=3]
+ %tmp6 = load volatile i64* @X ; <i64> [#uses=2]
+ %tmp7 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp8 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp9 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp10 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp11 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp12 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp13 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp14 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp15 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp16 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp17 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp18 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp19 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp20 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp21 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp22 = load volatile i64* @X ; <i64> [#uses=1]
+ %tmp23 = load volatile i64* @X ; <i64> [#uses=1]
%tmp24 = call i64 @llvm.bswap.i64(i64 %tmp8) ; <i64> [#uses=1]
%tmp25 = add i64 %tmp6, %tmp5 ; <i64> [#uses=1]
%tmp26 = add i64 %tmp25, %tmp4 ; <i64> [#uses=1]
%tmp217 = add i64 %tmp205, %tmp215 ; <i64> [#uses=1]
%tmp218 = add i64 %tmp217, %tmp211 ; <i64> [#uses=1]
%tmp219 = call i64 @llvm.bswap.i64(i64 %tmp23) ; <i64> [#uses=2]
- volatile store i64 %tmp219, i64* @X, align 8
+ store volatile i64 %tmp219, i64* @X, align 8
%tmp220 = add i64 %tmp203, %tmp190 ; <i64> [#uses=1]
%tmp221 = add i64 %tmp220, %tmp216 ; <i64> [#uses=1]
%tmp222 = add i64 %tmp219, %tmp177 ; <i64> [#uses=1]