From a3c533a716d95877ad2daf2326b5375a8fe844d3 Mon Sep 17 00:00:00 2001 From: Reid Spencer Date: Sun, 1 Apr 2007 07:36:28 +0000 Subject: [PATCH] For PR1297: Update these test cases to use proper signatures for bswap which is now and overloaded intrinsic. Its name must be of the form llvm.bswap.i32.i32 since both the parameter and the result or of type "iAny". Also, the bit counting intrinsics changed to always return i32. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35548 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/Generic/llvm-ct-intrinsics.ll | 66 +++++++++---------- .../PowerPC/2006-07-19-stwbrx-crash.ll | 4 +- test/CodeGen/PowerPC/2007-03-24-cntlzd.ll | 10 ++- test/CodeGen/PowerPC/bswap-load-store.ll | 12 ++-- test/CodeGen/X86/bswap.ll | 12 ++-- test/Feature/intrinsics.ll | 36 +++++----- test/Transforms/InstCombine/bswap-fold.ll | 14 ++-- 7 files changed, 76 insertions(+), 78 deletions(-) diff --git a/test/CodeGen/Generic/llvm-ct-intrinsics.ll b/test/CodeGen/Generic/llvm-ct-intrinsics.ll index 014d261c521..db2e4b19697 100644 --- a/test/CodeGen/Generic/llvm-ct-intrinsics.ll +++ b/test/CodeGen/Generic/llvm-ct-intrinsics.ll @@ -1,59 +1,59 @@ ; Make sure this testcase is supported by all code generators ; RUN: llvm-upgrade < %s | llvm-as | llc -declare ulong %llvm.ctpop.i64(ulong) +declare uint %llvm.ctpop.i64(ulong) declare uint %llvm.ctpop.i32(uint) -declare ushort %llvm.ctpop.i16(ushort) -declare ubyte %llvm.ctpop.i8(ubyte) +declare uint %llvm.ctpop.i16(ushort) +declare uint %llvm.ctpop.i8(ubyte) void %ctpoptest(ubyte %A, ushort %B, uint %C, ulong %D, - ubyte *%AP, ushort* %BP, uint* %CP, ulong* %DP) { - %a = call ubyte %llvm.ctpop.i8(ubyte %A) - %b = call ushort %llvm.ctpop.i16(ushort %B) + uint *%AP, uint* %BP, uint* %CP, uint* %DP) { + %a = call uint %llvm.ctpop.i8(ubyte %A) + %b = call uint %llvm.ctpop.i16(ushort %B) %c = call uint %llvm.ctpop.i32(uint %C) - %d = call ulong %llvm.ctpop.i64(ulong %D) + %d = call uint %llvm.ctpop.i64(ulong %D) - store ubyte %a, ubyte* %AP - store ushort %b, ushort* %BP - store uint %c, uint* %CP - store ulong %d, ulong* %DP + store uint %a, uint* %AP + store uint %b, uint* %BP + store uint %c, uint* %CP + store uint %d, uint* %DP ret void } -declare ulong %llvm.ctlz.i64(ulong) +declare uint %llvm.ctlz.i64(ulong) declare uint %llvm.ctlz.i32(uint) -declare ushort %llvm.ctlz.i16(ushort) -declare ubyte %llvm.ctlz.i8(ubyte) +declare uint %llvm.ctlz.i16(ushort) +declare uint %llvm.ctlz.i8(ubyte) void %ctlztest(ubyte %A, ushort %B, uint %C, ulong %D, - ubyte *%AP, ushort* %BP, uint* %CP, ulong* %DP) { - %a = call ubyte %llvm.ctlz.i8(ubyte %A) - %b = call ushort %llvm.ctlz.i16(ushort %B) + uint *%AP, uint* %BP, uint* %CP, uint* %DP) { + %a = call uint %llvm.ctlz.i8(ubyte %A) + %b = call uint %llvm.ctlz.i16(ushort %B) %c = call uint %llvm.ctlz.i32(uint %C) - %d = call ulong %llvm.ctlz.i64(ulong %D) + %d = call uint %llvm.ctlz.i64(ulong %D) - store ubyte %a, ubyte* %AP - store ushort %b, ushort* %BP - store uint %c, uint* %CP - store ulong %d, ulong* %DP + store uint %a, uint* %AP + store uint %b, uint* %BP + store uint %c, uint* %CP + store uint %d, uint* %DP ret void } -declare ulong %llvm.cttz.i64(ulong) +declare uint %llvm.cttz.i64(ulong) declare uint %llvm.cttz.i32(uint) -declare ushort %llvm.cttz.i16(ushort) -declare ubyte %llvm.cttz.i8(ubyte) +declare uint %llvm.cttz.i16(ushort) +declare uint %llvm.cttz.i8(ubyte) void %cttztest(ubyte %A, ushort %B, uint %C, ulong %D, - ubyte *%AP, ushort* %BP, uint* %CP, ulong* %DP) { - %a = call ubyte %llvm.cttz.i8(ubyte %A) - %b = call ushort %llvm.cttz.i16(ushort %B) + uint *%AP, uint* %BP, uint* %CP, uint* %DP) { + %a = call uint %llvm.cttz.i8(ubyte %A) + %b = call uint %llvm.cttz.i16(ushort %B) %c = call uint %llvm.cttz.i32(uint %C) - %d = call ulong %llvm.cttz.i64(ulong %D) + %d = call uint %llvm.cttz.i64(ulong %D) - store ubyte %a, ubyte* %AP - store ushort %b, ushort* %BP - store uint %c, uint* %CP - store ulong %d, ulong* %DP + store uint %a, uint* %AP + store uint %b, uint* %BP + store uint %c, uint* %CP + store uint %d, uint* %DP ret void } diff --git a/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll b/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll index d71ba5a3822..1da6dbf1117 100644 --- a/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll +++ b/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll @@ -2,9 +2,9 @@ void %img2buf(int %symbol_size_in_bytes, ushort* %ui16) { %tmp93 = load ushort* null ; [#uses=1] - %tmp99 = call ushort %llvm.bswap.i16( ushort %tmp93 ) ; [#uses=1] + %tmp99 = call ushort %llvm.bswap.i16.i16( ushort %tmp93 ) ; [#uses=1] store ushort %tmp99, ushort* %ui16 ret void } -declare ushort %llvm.bswap.i16(ushort) +declare ushort %llvm.bswap.i16.i16(ushort) diff --git a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll index a9808cee55d..1ea61746bb5 100644 --- a/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll +++ b/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll @@ -2,12 +2,10 @@ define i32 @_ZNK4llvm5APInt17countLeadingZerosEv(i64 *%t) { %tmp19 = load i64* %t - %tmp23 = tail call i64 @llvm.ctlz.i64( i64 %tmp19 ) ; [#uses=1] - %tmp2324 = trunc i64 %tmp23 to i32 ; [#uses=1] - %tmp89 = add i32 %tmp2324, -64 ; [#uses=1] - %tmp90 = add i32 %tmp89, 0 ; [#uses=1] + %tmp23 = tail call i32 @llvm.ctlz.i64( i64 %tmp19 ) ; [#uses=1] + %tmp89 = add i32 %tmp23, -64 ; [#uses=1] + %tmp90 = add i32 %tmp89, 0 ; [#uses=1] ret i32 %tmp90 } -declare i64 @llvm.ctlz.i64(i64) - +declare i32 @llvm.ctlz.i64(i64) diff --git a/test/CodeGen/PowerPC/bswap-load-store.ll b/test/CodeGen/PowerPC/bswap-load-store.ll index 853abc42558..664a2aa4bc9 100644 --- a/test/CodeGen/PowerPC/bswap-load-store.ll +++ b/test/CodeGen/PowerPC/bswap-load-store.ll @@ -8,7 +8,7 @@ void %STWBRX(uint %i, sbyte* %ptr, int %off) { %tmp1 = getelementptr sbyte* %ptr, int %off %tmp1 = cast sbyte* %tmp1 to uint* - %tmp13 = tail call uint %llvm.bswap.i32(uint %i) + %tmp13 = tail call uint %llvm.bswap.i32.i32(uint %i) store uint %tmp13, uint* %tmp1 ret void } @@ -17,14 +17,14 @@ uint %LWBRX(sbyte* %ptr, int %off) { %tmp1 = getelementptr sbyte* %ptr, int %off %tmp1 = cast sbyte* %tmp1 to uint* %tmp = load uint* %tmp1 - %tmp14 = tail call uint %llvm.bswap.i32( uint %tmp ) + %tmp14 = tail call uint %llvm.bswap.i32.i32( uint %tmp ) ret uint %tmp14 } void %STHBRX(ushort %s, sbyte* %ptr, int %off) { %tmp1 = getelementptr sbyte* %ptr, int %off %tmp1 = cast sbyte* %tmp1 to ushort* - %tmp5 = call ushort %llvm.bswap.i16( ushort %s ) + %tmp5 = call ushort %llvm.bswap.i16.i16( ushort %s ) store ushort %tmp5, ushort* %tmp1 ret void } @@ -33,10 +33,10 @@ ushort %LHBRX(sbyte* %ptr, int %off) { %tmp1 = getelementptr sbyte* %ptr, int %off %tmp1 = cast sbyte* %tmp1 to ushort* %tmp = load ushort* %tmp1 - %tmp6 = call ushort %llvm.bswap.i16(ushort %tmp) + %tmp6 = call ushort %llvm.bswap.i16.i16(ushort %tmp) ret ushort %tmp6 } -declare uint %llvm.bswap.i32(uint) +declare uint %llvm.bswap.i32.i32(uint) -declare ushort %llvm.bswap.i16(ushort) +declare ushort %llvm.bswap.i16.i16(ushort) diff --git a/test/CodeGen/X86/bswap.ll b/test/CodeGen/X86/bswap.ll index 79467e0838b..62c20c354eb 100644 --- a/test/CodeGen/X86/bswap.ll +++ b/test/CodeGen/X86/bswap.ll @@ -3,21 +3,21 @@ ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep bswapl | wc -l | grep 3 && ; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep rolw | wc -l | grep 1 -declare ushort %llvm.bswap.i16(ushort) -declare uint %llvm.bswap.i32(uint) -declare ulong %llvm.bswap.i64(ulong) +declare ushort %llvm.bswap.i16.i16(ushort) +declare uint %llvm.bswap.i32.i32(uint) +declare ulong %llvm.bswap.i64.i64(ulong) ushort %W(ushort %A) { - %Z = call ushort %llvm.bswap.i16(ushort %A) + %Z = call ushort %llvm.bswap.i16.i16(ushort %A) ret ushort %Z } uint %X(uint %A) { - %Z = call uint %llvm.bswap.i32(uint %A) + %Z = call uint %llvm.bswap.i32.i32(uint %A) ret uint %Z } ulong %Y(ulong %A) { - %Z = call ulong %llvm.bswap.i64(ulong %A) + %Z = call ulong %llvm.bswap.i64.i64(ulong %A) ret ulong %Z } diff --git a/test/Feature/intrinsics.ll b/test/Feature/intrinsics.ll index 1d1575d584a..d46472673cf 100644 --- a/test/Feature/intrinsics.ll +++ b/test/Feature/intrinsics.ll @@ -8,20 +8,20 @@ declare bool %llvm.isunordered.f64(double,double) declare void %llvm.prefetch(sbyte*, uint, uint) -declare ubyte %llvm.ctpop.i8(ubyte) -declare ushort %llvm.ctpop.i16(ushort) +declare uint %llvm.ctpop.i8(ubyte) +declare uint %llvm.ctpop.i16(ushort) declare uint %llvm.ctpop.i32(uint) -declare ulong %llvm.ctpop.i64(ulong) +declare uint %llvm.ctpop.i64(ulong) -declare ubyte %llvm.cttz.i8(ubyte) -declare ushort %llvm.cttz.i16(ushort) +declare uint %llvm.cttz.i8(ubyte) +declare uint %llvm.cttz.i16(ushort) declare uint %llvm.cttz.i32(uint) -declare ulong %llvm.cttz.i64(ulong) +declare uint %llvm.cttz.i64(ulong) -declare ubyte %llvm.ctlz.i8(ubyte) -declare ushort %llvm.ctlz.i16(ushort) +declare uint %llvm.ctlz.i8(ubyte) +declare uint %llvm.ctlz.i16(ushort) declare uint %llvm.ctlz.i32(uint) -declare ulong %llvm.ctlz.i64(ulong) +declare uint %llvm.ctlz.i64(ulong) declare float %llvm.sqrt.f32(float) declare double %llvm.sqrt.f64(double) @@ -39,20 +39,20 @@ void %libm() { call float %llvm.sqrt.f32(float 5.0) call double %llvm.sqrt.f64(double 6.0) - call ubyte %llvm.ctpop.i8(ubyte 10) - call ushort %llvm.ctpop.i16(ushort 11) + call uint %llvm.ctpop.i8(ubyte 10) + call uint %llvm.ctpop.i16(ushort 11) call uint %llvm.ctpop.i32(uint 12) - call ulong %llvm.ctpop.i64(ulong 13) + call uint %llvm.ctpop.i64(ulong 13) - call ubyte %llvm.ctlz.i8(ubyte 14) - call ushort %llvm.ctlz.i16(ushort 15) + call uint %llvm.ctlz.i8(ubyte 14) + call uint %llvm.ctlz.i16(ushort 15) call uint %llvm.ctlz.i32(uint 16) - call ulong %llvm.ctlz.i64(ulong 17) + call uint %llvm.ctlz.i64(ulong 17) - call ubyte %llvm.cttz.i8(ubyte 18) - call ushort %llvm.cttz.i16(ushort 19) + call uint %llvm.cttz.i8(ubyte 18) + call uint %llvm.cttz.i16(ushort 19) call uint %llvm.cttz.i32(uint 20) - call ulong %llvm.cttz.i64(ulong 21) + call uint %llvm.cttz.i64(ulong 21) ret void } diff --git a/test/Transforms/InstCombine/bswap-fold.ll b/test/Transforms/InstCombine/bswap-fold.ll index 0ee486ad1aa..146a4495c91 100644 --- a/test/Transforms/InstCombine/bswap-fold.ll +++ b/test/Transforms/InstCombine/bswap-fold.ll @@ -2,25 +2,25 @@ ; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep 'call.*bswap' bool %test1(ushort %tmp2) { - %tmp10 = call ushort %llvm.bswap.i16( ushort %tmp2 ) + %tmp10 = call ushort %llvm.bswap.i16.i16( ushort %tmp2 ) %tmp = seteq ushort %tmp10, 1 ret bool %tmp } bool %test2(uint %tmp) { - %tmp34 = tail call uint %llvm.bswap.i32( uint %tmp ) + %tmp34 = tail call uint %llvm.bswap.i32.i32( uint %tmp ) %tmp = seteq uint %tmp34, 1 ret bool %tmp } -declare uint %llvm.bswap.i32(uint) - bool %test3(ulong %tmp) { - %tmp34 = tail call ulong %llvm.bswap.i64( ulong %tmp ) + %tmp34 = tail call ulong %llvm.bswap.i64.i64( ulong %tmp ) %tmp = seteq ulong %tmp34, 1 ret bool %tmp } -declare ulong %llvm.bswap.i64(ulong) +declare ulong %llvm.bswap.i64.i64(ulong) + +declare ushort %llvm.bswap.i16.i16(ushort) -declare ushort %llvm.bswap.i16(ushort) +declare uint %llvm.bswap.i32.i32(uint) -- 2.34.1