X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FREADME.txt;h=284be24b679a61296eb429c38985746b92e33065;hb=a5e90d7b7467f299264535571943ac993ebd80af;hp=437e93c18c7089e7145cefe1a6154ff250e82d2d;hpb=ac9dcb94dde5f166ee29372385c0e3b695227ab4;p=oota-llvm.git diff --git a/lib/Target/README.txt b/lib/Target/README.txt index 437e93c18c7..284be24b679 100644 --- a/lib/Target/README.txt +++ b/lib/Target/README.txt @@ -18,23 +18,6 @@ This has a number of uses: //===---------------------------------------------------------------------===// -FreeBench/mason contains code like this: - -static p_type m0u(p_type p) { - int m[]={0, 8, 1, 2, 16, 5, 13, 7, 14, 9, 3, 4, 11, 12, 15, 10, 17, 6}; - p_type pu; - pu.a = m[p.a]; - pu.b = m[p.b]; - pu.c = m[p.c]; - return pu; -} - -We currently compile this into a memcpy from a static array into 'm', then -a bunch of loads from m. It would be better to avoid the memcpy and just do -loads from the static array. - -//===---------------------------------------------------------------------===// - Make the PPC branch selector target independant //===---------------------------------------------------------------------===// @@ -110,6 +93,8 @@ int foo(int z, int n) { return bar(z, n) + bar(2*z, 2*n); } +Reassociate should handle the example in GCC PR16157. + //===---------------------------------------------------------------------===// These two functions should generate the same code on big-endian systems: @@ -122,10 +107,6 @@ for 1,2,4,8 bytes. //===---------------------------------------------------------------------===// -Add LSR exit value substitution. It'll probably be a win for Ackermann, etc. - -//===---------------------------------------------------------------------===// - It would be nice to revert this patch: http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20060213/031986.html @@ -150,13 +131,6 @@ v4sf example(float *P) { //===---------------------------------------------------------------------===// -We should constant fold vector type casts at the LLVM level, regardless of the -cast. Currently we cannot fold some casts because we don't have TargetData -information in the constant folder, so we don't know the endianness of the -target! - -//===---------------------------------------------------------------------===// - Add support for conditional increments, and other related patterns. Instead of: @@ -189,17 +163,18 @@ http://gcc.gnu.org/bugzilla/show_bug.cgi?id=17687 Scalar Repl cannot currently promote this testcase to 'ret long cst': - %struct.X = type { int, int } + %struct.X = type { i32, i32 } %struct.Y = type { %struct.X } -ulong %bar() { - %retval = alloca %struct.Y, align 8 - %tmp12 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 0 - store int 0, int* %tmp12 - %tmp15 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 1 - store int 1, int* %tmp15 - %retval = bitcast %struct.Y* %retval to ulong* - %retval = load ulong* %retval - ret ulong %retval + +define i64 @bar() { + %retval = alloca %struct.Y, align 8 + %tmp12 = getelementptr %struct.Y* %retval, i32 0, i32 0, i32 0 + store i32 0, i32* %tmp12 + %tmp15 = getelementptr %struct.Y* %retval, i32 0, i32 0, i32 1 + store i32 1, i32* %tmp15 + %retval.upgrd.1 = bitcast %struct.Y* %retval to i64* + %retval.upgrd.2 = load i64* %retval.upgrd.1 + ret i64 %retval.upgrd.2 } it should be extended to do so. @@ -210,16 +185,14 @@ it should be extended to do so. %struct..0anon = type { <4 x float> } -implementation ; Functions: - -void %test1(<4 x float> %V, float* %P) { +define void @test1(<4 x float> %V, float* %P) { %u = alloca %struct..0anon, align 16 - %tmp = getelementptr %struct..0anon* %u, int 0, uint 0 + %tmp = getelementptr %struct..0anon* %u, i32 0, i32 0 store <4 x float> %V, <4 x float>* %tmp %tmp1 = bitcast %struct..0anon* %u to [4 x float]* - %tmp = getelementptr [4 x float]* %tmp1, int 0, int 1 - %tmp = load float* %tmp - %tmp3 = mul float %tmp, 2.000000e+00 + %tmp.upgrd.1 = getelementptr [4 x float]* %tmp1, i32 0, i32 1 + %tmp.upgrd.2 = load float* %tmp.upgrd.1 + %tmp3 = mul float %tmp.upgrd.2, 2.000000e+00 store float %tmp3, float* %P ret void } @@ -348,46 +321,6 @@ pass. //===---------------------------------------------------------------------===// --predsimplify should transform this: - -void bad(unsigned x) -{ - if (x > 4) - bar(12); - else if (x > 3) - bar(523); - else if (x > 2) - bar(36); - else if (x > 1) - bar(65); - else if (x > 0) - bar(45); - else - bar(367); -} - -into: - -void good(unsigned x) -{ - if (x == 4) - bar(523); - else if (x == 3) - bar(36); - else if (x == 2) - bar(65); - else if (x == 1) - bar(45); - else if (x == 0) - bar(367); - else - bar(12); -} - -to enable further optimizations. - -//===---------------------------------------------------------------------===// - Consider: typedef unsigned U32; @@ -419,3 +352,163 @@ Promote for i32 bswap can use i64 bswap + shr. Useful on targets with 64-bit regs and bswap, like itanium. //===---------------------------------------------------------------------===// + +LSR should know what GPR types a target has. This code: + +volatile short X, Y; // globals + +void foo(int N) { + int i; + for (i = 0; i < N; i++) { X = i; Y = i*4; } +} + +produces two identical IV's (after promotion) on PPC/ARM: + +LBB1_1: @bb.preheader + mov r3, #0 + mov r2, r3 + mov r1, r3 +LBB1_2: @bb + ldr r12, LCPI1_0 + ldr r12, [r12] + strh r2, [r12] + ldr r12, LCPI1_1 + ldr r12, [r12] + strh r3, [r12] + add r1, r1, #1 <- [0,+,1] + add r3, r3, #4 + add r2, r2, #1 <- [0,+,1] + cmp r1, r0 + bne LBB1_2 @bb + + +//===---------------------------------------------------------------------===// + +Tail call elim should be more aggressive, checking to see if the call is +followed by an uncond branch to an exit block. + +; This testcase is due to tail-duplication not wanting to copy the return +; instruction into the terminating blocks because there was other code +; optimized out of the function after the taildup happened. +;RUN: llvm-upgrade < %s | llvm-as | opt -tailcallelim | llvm-dis | not grep call + +int %t4(int %a) { +entry: + %tmp.1 = and int %a, 1 + %tmp.2 = cast int %tmp.1 to bool + br bool %tmp.2, label %then.0, label %else.0 + +then.0: + %tmp.5 = add int %a, -1 + %tmp.3 = call int %t4( int %tmp.5 ) + br label %return + +else.0: + %tmp.7 = setne int %a, 0 + br bool %tmp.7, label %then.1, label %return + +then.1: + %tmp.11 = add int %a, -2 + %tmp.9 = call int %t4( int %tmp.11 ) + br label %return + +return: + %result.0 = phi int [ 0, %else.0 ], [ %tmp.3, %then.0 ], + [ %tmp.9, %then.1 ] + ret int %result.0 +} + +//===---------------------------------------------------------------------===// + +Tail recursion elimination is not transforming this function, because it is +returning n, which fails the isDynamicConstant check in the accumulator +recursion checks. + +long long fib(const long long n) { + switch(n) { + case 0: + case 1: + return n; + default: + return fib(n-1) + fib(n-2); + } +} + +//===---------------------------------------------------------------------===// + +Argument promotion should promote arguments for recursive functions, like +this: + +; RUN: llvm-upgrade < %s | llvm-as | opt -argpromotion | llvm-dis | grep x.val + +implementation ; Functions: + +internal int %foo(int* %x) { +entry: + %tmp = load int* %x + %tmp.foo = call int %foo(int *%x) + ret int %tmp.foo +} + +int %bar(int* %x) { +entry: + %tmp3 = call int %foo( int* %x) ; [#uses=1] + ret int %tmp3 +} + +//===---------------------------------------------------------------------===// + +"basicaa" should know how to look through "or" instructions that act like add +instructions. For example in this code, the x*4+1 is turned into x*4 | 1, and +basicaa can't analyze the array subscript, leading to duplicated loads in the +generated code: + +void test(int X, int Y, int a[]) { +int i; + for (i=2; i<1000; i+=4) { + a[i+0] = a[i-1+0]*a[i-2+0]; + a[i+1] = a[i-1+1]*a[i-2+1]; + a[i+2] = a[i-1+2]*a[i-2+2]; + a[i+3] = a[i-1+3]*a[i-2+3]; + } +} + +//===---------------------------------------------------------------------===// + +We should investigate an instruction sinking pass. Consider this silly +example in pic mode: + +#include +void foo(int x) { + assert(x); + //... +} + +we compile this to: +_foo: + subl $28, %esp + call "L1$pb" +"L1$pb": + popl %eax + cmpl $0, 32(%esp) + je LBB1_2 # cond_true +LBB1_1: # return + # ... + addl $28, %esp + ret +LBB1_2: # cond_true +... + +The PIC base computation (call+popl) is only used on one path through the +code, but is currently always computed in the entry block. It would be +better to sink the picbase computation down into the block for the +assertion, as it is the only one that uses it. This happens for a lot of +code with early outs. + +Another example is loads of arguments, which are usually emitted into the +entry block on targets like x86. If not used in all paths through a +function, they should be sunk into the ones that do. + +In this case, whole-function-isel would also handle this. + +//===---------------------------------------------------------------------===//